From dc7334766097e7c94a1bbc542d6d00bcfb14dd76 Mon Sep 17 00:00:00 2001 From: Nitish Malhotra Date: Fri, 21 Jan 2022 22:03:44 +0000 Subject: [PATCH 01/11] Restructure code and add deploy manifests - Moved files from top level npm/ folder to their own respective folders for controller and daemon components - Generated new deployment manifests for each components and moved into the respective folders under npm/deploy Signed-off-by: Nitish Malhotra --- npm/cmd/start.go | 3 +- npm/cmd/start_daemon.go | 7 +- npm/cmd/start_server.go | 3 +- npm/{ => controller}/server.go | 89 ++++++++------- npm/{ => daemon}/daemon.go | 7 +- npm/deploy/common/npm-configmap.yaml | 24 ++++ npm/deploy/common/npm-serviceaccount.yaml | 7 ++ npm/deploy/common/rbac.yaml | 44 ++++++++ npm/deploy/controller/azure-npm.yaml | 82 ++++++++++++++ npm/deploy/daemon/azure-npm.yaml | 94 ++++++++++++++++ npm/{ => deploy/npm}/azure-npm.yaml | 8 +- npm/npm.go | 130 ++++++++++++---------- npm/{ => pkg/models}/consts.go | 9 +- npm/pkg/models/types.go | 53 +++++++++ npm/types.go | 78 ------------- 15 files changed, 447 insertions(+), 191 deletions(-) rename npm/{ => controller}/server.go (52%) rename npm/{ => daemon}/daemon.go (91%) create mode 100644 npm/deploy/common/npm-configmap.yaml create mode 100644 npm/deploy/common/npm-serviceaccount.yaml create mode 100644 npm/deploy/common/rbac.yaml create mode 100644 npm/deploy/controller/azure-npm.yaml create mode 100644 npm/deploy/daemon/azure-npm.yaml rename npm/{ => deploy/npm}/azure-npm.yaml (95%) rename npm/{ => pkg/models}/consts.go (76%) create mode 100644 npm/pkg/models/types.go delete mode 100644 npm/types.go diff --git a/npm/cmd/start.go b/npm/cmd/start.go index 00e177dc9e..7cdd2f4b42 100644 --- a/npm/cmd/start.go +++ b/npm/cmd/start.go @@ -18,6 +18,7 @@ import ( "github.com/Azure/azure-container-networking/npm/pkg/dataplane" "github.com/Azure/azure-container-networking/npm/pkg/dataplane/ipsets" "github.com/Azure/azure-container-networking/npm/pkg/dataplane/policies" + "github.com/Azure/azure-container-networking/npm/pkg/models" "github.com/Azure/azure-container-networking/npm/util" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -144,7 +145,7 @@ func start(config npmconfig.Config, flags npmconfig.Flags) error { var dp dataplane.GenericDataplane stopChannel := wait.NeverStop if config.Toggles.EnableV2NPM { - dp, err = dataplane.NewDataPlane(npm.GetNodeName(), common.NewIOShim(), npmV2DataplaneCfg, stopChannel) + dp, err = dataplane.NewDataPlane(models.GetNodeName(), common.NewIOShim(), npmV2DataplaneCfg, stopChannel) if err != nil { return fmt.Errorf("failed to create dataplane with error %w", err) } diff --git a/npm/cmd/start_daemon.go b/npm/cmd/start_daemon.go index 08ce1b423e..cb40f6df62 100644 --- a/npm/cmd/start_daemon.go +++ b/npm/cmd/start_daemon.go @@ -7,10 +7,11 @@ import ( "strconv" "github.com/Azure/azure-container-networking/common" - "github.com/Azure/azure-container-networking/npm" npmconfig "github.com/Azure/azure-container-networking/npm/config" + "github.com/Azure/azure-container-networking/npm/daemon" "github.com/Azure/azure-container-networking/npm/pkg/controlplane/goalstateprocessor" "github.com/Azure/azure-container-networking/npm/pkg/dataplane" + "github.com/Azure/azure-container-networking/npm/pkg/models" "github.com/Azure/azure-container-networking/npm/pkg/transport" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -58,7 +59,7 @@ func startDaemon(config npmconfig.Config) error { var dp dataplane.GenericDataplane - dp, err = dataplane.NewDataPlane(npm.GetNodeName(), common.NewIOShim(), npmV2DataplaneCfg, wait.NeverStop) + dp, err = dataplane.NewDataPlane(models.GetNodeName(), common.NewIOShim(), npmV2DataplaneCfg, wait.NeverStop) if err != nil { klog.Errorf("failed to create dataplane: %v", err) return fmt.Errorf("failed to create dataplane with error %w", err) @@ -76,7 +77,7 @@ func startDaemon(config npmconfig.Config) error { return fmt.Errorf("failed to create goalstate processor: %w", err) } - n, err := npm.NewNetworkPolicyDaemon(ctx, config, dp, gsp, client, version) + n, err := daemon.NewNetworkPolicyDaemon(ctx, config, dp, gsp, client, version) if err != nil { klog.Errorf("failed to create dataplane : %v", err) return fmt.Errorf("failed to create dataplane: %w", err) diff --git a/npm/cmd/start_server.go b/npm/cmd/start_server.go index 3d3f9742f7..ffc5499b76 100644 --- a/npm/cmd/start_server.go +++ b/npm/cmd/start_server.go @@ -8,6 +8,7 @@ import ( "github.com/Azure/azure-container-networking/npm" npmconfig "github.com/Azure/azure-container-networking/npm/config" + "github.com/Azure/azure-container-networking/npm/controller" restserver "github.com/Azure/azure-container-networking/npm/http/server" "github.com/Azure/azure-container-networking/npm/metrics" "github.com/Azure/azure-container-networking/npm/pkg/dataplane" @@ -105,7 +106,7 @@ func startControlplane(config npmconfig.Config, flags npmconfig.Flags) error { return fmt.Errorf("failed to create dataplane with error: %w", err) } - npMgr, err := npm.NewNetworkPolicyServer(config, factory, mgr, dp, version, k8sServerVersion) + npMgr, err := controller.NewNetworkPolicyServer(config, factory, mgr, dp, version, k8sServerVersion) if err != nil { klog.Errorf("failed to create NPM controlplane manager with error: %v", err) return fmt.Errorf("failed to create NPM controlplane manager: %w", err) diff --git a/npm/server.go b/npm/controller/server.go similarity index 52% rename from npm/server.go rename to npm/controller/server.go index edd2a9b6f3..2fb7050853 100644 --- a/npm/server.go +++ b/npm/controller/server.go @@ -1,6 +1,6 @@ // Copyright 2018 Microsoft. All rights reserved. // MIT License -package npm +package controller import ( "encoding/json" @@ -9,6 +9,7 @@ import ( npmconfig "github.com/Azure/azure-container-networking/npm/config" controllersv2 "github.com/Azure/azure-container-networking/npm/pkg/controlplane/controllers/v2" "github.com/Azure/azure-container-networking/npm/pkg/dataplane" + "github.com/Azure/azure-container-networking/npm/pkg/models" "github.com/Azure/azure-container-networking/npm/pkg/transport" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/version" @@ -17,6 +18,8 @@ import ( "k8s.io/klog" ) +var aiMetadata string + type NetworkPolicyServer struct { config npmconfig.Config @@ -25,20 +28,20 @@ type NetworkPolicyServer struct { // Informers are the Kubernetes Informer // https://pkg.go.dev/k8s.io/client-go/informers - Informers + models.Informers // Controllers for handling Kubernetes resource watcher events - K8SControllersV2 + models.K8SControllersV2 // Azure-specific variables - AzureConfig + models.AzureConfig } var ( - ErrInformerFactoryNil = errors.New("informer factory is nil") - ErrTransportManagerNil = errors.New("transport manager is nil") - ErrK8SServerVersionNil = errors.New("k8s server version is nil") - ErrInformerSyncFailure = errors.New("informer sync failure") + ErrInformerFactoryNil = errors.New("informer factory is nil") + ErrTransportManagerNil = errors.New("transport manager is nil") + ErrK8SServerVersionNil = errors.New("k8s server version is nil") + ErrDataplaneNotInitialized = errors.New("dataplane is not initialized") ) func NewNetworkPolicyServer( @@ -70,87 +73,87 @@ func NewNetworkPolicyServer( n := &NetworkPolicyServer{ config: config, tm: mgr, - Informers: Informers{ - informerFactory: informerFactory, - podInformer: informerFactory.Core().V1().Pods(), - nsInformer: informerFactory.Core().V1().Namespaces(), - npInformer: informerFactory.Networking().V1().NetworkPolicies(), + Informers: models.Informers{ + InformerFactory: informerFactory, + PodInformer: informerFactory.Core().V1().Pods(), + NsInformer: informerFactory.Core().V1().Namespaces(), + NpInformer: informerFactory.Networking().V1().NetworkPolicies(), }, - AzureConfig: AzureConfig{ - k8sServerVersion: k8sServerVersion, - NodeName: GetNodeName(), - version: npmVersion, + AzureConfig: models.AzureConfig{ + K8sServerVersion: k8sServerVersion, + NodeName: models.GetNodeName(), + Version: npmVersion, TelemetryEnabled: true, }, } - n.npmNamespaceCacheV2 = &controllersv2.NpmNamespaceCache{NsMap: make(map[string]*controllersv2.Namespace)} - n.podControllerV2 = controllersv2.NewPodController(n.podInformer, dp, n.npmNamespaceCacheV2) - n.namespaceControllerV2 = controllersv2.NewNamespaceController(n.nsInformer, dp, n.npmNamespaceCacheV2) - n.netPolControllerV2 = controllersv2.NewNetworkPolicyController(n.npInformer, dp) + n.NpmNamespaceCacheV2 = &controllersv2.NpmNamespaceCache{NsMap: make(map[string]*controllersv2.Namespace)} + n.PodControllerV2 = controllersv2.NewPodController(n.PodInformer, dp, n.NpmNamespaceCacheV2) + n.NamespaceControllerV2 = controllersv2.NewNamespaceController(n.NsInformer, dp, n.NpmNamespaceCacheV2) + n.NetPolControllerV2 = controllersv2.NewNetworkPolicyController(n.NpInformer, dp) return n, nil } func (n *NetworkPolicyServer) MarshalJSON() ([]byte, error) { - m := map[CacheKey]json.RawMessage{} + m := map[models.CacheKey]json.RawMessage{} var npmNamespaceCacheRaw []byte var err error - npmNamespaceCacheRaw, err = json.Marshal(n.npmNamespaceCacheV2) + npmNamespaceCacheRaw, err = json.Marshal(n.NpmNamespaceCacheV2) if err != nil { - return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err) + return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err) } - m[NsMap] = npmNamespaceCacheRaw + m[models.NsMap] = npmNamespaceCacheRaw var podControllerRaw []byte - podControllerRaw, err = json.Marshal(n.podControllerV2) + podControllerRaw, err = json.Marshal(n.PodControllerV2) if err != nil { - return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err) + return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err) } - m[PodMap] = podControllerRaw + m[models.PodMap] = podControllerRaw nodeNameRaw, err := json.Marshal(n.NodeName) if err != nil { - return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err) + return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err) } - m[NodeName] = nodeNameRaw + m[models.NodeName] = nodeNameRaw npmCacheRaw, err := json.Marshal(m) if err != nil { - return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err) + return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err) } return npmCacheRaw, nil } func (n *NetworkPolicyServer) GetAppVersion() string { - return n.version + return n.Version } func (n *NetworkPolicyServer) Start(config npmconfig.Config, stopCh <-chan struct{}) error { - // Starts all informers manufactured by n's informerFactory. - n.informerFactory.Start(stopCh) + // Starts all informers manufactured by n's InformerFactory. + n.InformerFactory.Start(stopCh) // Wait for the initial sync of local cache. - if !cache.WaitForCacheSync(stopCh, n.podInformer.Informer().HasSynced) { - return fmt.Errorf("Pod informer error: %w", ErrInformerSyncFailure) + if !cache.WaitForCacheSync(stopCh, n.PodInformer.Informer().HasSynced) { + return fmt.Errorf("Pod informer error: %w", models.ErrInformerSyncFailure) } - if !cache.WaitForCacheSync(stopCh, n.nsInformer.Informer().HasSynced) { - return fmt.Errorf("Namespace informer error: %w", ErrInformerSyncFailure) + if !cache.WaitForCacheSync(stopCh, n.NsInformer.Informer().HasSynced) { + return fmt.Errorf("Namespace informer error: %w", models.ErrInformerSyncFailure) } - if !cache.WaitForCacheSync(stopCh, n.npInformer.Informer().HasSynced) { - return fmt.Errorf("NetworkPolicy informer error: %w", ErrInformerSyncFailure) + if !cache.WaitForCacheSync(stopCh, n.NpInformer.Informer().HasSynced) { + return fmt.Errorf("NetworkPolicy informer error: %w", models.ErrInformerSyncFailure) } // start v2 NPM controllers after synced - go n.podControllerV2.Run(stopCh) - go n.namespaceControllerV2.Run(stopCh) - go n.netPolControllerV2.Run(stopCh) + go n.PodControllerV2.Run(stopCh) + go n.NamespaceControllerV2.Run(stopCh) + go n.NetPolControllerV2.Run(stopCh) // start the transport layer (gRPC) server // We block the main thread here until the server is stopped. diff --git a/npm/daemon.go b/npm/daemon/daemon.go similarity index 91% rename from npm/daemon.go rename to npm/daemon/daemon.go index a06c7971e9..a4ed9bb202 100644 --- a/npm/daemon.go +++ b/npm/daemon/daemon.go @@ -1,9 +1,10 @@ // Copyright 2018 Microsoft. All rights reserved. // MIT License -package npm +package daemon import ( "context" + "errors" "fmt" npmconfig "github.com/Azure/azure-container-networking/npm/config" @@ -12,6 +13,10 @@ import ( "github.com/Azure/azure-container-networking/npm/pkg/transport" ) +var aiMetadata string + +var ErrDataplaneNotInitialized = errors.New("dataplane is not initialized") + type NetworkPolicyDaemon struct { ctx context.Context config npmconfig.Config diff --git a/npm/deploy/common/npm-configmap.yaml b/npm/deploy/common/npm-configmap.yaml new file mode 100644 index 0000000000..4d8bd0d389 --- /dev/null +++ b/npm/deploy/common/npm-configmap.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: azure-npm-config + namespace: kube-system +data: + azure-npm.json: | + { + "ResyncPeriodInMinutes": 15, + "ListeningPort": 10091, + "ListeningAddress": "0.0.0.0", + "Toggles": { + "EnablePrometheusMetrics": true, + "EnablePprof": true, + "EnableHTTPDebugAPI": true, + "EnableV2NPM": false, + "PlaceAzureChainFirst": false + }, + "Transport": { + "Address": "azure-npm.kube-system.svc.cluster.local" + "Port": 10092 + } + } diff --git a/npm/deploy/common/npm-serviceaccount.yaml b/npm/deploy/common/npm-serviceaccount.yaml new file mode 100644 index 0000000000..97a508c1bf --- /dev/null +++ b/npm/deploy/common/npm-serviceaccount.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists diff --git a/npm/deploy/common/rbac.yaml b/npm/deploy/common/rbac.yaml new file mode 100644 index 0000000000..c1a2565e3b --- /dev/null +++ b/npm/deploy/common/rbac.yaml @@ -0,0 +1,44 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: azure-npm-binding + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +subjects: + - kind: ServiceAccount + name: azure-npm + namespace: kube-system +roleRef: + kind: ClusterRole + name: azure-npm + apiGroup: rbac.authorization.k8s.io +--- diff --git a/npm/deploy/controller/azure-npm.yaml b/npm/deploy/controller/azure-npm.yaml new file mode 100644 index 0000000000..60a2f4d6ed --- /dev/null +++ b/npm/deploy/controller/azure-npm.yaml @@ -0,0 +1,82 @@ + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: azure-npm-controller + namespace: kube-system + labels: + app: azure-npm + components: azure-npm-controller + addonmanager.kubernetes.io/mode: EnsureExists +spec: + selector: + matchLabels: + k8s-app: azure-npm + components: azure-npm-controller + template: + metadata: + labels: + k8s-app: azure-npm + components: azure-npm-controller + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + azure.npm/scrapeable: '' + spec: + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + effect: NoExecute + - operator: "Exists" + effect: NoSchedule + - key: CriticalAddonsOnly + operator: Exists + containers: + - name: azure-npm + image: mcr.microsoft.com/containernetworking/azure-npm:v1.4.1 + command: ["azure-npm"] + args: ["start", "controller"] + resources: + limits: + cpu: 250m + memory: 300Mi + requests: + cpu: 250m + securityContext: + privileged: true + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: NPM_CONFIG + value: /etc/azure-npm/azure-npm.json + volumeMounts: + - name: protocols + mountPath: /etc/protocols + - name: azure-npm-config + mountPath: /etc/azure-npm + volumes: + - name: protocols + hostPath: + path: /etc/protocols + type: File + - name: azure-npm-config + configMap: + name: azure-npm-config + serviceAccountName: azure-npm +--- +apiVersion: v1 +kind: Service +metadata: + name: npm-controller-metrics-cluster-service + namespace: kube-system + labels: + app: npm-controller-metrics +spec: + selector: + k8s-app: azure-npm + components: azure-npm-controller + ports: + - port: 9000 + targetPort: 10091 diff --git a/npm/deploy/daemon/azure-npm.yaml b/npm/deploy/daemon/azure-npm.yaml new file mode 100644 index 0000000000..ed2e3dce51 --- /dev/null +++ b/npm/deploy/daemon/azure-npm.yaml @@ -0,0 +1,94 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: azure-npm-deamon + namespace: kube-system + labels: + app: azure-npm-daemon + addonmanager.kubernetes.io/mode: EnsureExists +spec: + selector: + matchLabels: + k8s-app: azure-npm-daemon + components: azure-npm-daemon + template: + metadata: + labels: + k8s-app: azure-npm-deamon + components: azure-npm-daemon + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + azure.npm/scrapeable: '' + spec: + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + effect: NoExecute + - operator: "Exists" + effect: NoSchedule + - key: CriticalAddonsOnly + operator: Exists + containers: + - name: azure-npm + image: mcr.microsoft.com/containernetworking/azure-npm:v1.4.1 + command: ["azure-npm"] + args: ["start", "daemon"] + resources: + limits: + cpu: 250m + memory: 300Mi + requests: + cpu: 250m + securityContext: + privileged: true + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: NPM_CONFIG + value: /etc/azure-npm/azure-npm.json + - name: DEAMON_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DEAMON_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: xtables-lock + mountPath: /run/xtables.lock + - name: protocols + mountPath: /etc/protocols + - name: azure-npm-config + mountPath: /etc/azure-npm + hostNetwork: true + volumes: + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: File + - name: protocols + hostPath: + path: /etc/protocols + type: File + - name: azure-npm-config + configMap: + name: azure-npm-config + serviceAccountName: azure-npm +--- +apiVersion: v1 +kind: Service +metadata: + name: npm-metrics-cluster-service + namespace: kube-system + labels: + app: npm-metrics +spec: + selector: + k8s-app: azure-npm + ports: + - port: 9000 + targetPort: 10091 diff --git a/npm/azure-npm.yaml b/npm/deploy/npm/azure-npm.yaml similarity index 95% rename from npm/azure-npm.yaml rename to npm/deploy/npm/azure-npm.yaml index dfdcac0c8c..73e2bb2548 100644 --- a/npm/azure-npm.yaml +++ b/npm/deploy/npm/azure-npm.yaml @@ -99,18 +99,12 @@ spec: volumeMounts: - name: xtables-lock mountPath: /run/xtables.lock - - name: log - mountPath: /var/log - name: protocols mountPath: /etc/protocols - name: azure-npm-config mountPath: /etc/azure-npm hostNetwork: true volumes: - - name: log - hostPath: - path: /var/log - type: Directory - name: xtables-lock hostPath: path: /run/xtables.lock @@ -156,4 +150,4 @@ data: "EnableV2NPM": false, "PlaceAzureChainFirst": false } - } \ No newline at end of file + } diff --git a/npm/npm.go b/npm/npm.go index bcb3d7354f..4af4a0834d 100644 --- a/npm/npm.go +++ b/npm/npm.go @@ -5,13 +5,13 @@ package npm import ( "encoding/json" "fmt" - "os" npmconfig "github.com/Azure/azure-container-networking/npm/config" "github.com/Azure/azure-container-networking/npm/ipsm" controllersv1 "github.com/Azure/azure-container-networking/npm/pkg/controlplane/controllers/v1" controllersv2 "github.com/Azure/azure-container-networking/npm/pkg/controlplane/controllers/v2" "github.com/Azure/azure-container-networking/npm/pkg/dataplane" + "github.com/Azure/azure-container-networking/npm/pkg/models" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/version" "k8s.io/client-go/informers" @@ -20,7 +20,30 @@ import ( utilexec "k8s.io/utils/exec" ) -var ErrDataplaneNotInitialized = errors.New("dataplane is not initialized") +var aiMetadata string + +// NetworkPolicyManager contains informers for pod, namespace and networkpolicy. +type NetworkPolicyManager struct { + config npmconfig.Config + + // ipsMgr are shared in all controllers. Thus, only one ipsMgr is created for simple management + // and uses lock to avoid unintentional race condictions in IpsetManager. + ipsMgr *ipsm.IpsetManager + + // Informers are the Kubernetes Informer + // https://pkg.go.dev/k8s.io/client-go/informers + models.Informers + + // Legacy controllers for handling Kubernetes resource watcher events + // To be deprecated + models.K8SControllersV1 + + // Controllers for handling Kubernetes resource watcher events + models.K8SControllersV2 + + // Azure-specific variables + models.AzureConfig +} // NewNetworkPolicyManager creates a NetworkPolicyManager func NewNetworkPolicyManager(config npmconfig.Config, @@ -33,93 +56,93 @@ func NewNetworkPolicyManager(config npmconfig.Config, npMgr := &NetworkPolicyManager{ config: config, - Informers: Informers{ - informerFactory: informerFactory, - podInformer: informerFactory.Core().V1().Pods(), - nsInformer: informerFactory.Core().V1().Namespaces(), - npInformer: informerFactory.Networking().V1().NetworkPolicies(), + Informers: models.Informers{ + InformerFactory: informerFactory, + PodInformer: informerFactory.Core().V1().Pods(), + NsInformer: informerFactory.Core().V1().Namespaces(), + NpInformer: informerFactory.Networking().V1().NetworkPolicies(), }, - AzureConfig: AzureConfig{ - k8sServerVersion: k8sServerVersion, - NodeName: GetNodeName(), - version: npmVersion, + AzureConfig: models.AzureConfig{ + K8sServerVersion: k8sServerVersion, + NodeName: models.GetNodeName(), + Version: npmVersion, TelemetryEnabled: true, }, } // create v2 NPM specific components. if npMgr.config.Toggles.EnableV2NPM { - npMgr.npmNamespaceCacheV2 = &controllersv2.NpmNamespaceCache{NsMap: make(map[string]*controllersv2.Namespace)} - npMgr.podControllerV2 = controllersv2.NewPodController(npMgr.podInformer, dp, npMgr.npmNamespaceCacheV2) - npMgr.namespaceControllerV2 = controllersv2.NewNamespaceController(npMgr.nsInformer, dp, npMgr.npmNamespaceCacheV2) + npMgr.NpmNamespaceCacheV2 = &controllersv2.NpmNamespaceCache{NsMap: make(map[string]*controllersv2.Namespace)} + npMgr.PodControllerV2 = controllersv2.NewPodController(npMgr.PodInformer, dp, npMgr.NpmNamespaceCacheV2) + npMgr.NamespaceControllerV2 = controllersv2.NewNamespaceController(npMgr.NsInformer, dp, npMgr.NpmNamespaceCacheV2) // Question(jungukcho): Is config.Toggles.PlaceAzureChainFirst needed for v2? - npMgr.netPolControllerV2 = controllersv2.NewNetworkPolicyController(npMgr.npInformer, dp) + npMgr.NetPolControllerV2 = controllersv2.NewNetworkPolicyController(npMgr.NpInformer, dp) return npMgr } // create v1 NPM specific components. npMgr.ipsMgr = ipsm.NewIpsetManager(exec) - npMgr.npmNamespaceCacheV1 = &controllersv1.NpmNamespaceCache{NsMap: make(map[string]*controllersv1.Namespace)} - npMgr.podControllerV1 = controllersv1.NewPodController(npMgr.podInformer, npMgr.ipsMgr, npMgr.npmNamespaceCacheV1) - npMgr.namespaceControllerV1 = controllersv1.NewNameSpaceController(npMgr.nsInformer, npMgr.ipsMgr, npMgr.npmNamespaceCacheV1) - npMgr.netPolControllerV1 = controllersv1.NewNetworkPolicyController(npMgr.npInformer, npMgr.ipsMgr, config.Toggles.PlaceAzureChainFirst) + npMgr.NpmNamespaceCacheV1 = &controllersv1.NpmNamespaceCache{NsMap: make(map[string]*controllersv1.Namespace)} + npMgr.PodControllerV1 = controllersv1.NewPodController(npMgr.PodInformer, npMgr.ipsMgr, npMgr.NpmNamespaceCacheV1) + npMgr.NamespaceControllerV1 = controllersv1.NewNameSpaceController(npMgr.NsInformer, npMgr.ipsMgr, npMgr.NpmNamespaceCacheV1) + npMgr.NetPolControllerV1 = controllersv1.NewNetworkPolicyController(npMgr.NpInformer, npMgr.ipsMgr, config.Toggles.PlaceAzureChainFirst) return npMgr } func (npMgr *NetworkPolicyManager) MarshalJSON() ([]byte, error) { - m := map[CacheKey]json.RawMessage{} + m := map[models.CacheKey]json.RawMessage{} var npmNamespaceCacheRaw []byte var err error if npMgr.config.Toggles.EnableV2NPM { - npmNamespaceCacheRaw, err = json.Marshal(npMgr.npmNamespaceCacheV2) + npmNamespaceCacheRaw, err = json.Marshal(npMgr.NpmNamespaceCacheV2) } else { - npmNamespaceCacheRaw, err = json.Marshal(npMgr.npmNamespaceCacheV1) + npmNamespaceCacheRaw, err = json.Marshal(npMgr.NpmNamespaceCacheV1) } if err != nil { - return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err) + return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err) } - m[NsMap] = npmNamespaceCacheRaw + m[models.NsMap] = npmNamespaceCacheRaw var podControllerRaw []byte if npMgr.config.Toggles.EnableV2NPM { - podControllerRaw, err = json.Marshal(npMgr.podControllerV2) + podControllerRaw, err = json.Marshal(npMgr.PodControllerV2) } else { - podControllerRaw, err = json.Marshal(npMgr.podControllerV1) + podControllerRaw, err = json.Marshal(npMgr.PodControllerV1) } if err != nil { - return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err) + return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err) } - m[PodMap] = podControllerRaw + m[models.PodMap] = podControllerRaw // TODO(jungukcho): NPM debug may be broken. // Will fix it later after v2 controller and linux test if it is broken. if !npMgr.config.Toggles.EnableV2NPM && npMgr.ipsMgr != nil { listMapRaw, listMapMarshalErr := npMgr.ipsMgr.MarshalListMapJSON() if listMapMarshalErr != nil { - return nil, errors.Errorf("%s: %v", errMarshalNPMCache, listMapMarshalErr) + return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, listMapMarshalErr) } - m[ListMap] = listMapRaw + m[models.ListMap] = listMapRaw setMapRaw, setMapMarshalErr := npMgr.ipsMgr.MarshalSetMapJSON() if setMapMarshalErr != nil { - return nil, errors.Errorf("%s: %v", errMarshalNPMCache, setMapMarshalErr) + return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, setMapMarshalErr) } - m[SetMap] = setMapRaw + m[models.SetMap] = setMapRaw } nodeNameRaw, err := json.Marshal(npMgr.NodeName) if err != nil { - return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err) + return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err) } - m[NodeName] = nodeNameRaw + m[models.NodeName] = nodeNameRaw npmCacheRaw, err := json.Marshal(m) if err != nil { - return nil, errors.Errorf("%s: %v", errMarshalNPMCache, err) + return nil, errors.Errorf("%s: %v", models.ErrMarshalNPMCache, err) } return npmCacheRaw, nil @@ -127,47 +150,47 @@ func (npMgr *NetworkPolicyManager) MarshalJSON() ([]byte, error) { // GetAppVersion returns network policy manager app version func (npMgr *NetworkPolicyManager) GetAppVersion() string { - return npMgr.version + return npMgr.Version } // Start starts shared informers and waits for the shared informer cache to sync. func (npMgr *NetworkPolicyManager) Start(config npmconfig.Config, stopCh <-chan struct{}) error { if !config.Toggles.EnableV2NPM { // Do initialization of data plane before starting syncup of each controller to avoid heavy call to api-server - if err := npMgr.netPolControllerV1.ResetDataPlane(); err != nil { + if err := npMgr.NetPolControllerV1.ResetDataPlane(); err != nil { return fmt.Errorf("Failed to initialized data plane with err %w", err) } } // Starts all informers manufactured by npMgr's informerFactory. - npMgr.informerFactory.Start(stopCh) + npMgr.InformerFactory.Start(stopCh) // Wait for the initial sync of local cache. - if !cache.WaitForCacheSync(stopCh, npMgr.podInformer.Informer().HasSynced) { - return fmt.Errorf("Pod informer error: %w", ErrInformerSyncFailure) + if !cache.WaitForCacheSync(stopCh, npMgr.PodInformer.Informer().HasSynced) { + return fmt.Errorf("Pod informer error: %w", models.ErrInformerSyncFailure) } - if !cache.WaitForCacheSync(stopCh, npMgr.nsInformer.Informer().HasSynced) { - return fmt.Errorf("Namespace informer error: %w", ErrInformerSyncFailure) + if !cache.WaitForCacheSync(stopCh, npMgr.NsInformer.Informer().HasSynced) { + return fmt.Errorf("Namespace informer error: %w", models.ErrInformerSyncFailure) } - if !cache.WaitForCacheSync(stopCh, npMgr.npInformer.Informer().HasSynced) { - return fmt.Errorf("NetworkPolicy informer error: %w", ErrInformerSyncFailure) + if !cache.WaitForCacheSync(stopCh, npMgr.NpInformer.Informer().HasSynced) { + return fmt.Errorf("NetworkPolicy informer error: %w", models.ErrInformerSyncFailure) } // start v2 NPM controllers after synced if config.Toggles.EnableV2NPM { - go npMgr.podControllerV2.Run(stopCh) - go npMgr.namespaceControllerV2.Run(stopCh) - go npMgr.netPolControllerV2.Run(stopCh) + go npMgr.PodControllerV2.Run(stopCh) + go npMgr.NamespaceControllerV2.Run(stopCh) + go npMgr.NetPolControllerV2.Run(stopCh) return nil } // start v1 NPM controllers after synced - go npMgr.podControllerV1.Run(stopCh) - go npMgr.namespaceControllerV1.Run(stopCh) - go npMgr.netPolControllerV1.Run(stopCh) - go npMgr.netPolControllerV1.RunPeriodicTasks(stopCh) + go npMgr.PodControllerV1.Run(stopCh) + go npMgr.NamespaceControllerV1.Run(stopCh) + go npMgr.NetPolControllerV1.Run(stopCh) + go npMgr.NetPolControllerV1.RunPeriodicTasks(stopCh) return nil } @@ -176,8 +199,3 @@ func (npMgr *NetworkPolicyManager) Start(config npmconfig.Config, stopCh <-chan func GetAIMetadata() string { return aiMetadata } - -func GetNodeName() string { - nodeName := os.Getenv(EnvNodeName) - return nodeName -} diff --git a/npm/consts.go b/npm/pkg/models/consts.go similarity index 76% rename from npm/consts.go rename to npm/pkg/models/consts.go index 9202eaf8dc..403bfb9c93 100644 --- a/npm/consts.go +++ b/npm/pkg/models/consts.go @@ -1,4 +1,6 @@ -package npm +package models + +import "os" const ( heartbeatIntervalInMinutes = 30 //nolint:unused,deadcode,varcheck // ignore this error @@ -14,3 +16,8 @@ const ( EnvNodeName = "HOSTNAME" ) + +func GetNodeName() string { + nodeName := os.Getenv(EnvNodeName) + return nodeName +} diff --git a/npm/pkg/models/types.go b/npm/pkg/models/types.go new file mode 100644 index 0000000000..28d3ffb80e --- /dev/null +++ b/npm/pkg/models/types.go @@ -0,0 +1,53 @@ +// Copyright 2018 Microsoft. All rights reserved. +// MIT License +package models + +import ( + controllersv1 "github.com/Azure/azure-container-networking/npm/pkg/controlplane/controllers/v1" + controllersv2 "github.com/Azure/azure-container-networking/npm/pkg/controlplane/controllers/v2" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/informers" + coreinformers "k8s.io/client-go/informers/core/v1" + networkinginformers "k8s.io/client-go/informers/networking/v1" +) + +var ( + ErrMarshalNPMCache = errors.New("failed to marshal NPM Cache") + ErrInformerSyncFailure = errors.New("informer sync failure") +) + +// Cache is the cache lookup key for the NPM cache +type CacheKey string + +// K8SControllerV1 are the legacy k8s controllers +type K8SControllersV1 struct { + PodControllerV1 *controllersv1.PodController //nolint:structcheck //ignore this error + NamespaceControllerV1 *controllersv1.NamespaceController //nolint:structcheck // false lint error + NpmNamespaceCacheV1 *controllersv1.NpmNamespaceCache //nolint:structcheck // false lint error + NetPolControllerV1 *controllersv1.NetworkPolicyController //nolint:structcheck // false lint error +} + +// K8SControllerV2 are the optimized k8s controllers that replace the legacy controllers +type K8SControllersV2 struct { + PodControllerV2 *controllersv2.PodController //nolint:structcheck //ignore this error + NamespaceControllerV2 *controllersv2.NamespaceController //nolint:structcheck // false lint error + NpmNamespaceCacheV2 *controllersv2.NpmNamespaceCache //nolint:structcheck // false lint error + NetPolControllerV2 *controllersv2.NetworkPolicyController //nolint:structcheck // false lint error +} + +// Informers are the informers for the k8s controllers +type Informers struct { + InformerFactory informers.SharedInformerFactory //nolint:structcheck //ignore this error + PodInformer coreinformers.PodInformer //nolint:structcheck // false lint error + NsInformer coreinformers.NamespaceInformer //nolint:structcheck // false lint error + NpInformer networkinginformers.NetworkPolicyInformer //nolint:structcheck // false lint error +} + +// AzureConfig captures the Azure specific configurations and fields +type AzureConfig struct { + K8sServerVersion *version.Info + NodeName string + Version string + TelemetryEnabled bool +} diff --git a/npm/types.go b/npm/types.go deleted file mode 100644 index 1de1e025d1..0000000000 --- a/npm/types.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2018 Microsoft. All rights reserved. -// MIT License -package npm - -import ( - npmconfig "github.com/Azure/azure-container-networking/npm/config" - "github.com/Azure/azure-container-networking/npm/ipsm" - controllersv1 "github.com/Azure/azure-container-networking/npm/pkg/controlplane/controllers/v1" - controllersv2 "github.com/Azure/azure-container-networking/npm/pkg/controlplane/controllers/v2" - "github.com/pkg/errors" - "k8s.io/apimachinery/pkg/version" - "k8s.io/client-go/informers" - coreinformers "k8s.io/client-go/informers/core/v1" - networkinginformers "k8s.io/client-go/informers/networking/v1" -) - -var ( - aiMetadata string - errMarshalNPMCache = errors.New("failed to marshal NPM Cache") -) - -// NetworkPolicyManager contains informers for pod, namespace and networkpolicy. -type NetworkPolicyManager struct { - config npmconfig.Config - - // ipsMgr are shared in all controllers. Thus, only one ipsMgr is created for simple management - // and uses lock to avoid unintentional race condictions in IpsetManager. - ipsMgr *ipsm.IpsetManager - - // Informers are the Kubernetes Informer - // https://pkg.go.dev/k8s.io/client-go/informers - Informers - - // Legacy controllers for handling Kubernetes resource watcher events - // To be deprecated - K8SControllersV1 - - // Controllers for handling Kubernetes resource watcher events - K8SControllersV2 - - // Azure-specific variables - AzureConfig -} - -// Cache is the cache lookup key for the NPM cache -type CacheKey string - -// K8SControllerV1 are the legacy k8s controllers -type K8SControllersV1 struct { - podControllerV1 *controllersv1.PodController //nolint:structcheck //ignore this error - namespaceControllerV1 *controllersv1.NamespaceController //nolint:structcheck // false lint error - npmNamespaceCacheV1 *controllersv1.NpmNamespaceCache //nolint:structcheck // false lint error - netPolControllerV1 *controllersv1.NetworkPolicyController //nolint:structcheck // false lint error -} - -// K8SControllerV2 are the optimized k8s controllers that replace the legacy controllers -type K8SControllersV2 struct { - podControllerV2 *controllersv2.PodController //nolint:structcheck //ignore this error - namespaceControllerV2 *controllersv2.NamespaceController //nolint:structcheck // false lint error - npmNamespaceCacheV2 *controllersv2.NpmNamespaceCache //nolint:structcheck // false lint error - netPolControllerV2 *controllersv2.NetworkPolicyController //nolint:structcheck // false lint error -} - -// Informers are the informers for the k8s controllers -type Informers struct { - informerFactory informers.SharedInformerFactory //nolint:structcheck //ignore this error - podInformer coreinformers.PodInformer //nolint:structcheck // false lint error - nsInformer coreinformers.NamespaceInformer //nolint:structcheck // false lint error - npInformer networkinginformers.NetworkPolicyInformer //nolint:structcheck // false lint error -} - -// AzureConfig captures the Azure specific configurations and fields -type AzureConfig struct { - k8sServerVersion *version.Info - NodeName string - version string - TelemetryEnabled bool -} From 9bd79df974aff33b00ddfc863e3c78db6567037b Mon Sep 17 00:00:00 2001 From: Nitish Malhotra Date: Mon, 24 Jan 2022 21:16:16 +0000 Subject: [PATCH 02/11] Add kustomize manifests Signed-off-by: Nitish Malhotra --- npm/deploy/kustomize/README.md | 36 +++++++++ .../base/configmap.yaml} | 0 npm/deploy/kustomize/base/kustomization.yaml | 7 ++ .../{common => kustomize/base}/rbac.yaml | 0 .../base/serviceaccount.yaml} | 0 .../overlays/controller/deployment.yaml | 67 +++++++++++++++ .../overlays/controller/kustomization.yaml | 7 ++ .../overlays/controller/service.yaml | 16 ++++ .../kustomize/overlays/daemon/deployment.yaml | 81 +++++++++++++++++++ .../overlays/daemon/kustomization.yaml | 7 ++ .../kustomize/overlays/daemon/service.yaml | 15 ++++ .../manifests/common/npm-configmap.yaml | 24 ++++++ .../manifests/common/npm-serviceaccount.yaml | 7 ++ npm/deploy/manifests/common/rbac.yaml | 44 ++++++++++ .../{ => manifests}/controller/azure-npm.yaml | 0 .../{ => manifests}/daemon/azure-npm.yaml | 0 16 files changed, 311 insertions(+) create mode 100644 npm/deploy/kustomize/README.md rename npm/deploy/{common/npm-configmap.yaml => kustomize/base/configmap.yaml} (100%) create mode 100644 npm/deploy/kustomize/base/kustomization.yaml rename npm/deploy/{common => kustomize/base}/rbac.yaml (100%) rename npm/deploy/{common/npm-serviceaccount.yaml => kustomize/base/serviceaccount.yaml} (100%) create mode 100644 npm/deploy/kustomize/overlays/controller/deployment.yaml create mode 100644 npm/deploy/kustomize/overlays/controller/kustomization.yaml create mode 100644 npm/deploy/kustomize/overlays/controller/service.yaml create mode 100644 npm/deploy/kustomize/overlays/daemon/deployment.yaml create mode 100644 npm/deploy/kustomize/overlays/daemon/kustomization.yaml create mode 100644 npm/deploy/kustomize/overlays/daemon/service.yaml create mode 100644 npm/deploy/manifests/common/npm-configmap.yaml create mode 100644 npm/deploy/manifests/common/npm-serviceaccount.yaml create mode 100644 npm/deploy/manifests/common/rbac.yaml rename npm/deploy/{ => manifests}/controller/azure-npm.yaml (100%) rename npm/deploy/{ => manifests}/daemon/azure-npm.yaml (100%) diff --git a/npm/deploy/kustomize/README.md b/npm/deploy/kustomize/README.md new file mode 100644 index 0000000000..f83ca04c3a --- /dev/null +++ b/npm/deploy/kustomize/README.md @@ -0,0 +1,36 @@ +# Kustomize based deployment + +## Prerequisites + +- [Kustomize](https://kustomize.io/) - Follow the instructions below to install it. + + ```terminal + curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | bash + ``` + + For other installation options refer to https://kubectl.docs.kubernetes.io/installation/kustomize. + + To generate the resources for the **controller**, run the following command: + + ```terminal + kustomize build overlays/controller > /tmp/controller.yaml + ``` + +## Deploying to the cluster + +### NPM Controller + +To generate the resources for the **daemon**, run the following command: + +```terminal +kustomize build overlays/daemon > /tmp/daemon.yaml +``` + +### NPM Daemon + +> `kustomize` is not required for this step, since it is already bundled in the `kubectl` binary. + +To deploy the daemon to your cluster, run the following command: +```terminal +kubectl apply -k overlays/daemon +``` diff --git a/npm/deploy/common/npm-configmap.yaml b/npm/deploy/kustomize/base/configmap.yaml similarity index 100% rename from npm/deploy/common/npm-configmap.yaml rename to npm/deploy/kustomize/base/configmap.yaml diff --git a/npm/deploy/kustomize/base/kustomization.yaml b/npm/deploy/kustomize/base/kustomization.yaml new file mode 100644 index 0000000000..cf4b75aa8e --- /dev/null +++ b/npm/deploy/kustomize/base/kustomization.yaml @@ -0,0 +1,7 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - configmap.yaml + - serviceaccount.yaml + - rbac.yaml diff --git a/npm/deploy/common/rbac.yaml b/npm/deploy/kustomize/base/rbac.yaml similarity index 100% rename from npm/deploy/common/rbac.yaml rename to npm/deploy/kustomize/base/rbac.yaml diff --git a/npm/deploy/common/npm-serviceaccount.yaml b/npm/deploy/kustomize/base/serviceaccount.yaml similarity index 100% rename from npm/deploy/common/npm-serviceaccount.yaml rename to npm/deploy/kustomize/base/serviceaccount.yaml diff --git a/npm/deploy/kustomize/overlays/controller/deployment.yaml b/npm/deploy/kustomize/overlays/controller/deployment.yaml new file mode 100644 index 0000000000..e2baf9e4b2 --- /dev/null +++ b/npm/deploy/kustomize/overlays/controller/deployment.yaml @@ -0,0 +1,67 @@ + +apiVersion: apps/v1 +kind: Deployment +metadata: + name: azure-npm-controller + namespace: kube-system + labels: + app: azure-npm + component: controller + addonmanager.kubernetes.io/mode: EnsureExists +spec: + selector: + matchLabels: + k8s-app: azure-npm + component: controller + template: + metadata: + labels: + k8s-app: azure-npm + component: controller + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + azure.npm/scrapeable: '' + spec: + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + effect: NoExecute + - operator: "Exists" + effect: NoSchedule + - key: CriticalAddonsOnly + operator: Exists + containers: + - name: azure-npm + image: mcr.microsoft.com/containernetworking/azure-npm:v1.4.1 + command: ["azure-npm"] + args: ["start", "controller"] + resources: + limits: + cpu: 250m + memory: 300Mi + requests: + cpu: 250m + securityContext: + privileged: true + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: NPM_CONFIG + value: /etc/azure-npm/azure-npm.json + volumeMounts: + - name: protocols + mountPath: /etc/protocols + - name: azure-npm-config + mountPath: /etc/azure-npm + volumes: + - name: protocols + hostPath: + path: /etc/protocols + type: File + - name: azure-npm-config + configMap: + name: azure-npm-config + serviceAccountName: azure-npm diff --git a/npm/deploy/kustomize/overlays/controller/kustomization.yaml b/npm/deploy/kustomize/overlays/controller/kustomization.yaml new file mode 100644 index 0000000000..03c002324a --- /dev/null +++ b/npm/deploy/kustomize/overlays/controller/kustomization.yaml @@ -0,0 +1,7 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +- ../../base +resources: + - deployment.yaml + - service.yaml diff --git a/npm/deploy/kustomize/overlays/controller/service.yaml b/npm/deploy/kustomize/overlays/controller/service.yaml new file mode 100644 index 0000000000..bb21504658 --- /dev/null +++ b/npm/deploy/kustomize/overlays/controller/service.yaml @@ -0,0 +1,16 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: npm-controller-metrics-cluster-service + namespace: kube-system + labels: + app: azure-npm + component: controller +spec: + selector: + k8s-app: azure-npm + component: controller + ports: + - port: 9000 + targetPort: 10091 diff --git a/npm/deploy/kustomize/overlays/daemon/deployment.yaml b/npm/deploy/kustomize/overlays/daemon/deployment.yaml new file mode 100644 index 0000000000..ba410728df --- /dev/null +++ b/npm/deploy/kustomize/overlays/daemon/deployment.yaml @@ -0,0 +1,81 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: azure-npm-deamon + namespace: kube-system + labels: + app: azure-npm + component: daemon + addonmanager.kubernetes.io/mode: EnsureExists +spec: + selector: + matchLabels: + k8s-app: azure-npm + component: daemon + template: + metadata: + labels: + k8s-app: azure-npm + component: daemon + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + azure.npm/scrapeable: '' + spec: + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + effect: NoExecute + - operator: "Exists" + effect: NoSchedule + - key: CriticalAddonsOnly + operator: Exists + containers: + - name: azure-npm + image: mcr.microsoft.com/containernetworking/azure-npm:v1.4.1 + command: ["azure-npm"] + args: ["start", "daemon"] + resources: + limits: + cpu: 250m + memory: 300Mi + requests: + cpu: 250m + securityContext: + privileged: true + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: NPM_CONFIG + value: /etc/azure-npm/azure-npm.json + - name: DEAMON_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DEAMON_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: xtables-lock + mountPath: /run/xtables.lock + - name: protocols + mountPath: /etc/protocols + - name: azure-npm-config + mountPath: /etc/azure-npm + hostNetwork: true + volumes: + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: File + - name: protocols + hostPath: + path: /etc/protocols + type: File + - name: azure-npm-config + configMap: + name: azure-npm-config + serviceAccountName: azure-npm diff --git a/npm/deploy/kustomize/overlays/daemon/kustomization.yaml b/npm/deploy/kustomize/overlays/daemon/kustomization.yaml new file mode 100644 index 0000000000..03c002324a --- /dev/null +++ b/npm/deploy/kustomize/overlays/daemon/kustomization.yaml @@ -0,0 +1,7 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +bases: +- ../../base +resources: + - deployment.yaml + - service.yaml diff --git a/npm/deploy/kustomize/overlays/daemon/service.yaml b/npm/deploy/kustomize/overlays/daemon/service.yaml new file mode 100644 index 0000000000..8d5f796035 --- /dev/null +++ b/npm/deploy/kustomize/overlays/daemon/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: npm-deamon-metrics-cluster-service + namespace: kube-system + labels: + app: azure-npm + component: daemon +spec: + selector: + k8s-app: azure-npm + component: deamon + ports: + - port: 9000 + targetPort: 10091 diff --git a/npm/deploy/manifests/common/npm-configmap.yaml b/npm/deploy/manifests/common/npm-configmap.yaml new file mode 100644 index 0000000000..4d8bd0d389 --- /dev/null +++ b/npm/deploy/manifests/common/npm-configmap.yaml @@ -0,0 +1,24 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: azure-npm-config + namespace: kube-system +data: + azure-npm.json: | + { + "ResyncPeriodInMinutes": 15, + "ListeningPort": 10091, + "ListeningAddress": "0.0.0.0", + "Toggles": { + "EnablePrometheusMetrics": true, + "EnablePprof": true, + "EnableHTTPDebugAPI": true, + "EnableV2NPM": false, + "PlaceAzureChainFirst": false + }, + "Transport": { + "Address": "azure-npm.kube-system.svc.cluster.local" + "Port": 10092 + } + } diff --git a/npm/deploy/manifests/common/npm-serviceaccount.yaml b/npm/deploy/manifests/common/npm-serviceaccount.yaml new file mode 100644 index 0000000000..97a508c1bf --- /dev/null +++ b/npm/deploy/manifests/common/npm-serviceaccount.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists diff --git a/npm/deploy/manifests/common/rbac.yaml b/npm/deploy/manifests/common/rbac.yaml new file mode 100644 index 0000000000..c1a2565e3b --- /dev/null +++ b/npm/deploy/manifests/common/rbac.yaml @@ -0,0 +1,44 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: azure-npm-binding + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +subjects: + - kind: ServiceAccount + name: azure-npm + namespace: kube-system +roleRef: + kind: ClusterRole + name: azure-npm + apiGroup: rbac.authorization.k8s.io +--- diff --git a/npm/deploy/controller/azure-npm.yaml b/npm/deploy/manifests/controller/azure-npm.yaml similarity index 100% rename from npm/deploy/controller/azure-npm.yaml rename to npm/deploy/manifests/controller/azure-npm.yaml diff --git a/npm/deploy/daemon/azure-npm.yaml b/npm/deploy/manifests/daemon/azure-npm.yaml similarity index 100% rename from npm/deploy/daemon/azure-npm.yaml rename to npm/deploy/manifests/daemon/azure-npm.yaml From 5360c307eae4aa1dc2a03691290291a6afcd26d9 Mon Sep 17 00:00:00 2001 From: Nitish Malhotra Date: Mon, 24 Jan 2022 21:30:00 +0000 Subject: [PATCH 03/11] Address lint errors Signed-off-by: Nitish Malhotra --- npm/controller/server.go | 2 +- npm/daemon/daemon.go | 2 +- npm/npm.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/npm/controller/server.go b/npm/controller/server.go index 2fb7050853..672b4f3057 100644 --- a/npm/controller/server.go +++ b/npm/controller/server.go @@ -18,7 +18,7 @@ import ( "k8s.io/klog" ) -var aiMetadata string +var aiMetadata string //nolint // aiMetadata is set in Makefile type NetworkPolicyServer struct { config npmconfig.Config diff --git a/npm/daemon/daemon.go b/npm/daemon/daemon.go index a4ed9bb202..b261dbc308 100644 --- a/npm/daemon/daemon.go +++ b/npm/daemon/daemon.go @@ -13,7 +13,7 @@ import ( "github.com/Azure/azure-container-networking/npm/pkg/transport" ) -var aiMetadata string +var aiMetadata string //nolint // aiMetadata is set in Makefile var ErrDataplaneNotInitialized = errors.New("dataplane is not initialized") diff --git a/npm/npm.go b/npm/npm.go index 4af4a0834d..e116a99fac 100644 --- a/npm/npm.go +++ b/npm/npm.go @@ -20,7 +20,7 @@ import ( utilexec "k8s.io/utils/exec" ) -var aiMetadata string +var aiMetadata string //nolint // aiMetadata is set in Makefile // NetworkPolicyManager contains informers for pod, namespace and networkpolicy. type NetworkPolicyManager struct { From d10798db79ed9f6b9ef45976cd1744f80ce04b41 Mon Sep 17 00:00:00 2001 From: Nitish Malhotra Date: Fri, 28 Jan 2022 22:09:25 +0000 Subject: [PATCH 04/11] Deployment manifests Signed-off-by: Nitish Malhotra --- npm/deploy/kustomize/base/configmap.yaml | 3 +- .../overlays/controller/deployment.yaml | 7 +- .../overlays/controller/service.yaml | 18 ++ .../kustomize/overlays/daemon/deployment.yaml | 6 + .../manifests/controller/azure-npm.yaml | 223 ++++++++++++----- npm/deploy/manifests/daemon/azure-npm.yaml | 232 ++++++++++++------ npm/deploy/npm/azure-npm.yaml | 6 + 7 files changed, 361 insertions(+), 134 deletions(-) diff --git a/npm/deploy/kustomize/base/configmap.yaml b/npm/deploy/kustomize/base/configmap.yaml index 4d8bd0d389..ae7e0ed936 100644 --- a/npm/deploy/kustomize/base/configmap.yaml +++ b/npm/deploy/kustomize/base/configmap.yaml @@ -19,6 +19,7 @@ data: }, "Transport": { "Address": "azure-npm.kube-system.svc.cluster.local" - "Port": 10092 + # "Port": 10092 + "Port": 9001 } } diff --git a/npm/deploy/kustomize/overlays/controller/deployment.yaml b/npm/deploy/kustomize/overlays/controller/deployment.yaml index e2baf9e4b2..fb613182e2 100644 --- a/npm/deploy/kustomize/overlays/controller/deployment.yaml +++ b/npm/deploy/kustomize/overlays/controller/deployment.yaml @@ -1,4 +1,3 @@ - apiVersion: apps/v1 kind: Deployment metadata: @@ -52,11 +51,17 @@ spec: - name: NPM_CONFIG value: /etc/azure-npm/azure-npm.json volumeMounts: + - name: log + mountPath: /var/log - name: protocols mountPath: /etc/protocols - name: azure-npm-config mountPath: /etc/azure-npm volumes: + - name: log + hostPath: + path: /var/log + type: Directory - name: protocols hostPath: path: /etc/protocols diff --git a/npm/deploy/kustomize/overlays/controller/service.yaml b/npm/deploy/kustomize/overlays/controller/service.yaml index bb21504658..d603091f3e 100644 --- a/npm/deploy/kustomize/overlays/controller/service.yaml +++ b/npm/deploy/kustomize/overlays/controller/service.yaml @@ -13,4 +13,22 @@ spec: component: controller ports: - port: 9000 + name: metrics targetPort: 10091 +--- +apiVersion: v1 +kind: Service +metadata: + name: azure-npm + namespace: kube-system + labels: + app: azure-npm + component: controller +spec: + selector: + k8s-app: azure-npm + component: controller + ports: + - port: 9001 + name: metrics + targetPort: 10092 diff --git a/npm/deploy/kustomize/overlays/daemon/deployment.yaml b/npm/deploy/kustomize/overlays/daemon/deployment.yaml index ba410728df..f2a452c0f6 100644 --- a/npm/deploy/kustomize/overlays/daemon/deployment.yaml +++ b/npm/deploy/kustomize/overlays/daemon/deployment.yaml @@ -59,6 +59,8 @@ spec: fieldRef: fieldPath: spec.nodeName volumeMounts: + - name: log + mountPath: /var/log - name: xtables-lock mountPath: /run/xtables.lock - name: protocols @@ -67,6 +69,10 @@ spec: mountPath: /etc/azure-npm hostNetwork: true volumes: + - name: log + hostPath: + path: /var/log + type: Directory - name: xtables-lock hostPath: path: /run/xtables.lock diff --git a/npm/deploy/manifests/controller/azure-npm.yaml b/npm/deploy/manifests/controller/azure-npm.yaml index 60a2f4d6ed..fdc052978c 100644 --- a/npm/deploy/manifests/controller/azure-npm.yaml +++ b/npm/deploy/manifests/controller/azure-npm.yaml @@ -1,82 +1,185 @@ - -apiVersion: apps/v1 -kind: Deployment +apiVersion: v1 +kind: ServiceAccount metadata: - name: azure-npm-controller + labels: + addonmanager.kubernetes.io/mode: EnsureExists + name: azure-npm + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + addonmanager.kubernetes.io/mode: EnsureExists + name: azure-npm namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - pods + - nodes + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + addonmanager.kubernetes.io/mode: EnsureExists + name: azure-npm-binding + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: azure-npm +subjects: +- kind: ServiceAccount + name: azure-npm + namespace: kube-system +--- +apiVersion: v1 +data: + azure-npm.json: | + { + "ResyncPeriodInMinutes": 15, + "ListeningPort": 10091, + "ListeningAddress": "0.0.0.0", + "Toggles": { + "EnablePrometheusMetrics": true, + "EnablePprof": true, + "EnableHTTPDebugAPI": true, + "EnableV2NPM": false, + "PlaceAzureChainFirst": false + }, + "Transport": { + "Address": "azure-npm.kube-system.svc.cluster.local" + # "Port": 10092 + "Port": 9001 + } + } +kind: ConfigMap +metadata: + name: azure-npm-config + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: azure-npm + component: controller + name: azure-npm + namespace: kube-system +spec: + ports: + - name: metrics + port: 9001 + targetPort: 10092 + selector: + component: controller + k8s-app: azure-npm +--- +apiVersion: v1 +kind: Service +metadata: labels: app: azure-npm - components: azure-npm-controller + component: controller + name: npm-controller-metrics-cluster-service + namespace: kube-system +spec: + ports: + - name: metrics + port: 9000 + targetPort: 10091 + selector: + component: controller + k8s-app: azure-npm +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: addonmanager.kubernetes.io/mode: EnsureExists + app: azure-npm + component: controller + name: azure-npm-controller + namespace: kube-system spec: selector: matchLabels: + component: controller k8s-app: azure-npm - components: azure-npm-controller template: metadata: + annotations: + azure.npm/scrapeable: "" + scheduler.alpha.kubernetes.io/critical-pod: "" labels: + component: controller k8s-app: azure-npm - components: azure-npm-controller - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - azure.npm/scrapeable: '' spec: + containers: + - args: + - start + - controller + command: + - azure-npm + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: NPM_CONFIG + value: /etc/azure-npm/azure-npm.json + image: mcr.microsoft.com/containernetworking/azure-npm:v1.4.1 + name: azure-npm + resources: + limits: + cpu: 250m + memory: 300Mi + requests: + cpu: 250m + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/log + name: log + - mountPath: /etc/protocols + name: protocols + - mountPath: /etc/azure-npm + name: azure-npm-config priorityClassName: system-node-critical + serviceAccountName: azure-npm tolerations: - - operator: "Exists" - effect: NoExecute - - operator: "Exists" - effect: NoSchedule + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists - key: CriticalAddonsOnly operator: Exists - containers: - - name: azure-npm - image: mcr.microsoft.com/containernetworking/azure-npm:v1.4.1 - command: ["azure-npm"] - args: ["start", "controller"] - resources: - limits: - cpu: 250m - memory: 300Mi - requests: - cpu: 250m - securityContext: - privileged: true - env: - - name: HOSTNAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: NPM_CONFIG - value: /etc/azure-npm/azure-npm.json - volumeMounts: - - name: protocols - mountPath: /etc/protocols - - name: azure-npm-config - mountPath: /etc/azure-npm volumes: - - name: protocols - hostPath: + - hostPath: + path: /var/log + type: Directory + name: log + - hostPath: path: /etc/protocols type: File - - name: azure-npm-config - configMap: + name: protocols + - configMap: name: azure-npm-config - serviceAccountName: azure-npm ---- -apiVersion: v1 -kind: Service -metadata: - name: npm-controller-metrics-cluster-service - namespace: kube-system - labels: - app: npm-controller-metrics -spec: - selector: - k8s-app: azure-npm - components: azure-npm-controller - ports: - - port: 9000 - targetPort: 10091 + name: azure-npm-config diff --git a/npm/deploy/manifests/daemon/azure-npm.yaml b/npm/deploy/manifests/daemon/azure-npm.yaml index ed2e3dce51..233c3428c2 100644 --- a/npm/deploy/manifests/daemon/azure-npm.yaml +++ b/npm/deploy/manifests/daemon/azure-npm.yaml @@ -1,94 +1,182 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + addonmanager.kubernetes.io/mode: EnsureExists + name: azure-npm + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + addonmanager.kubernetes.io/mode: EnsureExists + name: azure-npm + namespace: kube-system +rules: +- apiGroups: + - "" + resources: + - pods + - nodes + - namespaces + verbs: + - get + - list + - watch +- apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + addonmanager.kubernetes.io/mode: EnsureExists + name: azure-npm-binding + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: azure-npm +subjects: +- kind: ServiceAccount + name: azure-npm + namespace: kube-system +--- +apiVersion: v1 +data: + azure-npm.json: | + { + "ResyncPeriodInMinutes": 15, + "ListeningPort": 10091, + "ListeningAddress": "0.0.0.0", + "Toggles": { + "EnablePrometheusMetrics": true, + "EnablePprof": true, + "EnableHTTPDebugAPI": true, + "EnableV2NPM": false, + "PlaceAzureChainFirst": false + }, + "Transport": { + "Address": "azure-npm.kube-system.svc.cluster.local" + # "Port": 10092 + "Port": 9001 + } + } +kind: ConfigMap +metadata: + name: azure-npm-config + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: azure-npm + component: daemon + name: npm-deamon-metrics-cluster-service + namespace: kube-system +spec: + ports: + - port: 9000 + targetPort: 10091 + selector: + component: deamon + k8s-app: azure-npm +--- apiVersion: apps/v1 kind: DaemonSet metadata: - name: azure-npm-deamon - namespace: kube-system labels: - app: azure-npm-daemon addonmanager.kubernetes.io/mode: EnsureExists + app: azure-npm + component: daemon + name: azure-npm-deamon + namespace: kube-system spec: selector: matchLabels: - k8s-app: azure-npm-daemon - components: azure-npm-daemon + component: daemon + k8s-app: azure-npm template: metadata: - labels: - k8s-app: azure-npm-deamon - components: azure-npm-daemon annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - azure.npm/scrapeable: '' + azure.npm/scrapeable: "" + scheduler.alpha.kubernetes.io/critical-pod: "" + labels: + component: daemon + k8s-app: azure-npm spec: + containers: + - args: + - start + - daemon + command: + - azure-npm + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: NPM_CONFIG + value: /etc/azure-npm/azure-npm.json + - name: DEAMON_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: DEAMON_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + image: mcr.microsoft.com/containernetworking/azure-npm:v1.4.1 + name: azure-npm + resources: + limits: + cpu: 250m + memory: 300Mi + requests: + cpu: 250m + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/log + name: log + - mountPath: /run/xtables.lock + name: xtables-lock + - mountPath: /etc/protocols + name: protocols + - mountPath: /etc/azure-npm + name: azure-npm-config + hostNetwork: true priorityClassName: system-node-critical + serviceAccountName: azure-npm tolerations: - - operator: "Exists" - effect: NoExecute - - operator: "Exists" - effect: NoSchedule + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists - key: CriticalAddonsOnly operator: Exists - containers: - - name: azure-npm - image: mcr.microsoft.com/containernetworking/azure-npm:v1.4.1 - command: ["azure-npm"] - args: ["start", "daemon"] - resources: - limits: - cpu: 250m - memory: 300Mi - requests: - cpu: 250m - securityContext: - privileged: true - env: - - name: HOSTNAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: NPM_CONFIG - value: /etc/azure-npm/azure-npm.json - - name: DEAMON_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: DEAMON_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - - name: xtables-lock - mountPath: /run/xtables.lock - - name: protocols - mountPath: /etc/protocols - - name: azure-npm-config - mountPath: /etc/azure-npm - hostNetwork: true volumes: - - name: xtables-lock - hostPath: + - hostPath: + path: /var/log + type: Directory + name: log + - hostPath: path: /run/xtables.lock type: File - - name: protocols - hostPath: + name: xtables-lock + - hostPath: path: /etc/protocols type: File - - name: azure-npm-config - configMap: + name: protocols + - configMap: name: azure-npm-config - serviceAccountName: azure-npm ---- -apiVersion: v1 -kind: Service -metadata: - name: npm-metrics-cluster-service - namespace: kube-system - labels: - app: npm-metrics -spec: - selector: - k8s-app: azure-npm - ports: - - port: 9000 - targetPort: 10091 + name: azure-npm-config diff --git a/npm/deploy/npm/azure-npm.yaml b/npm/deploy/npm/azure-npm.yaml index 73e2bb2548..8d3908ca14 100644 --- a/npm/deploy/npm/azure-npm.yaml +++ b/npm/deploy/npm/azure-npm.yaml @@ -97,6 +97,8 @@ spec: - name: NPM_CONFIG value: /etc/azure-npm/azure-npm.json volumeMounts: + - name: log + mountPath: /var/log - name: xtables-lock mountPath: /run/xtables.lock - name: protocols @@ -105,6 +107,10 @@ spec: mountPath: /etc/azure-npm hostNetwork: true volumes: + - name: log + hostPath: + path: /var/log + type: Directory - name: xtables-lock hostPath: path: /run/xtables.lock From 0c1af2846e9db9af26691cdfbfacf9fecba7eb3d Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Tue, 1 Feb 2022 18:54:39 +0000 Subject: [PATCH 05/11] Fix failing tests and other minor changes Signed-off-by: Ubuntu --- .github/workflows/cyclonus-netpol-test.yaml | 4 ++-- .pipelines/npm/npm-conformance-tests.yaml | 2 +- Tiltfile | 10 ++++++++++ docs/npm.md | 2 +- npm/cmd/root.go | 8 +++++++- npm/cmd/start.go | 5 ----- npm/deploy/kustomize/base/configmap.yaml | 3 +-- .../kustomize/overlays/controller/deployment.yaml | 4 ++-- npm/deploy/kustomize/overlays/daemon/deployment.yaml | 2 +- npm/deploy/manifests/controller/azure-npm.yaml | 9 ++++----- npm/deploy/manifests/daemon/azure-npm.yaml | 7 +++---- npm/pkg/transport/events_server.go | 2 +- 12 files changed, 33 insertions(+), 25 deletions(-) create mode 100644 Tiltfile diff --git a/.github/workflows/cyclonus-netpol-test.yaml b/.github/workflows/cyclonus-netpol-test.yaml index b23769781f..53233586c3 100644 --- a/.github/workflows/cyclonus-netpol-test.yaml +++ b/.github/workflows/cyclonus-netpol-test.yaml @@ -48,9 +48,9 @@ jobs: # set the ConfigMap based on the build matrix # currently have to restart the daemonset because changing the ConfigMap doesn't restart NPM run: | - sed -i 's/mcr.microsoft.com\/containernetworking\/azure-npm:.*/acnpublic.azurecr.io\/azure-npm:cyclonus/' ./npm/azure-npm.yaml + sed -i 's/mcr.microsoft.com\/containernetworking\/azure-npm:.*/acnpublic.azurecr.io\/azure-npm:cyclonus/' ./npm/deploy/npm/azure-npm.yaml kind load docker-image acnpublic.azurecr.io/azure-npm:cyclonus --name npm-kind - kubectl apply -f ./npm/azure-npm.yaml + kubectl apply -f ./npm/deploy/npm/azure-npm.yaml echo "Applying profile: ${{ matrix.profile }}" kubectl apply -f ./npm/profiles/${{ matrix.profile }} kubectl rollout restart ds azure-npm -n kube-system diff --git a/.pipelines/npm/npm-conformance-tests.yaml b/.pipelines/npm/npm-conformance-tests.yaml index 580d7b0ad4..518f45c176 100644 --- a/.pipelines/npm/npm-conformance-tests.yaml +++ b/.pipelines/npm/npm-conformance-tests.yaml @@ -164,7 +164,7 @@ jobs: chmod +x kubectl # deploy azure-npm - ./kubectl --kubeconfig=./kubeconfig apply -f https://raw.githubusercontent.com/Azure/azure-container-networking/master/npm/azure-npm.yaml + ./kubectl --kubeconfig=./kubeconfig apply -f https://raw.githubusercontent.com/Azure/azure-container-networking/master/npm/deploy/npm/azure-npm.yaml # swap azure-npm image with one built during run ./kubectl --kubeconfig=./kubeconfig set image daemonset/azure-npm -n kube-system azure-npm=$IMAGE_REGISTRY/azure-npm:$(TAG) diff --git a/Tiltfile b/Tiltfile new file mode 100644 index 0000000000..d94357f591 --- /dev/null +++ b/Tiltfile @@ -0,0 +1,10 @@ +allow_k8s_contexts('acn-dev-azure-cni') +default_registry('ttl.sh/nitishm-12390') +docker_build('azure-npm', '.', dockerfile='npm/Dockerfile', build_args = { + "VERSION": "v1.4.14-101-gf900e319-dirty", + "NPM_AI_PATH": "github.com/Azure/azure-container-networking/npm.aiMetadata", + "NPM_AI_ID": "014c22bd-4107-459e-8475-67909e96edcb" +}) +# watch_file('npm') +k8s_yaml('npm/deploy/manifests/controller/azure-npm.yaml') + diff --git a/docs/npm.md b/docs/npm.md index 724b3345a9..0326cb42f8 100644 --- a/docs/npm.md +++ b/docs/npm.md @@ -12,7 +12,7 @@ Azure-NPM serves as a distributed firewall for the Kubernetes cluster, and it ca Running the command below will bring up one azure-npm instance on each Kubernetes node. ``` -kubectl apply -f https://raw.githubusercontent.com/Azure/azure-container-networking/master/npm/azure-npm.yaml +kubectl apply -f https://raw.githubusercontent.com/Azure/azure-container-networking/master/npm/deploy/npm/azure-npm.yaml ``` Now you can secure your Kubernetes cluster with Azure-NPM by applying Kubernetes network policies. diff --git a/npm/cmd/root.go b/npm/cmd/root.go index 88a0faab54..0c9bc38b79 100644 --- a/npm/cmd/root.go +++ b/npm/cmd/root.go @@ -14,7 +14,13 @@ func NewRootCmd() *cobra.Command { }, } - rootCmd.AddCommand(newStartNPMCmd()) + startCmd := newStartNPMCmd() + + startCmd.AddCommand(newStartNPMControlplaneCmd()) + startCmd.AddCommand(newStartNPMDaemonCmd()) + + rootCmd.AddCommand(startCmd) + rootCmd.AddCommand(newDebugCmd()) return rootCmd diff --git a/npm/cmd/start.go b/npm/cmd/start.go index eda30ac738..5adbde7052 100644 --- a/npm/cmd/start.go +++ b/npm/cmd/start.go @@ -86,11 +86,6 @@ func newStartNPMCmd() *cobra.Command { startNPMCmd.Flags().String(flagKubeConfigPath, flagDefaults[flagKubeConfigPath], "path to kubeconfig") - // The controlplane subcommand starts the NPM controller's controlplane component in the decomposed mode - startNPMCmd.AddCommand(newStartNPMControlplaneCmd()) - // The daemon subcommand starts the NPM controller's datapath component in the daemon mode - startNPMCmd.AddCommand(newStartNPMDaemonCmd()) - return startNPMCmd } diff --git a/npm/deploy/kustomize/base/configmap.yaml b/npm/deploy/kustomize/base/configmap.yaml index ae7e0ed936..4d8bd0d389 100644 --- a/npm/deploy/kustomize/base/configmap.yaml +++ b/npm/deploy/kustomize/base/configmap.yaml @@ -19,7 +19,6 @@ data: }, "Transport": { "Address": "azure-npm.kube-system.svc.cluster.local" - # "Port": 10092 - "Port": 9001 + "Port": 10092 } } diff --git a/npm/deploy/kustomize/overlays/controller/deployment.yaml b/npm/deploy/kustomize/overlays/controller/deployment.yaml index fb613182e2..12cc0dcd98 100644 --- a/npm/deploy/kustomize/overlays/controller/deployment.yaml +++ b/npm/deploy/kustomize/overlays/controller/deployment.yaml @@ -31,9 +31,9 @@ spec: operator: Exists containers: - name: azure-npm - image: mcr.microsoft.com/containernetworking/azure-npm:v1.4.1 + image: azure-npm:v1.4.1 command: ["azure-npm"] - args: ["start", "controller"] + args: ["start", "controlplane"] resources: limits: cpu: 250m diff --git a/npm/deploy/kustomize/overlays/daemon/deployment.yaml b/npm/deploy/kustomize/overlays/daemon/deployment.yaml index f2a452c0f6..7f7346195e 100644 --- a/npm/deploy/kustomize/overlays/daemon/deployment.yaml +++ b/npm/deploy/kustomize/overlays/daemon/deployment.yaml @@ -31,7 +31,7 @@ spec: operator: Exists containers: - name: azure-npm - image: mcr.microsoft.com/containernetworking/azure-npm:v1.4.1 + image: azure-npm:v1.4.1 command: ["azure-npm"] args: ["start", "daemon"] resources: diff --git a/npm/deploy/manifests/controller/azure-npm.yaml b/npm/deploy/manifests/controller/azure-npm.yaml index fdc052978c..e573849f8d 100644 --- a/npm/deploy/manifests/controller/azure-npm.yaml +++ b/npm/deploy/manifests/controller/azure-npm.yaml @@ -64,9 +64,8 @@ data: "PlaceAzureChainFirst": false }, "Transport": { - "Address": "azure-npm.kube-system.svc.cluster.local" - # "Port": 10092 - "Port": 9001 + "Address": "azure-npm.kube-system.svc.cluster.local", + "Port": 19002 } } kind: ConfigMap @@ -134,7 +133,7 @@ spec: containers: - args: - start - - controller + - controlplane command: - azure-npm env: @@ -145,7 +144,7 @@ spec: fieldPath: spec.nodeName - name: NPM_CONFIG value: /etc/azure-npm/azure-npm.json - image: mcr.microsoft.com/containernetworking/azure-npm:v1.4.1 + image: azure-npm:v1.4.1 name: azure-npm resources: limits: diff --git a/npm/deploy/manifests/daemon/azure-npm.yaml b/npm/deploy/manifests/daemon/azure-npm.yaml index 233c3428c2..a3eac49670 100644 --- a/npm/deploy/manifests/daemon/azure-npm.yaml +++ b/npm/deploy/manifests/daemon/azure-npm.yaml @@ -64,9 +64,8 @@ data: "PlaceAzureChainFirst": false }, "Transport": { - "Address": "azure-npm.kube-system.svc.cluster.local" - # "Port": 10092 - "Port": 9001 + "Address": "azure-npm.kube-system.svc.cluster.local", + "Port": 10092 } } kind: ConfigMap @@ -135,7 +134,7 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName - image: mcr.microsoft.com/containernetworking/azure-npm:v1.4.1 + image: azure-npm:v1.4.1 name: azure-npm resources: limits: diff --git a/npm/pkg/transport/events_server.go b/npm/pkg/transport/events_server.go index 714560bb48..34038bb61d 100644 --- a/npm/pkg/transport/events_server.go +++ b/npm/pkg/transport/events_server.go @@ -134,7 +134,7 @@ func (m *EventsServer) start(stopCh <-chan struct{}) error { } func (m *EventsServer) handle() error { - klog.Info("Starting transport manager listener") + klog.Infof("Starting transport manager listener on port %v", m.port) lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", m.port)) if err != nil { return fmt.Errorf("failed to handle server connections: %w", err) From 04eae565df0212e2f8cab1c771f9897b92b8c70d Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Tue, 1 Feb 2022 21:58:15 +0000 Subject: [PATCH 06/11] Add RemotePort field to grpc config Use the remote port for the client to connect to the k8s service port Signed-off-by: Nitish Malhotra --- npm/cmd/start_daemon.go | 2 +- npm/config/config.go | 9 ++++++--- npm/deploy/kustomize/base/configmap.yaml | 3 ++- npm/deploy/manifests/controller/azure-npm.yaml | 3 ++- npm/deploy/manifests/daemon/azure-npm.yaml | 7 ++++--- 5 files changed, 15 insertions(+), 9 deletions(-) diff --git a/npm/cmd/start_daemon.go b/npm/cmd/start_daemon.go index cb40f6df62..278019bd11 100644 --- a/npm/cmd/start_daemon.go +++ b/npm/cmd/start_daemon.go @@ -49,7 +49,7 @@ func startDaemon(config npmconfig.Config) error { pod := os.Getenv(podNameEnv) node := os.Getenv(nodeNameEnv) - addr := config.Transport.Address + ":" + strconv.Itoa(config.Transport.Port) + addr := config.Transport.Address + ":" + strconv.Itoa(config.Transport.RemotePort) ctx := context.Background() err := initLogging() if err != nil { diff --git a/npm/config/config.go b/npm/config/config.go index 4ceb1cdf33..e6f8ca5a56 100644 --- a/npm/config/config.go +++ b/npm/config/config.go @@ -1,9 +1,10 @@ package npmconfig const ( - defaultResyncPeriod = 15 - defaultListeningPort = 10091 - defaultGrpcPort = 10092 + defaultResyncPeriod = 15 + defaultListeningPort = 10091 + defaultGrpcPort = 10092 + defaultGrpcRemotePort = 9002 // ConfigEnvPath is what's used by viper to load config path ConfigEnvPath = "NPM_CONFIG" ) @@ -35,6 +36,8 @@ type GrpcServerConfig struct { Address string `json:"Address,omitempty"` // Port is the port on which the gRPC server will listen Port int `json:"Port,omitempty"` + // RemotePort is the service port for the client to connect + RemotePort int `json="RemotePort,omitempty"` } type Config struct { diff --git a/npm/deploy/kustomize/base/configmap.yaml b/npm/deploy/kustomize/base/configmap.yaml index 4d8bd0d389..4348c96a05 100644 --- a/npm/deploy/kustomize/base/configmap.yaml +++ b/npm/deploy/kustomize/base/configmap.yaml @@ -19,6 +19,7 @@ data: }, "Transport": { "Address": "azure-npm.kube-system.svc.cluster.local" - "Port": 10092 + "Port": 10092, + "RemotePort": 9002 } } diff --git a/npm/deploy/manifests/controller/azure-npm.yaml b/npm/deploy/manifests/controller/azure-npm.yaml index e573849f8d..dd6df562cd 100644 --- a/npm/deploy/manifests/controller/azure-npm.yaml +++ b/npm/deploy/manifests/controller/azure-npm.yaml @@ -65,7 +65,8 @@ data: }, "Transport": { "Address": "azure-npm.kube-system.svc.cluster.local", - "Port": 19002 + "Port": 19002, + "RemotePort": 9002 } } kind: ConfigMap diff --git a/npm/deploy/manifests/daemon/azure-npm.yaml b/npm/deploy/manifests/daemon/azure-npm.yaml index a3eac49670..f2147f8b65 100644 --- a/npm/deploy/manifests/daemon/azure-npm.yaml +++ b/npm/deploy/manifests/daemon/azure-npm.yaml @@ -65,7 +65,8 @@ data: }, "Transport": { "Address": "azure-npm.kube-system.svc.cluster.local", - "Port": 10092 + "Port": 10092, + "RemotePort": 9002 } } kind: ConfigMap @@ -126,11 +127,11 @@ spec: fieldPath: spec.nodeName - name: NPM_CONFIG value: /etc/azure-npm/azure-npm.json - - name: DEAMON_POD_NAME + - name: DAEMON_POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - - name: DEAMON_NODE_NAME + - name: DAEMON_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName From 674b63b11d7496c5a7257b87df7858f362eb6cc3 Mon Sep 17 00:00:00 2001 From: Nitish Malhotra Date: Wed, 2 Feb 2022 10:59:00 -0800 Subject: [PATCH 07/11] Fix lint issue Signed-off-by: Nitish Malhotra --- npm/config/config.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/npm/config/config.go b/npm/config/config.go index e6f8ca5a56..716cbb3662 100644 --- a/npm/config/config.go +++ b/npm/config/config.go @@ -17,8 +17,9 @@ var DefaultConfig = Config{ ListeningAddress: "0.0.0.0", Transport: GrpcServerConfig{ - Address: "0.0.0.0", - Port: defaultGrpcPort, + Address: "0.0.0.0", + Port: defaultGrpcPort, + RemotePort: defaultGrpcRemotePort, }, Toggles: Toggles{ @@ -36,8 +37,8 @@ type GrpcServerConfig struct { Address string `json:"Address,omitempty"` // Port is the port on which the gRPC server will listen Port int `json:"Port,omitempty"` - // RemotePort is the service port for the client to connect - RemotePort int `json="RemotePort,omitempty"` + // RemotePort is the service port for the client to connect to the gRPC server + RemotePort int `json:"RemotePort,omitempty"` } type Config struct { From 74db27b95d3a80cb9d8eea3edd8d3427288ffa66 Mon Sep 17 00:00:00 2001 From: Nitish Malhotra Date: Wed, 2 Feb 2022 11:03:31 -0800 Subject: [PATCH 08/11] Readd the azure-npm.yaml file to npm root for cyclonus Signed-off-by: Nitish Malhotra --- npm/azure-npm.yaml | 159 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 159 insertions(+) create mode 100644 npm/azure-npm.yaml diff --git a/npm/azure-npm.yaml b/npm/azure-npm.yaml new file mode 100644 index 0000000000..8d3908ca14 --- /dev/null +++ b/npm/azure-npm.yaml @@ -0,0 +1,159 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: azure-npm-binding + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +subjects: + - kind: ServiceAccount + name: azure-npm + namespace: kube-system +roleRef: + kind: ClusterRole + name: azure-npm + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: azure-npm + namespace: kube-system + labels: + app: azure-npm + addonmanager.kubernetes.io/mode: EnsureExists +spec: + selector: + matchLabels: + k8s-app: azure-npm + template: + metadata: + labels: + k8s-app: azure-npm + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + azure.npm/scrapeable: '' + spec: + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + effect: NoExecute + - operator: "Exists" + effect: NoSchedule + - key: CriticalAddonsOnly + operator: Exists + containers: + - name: azure-npm + image: mcr.microsoft.com/containernetworking/azure-npm:v1.4.1 + resources: + limits: + cpu: 250m + memory: 300Mi + requests: + cpu: 250m + securityContext: + privileged: true + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: NPM_CONFIG + value: /etc/azure-npm/azure-npm.json + volumeMounts: + - name: log + mountPath: /var/log + - name: xtables-lock + mountPath: /run/xtables.lock + - name: protocols + mountPath: /etc/protocols + - name: azure-npm-config + mountPath: /etc/azure-npm + hostNetwork: true + volumes: + - name: log + hostPath: + path: /var/log + type: Directory + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: File + - name: protocols + hostPath: + path: /etc/protocols + type: File + - name: azure-npm-config + configMap: + name: azure-npm-config + serviceAccountName: azure-npm +--- +apiVersion: v1 +kind: Service +metadata: + name: npm-metrics-cluster-service + namespace: kube-system + labels: + app: npm-metrics +spec: + selector: + k8s-app: azure-npm + ports: + - port: 9000 + targetPort: 10091 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: azure-npm-config + namespace: kube-system +data: + azure-npm.json: | + { + "ResyncPeriodInMinutes": 15, + "ListeningPort": 10091, + "ListeningAddress": "0.0.0.0", + "Toggles": { + "EnablePrometheusMetrics": true, + "EnablePprof": true, + "EnableHTTPDebugAPI": true, + "EnableV2NPM": false, + "PlaceAzureChainFirst": false + } + } From ee958c89af39997d8c885856d0c2471a481b6ae0 Mon Sep 17 00:00:00 2001 From: Nitish Malhotra Date: Wed, 2 Feb 2022 11:03:31 -0800 Subject: [PATCH 09/11] Readd the azure-npm.yaml file to npm root for cyclonus Signed-off-by: Nitish Malhotra --- Tiltfile | 3 +- npm/azure-npm.yaml | 159 ++++++++++++++++++ npm/cmd/start_daemon.go | 2 +- npm/config/config.go | 18 +- npm/deploy/kustomize/base/configmap.yaml | 2 +- .../overlays/controller/deployment.yaml | 5 + .../overlays/controller/service.yaml | 6 +- .../kustomize/overlays/daemon/deployment.yaml | 3 + .../kustomize/overlays/daemon/service.yaml | 5 +- .../manifests/controller/azure-npm.yaml | 9 +- npm/deploy/manifests/daemon/azure-npm.yaml | 8 +- npm/deploy/npm/azure-npm.yaml | 5 + npm/pkg/transport/events_client.go | 2 + npm/pkg/transport/events_server.go | 2 +- 14 files changed, 207 insertions(+), 22 deletions(-) create mode 100644 npm/azure-npm.yaml diff --git a/Tiltfile b/Tiltfile index d94357f591..cf1b36f29d 100644 --- a/Tiltfile +++ b/Tiltfile @@ -1,4 +1,4 @@ -allow_k8s_contexts('acn-dev-azure-cni') +allow_k8s_contexts(k8s_context()) default_registry('ttl.sh/nitishm-12390') docker_build('azure-npm', '.', dockerfile='npm/Dockerfile', build_args = { "VERSION": "v1.4.14-101-gf900e319-dirty", @@ -7,4 +7,5 @@ docker_build('azure-npm', '.', dockerfile='npm/Dockerfile', build_args = { }) # watch_file('npm') k8s_yaml('npm/deploy/manifests/controller/azure-npm.yaml') +k8s_yaml('npm/deploy/manifests/daemon/azure-npm.yaml', allow_duplicates=True) diff --git a/npm/azure-npm.yaml b/npm/azure-npm.yaml new file mode 100644 index 0000000000..8d3908ca14 --- /dev/null +++ b/npm/azure-npm.yaml @@ -0,0 +1,159 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: azure-npm-binding + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +subjects: + - kind: ServiceAccount + name: azure-npm + namespace: kube-system +roleRef: + kind: ClusterRole + name: azure-npm + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: azure-npm + namespace: kube-system + labels: + app: azure-npm + addonmanager.kubernetes.io/mode: EnsureExists +spec: + selector: + matchLabels: + k8s-app: azure-npm + template: + metadata: + labels: + k8s-app: azure-npm + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + azure.npm/scrapeable: '' + spec: + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + effect: NoExecute + - operator: "Exists" + effect: NoSchedule + - key: CriticalAddonsOnly + operator: Exists + containers: + - name: azure-npm + image: mcr.microsoft.com/containernetworking/azure-npm:v1.4.1 + resources: + limits: + cpu: 250m + memory: 300Mi + requests: + cpu: 250m + securityContext: + privileged: true + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: NPM_CONFIG + value: /etc/azure-npm/azure-npm.json + volumeMounts: + - name: log + mountPath: /var/log + - name: xtables-lock + mountPath: /run/xtables.lock + - name: protocols + mountPath: /etc/protocols + - name: azure-npm-config + mountPath: /etc/azure-npm + hostNetwork: true + volumes: + - name: log + hostPath: + path: /var/log + type: Directory + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: File + - name: protocols + hostPath: + path: /etc/protocols + type: File + - name: azure-npm-config + configMap: + name: azure-npm-config + serviceAccountName: azure-npm +--- +apiVersion: v1 +kind: Service +metadata: + name: npm-metrics-cluster-service + namespace: kube-system + labels: + app: npm-metrics +spec: + selector: + k8s-app: azure-npm + ports: + - port: 9000 + targetPort: 10091 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: azure-npm-config + namespace: kube-system +data: + azure-npm.json: | + { + "ResyncPeriodInMinutes": 15, + "ListeningPort": 10091, + "ListeningAddress": "0.0.0.0", + "Toggles": { + "EnablePrometheusMetrics": true, + "EnablePprof": true, + "EnableHTTPDebugAPI": true, + "EnableV2NPM": false, + "PlaceAzureChainFirst": false + } + } diff --git a/npm/cmd/start_daemon.go b/npm/cmd/start_daemon.go index 278019bd11..c6928c574c 100644 --- a/npm/cmd/start_daemon.go +++ b/npm/cmd/start_daemon.go @@ -49,7 +49,7 @@ func startDaemon(config npmconfig.Config) error { pod := os.Getenv(podNameEnv) node := os.Getenv(nodeNameEnv) - addr := config.Transport.Address + ":" + strconv.Itoa(config.Transport.RemotePort) + addr := config.Transport.Address + ":" + strconv.Itoa(config.Transport.ServicePort) ctx := context.Background() err := initLogging() if err != nil { diff --git a/npm/config/config.go b/npm/config/config.go index 716cbb3662..3bb07114ef 100644 --- a/npm/config/config.go +++ b/npm/config/config.go @@ -1,10 +1,10 @@ package npmconfig const ( - defaultResyncPeriod = 15 - defaultListeningPort = 10091 - defaultGrpcPort = 10092 - defaultGrpcRemotePort = 9002 + defaultResyncPeriod = 15 + defaultListeningPort = 10091 + defaultGrpcPort = 10092 + defaultGrpcServicePort = 9002 // ConfigEnvPath is what's used by viper to load config path ConfigEnvPath = "NPM_CONFIG" ) @@ -17,9 +17,9 @@ var DefaultConfig = Config{ ListeningAddress: "0.0.0.0", Transport: GrpcServerConfig{ - Address: "0.0.0.0", - Port: defaultGrpcPort, - RemotePort: defaultGrpcRemotePort, + Address: "0.0.0.0", + Port: defaultGrpcPort, + ServicePort: defaultGrpcServicePort, }, Toggles: Toggles{ @@ -37,8 +37,8 @@ type GrpcServerConfig struct { Address string `json:"Address,omitempty"` // Port is the port on which the gRPC server will listen Port int `json:"Port,omitempty"` - // RemotePort is the service port for the client to connect to the gRPC server - RemotePort int `json:"RemotePort,omitempty"` + // ServicePort is the service port for the client to connect to the gRPC server + ServicePort int `json:"ServicePort,omitempty"` } type Config struct { diff --git a/npm/deploy/kustomize/base/configmap.yaml b/npm/deploy/kustomize/base/configmap.yaml index 4348c96a05..d9f549f2a3 100644 --- a/npm/deploy/kustomize/base/configmap.yaml +++ b/npm/deploy/kustomize/base/configmap.yaml @@ -20,6 +20,6 @@ data: "Transport": { "Address": "azure-npm.kube-system.svc.cluster.local" "Port": 10092, - "RemotePort": 9002 + "ServicePort": 9001 } } diff --git a/npm/deploy/kustomize/overlays/controller/deployment.yaml b/npm/deploy/kustomize/overlays/controller/deployment.yaml index 12cc0dcd98..f6294e3a56 100644 --- a/npm/deploy/kustomize/overlays/controller/deployment.yaml +++ b/npm/deploy/kustomize/overlays/controller/deployment.yaml @@ -31,6 +31,11 @@ spec: operator: Exists containers: - name: azure-npm + ports: + - name: metrics + containerPort: 10091 + - name: http + containerPort: 10092 image: azure-npm:v1.4.1 command: ["azure-npm"] args: ["start", "controlplane"] diff --git a/npm/deploy/kustomize/overlays/controller/service.yaml b/npm/deploy/kustomize/overlays/controller/service.yaml index d603091f3e..3db16d9aca 100644 --- a/npm/deploy/kustomize/overlays/controller/service.yaml +++ b/npm/deploy/kustomize/overlays/controller/service.yaml @@ -29,6 +29,6 @@ spec: k8s-app: azure-npm component: controller ports: - - port: 9001 - name: metrics - targetPort: 10092 + - name: http + port: 9001 + targetPort: 10092 diff --git a/npm/deploy/kustomize/overlays/daemon/deployment.yaml b/npm/deploy/kustomize/overlays/daemon/deployment.yaml index 7f7346195e..93728fc46e 100644 --- a/npm/deploy/kustomize/overlays/daemon/deployment.yaml +++ b/npm/deploy/kustomize/overlays/daemon/deployment.yaml @@ -31,6 +31,9 @@ spec: operator: Exists containers: - name: azure-npm + ports: + - name: metrics + containerPort: 10091 image: azure-npm:v1.4.1 command: ["azure-npm"] args: ["start", "daemon"] diff --git a/npm/deploy/kustomize/overlays/daemon/service.yaml b/npm/deploy/kustomize/overlays/daemon/service.yaml index 8d5f796035..2cdbe35e2e 100644 --- a/npm/deploy/kustomize/overlays/daemon/service.yaml +++ b/npm/deploy/kustomize/overlays/daemon/service.yaml @@ -11,5 +11,6 @@ spec: k8s-app: azure-npm component: deamon ports: - - port: 9000 - targetPort: 10091 + - name: metrics + port: 9000 + targetPort: 10091 diff --git a/npm/deploy/manifests/controller/azure-npm.yaml b/npm/deploy/manifests/controller/azure-npm.yaml index dd6df562cd..5804e79db9 100644 --- a/npm/deploy/manifests/controller/azure-npm.yaml +++ b/npm/deploy/manifests/controller/azure-npm.yaml @@ -66,7 +66,7 @@ data: "Transport": { "Address": "azure-npm.kube-system.svc.cluster.local", "Port": 19002, - "RemotePort": 9002 + "ServicePort": 9001 } } kind: ConfigMap @@ -84,7 +84,7 @@ metadata: namespace: kube-system spec: ports: - - name: metrics + - name: http port: 9001 targetPort: 10092 selector: @@ -135,6 +135,11 @@ spec: - args: - start - controlplane + ports: + - name: metrics + containerPort: 10091 + - name: http + containerPort: 10092 command: - azure-npm env: diff --git a/npm/deploy/manifests/daemon/azure-npm.yaml b/npm/deploy/manifests/daemon/azure-npm.yaml index f2147f8b65..ca4508cb35 100644 --- a/npm/deploy/manifests/daemon/azure-npm.yaml +++ b/npm/deploy/manifests/daemon/azure-npm.yaml @@ -66,7 +66,7 @@ data: "Transport": { "Address": "azure-npm.kube-system.svc.cluster.local", "Port": 10092, - "RemotePort": 9002 + "ServicePort": 9001 } } kind: ConfigMap @@ -84,7 +84,8 @@ metadata: namespace: kube-system spec: ports: - - port: 9000 + - name: metrics + port: 9000 targetPort: 10091 selector: component: deamon @@ -137,6 +138,9 @@ spec: fieldPath: spec.nodeName image: azure-npm:v1.4.1 name: azure-npm + ports: + - name: metrics + containerPort: 10091 resources: limits: cpu: 250m diff --git a/npm/deploy/npm/azure-npm.yaml b/npm/deploy/npm/azure-npm.yaml index 8d3908ca14..bf4be6d675 100644 --- a/npm/deploy/npm/azure-npm.yaml +++ b/npm/deploy/npm/azure-npm.yaml @@ -155,5 +155,10 @@ data: "EnableHTTPDebugAPI": true, "EnableV2NPM": false, "PlaceAzureChainFirst": false + }, + "Transport": { + "Address": "azure-npm.kube-system.svc.cluster.local", + "Port": 19002, + "ServicePort": 9001 } } diff --git a/npm/pkg/transport/events_client.go b/npm/pkg/transport/events_client.go index 481b1ed4f8..5e91eea9e4 100644 --- a/npm/pkg/transport/events_client.go +++ b/npm/pkg/transport/events_client.go @@ -35,7 +35,9 @@ func NewEventsClient(ctx context.Context, pod, node, addr string) (*EventsClient return nil, ErrAddressNil } + klog.Infof("Connecting to NPM controller gRPC server at address %s\n", addr) // TODO Make this secure + // TODO Remove WithBlock option post testing cc, err := grpc.DialContext(ctx, addr, grpc.WithInsecure(), grpc.WithBlock()) if err != nil { return nil, fmt.Errorf("failed to dial %s: %w", addr, err) diff --git a/npm/pkg/transport/events_server.go b/npm/pkg/transport/events_server.go index 34038bb61d..3361531bb5 100644 --- a/npm/pkg/transport/events_server.go +++ b/npm/pkg/transport/events_server.go @@ -135,7 +135,7 @@ func (m *EventsServer) start(stopCh <-chan struct{}) error { func (m *EventsServer) handle() error { klog.Infof("Starting transport manager listener on port %v", m.port) - lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", m.port)) + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", m.port)) if err != nil { return fmt.Errorf("failed to handle server connections: %w", err) } From 017a32a08e4bc6953df1612ee06758322f1618ec Mon Sep 17 00:00:00 2001 From: Nitish Malhotra Date: Wed, 2 Feb 2022 11:03:31 -0800 Subject: [PATCH 10/11] Merge branch 'master' of git://github.com/Azure/azure-container-networking --- .github/workflows/cyclonus-netpol-test.yaml | 4 +- .pipelines/npm/npm-conformance-tests.yaml | 2 +- Tiltfile | 3 +- npm/azure-npm.yaml | 159 ++++++++++++++++++ npm/cmd/start_daemon.go | 2 +- npm/config/config.go | 18 +- npm/deploy/kustomize/base/configmap.yaml | 2 +- .../overlays/controller/deployment.yaml | 5 + .../overlays/controller/service.yaml | 6 +- .../kustomize/overlays/daemon/deployment.yaml | 3 + .../kustomize/overlays/daemon/service.yaml | 5 +- .../manifests/controller/azure-npm.yaml | 9 +- npm/deploy/manifests/daemon/azure-npm.yaml | 8 +- npm/deploy/npm/azure-npm.yaml | 5 + npm/pkg/transport/events_client.go | 2 + npm/pkg/transport/events_server.go | 2 +- 16 files changed, 210 insertions(+), 25 deletions(-) create mode 100644 npm/azure-npm.yaml diff --git a/.github/workflows/cyclonus-netpol-test.yaml b/.github/workflows/cyclonus-netpol-test.yaml index 53233586c3..b23769781f 100644 --- a/.github/workflows/cyclonus-netpol-test.yaml +++ b/.github/workflows/cyclonus-netpol-test.yaml @@ -48,9 +48,9 @@ jobs: # set the ConfigMap based on the build matrix # currently have to restart the daemonset because changing the ConfigMap doesn't restart NPM run: | - sed -i 's/mcr.microsoft.com\/containernetworking\/azure-npm:.*/acnpublic.azurecr.io\/azure-npm:cyclonus/' ./npm/deploy/npm/azure-npm.yaml + sed -i 's/mcr.microsoft.com\/containernetworking\/azure-npm:.*/acnpublic.azurecr.io\/azure-npm:cyclonus/' ./npm/azure-npm.yaml kind load docker-image acnpublic.azurecr.io/azure-npm:cyclonus --name npm-kind - kubectl apply -f ./npm/deploy/npm/azure-npm.yaml + kubectl apply -f ./npm/azure-npm.yaml echo "Applying profile: ${{ matrix.profile }}" kubectl apply -f ./npm/profiles/${{ matrix.profile }} kubectl rollout restart ds azure-npm -n kube-system diff --git a/.pipelines/npm/npm-conformance-tests.yaml b/.pipelines/npm/npm-conformance-tests.yaml index 518f45c176..580d7b0ad4 100644 --- a/.pipelines/npm/npm-conformance-tests.yaml +++ b/.pipelines/npm/npm-conformance-tests.yaml @@ -164,7 +164,7 @@ jobs: chmod +x kubectl # deploy azure-npm - ./kubectl --kubeconfig=./kubeconfig apply -f https://raw.githubusercontent.com/Azure/azure-container-networking/master/npm/deploy/npm/azure-npm.yaml + ./kubectl --kubeconfig=./kubeconfig apply -f https://raw.githubusercontent.com/Azure/azure-container-networking/master/npm/azure-npm.yaml # swap azure-npm image with one built during run ./kubectl --kubeconfig=./kubeconfig set image daemonset/azure-npm -n kube-system azure-npm=$IMAGE_REGISTRY/azure-npm:$(TAG) diff --git a/Tiltfile b/Tiltfile index d94357f591..cf1b36f29d 100644 --- a/Tiltfile +++ b/Tiltfile @@ -1,4 +1,4 @@ -allow_k8s_contexts('acn-dev-azure-cni') +allow_k8s_contexts(k8s_context()) default_registry('ttl.sh/nitishm-12390') docker_build('azure-npm', '.', dockerfile='npm/Dockerfile', build_args = { "VERSION": "v1.4.14-101-gf900e319-dirty", @@ -7,4 +7,5 @@ docker_build('azure-npm', '.', dockerfile='npm/Dockerfile', build_args = { }) # watch_file('npm') k8s_yaml('npm/deploy/manifests/controller/azure-npm.yaml') +k8s_yaml('npm/deploy/manifests/daemon/azure-npm.yaml', allow_duplicates=True) diff --git a/npm/azure-npm.yaml b/npm/azure-npm.yaml new file mode 100644 index 0000000000..8d3908ca14 --- /dev/null +++ b/npm/azure-npm.yaml @@ -0,0 +1,159 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: azure-npm + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +rules: + - apiGroups: + - "" + resources: + - pods + - nodes + - namespaces + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: azure-npm-binding + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: EnsureExists +subjects: + - kind: ServiceAccount + name: azure-npm + namespace: kube-system +roleRef: + kind: ClusterRole + name: azure-npm + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: azure-npm + namespace: kube-system + labels: + app: azure-npm + addonmanager.kubernetes.io/mode: EnsureExists +spec: + selector: + matchLabels: + k8s-app: azure-npm + template: + metadata: + labels: + k8s-app: azure-npm + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + azure.npm/scrapeable: '' + spec: + priorityClassName: system-node-critical + tolerations: + - operator: "Exists" + effect: NoExecute + - operator: "Exists" + effect: NoSchedule + - key: CriticalAddonsOnly + operator: Exists + containers: + - name: azure-npm + image: mcr.microsoft.com/containernetworking/azure-npm:v1.4.1 + resources: + limits: + cpu: 250m + memory: 300Mi + requests: + cpu: 250m + securityContext: + privileged: true + env: + - name: HOSTNAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: NPM_CONFIG + value: /etc/azure-npm/azure-npm.json + volumeMounts: + - name: log + mountPath: /var/log + - name: xtables-lock + mountPath: /run/xtables.lock + - name: protocols + mountPath: /etc/protocols + - name: azure-npm-config + mountPath: /etc/azure-npm + hostNetwork: true + volumes: + - name: log + hostPath: + path: /var/log + type: Directory + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: File + - name: protocols + hostPath: + path: /etc/protocols + type: File + - name: azure-npm-config + configMap: + name: azure-npm-config + serviceAccountName: azure-npm +--- +apiVersion: v1 +kind: Service +metadata: + name: npm-metrics-cluster-service + namespace: kube-system + labels: + app: npm-metrics +spec: + selector: + k8s-app: azure-npm + ports: + - port: 9000 + targetPort: 10091 +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: azure-npm-config + namespace: kube-system +data: + azure-npm.json: | + { + "ResyncPeriodInMinutes": 15, + "ListeningPort": 10091, + "ListeningAddress": "0.0.0.0", + "Toggles": { + "EnablePrometheusMetrics": true, + "EnablePprof": true, + "EnableHTTPDebugAPI": true, + "EnableV2NPM": false, + "PlaceAzureChainFirst": false + } + } diff --git a/npm/cmd/start_daemon.go b/npm/cmd/start_daemon.go index 278019bd11..c6928c574c 100644 --- a/npm/cmd/start_daemon.go +++ b/npm/cmd/start_daemon.go @@ -49,7 +49,7 @@ func startDaemon(config npmconfig.Config) error { pod := os.Getenv(podNameEnv) node := os.Getenv(nodeNameEnv) - addr := config.Transport.Address + ":" + strconv.Itoa(config.Transport.RemotePort) + addr := config.Transport.Address + ":" + strconv.Itoa(config.Transport.ServicePort) ctx := context.Background() err := initLogging() if err != nil { diff --git a/npm/config/config.go b/npm/config/config.go index 716cbb3662..3bb07114ef 100644 --- a/npm/config/config.go +++ b/npm/config/config.go @@ -1,10 +1,10 @@ package npmconfig const ( - defaultResyncPeriod = 15 - defaultListeningPort = 10091 - defaultGrpcPort = 10092 - defaultGrpcRemotePort = 9002 + defaultResyncPeriod = 15 + defaultListeningPort = 10091 + defaultGrpcPort = 10092 + defaultGrpcServicePort = 9002 // ConfigEnvPath is what's used by viper to load config path ConfigEnvPath = "NPM_CONFIG" ) @@ -17,9 +17,9 @@ var DefaultConfig = Config{ ListeningAddress: "0.0.0.0", Transport: GrpcServerConfig{ - Address: "0.0.0.0", - Port: defaultGrpcPort, - RemotePort: defaultGrpcRemotePort, + Address: "0.0.0.0", + Port: defaultGrpcPort, + ServicePort: defaultGrpcServicePort, }, Toggles: Toggles{ @@ -37,8 +37,8 @@ type GrpcServerConfig struct { Address string `json:"Address,omitempty"` // Port is the port on which the gRPC server will listen Port int `json:"Port,omitempty"` - // RemotePort is the service port for the client to connect to the gRPC server - RemotePort int `json:"RemotePort,omitempty"` + // ServicePort is the service port for the client to connect to the gRPC server + ServicePort int `json:"ServicePort,omitempty"` } type Config struct { diff --git a/npm/deploy/kustomize/base/configmap.yaml b/npm/deploy/kustomize/base/configmap.yaml index 4348c96a05..d9f549f2a3 100644 --- a/npm/deploy/kustomize/base/configmap.yaml +++ b/npm/deploy/kustomize/base/configmap.yaml @@ -20,6 +20,6 @@ data: "Transport": { "Address": "azure-npm.kube-system.svc.cluster.local" "Port": 10092, - "RemotePort": 9002 + "ServicePort": 9001 } } diff --git a/npm/deploy/kustomize/overlays/controller/deployment.yaml b/npm/deploy/kustomize/overlays/controller/deployment.yaml index 12cc0dcd98..f6294e3a56 100644 --- a/npm/deploy/kustomize/overlays/controller/deployment.yaml +++ b/npm/deploy/kustomize/overlays/controller/deployment.yaml @@ -31,6 +31,11 @@ spec: operator: Exists containers: - name: azure-npm + ports: + - name: metrics + containerPort: 10091 + - name: http + containerPort: 10092 image: azure-npm:v1.4.1 command: ["azure-npm"] args: ["start", "controlplane"] diff --git a/npm/deploy/kustomize/overlays/controller/service.yaml b/npm/deploy/kustomize/overlays/controller/service.yaml index d603091f3e..3db16d9aca 100644 --- a/npm/deploy/kustomize/overlays/controller/service.yaml +++ b/npm/deploy/kustomize/overlays/controller/service.yaml @@ -29,6 +29,6 @@ spec: k8s-app: azure-npm component: controller ports: - - port: 9001 - name: metrics - targetPort: 10092 + - name: http + port: 9001 + targetPort: 10092 diff --git a/npm/deploy/kustomize/overlays/daemon/deployment.yaml b/npm/deploy/kustomize/overlays/daemon/deployment.yaml index 7f7346195e..93728fc46e 100644 --- a/npm/deploy/kustomize/overlays/daemon/deployment.yaml +++ b/npm/deploy/kustomize/overlays/daemon/deployment.yaml @@ -31,6 +31,9 @@ spec: operator: Exists containers: - name: azure-npm + ports: + - name: metrics + containerPort: 10091 image: azure-npm:v1.4.1 command: ["azure-npm"] args: ["start", "daemon"] diff --git a/npm/deploy/kustomize/overlays/daemon/service.yaml b/npm/deploy/kustomize/overlays/daemon/service.yaml index 8d5f796035..2cdbe35e2e 100644 --- a/npm/deploy/kustomize/overlays/daemon/service.yaml +++ b/npm/deploy/kustomize/overlays/daemon/service.yaml @@ -11,5 +11,6 @@ spec: k8s-app: azure-npm component: deamon ports: - - port: 9000 - targetPort: 10091 + - name: metrics + port: 9000 + targetPort: 10091 diff --git a/npm/deploy/manifests/controller/azure-npm.yaml b/npm/deploy/manifests/controller/azure-npm.yaml index dd6df562cd..5804e79db9 100644 --- a/npm/deploy/manifests/controller/azure-npm.yaml +++ b/npm/deploy/manifests/controller/azure-npm.yaml @@ -66,7 +66,7 @@ data: "Transport": { "Address": "azure-npm.kube-system.svc.cluster.local", "Port": 19002, - "RemotePort": 9002 + "ServicePort": 9001 } } kind: ConfigMap @@ -84,7 +84,7 @@ metadata: namespace: kube-system spec: ports: - - name: metrics + - name: http port: 9001 targetPort: 10092 selector: @@ -135,6 +135,11 @@ spec: - args: - start - controlplane + ports: + - name: metrics + containerPort: 10091 + - name: http + containerPort: 10092 command: - azure-npm env: diff --git a/npm/deploy/manifests/daemon/azure-npm.yaml b/npm/deploy/manifests/daemon/azure-npm.yaml index f2147f8b65..ca4508cb35 100644 --- a/npm/deploy/manifests/daemon/azure-npm.yaml +++ b/npm/deploy/manifests/daemon/azure-npm.yaml @@ -66,7 +66,7 @@ data: "Transport": { "Address": "azure-npm.kube-system.svc.cluster.local", "Port": 10092, - "RemotePort": 9002 + "ServicePort": 9001 } } kind: ConfigMap @@ -84,7 +84,8 @@ metadata: namespace: kube-system spec: ports: - - port: 9000 + - name: metrics + port: 9000 targetPort: 10091 selector: component: deamon @@ -137,6 +138,9 @@ spec: fieldPath: spec.nodeName image: azure-npm:v1.4.1 name: azure-npm + ports: + - name: metrics + containerPort: 10091 resources: limits: cpu: 250m diff --git a/npm/deploy/npm/azure-npm.yaml b/npm/deploy/npm/azure-npm.yaml index 8d3908ca14..bf4be6d675 100644 --- a/npm/deploy/npm/azure-npm.yaml +++ b/npm/deploy/npm/azure-npm.yaml @@ -155,5 +155,10 @@ data: "EnableHTTPDebugAPI": true, "EnableV2NPM": false, "PlaceAzureChainFirst": false + }, + "Transport": { + "Address": "azure-npm.kube-system.svc.cluster.local", + "Port": 19002, + "ServicePort": 9001 } } diff --git a/npm/pkg/transport/events_client.go b/npm/pkg/transport/events_client.go index 481b1ed4f8..5e91eea9e4 100644 --- a/npm/pkg/transport/events_client.go +++ b/npm/pkg/transport/events_client.go @@ -35,7 +35,9 @@ func NewEventsClient(ctx context.Context, pod, node, addr string) (*EventsClient return nil, ErrAddressNil } + klog.Infof("Connecting to NPM controller gRPC server at address %s\n", addr) // TODO Make this secure + // TODO Remove WithBlock option post testing cc, err := grpc.DialContext(ctx, addr, grpc.WithInsecure(), grpc.WithBlock()) if err != nil { return nil, fmt.Errorf("failed to dial %s: %w", addr, err) diff --git a/npm/pkg/transport/events_server.go b/npm/pkg/transport/events_server.go index 34038bb61d..3361531bb5 100644 --- a/npm/pkg/transport/events_server.go +++ b/npm/pkg/transport/events_server.go @@ -135,7 +135,7 @@ func (m *EventsServer) start(stopCh <-chan struct{}) error { func (m *EventsServer) handle() error { klog.Infof("Starting transport manager listener on port %v", m.port) - lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", m.port)) + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", m.port)) if err != nil { return fmt.Errorf("failed to handle server connections: %w", err) } From db39877a11056a49108baa11866b6b6004a29865 Mon Sep 17 00:00:00 2001 From: Nitish Malhotra Date: Thu, 3 Feb 2022 18:33:33 -0800 Subject: [PATCH 11/11] Add tm config to windows example manifest Signed-off-by: Nitish Malhotra --- npm/examples/windows/azure-npm.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/npm/examples/windows/azure-npm.yaml b/npm/examples/windows/azure-npm.yaml index 3b7af924ac..d892e95bb8 100644 --- a/npm/examples/windows/azure-npm.yaml +++ b/npm/examples/windows/azure-npm.yaml @@ -143,6 +143,11 @@ data: "EnableV2NPM": true, "PlaceAzureChainFirst": false, "ApplyIPSetsOnNeed": false + }, + "Transport": { + "Address": "azure-npm.kube-system.svc.cluster.local", + "Port": 10092, + "ServicePort": 9001 } }