-
Notifications
You must be signed in to change notification settings - Fork 116
Add static NetworkPolicy for marketplace-operator #644
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Add static NetworkPolicy for marketplace-operator #644
Conversation
/retest |
1 similar comment
/retest |
/test-required |
/retest-required |
/test e2e-gcp-operator |
/hold |
@rashmigottipati please help resolve them before the PR is merged. frankly they cause the function blocked, so they have to fixed in priority Thanks |
/test e2e-gcp-operator |
@rashmigottipati |
@rashmigottipati ingress:
- ports:
- protocol: TCP
port: 8081 it is same to other NP on metric port. please talk to SME on it. |
port: 8081 | ||
egress: | ||
- ports: | ||
- protocol: TCP |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@rashmigottipati I guess it is not needed
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@rashmigottipati Hi, reminder! thanks
@rashmigottipati with new rule, it still does not work for https://issues.redhat.com/browse/OCPBUGS-58388. |
/retest |
I see that there are changes in the NP to attempt to interact with the HCP API server. I'm not sure if these will be effective since it doesn't appear that HCP currently implements OLM-specific NPs, but at least my original comment isn't correct. |
@grokspawn [root@preserve-olm-env2 OPRUN-3896]# oc -n default get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 172.31.0.1 <none> 443/TCP 24m
openshift ExternalName <none> kubernetes.default.svc.cluster.local <none> 20m
openshift-apiserver ClusterIP 172.31.205.130 <none> 443/TCP 23m
openshift-oauth-apiserver ClusterIP 172.31.98.193 <none> 443/TCP 23m
packageserver ClusterIP 172.31.131.55 <none> 443/TCP 23m
[root@preserve-olm-env2 OPRUN-3896]# oc -n default get svc kubernetes -o yaml
apiVersion: v1
kind: Service
metadata:
creationTimestamp: "2025-07-07T01:07:49Z"
labels:
component: apiserver
provider: kubernetes
name: kubernetes
namespace: default
resourceVersion: "275"
uid: 9c4ae8ee-1533-490c-8ff2-12fce8ca9a36
spec:
clusterIP: 172.31.0.1
clusterIPs:
- 172.31.0.1
internalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: https
port: 443
protocol: TCP
targetPort: 6443
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {} So, I am not sure if https://issues.redhat.com/browse/OCPBUGS-58388 is hypershift to fix (for me, I think it is olm to fix it). by the way, why I guess olm need to fix https://issues.redhat.com/browse/OCPBUGS-58388: maybe we could remove deny-all rule currently as workaround if there is no good solution on it. |
@rashmigottipati @grokspawn @oceanc80 [root@preserve-olm-env2 OPRUN-3896]# oc -n kube-system get pod
NAME READY STATUS RESTARTS AGE
konnectivity-agent-kq67s 1/1 Running 0 130m
konnectivity-agent-lvnll 1/1 Running 0 130m
konnectivity-agent-pxl65 1/1 Running 0 130m
kube-apiserver-proxy-ip-10-0-143-235.us-east-2.compute.internal 1/1 Running 0 130m
kube-apiserver-proxy-ip-10-0-144-78.us-east-2.compute.internal 1/1 Running 0 130m
kube-apiserver-proxy-ip-10-0-171-79.us-east-2.compute.internal 1/1 Running 0 130m
[root@preserve-olm-env2 OPRUN-3896]# oc -n kube-system get pod kube-apiserver-proxy-ip-10-0-143-235.us-east-2.compute.internal -o yaml
apiVersion: v1
kind: Pod
metadata:
annotations:
kubernetes.io/config.hash: a62cd84106bc7e639dbebf92ca2bcac9
kubernetes.io/config.mirror: a62cd84106bc7e639dbebf92ca2bcac9
kubernetes.io/config.seen: "2025-07-08T02:50:32.831271083Z"
kubernetes.io/config.source: file
creationTimestamp: "2025-07-08T02:50:34Z"
labels:
k8s-app: kube-apiserver-proxy
name: kube-apiserver-proxy-ip-10-0-143-235.us-east-2.compute.internal
namespace: kube-system
ownerReferences:
- apiVersion: v1
controller: true
kind: Node
name: ip-10-0-143-235.us-east-2.compute.internal
uid: 1302e9f2-809d-4c42-ac79-e5f91f9ac4ce
resourceVersion: "5611"
uid: c9cabec3-a9d3-4948-ba08-86190a71ee89
spec:
containers:
- command:
- haproxy
- -f
- /usr/local/etc/haproxy
image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:517e6ecc165325a5f772ff3df06471bb7763e097c6658f68ea04253262341e02
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
host: 172.20.0.1
path: /version
port: 6443
scheme: HTTPS
initialDelaySeconds: 120
periodSeconds: 120
successThreshold: 1
timeoutSeconds: 1
name: haproxy
ports:
- containerPort: 6443
hostPort: 6443
name: apiserver
protocol: TCP
resources:
requests:
cpu: 13m
memory: 16Mi
securityContext:
runAsUser: 1001
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /usr/local/etc/haproxy
name: config
dnsPolicy: ClusterFirst
enableServiceLinks: true
hostNetwork: true
nodeName: ip-10-0-143-235.us-east-2.compute.internal
preemptionPolicy: PreemptLowerPriority
priority: 2000001000
priorityClassName: system-node-critical
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
key: node.kubernetes.io/memory-pressure
operator: Exists
volumes:
- hostPath:
path: /etc/kubernetes/apiserver-proxy-config
type: ""
name: config
status:
conditions:
- lastProbeTime: null
lastTransitionTime: "2025-07-08T02:51:16Z"
status: "True"
type: PodReadyToStartContainers
- lastProbeTime: null
lastTransitionTime: "2025-07-08T02:50:33Z"
status: "True"
type: Initialized
- lastProbeTime: null
lastTransitionTime: "2025-07-08T02:51:16Z"
status: "True"
type: Ready
- lastProbeTime: null
lastTransitionTime: "2025-07-08T02:51:16Z"
status: "True"
type: ContainersReady
- lastProbeTime: null
lastTransitionTime: "2025-07-08T02:50:33Z"
status: "True"
type: PodScheduled
containerStatuses:
- containerID: cri-o://e7e41d2fa8d3b125dbcd12e8601751345f9ca5e6568f0bb2f656277eb2d41fad
image: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:517e6ecc165325a5f772ff3df06471bb7763e097c6658f68ea04253262341e02
imageID: quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:517e6ecc165325a5f772ff3df06471bb7763e097c6658f68ea04253262341e02
lastState: {}
name: haproxy
ready: true
restartCount: 0
started: true
state:
running:
startedAt: "2025-07-08T02:51:15Z"
volumeMounts:
- mountPath: /usr/local/etc/haproxy
name: config
hostIP: 10.0.143.235
hostIPs:
- ip: 10.0.143.235
phase: Running
podIP: 10.0.143.235
podIPs:
- ip: 10.0.143.235
qosClass: Burstable
startTime: "2025-07-08T02:50:33Z" So, I tried the policy for 6443, and it works. [root@preserve-olm-env2 OPRUN-3896]# oc -n openshift-marketplace get networkpolicy -o yaml
apiVersion: v1
items:
- apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"networking.k8s.io/v1","kind":"NetworkPolicy","metadata":{"annotations":{},"name":"default-deny-all","namespace":"openshift-marketplace"},"spec":{"egress":[{"ports":[{"port":443,"protocol":"TCP"}],"to":[{"ipBlock":{"cidr":"172.31.0.1/32"}}]}],"podSelector":{},"policyTypes":["Ingress","Egress"]}}
creationTimestamp: "2025-07-08T03:08:44Z"
generation: 3
name: default-deny-all
namespace: openshift-marketplace
resourceVersion: "49438"
uid: 03e44016-4bcc-41f8-8bcf-0b0360eee0ed
spec:
egress:
- ports:
- port: 6443
protocol: TCP
podSelector: {}
policyTypes:
- Ingress
- Egress
- apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"networking.k8s.io/v1","kind":"NetworkPolicy","metadata":{"annotations":{},"name":"marketplace-operator","namespace":"openshift-marketplace"},"spec":{"egress":[{"ports":[{"port":6443,"protocol":"TCP"},{"port":53,"protocol":"TCP"},{"port":53,"protocol":"UDP"}]}],"ingress":[{"ports":[{"port":8081,"protocol":"TCP"}]}],"podSelector":{"matchLabels":{"name":"marketplace-operator"}},"policyTypes":["Ingress","Egress"]}}
creationTimestamp: "2025-07-08T03:08:48Z"
generation: 1
name: marketplace-operator
namespace: openshift-marketplace
resourceVersion: "14314"
uid: 50bbd9cb-abe7-47ed-a9a1-1dfd893099d9
spec:
egress:
- ports:
- port: 6443
protocol: TCP
- port: 53
protocol: TCP
- port: 53
protocol: UDP
ingress:
- ports:
- port: 8081
protocol: TCP
podSelector:
matchLabels:
name: marketplace-operator
policyTypes:
- Ingress
- Egress
kind: List
metadata:
resourceVersion: "" So, please make the rule as the following: apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
name: default-deny-all
namespace: openshift-marketplace
spec:
egress:
- ports:
- port: 6443
protocol: TCP
podSelector: {}
policyTypes:
- Ingress
- Egress
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: marketplace-operator
namespace: openshift-marketplace
spec:
egress:
- ports:
- port: 6443
protocol: TCP
- port: 53
protocol: TCP
- port: 53
protocol: UDP
ingress:
- ports:
- port: 8081
protocol: TCP
podSelector:
matchLabels:
name: marketplace-operator
policyTypes:
- Ingress
- Egress and then it will make both https://issues.redhat.com/browse/OCPBUGS-58388 and https://issues.redhat.com/browse/OCPBUGS-58390 works. |
@rashmigottipati @grokspawn @oceanc80 apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
name: default-deny-all
namespace: openshift-marketplace
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress 2, remove 443 from marketplace-operator rule apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: marketplace-operator
namespace: openshift-marketplace
spec:
egress:
- ports:
- port: 6443
protocol: TCP
- port: 53
protocol: TCP
- port: 53
protocol: UDP
ingress:
- ports:
- port: 8081
protocol: TCP
podSelector:
matchLabels:
name: marketplace-operator
policyTypes:
- Ingress
- Egress 3, add unpack-bundle rule to enable unpack pod of hosted cluster to access apiserver of mgmt cluster apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: unpack-bundles
namespace: openshift-marketplace
spec:
egress:
- ports:
- port: 6443
protocol: TCP
podSelector:
matchExpressions:
- key: operatorframework.io/bundle-unpack-ref
operator: Exists
- key: olm.managed
operator: In
values:
- "true"
policyTypes:
- Ingress
- Egress with the above three rules, it works. [root@preserve-olm-env2 OPRUN-3896]# oc get nodes
NAME STATUS ROLES AGE VERSION
ip-10-0-130-5.us-east-2.compute.internal Ready worker 6m4s v1.32.5
ip-10-0-154-14.us-east-2.compute.internal Ready worker 6m55s v1.32.5
ip-10-0-169-157.us-east-2.compute.internal Ready worker 7m7s v1.32.5
[root@preserve-olm-env2 OPRUN-3896]# oc get clusterversion
NAME VERSION AVAILABLE PROGRESSING SINCE STATUS
version 4.20.0-0.nightly-2025-07-01-051543 True False 84s Cluster version is 4.20.0-0.nightly-2025-07-01-051543
[root@preserve-olm-env2 OPRUN-3896]# oc -n openshift-marketplace get networkpolicy -o yaml
apiVersion: v1
items:
- apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"networking.k8s.io/v1","kind":"NetworkPolicy","metadata":{"annotations":{},"name":"default-deny-all","namespace":"openshift-marketplace"},"spec":{"podSelector":{},"policyTypes":["Ingress","Egress"]}}
creationTimestamp: "2025-07-08T05:30:10Z"
generation: 1
name: default-deny-all
namespace: openshift-marketplace
resourceVersion: "11995"
uid: d6a84e5a-5786-482c-8e82-efb6047d8cc7
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress
- apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"networking.k8s.io/v1","kind":"NetworkPolicy","metadata":{"annotations":{},"name":"marketplace-operator","namespace":"openshift-marketplace"},"spec":{"egress":[{"ports":[{"port":6443,"protocol":"TCP"},{"port":53,"protocol":"TCP"},{"port":53,"protocol":"UDP"}]}],"ingress":[{"ports":[{"port":8081,"protocol":"TCP"}]}],"podSelector":{"matchLabels":{"name":"marketplace-operator"}},"policyTypes":["Ingress","Egress"]}}
creationTimestamp: "2025-07-08T05:30:22Z"
generation: 1
name: marketplace-operator
namespace: openshift-marketplace
resourceVersion: "12045"
uid: 65ccdeef-086e-4337-b48d-27297aa67a96
spec:
egress:
- ports:
- port: 6443
protocol: TCP
- port: 53
protocol: TCP
- port: 53
protocol: UDP
ingress:
- ports:
- port: 8081
protocol: TCP
podSelector:
matchLabels:
name: marketplace-operator
policyTypes:
- Ingress
- Egress
- apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
annotations:
kubectl.kubernetes.io/last-applied-configuration: |
{"apiVersion":"networking.k8s.io/v1","kind":"NetworkPolicy","metadata":{"annotations":{},"name":"unpack-bundles","namespace":"openshift-marketplace"},"spec":{"egress":[{"ports":[{"port":6443,"protocol":"TCP"}]}],"podSelector":{"matchExpressions":[{"key":"operatorframework.io/bundle-unpack-ref","operator":"Exists"},{"key":"olm.managed","operator":"In","values":["true"]}]},"policyTypes":["Ingress","Egress"]}}
creationTimestamp: "2025-07-08T05:30:38Z"
generation: 1
name: unpack-bundles
namespace: openshift-marketplace
resourceVersion: "12120"
uid: ccce057e-ac9e-4873-891e-f9601fc50379
spec:
egress:
- ports:
- port: 6443
protocol: TCP
podSelector:
matchExpressions:
- key: operatorframework.io/bundle-unpack-ref
operator: Exists
- key: olm.managed
operator: In
values:
- "true"
policyTypes:
- Ingress
- Egress
kind: List
metadata:
resourceVersion: ""
[root@preserve-olm-env2 OPRUN-3896]# oc create ns test3896
namespace/test3896 created
[root@preserve-olm-env2 OPRUN-3896]# oc apply -f og.yaml
operatorgroup.operators.coreos.com/og-81389 created
[root@preserve-olm-env2 OPRUN-3896]# oc apply -f sub.yaml
subscription.operators.coreos.com/sub-81389 created
[root@preserve-olm-env2 OPRUN-3896]# oc apply -f catsrc.yaml
catalogsource.operators.coreos.com/catsrc-operator created
[root@preserve-olm-env2 OPRUN-3896]# oc create ns testnod
namespace/testnod created
[root@preserve-olm-env2 OPRUN-3896]# oc apply -f noog.yaml
operatorgroup.operators.coreos.com/og-singlenamespace created
[root@preserve-olm-env2 OPRUN-3896]# oc apply -f nosub.yaml
subscription.operators.coreos.com/nginx-ok-v23170 created
[root@preserve-olm-env2 OPRUN-3896]# oc -n openshift-marketplace get pod
NAME READY STATUS RESTARTS AGE
b72447df9c7b416ef9dbeb099605a6f744ea2a6a181256d12f4023e46dxjqb8 0/1 Completed 0 9m43s
catsrc-operator-jqdtj 1/1 Running 0 8m11s
e6ed4023a8bdc26b42a6a3b525b25fe2fdf1ef34ab1f427129671a6471jmmw4 0/1 Completed 0 7m49s
[root@preserve-olm-env2 OPRUN-3896]# oc get csv -A
NAMESPACE NAME DISPLAY VERSION REPLACES PHASE
test3896 postgresoperator.v5.8.2 Crunchy Postgres for Kubernetes 5.8.2 postgresoperator.v5.8.1 Succeeded
testnod nginx-ok-v23170.v0.0.1 vokv23170 0.0.1 Succeeded |
2d2c288
to
d8efb4c
Compare
[APPROVALNOTIFIER] This PR is APPROVED This pull-request has been approved by: grokspawn, perdasilva, rashmigottipati The full list of commands accepted by this bot can be found here. The pull request process is described here
Needs approval from an approver in each of these files:
Approvers can indicate their approval by writing |
Signed-off-by: Rashmi Gottipati <rgottipa@redhat.com>
d8efb4c
to
c7c4f9c
Compare
/lgtm |
Description of the change:
Motivation for the change:
Reviewer Checklist
/docs