Skip to content

Commit

Permalink
Fix bug where pod-watcher would fail to reconcile pods due to missing…
Browse files Browse the repository at this point in the history
… schema registration (#185)
  • Loading branch information
orishoshan committed May 22, 2023
1 parent 2c779f1 commit 8f770a4
Show file tree
Hide file tree
Showing 2 changed files with 102 additions and 1 deletion.
101 changes: 100 additions & 1 deletion .github/workflows/netpol-e2e-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ env:


jobs:
e2e-test:
e2e-test-intents-after-pods:
timeout-minutes: 10
runs-on: ubuntu-latest
steps:
Expand Down Expand Up @@ -116,3 +116,102 @@ jobs:


e2e-test-intents-before-pods:
timeout-minutes: 10
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
submodules: recursive
token: ${{ secrets.OTTERIZEBOT_GITHUB_TOKEN }} # required for checking out submodules

- name: Login to GCR
uses: docker/login-action@v2
with:
registry: ${{ env.REGISTRY }}
username: _json_key_base64
password: ${{ secrets.B64_GCLOUD_SERVICE_ACCOUNT_JSON}}

- name: Set up Helm
uses: azure/setup-helm@v3

- name: Start minikube
uses: medyagh/setup-minikube@master
with:
start-args: "--network-plugin=cni --cni=calico"

- name: Wait for Calico startup
run: |-
kubectl wait pods -n kube-system -l k8s-app=calico-kube-controllers --for condition=Ready --timeout=90s
kubectl wait pods -n kube-system -l k8s-app=calico-node --for condition=Ready --timeout=90s
kubectl wait pods -n kube-system -l k8s-app=calico-kube-controllers --for condition=Ready --timeout=90s
- name: Install Otterize
run: |-
docker pull ${{ env.REGISTRY }}/intents-operator:${{ inputs.operator-tag }}
minikube image load ${{ env.REGISTRY }}/intents-operator:${{ inputs.operator-tag }}
docker pull ${{ env.REGISTRY }}/watcher:${{ inputs.watcher-tag }}
minikube image load ${{ env.REGISTRY }}/watcher:${{ inputs.watcher-tag }}
OPERATOR_FLAGS="--set-string intentsOperator.operator.repository=${{ env.REGISTRY }} --set-string intentsOperator.operator.image=intents-operator --set-string intentsOperator.operator.tag=${{ inputs.operator-tag }} --set-string intentsOperator.operator.pullPolicy=Never"
WATCHER_FLAGS="--set-string intentsOperator.watcher.repository=${{ env.REGISTRY }} --set-string intentsOperator.watcher.image=watcher --set-string intentsOperator.watcher.tag=${{ inputs.watcher-tag }} --set-string intentsOperator.watcher.pullPolicy=Never"
helm dep up ./helm-charts/otterize-kubernetes
helm install otterize ./helm-charts/otterize-kubernetes -n otterize-system --create-namespace $OPERATOR_FLAGS $WATCHER_FLAGS
- name: Wait for Otterize
run: |-
kubectl wait pods -n otterize-system -l app=intents-operator --for condition=Ready --timeout=360s
kubectl wait pods -n otterize-system -l app=otterize-watcher --for condition=Ready --timeout=360s
- name: Apply intents
run: |-
kubectl create namespace otterize-tutorial-npol
kubectl apply -f https://docs.otterize.com/code-examples/automate-network-policies/intents.yaml
- name: Deploy Tutorial services
run: |-
kubectl apply -f https://docs.otterize.com/code-examples/automate-network-policies/all.yaml
- name: Wait for pods
run: |-
kubectl wait pods -n otterize-tutorial-npol -l app=client --for condition=Ready --timeout=180s
kubectl wait pods -n otterize-tutorial-npol -l app=client-other --for condition=Ready --timeout=180s
kubectl wait pods -n otterize-tutorial-npol -l app=server --for condition=Ready --timeout=180s
- name: Test connectivity
run: |-
CLI1_POD=`kubectl get pod --selector app=client -n otterize-tutorial-npol -o json | jq -r ".items[0].metadata.name"`
CLI2_POD=`kubectl get pod --selector app=client-other -n otterize-tutorial-npol -o json | jq -r ".items[0].metadata.name"`
echo Client: $CLI1_POD client_other: $CLI2_POD
for i in {1..10}; do if ! kubectl get pod --selector app=client -n otterize-tutorial-npol -o json | jq -r ".items[0].metadata.labels" | grep 'access-server'; then echo Waiting for label; sleep 1; else echo Label found; break; fi; done;
if ! kubectl get pod --selector app=client -n otterize-tutorial-npol -o json | jq -r ".items[0].metadata.labels" | grep 'access-server'; then echo Label not found; exit 1; fi
# once the label is applied, we should sleep a few seconds because the tutorial service sleeps between attempts
sleep 5
# should work because there is an applied intent
echo check client log
CLIENT_LOG=`kubectl logs -n otterize-tutorial-npol $CLI1_POD`
echo $CLIENT_LOG
echo $CLIENT_LOG | grep "Hi, I am the server, you called, may I help you?"
# should be blocked (using 3 because the log should repeat itself every 3 lines)
echo check client other log
OTHER_CLIENT_LOG=`kubectl logs -n otterize-tutorial-npol $CLI2_POD`
echo $OTHER_CLIENT_LOG
echo $OTHER_CLIENT_LOG | grep "curl timed out"


e2e-test:
needs:
- e2e-test-intents-after-pods
- e2e-test-intents-before-pods
runs-on: ubuntu-latest
steps:
- run: |-
echo Success! This step is only here to depend on the tests.
2 changes: 2 additions & 0 deletions src/watcher/cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"github.com/sirupsen/logrus"
"github.com/spf13/viper"
istiosecurityscheme "istio.io/client-go/pkg/apis/security/v1beta1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
Expand All @@ -20,6 +21,7 @@ var (
)

func init() {
utilruntime.Must(apiextensionsv1.AddToScheme(scheme))
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(istiosecurityscheme.AddToScheme(scheme))
utilruntime.Must(otterizev1alpha2.AddToScheme(scheme))
Expand Down

0 comments on commit 8f770a4

Please sign in to comment.