Skip to content

Commit

Permalink
Enable the use of S3 compliant or Azure object storage as storage bac…
Browse files Browse the repository at this point in the history
…kend

* Update container with both storage options
* Update CRD with object storage configuration via secret
* Add roles to configure settings.py based on object storage secret
* Do not create and bind PVC if object storage is used

closes #8361
https://pulp.plan.io/issues/8361
  • Loading branch information
chambridge committed Mar 12, 2021
1 parent 2383510 commit be353ce
Show file tree
Hide file tree
Showing 21 changed files with 481 additions and 138 deletions.
22 changes: 11 additions & 11 deletions .ci/scripts/pulp-operator-check-and-wait.sh
Expand Up @@ -18,12 +18,12 @@ fi

storage_debug() {
echo "VOLUMES:"
sudo $KUBECTL get pvc
sudo $KUBECTL get pv
sudo -E $KUBECTL get pvc
sudo -E $KUBECTL get pv
df -h
if [ "$KUBE" = "k3s" ]; then
sudo $KUBECTL -n local-path-storage get pod
sudo $KUBECTL -n local-path-storage logs $STORAGE_POD
sudo -E $KUBECTL -n local-path-storage get pod
sudo -E $KUBECTL -n local-path-storage logs $STORAGE_POD
fi
}

Expand Down Expand Up @@ -51,7 +51,7 @@ echo "Waiting for services to come up ..."
# Before the services are both up, the pods may not exist at all.
# So check for the services being up 1st.
for tries in {0..90}; do
services=$(sudo $KUBECTL get services)
services=$(sudo -E $KUBECTL get services)
if [[ $(echo "$services" | grep -c NodePort) > 1 ]]; then
# parse string like this. 30805 is the external port
# pulp-api-svc NodePort 10.43.170.79 <none> 24817:30805/TCP 0s
Expand All @@ -64,15 +64,15 @@ for tries in {0..90}; do
if [[ $tries -eq 90 ]]; then
echo "ERROR 2: 1 or more external services never came up"
echo "NAMESPACES:"
sudo $KUBECTL get namespaces
sudo -E $KUBECTL get namespaces
echo "SERVICES:"
echo "$services"
if [ -x "$(command -v docker)" ]; then
echo "DOCKER IMAGE CACHE:"
sudo docker images
sudo -E docker images
fi
echo "PODS:"
sudo $KUBECTL get pods -o wide
sudo -E $KUBECTL get pods -o wide
storage_debug
exit 2
fi
Expand All @@ -83,15 +83,15 @@ done
if [[ "$KUBE" == "k3s" ]]; then
# This needs to be down here. Otherwise, the storage pod may not be
# up in time.
STORAGE_POD=$(sudo $KUBECTL -n local-path-storage get pod | awk '/local-path-provisioner/{print $1}')
STORAGE_POD=$(sudo -E $KUBECTL -n local-path-storage get pod | awk '/local-path-provisioner/{print $1}')
fi

echo "Waiting for pods to transition to Running ..."
# NOTE: Before the pods can be started, they must be downloaded/cached from
# quay.io .
# Therefore, this wait is highly dependent on network speed.
for tries in {0..180}; do
pods=$(sudo $KUBECTL get pods -o wide)
pods=$(sudo -E $KUBECTL get pods -o wide)
if [[ $(echo "$pods" | grep -c -v -E "STATUS|Running") -eq 0 ]]; then
echo "PODS:"
echo "$pods"
Expand All @@ -106,7 +106,7 @@ for tries in {0..180}; do
echo "$pods"
if [ -x "$(command -v docker)" ]; then
echo "DOCKER IMAGE CACHE:"
sudo docker images
sudo -E docker images
fi
fi
if [[ $tries -eq 180 ]]; then
Expand Down
48 changes: 24 additions & 24 deletions .github/workflows/ci.yml
Expand Up @@ -63,12 +63,12 @@ jobs:
if: failure()
run: |
sudo docker images
sudo kubectl logs -l name=pulp-operator -c pulp-operator --tail=10000
sudo kubectl logs -l app=pulp-api --tail=10000
sudo kubectl logs -l app=pulp-content --tail=10000
sudo kubectl logs -l app=pulp-worker --tail=10000
sudo kubectl logs -l app=pulp-resource-manager --tail=10000
sudo kubectl logs -l app=pulp-web --tail=10000
sudo -E kubectl logs -l name=pulp-operator -c pulp-operator --tail=10000
sudo -E kubectl logs -l app=pulp-api --tail=10000
sudo -E kubectl logs -l app=pulp-content --tail=10000
sudo -E kubectl logs -l app=pulp-worker --tail=10000
sudo -E kubectl logs -l app=pulp-resource-manager --tail=10000
sudo -E kubectl logs -l app=pulp-web --tail=10000
http --timeout 30 --check-status --pretty format --print hb http://localhost:24817/pulp/api/v3/status/
components:
Expand Down Expand Up @@ -135,12 +135,12 @@ jobs:
if: failure()
run: |
sudo docker images
sudo kubectl logs -l name=pulp-operator -c pulp-operator --tail=10000
sudo kubectl logs -l app=pulp-api --tail=10000
sudo kubectl logs -l app=pulp-content --tail=10000
sudo kubectl logs -l app=pulp-worker --tail=10000
sudo kubectl logs -l app=pulp-resource-manager --tail=10000
sudo kubectl logs -l app=pulp-web --tail=10000
sudo -E kubectl logs -l name=pulp-operator -c pulp-operator --tail=10000
sudo -E kubectl logs -l app=pulp-api --tail=10000
sudo -E kubectl logs -l app=pulp-content --tail=10000
sudo -E kubectl logs -l app=pulp-worker --tail=10000
sudo -E kubectl logs -l app=pulp-resource-manager --tail=10000
sudo -E kubectl logs -l app=pulp-web --tail=10000
http --timeout 30 --check-status --pretty format --print hb http://localhost:24817/pulp/api/v3/status/
galaxy:
Expand Down Expand Up @@ -211,12 +211,12 @@ jobs:
if: failure()
run: |
sudo docker images
sudo kubectl logs -l name=pulp-operator -c pulp-operator --tail=10000
sudo kubectl logs -l app=pulp-api --tail=10000
sudo kubectl logs -l app=pulp-content --tail=10000
sudo kubectl logs -l app=pulp-worker --tail=10000
sudo kubectl logs -l app=pulp-resource-manager --tail=10000
sudo kubectl logs -l app=pulp-web --tail=10000
sudo -E kubectl logs -l name=pulp-operator -c pulp-operator --tail=10000
sudo -E kubectl logs -l app=pulp-api --tail=10000
sudo -E kubectl logs -l app=pulp-content --tail=10000
sudo -E kubectl logs -l app=pulp-worker --tail=10000
sudo -E kubectl logs -l app=pulp-resource-manager --tail=10000
sudo -E kubectl logs -l app=pulp-web --tail=10000
http --timeout 30 --check-status --pretty format --print hb http://localhost:24817/pulp/api/v3/status/
molecule:
Expand Down Expand Up @@ -318,10 +318,10 @@ jobs:
if: failure()
run: |
sudo docker images
sudo kubectl logs -l name=pulp-operator -c pulp-operator --tail=10000
sudo kubectl logs -l app=pulp-api --tail=10000
sudo kubectl logs -l app=pulp-content --tail=10000
sudo kubectl logs -l app=pulp-worker --tail=10000
sudo kubectl logs -l app=pulp-resource-manager --tail=10000
sudo kubectl logs -l app=pulp-web --tail=10000
sudo -E kubectl logs -l name=pulp-operator -c pulp-operator --tail=10000
sudo -E kubectl logs -l app=pulp-api --tail=10000
sudo -E kubectl logs -l app=pulp-content --tail=10000
sudo -E kubectl logs -l app=pulp-worker --tail=10000
sudo -E kubectl logs -l app=pulp-resource-manager --tail=10000
sudo -E kubectl logs -l app=pulp-web --tail=10000
http --timeout 30 --check-status --pretty format --print hb http://localhost:24817/pulp/api/v3/status/
46 changes: 26 additions & 20 deletions .github/workflows/pr.yml
Expand Up @@ -107,12 +107,12 @@ jobs:
if: failure()
run: |
sudo docker images
sudo kubectl logs -l name=pulp-operator -c pulp-operator --tail=10000
sudo kubectl logs -l app=pulp-api --tail=10000
sudo kubectl logs -l app=pulp-content --tail=10000
sudo kubectl logs -l app=pulp-worker --tail=10000
sudo kubectl logs -l app=pulp-resource-manager --tail=10000
sudo kubectl logs -l app=pulp-web --tail=10000
sudo -E kubectl logs -l name=pulp-operator -c pulp-operator --tail=10000
sudo -E kubectl logs -l app=pulp-api --tail=10000
sudo -E kubectl logs -l app=pulp-content --tail=10000
sudo -E kubectl logs -l app=pulp-worker --tail=10000
sudo -E kubectl logs -l app=pulp-resource-manager --tail=10000
sudo -E kubectl logs -l app=pulp-web --tail=10000
http --timeout 30 --check-status --pretty format --print hb http://localhost:24817/pulp/api/v3/status/
# - name: Debugging example (uncomment when needed)
# if: failure()
Expand Down Expand Up @@ -152,7 +152,10 @@ jobs:
minikube start --vm-driver=docker --extra-config=apiserver.service-node-port-range=80-32000
# now you can run kubectl to see the pods in the cluster
- name: Try the cluster !
run: kubectl get pods -A
run: |
kubectl config view > $PWD/kubeconfig
kubectl get pods -A
echo "KUBECONFIG=$PWD/kubeconfig" >> $GITHUB_ENV
- name: Setup a minikube docker env
run: minikube -p minikube docker-env | grep "export" | awk '{$1= ""; print $0}' >> $GITHUB_ENV
- name: Build Operator
Expand All @@ -167,7 +170,10 @@ jobs:
sudo -E docker images
shell: bash
- name: Deploy pulp-operator to K8s
run: sudo -E ./up.sh
run: |
echo $KUBECONFIG
cat $KUBECONFIG
sudo -E ./up.sh
shell: bash
- name: Check and wait pulp-operator deploy
run: .ci/scripts/pulp-operator-check-and-wait.sh -m
Expand All @@ -183,12 +189,12 @@ jobs:
if: failure()
run: |
sudo docker images
sudo kubectl logs -l name=pulp-operator -c pulp-operator --tail=10000
sudo kubectl logs -l app=pulp-api --tail=10000
sudo kubectl logs -l app=pulp-content --tail=10000
sudo kubectl logs -l app=pulp-worker --tail=10000
sudo kubectl logs -l app=pulp-resource-manager --tail=10000
sudo kubectl logs -l app=pulp-web --tail=10000
sudo -E kubectl logs -l name=pulp-operator -c pulp-operator --tail=10000
sudo -E kubectl logs -l app=pulp-api --tail=10000
sudo -E kubectl logs -l app=pulp-content --tail=10000
sudo -E kubectl logs -l app=pulp-worker --tail=10000
sudo -E kubectl logs -l app=pulp-resource-manager --tail=10000
sudo -E kubectl logs -l app=pulp-web --tail=10000
http --timeout 30 --check-status --pretty format --print hb http://localhost:24817/pulp/api/v3/status/
# - name: Debugging example (uncomment when needed)
# if: failure()
Expand Down Expand Up @@ -263,12 +269,12 @@ jobs:
if: failure()
run: |
sudo docker images
sudo kubectl logs -l name=pulp-operator -c pulp-operator --tail=10000
sudo kubectl logs -l app=pulp-api --tail=10000
sudo kubectl logs -l app=pulp-content --tail=10000
sudo kubectl logs -l app=pulp-worker --tail=10000
sudo kubectl logs -l app=pulp-resource-manager --tail=10000
sudo kubectl logs -l app=pulp-web --tail=10000
sudo -E kubectl logs -l name=pulp-operator -c pulp-operator --tail=10000
sudo -E kubectl logs -l app=pulp-api --tail=10000
sudo -E kubectl logs -l app=pulp-content --tail=10000
sudo -E kubectl logs -l app=pulp-worker --tail=10000
sudo -E kubectl logs -l app=pulp-resource-manager --tail=10000
sudo -E kubectl logs -l app=pulp-web --tail=10000
http --timeout 30 --check-status --pretty format --print hb http://localhost:24817/pulp/api/v3/status/
# - name: Debugging example (uncomment when needed)
# if: failure()
Expand Down
1 change: 1 addition & 0 deletions CHANGES/8361.feature
@@ -0,0 +1 @@
Enable the use of S3 compliant or Azure object storage as storage backend
8 changes: 5 additions & 3 deletions containers/images/pulp/Containerfile.core.j2
Expand Up @@ -47,10 +47,12 @@ RUN dnf -y update && \
# Need to install optional dep, rhsm, for pulp-certguard
RUN pip install rhsm

RUN mkdir -p /etc/pulp
RUN mkdir -p /var/lib/pulp/media
RUN mkdir -p /etc/pulp \
/var/lib/pulp/media \
/var/lib/pulp/assets \
/var/lib/pulp/tmp

RUN pip install gunicorn django-storages[boto3]
RUN pip install gunicorn django-storages[boto3,azure]
{% if s3_test is defined %}
# Hacking botocore (https://github.com/boto/botocore/pull/1990):
RUN sed -i "s/hasattr(body, 'read')/getattr(body, '_size', None)/g" $(pip show botocore | grep -i location | awk '{ print $2 }')/botocore/handlers.py
Expand Down
4 changes: 3 additions & 1 deletion containers/images/pulp/container-assets/pulp-api
Expand Up @@ -24,7 +24,9 @@ if [ -n "${PULP_ADMIN_PASSWORD}" ]; then
django-admin reset-admin-password --password "${PULP_ADMIN_PASSWORD}"
fi

mkdir -p /var/lib/pulp/media
mkdir -p /var/lib/pulp/media \
/var/lib/pulp/assets \
/var/lib/pulp/tmp

# NOTE: Due to the Linux dual-stack functionality, this will listen on both IPv4
# IPv6, even though netstat may seem to indicate it is IPv6 only.
Expand Down
11 changes: 6 additions & 5 deletions deploy/crds/pulpproject_v1beta1_pulp_cr.ci.yaml
Expand Up @@ -6,8 +6,9 @@ spec:
# image: pulp
tag: "latest"
pulp_admin_password_secret: "example-pulp-admin-password"
pulp_file_storage:
# k3s local-path requires this
access_mode: "ReadWriteOnce"
# We have a little over 10GB free on GHA VMs/instances
size: "10Gi"
pulp_storage:
file:
# k3s local-path requires this
access_mode: "ReadWriteOnce"
# We have a little over 10GB free on GHA VMs/instances
size: "10Gi"
56 changes: 21 additions & 35 deletions deploy/crds/pulpproject_v1beta1_pulp_cr.default.yaml
Expand Up @@ -13,49 +13,35 @@ metadata:
# tag: "stable"
# Pulp settings.
# pulp_settings:
# databases:
# default:
# HOST: postgres
# ENGINE: django.db.backends.postgresql_psycopg2
# NAME: pulp
# USER: pulp
# PASSWORD: pulp
# PORT: 5432,
# CONN_MAX_AGE: 0
# debug: "True"
# redis_host: redis
# redis_port: 6379
# redis_password: ''
# content_origin: # pulp-operator will query the 1st address of the 1st k8s
# node. This suffices for most single node deployments.
# If on a cluster, you should set this manually until
# ingress(es) are implemented. Example:
# http://myserver.fqdn:24816
# The pulp adminstrator password secret.
# pulp_admin_password_secret:
# PostgreSQL container settings secret.
# postgres_configuration_secret: pg_secret_name
# Configuration for the persistentVolumeClaim for /var/lib/pulp
# pulp_file_storage:
# If your K8s cluster is only 1 node, and its StorageClass /
# provisioner does not support ReadWriteMany, then you must change
# this to "ReadWriteOnce".
#
# If your K8s cluster is multiple nodes, and does not support
# ReadWriteMany, then pulp-operator is currently incompatible.
#
# Reference on which support ReadWriteMany:
# https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes
# access_mode: "ReadWriteMany"
# pulp_storage:
# file:
# If your K8s cluster is only 1 node, and its StorageClass /
# provisioner does not support ReadWriteMany, then you must change
# this to "ReadWriteOnce".
#
# If your K8s cluster is multiple nodes, and does not support
# ReadWriteMany, then pulp-operator is currently incompatible.
#
# Reference on which support ReadWriteMany:
# https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes
# access_mode: "ReadWriteMany"

# How much space do you want to give Pulp for storing content under
# /var/lib/pulp ?
# https://docs.pulpproject.org/en/3.0/nightly/installation/configuration.html#media-root
# How much space do you want to give Pulp for storing content under
# /var/lib/pulp ?
# https://docs.pulpproject.org/en/3.0/nightly/installation/configuration.html#media-root

# For reference, epel7 x86_64 is currently (2019-07) 30G. So 100G
# should be sufficient for a test deployment with only the RPM
# content plugin.
# size: "100Gi"
# For reference, epel7 x86_64 is currently (2019-07) 30G. So 100G
# should be sufficient for a test deployment with only the RPM
# content plugin.
# size: "100Gi"
# object_storage:
# s3_secret: "s3secret"
# Values below are set in roles rather than in playbook.yaml
# pulp_api:
# replicas: 1
Expand Down
15 changes: 8 additions & 7 deletions deploy/crds/pulpproject_v1beta1_pulp_cr.pulp-demo.yaml
Expand Up @@ -3,10 +3,11 @@ kind: Pulp
metadata:
name: example-pulp
spec:
pulp_file_storage:
# This doesn't really matter for minikube. Single node by design,
# but the storage provisioner allows for ReadWriteMany. So let's
# stick to our default.
access_mode: "ReadWriteMany"
# The minikube VM won't go any larger.
size: "375Gi"
pulp_storage:
file:
# This doesn't really matter for minikube. Single node by design,
# but the storage provisioner allows for ReadWriteMany. So let's
# stick to our default.
access_mode: "ReadWriteMany"
# The minikube VM won't go any larger.
size: "375Gi"

0 comments on commit be353ce

Please sign in to comment.