Skip to content
Permalink
Branch: master
Find file Copy path
Find file Copy path
20 contributors

Users who have contributed to this file

@TAvardit @Miouge1 @bbriggs @rjkernick @faizanahmad055 @rawlingsj @jeff-knurek @hamza3202 @rodesousa @xynova @jheyduk @JennyLi90 @guptaarvindk @alitari @alexandrst88 @Nodraak @AdamDang @rcjames @cmoroianu1 @kyle-williams-1
213 lines (199 sloc) 5.71 KB
statefulset:
enabled: false
replicaCount: 1
# By default deploymentStrategy is set to rollingUpdate with maxSurge of 25% and maxUnavailable of 25% . you can change type to `Recreate` or can uncomment `rollingUpdate` specification and adjust them to your usage.
deploymentStrategy: {}
# rollingUpdate:
# maxSurge: 25%
# maxUnavailable: 25%
# type: RollingUpdate
nexus:
imageName: quay.io/travelaudience/docker-nexus
imageTag: 3.17.0
imagePullPolicy: IfNotPresent
env:
- name: install4jAddVmParams
value: "-Xms1200M -Xmx1200M -XX:MaxDirectMemorySize=2G -XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap"
# nodeSelector:
# cloud.google.com/gke-nodepool: default-pool
resources: {}
# requests:
## Based on https://support.sonatype.com/hc/en-us/articles/115006448847#mem
## and https://twitter.com/analytically/status/894592422382063616:
## Xms == Xmx
## Xmx <= 4G
## MaxDirectMemory >= 2G
## Xmx + MaxDirectMemory <= RAM * 2/3 (hence the request for 4800Mi)
## MaxRAMFraction=1 is not being set as it would allow the heap
## to use all the available memory.
# cpu: 250m
# memory: 4800Mi
# The ports should only be changed if the nexus image uses a different port
dockerPort: 5003
nexusPort: 8081
service:
type: NodePort
# clusterIP: None
# annotations: {}
# labels: {}
# securityContext:
# fsGroup: 2000
podAnnotations: {}
livenessProbe:
initialDelaySeconds: 30
periodSeconds: 30
failureThreshold: 6
# timeoutSeconds: 10
path: /
readinessProbe:
initialDelaySeconds: 30
periodSeconds: 30
failureThreshold: 6
# timeoutSeconds: 10
path: /
# hostAliases allows the modification of the hosts file inside a container
hostAliases: []
# - ip: "192.168.1.10"
# hostnames:
# - "example.com"
# - "www.example.com"
route:
enabled: false
name: docker
portName: docker
labels:
annotations:
# path: /docker
nexusProxy:
enabled: true
# svcName: proxy-svc
imageName: quay.io/travelaudience/docker-nexus-proxy
imageTag: 2.5.0
imagePullPolicy: IfNotPresent
port: 8080
targetPort: 8080
# labels: {}
env:
nexusDockerHost:
nexusHttpHost:
enforceHttps: false
cloudIamAuthEnabled: false
## If cloudIamAuthEnabled is set to true uncomment the variables below and remove this line
# clientId: ""
# clientSecret: ""
# organizationId: ""
# redirectUrl: ""
# requiredMembershipVerification: "true"
# secrets:
# keystore: ""
# password: ""
resources: {}
# requests:
# cpu: 100m
# memory: 256Mi
# limits:
# cpu: 200m
# memory: 512Mi
nexusProxyRoute:
enabled: false
labels:
annotations:
# path: /nexus
persistence:
enabled: true
accessMode: ReadWriteOnce
## If defined, storageClass: <storageClass>
## If set to "-", storageClass: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClass spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
# existingClaim:
# annotations:
# "helm.sh/resource-policy": keep
# storageClass: "-"
storageSize: 8Gi
# If PersistentDisk already exists you can create a PV for it by including the 2 following keypairs.
# pdName: nexus-data-disk
# fsType: ext4
nexusBackup:
enabled: false
imageName: quay.io/travelaudience/docker-nexus-backup
imageTag: 1.5.0
imagePullPolicy: IfNotPresent
env:
targetBucket:
nexusAdminPassword: "admin123"
persistence:
enabled: true
# existingClaim:
# annotations:
# "helm.sh/resource-policy": keep
accessMode: ReadWriteOnce
# See comment above for information on setting the backup storageClass
# storageClass: "-"
storageSize: 8Gi
# If PersistentDisk already exists you can create a PV for it by including the 2 following keypairs.
# pdName: nexus-backup-disk
# fsType: ext4
ingress:
enabled: false
path: /
annotations: {}
# # NOTE: Can't use 'false' due to https://github.com/jetstack/kube-lego/issues/173.
# kubernetes.io/ingress.allow-http: true
# kubernetes.io/ingress.class: gce
# kubernetes.io/ingress.global-static-ip-name: ""
# kubernetes.io/tls-acme: true
tls:
enabled: true
secretName: nexus-tls
# Specify custom rules in addition to or instead of the nexus-proxy rules
rules:
# - host: http://nexus.127.0.0.1.nip.io
# http:
# paths:
# - backend:
# serviceName: additional-svc
# servicePort: 80
tolerations: []
# # Enable configmap and add data in configmap
config:
enabled: false
mountPath: /sonatype-nexus-conf
data:
deployment:
# # Add annotations in deployment to enhance deployment configurations
annotations: {}
# # Add init containers. e.g. to be used to give specific permissions for nexus-data.
# # Add your own init container or uncomment and modify the given example.
initContainers:
# - name: fmp-volume-permission
# image: busybox
# imagePullPolicy: IfNotPresent
# command: ['chown','-R', '200', '/nexus-data']
# volumeMounts:
# - name: nexus-data
# mountPath: /nexus-data
# # Uncomment and modify this to run a command after starting the nexus container.
postStart:
command: # '["/bin/sh", "-c", "ls"]'
additionalContainers:
additionalVolumes:
additionalVolumeMounts:
# # To use an additional secret, set enable to true and add data
secret:
enabled: false
mountPath: /etc/secret-volume
readOnly: true
data:
# # To use an additional service, set enable to true
service:
# name: additional-svc
enabled: false
labels: {}
annotations: {}
ports:
- name: nexus-service
targetPort: 80
port: 80
You can’t perform that action at this time.