Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

adding values-parity.yaml to the project again #382

Merged
merged 3 commits into from
Aug 27, 2021
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ stages:
echo "Using Helm `helm version --short`"
helm repo add parity https://paritytech.github.io/helm-charts/
helm repo update
curl -sS $HELM_VALUES_FILE > values-parity.yaml
- helm upgrade
--install
--atomic
Expand Down
194 changes: 194 additions & 0 deletions values-parity.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,194 @@
# Default values for substrate-telemtry.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.


replicaCount:
# NOTE: The core service is not scalable at the moment.
shard: 3
core: 1
frontend: 1

image:
backend:
repository: docker.io/parity/substrate-telemetry-backend
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: f089ad17
frontend:
repository: docker.io/parity/substrate-telemetry-frontend
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: f089ad17

imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""

envVars:
shard: {}
core: {}
frontend:
# The frontend docker container makes this available to the UI,
# so that it knows where to look for feed information:
SUBSTRATE_TELEMETRY_URL: wss://feed.telemetry.parity-stg.parity.io/feed

serviceMonitor:
# Only core service has Prometheus metrics exposed at the moment.
core:
enabled: true
interval: ""
additionalLabels: {}
annotations: {}
# scrapeTimeout: 10s

serviceAccount:
# Specifies whether a service account should be created
create: false
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""

podAnnotations: {}

podSecurityContext: {}
# fsGroup: 2000

securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000

service:
shard:
type: LoadBalancer
port: 80
targetPort: 8000
# nodePort: 31000
annotations:
external-dns.alpha.kubernetes.io/hostname: submit.telemetry.parity-stg.parity.io.
core:
type: ClusterIP
port: 80
targetPort: 8000
# nodePort: 31000
annotations: {}
# cloud.google.com/load-balancer-type: Internal
# networking.gke.io/internal-load-balancer-allow-global-access: "true"
# external-dns.alpha.kubernetes.io/hostname: feed.telemetry.parity-stg.parity.io.
frontend:
type: ClusterIP
port: 80
targetPort: 8000
# nodePort: 31000
annotations: {}

ingress:
shard:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
core:
enabled: true
className: ""
annotations:
kubernetes.io/ingress.class: traefik-internal
traefik.ingress.kubernetes.io/router.entrypoints: web,websecure
cert-manager.io/cluster-issuer: letsencrypt-dns01
external-dns.alpha.kubernetes.io/target: traefik-internal.parity-stg.parity.io.
traefik.ingress.kubernetes.io/router.tls: "true"
hosts:
- host: feed.telemetry.parity-stg.parity.io
paths:
- path: /
pathType: ImplementationSpecific
tls:
- secretName: feed.telemetry.parity-stg.parity.io
hosts:
- feed.telemetry.parity-stg.parity.io
frontend:
enabled: true
className: ""
annotations:
kubernetes.io/ingress.class: traefik-internal
traefik.ingress.kubernetes.io/router.entrypoints: web,websecure
cert-manager.io/cluster-issuer: letsencrypt-dns01
external-dns.alpha.kubernetes.io/target: traefik-internal.parity-stg.parity.io.
traefik.ingress.kubernetes.io/router.tls: "true"
hosts:
- host: telemetry.parity-stg.parity.io
paths:
- path: /
pathType: ImplementationSpecific
tls:
- secretName: telemetry.parity-stg.parity.io
hosts:
- telemetry.parity-stg.parity.io



resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi

autoscaling:
# NOTE: The core service is not scalable at the moment.
shard:
enabled: true
minReplicas: 2
maxReplicas: 6
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
frontend:
enabled: false
minReplicas: 1
maxReplicas: 6
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
core:
enabled: false


nodeSelector: {}


affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: cloud.google.com/gke-nodepool
operator: In
values:
- substrate-telemetry-pool

tolerations:
- key: "app"
operator: "Equal"
value: "substrate-telemetry"
effect: "NoExecute"