diff --git a/api/gen/proto/go/google/v1/profile.pb.go b/api/gen/proto/go/google/v1/profile.pb.go index 3ce664352..551431869 100644 --- a/api/gen/proto/go/google/v1/profile.pb.go +++ b/api/gen/proto/go/google/v1/profile.pb.go @@ -65,9 +65,13 @@ type Profile struct { // A description of the samples associated with each Sample.value. // For a cpu profile this might be: - // [["cpu","nanoseconds"]] or [["wall","seconds"]] or [["syscall","count"]] + // + // [["cpu","nanoseconds"]] or [["wall","seconds"]] or [["syscall","count"]] + // // For a heap profile, this might be: - // [["allocations","count"], ["space","bytes"]], + // + // [["allocations","count"], ["space","bytes"]], + // // If one of the values represents the number of events represented // by the sample, by convention it should be at index 0 and use // sample_type.unit == "count". @@ -602,8 +606,9 @@ type Location struct { // preceding entries were inlined. // // E.g., if memcpy() is inlined into printf: - // line[0].function_name == "memcpy" - // line[1].function_name == "printf" + // + // line[0].function_name == "memcpy" + // line[1].function_name == "printf" Line []*Line `protobuf:"bytes,4,rep,name=line,proto3" json:"line,omitempty"` // Provides an indication that multiple symbols map to this location's // address, for example due to identical code folding by the linker. In that diff --git a/docs/sources/operators-guide/configure/reference-configuration-parameters/index.md b/docs/sources/operators-guide/configure/reference-configuration-parameters/index.md index a9d9224b4..332bfe028 100644 --- a/docs/sources/operators-guide/configure/reference-configuration-parameters/index.md +++ b/docs/sources/operators-guide/configure/reference-configuration-parameters/index.md @@ -185,9 +185,9 @@ phlaredb: # CLI flag: -phlaredb.max-block-duration [max_block_duration: | default = 3h] - # How big should a single row group be + # How big should a single row group be uncompressed # CLI flag: -phlaredb.row-group-target-size - [row_group_target_size: | default = 104857600] + [row_group_target_size: | default = 1342177280] tracing: # Set to false to disable tracing. diff --git a/operations/phlare/helm/phlare/rendered/micro-services.yaml b/operations/phlare/helm/phlare/rendered/micro-services.yaml index fd0886ff0..387fb0080 100644 --- a/operations/phlare/helm/phlare/rendered/micro-services.yaml +++ b/operations/phlare/helm/phlare/rendered/micro-services.yaml @@ -1,4 +1,11 @@ --- +# Source: phlare/charts/minio/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: "minio-sa" + namespace: "default" +--- # Source: phlare/templates/serviceaccount.yaml apiVersion: v1 kind: ServiceAccount @@ -11,6 +18,340 @@ metadata: app.kubernetes.io/version: "0.1.2" app.kubernetes.io/managed-by: Helm --- +# Source: phlare/charts/minio/templates/secrets.yaml +apiVersion: v1 +kind: Secret +metadata: + name: phlare-dev-minio + namespace: "default" + labels: + app: minio + chart: minio-4.0.12 + release: phlare-dev + heritage: Helm +type: Opaque +data: + rootUser: "Z3JhZmFuYS1waGxhcmU=" + rootPassword: "c3VwZXJzZWNyZXQ=" +--- +# Source: phlare/charts/minio/templates/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: phlare-dev-minio + namespace: "default" + labels: + app: minio + chart: minio-4.0.12 + release: phlare-dev + heritage: Helm +data: + initialize: |- + #!/bin/sh + set -e ; # Have script exit in the event of a failed command. + MC_CONFIG_DIR="/etc/minio/mc/" + MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" + + # connectToMinio + # Use a check-sleep-check loop to wait for MinIO service to be available + connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 + } + + # checkBucketExists ($bucket) + # Check if the bucket exists, by using the exit code of `mc ls` + checkBucketExists() { + BUCKET=$1 + CMD=$(${MC} ls myminio/$BUCKET > /dev/null 2>&1) + return $? + } + + # createBucket ($bucket, $policy, $purge) + # Ensure bucket exists, purging if asked to + createBucket() { + BUCKET=$1 + POLICY=$2 + PURGE=$3 + VERSIONING=$4 + OBJECTLOCKING=$5 + + # Purge the bucket, if set & exists + # Since PURGE is user input, check explicitly for `true` + if [ $PURGE = true ]; then + if checkBucketExists $BUCKET ; then + echo "Purging bucket '$BUCKET'." + set +e ; # don't exit if this fails + ${MC} rm -r --force myminio/$BUCKET + set -e ; # reset `e` as active + else + echo "Bucket '$BUCKET' does not exist, skipping purge." + fi + fi + + # Create the bucket if it does not exist and set objectlocking if enabled (NOTE: versioning will be not changed if OBJECTLOCKING is set because it enables versioning to the Buckets created) + if ! checkBucketExists $BUCKET ; then + if [ ! -z $OBJECTLOCKING ] ; then + if [ $OBJECTLOCKING = true ] ; then + echo "Creating bucket with OBJECTLOCKING '$BUCKET'" + ${MC} mb --with-lock myminio/$BUCKET + elif [ $OBJECTLOCKING = false ] ; then + echo "Creating bucket '$BUCKET'" + ${MC} mb myminio/$BUCKET + fi + elif [ -z $OBJECTLOCKING ] ; then + echo "Creating bucket '$BUCKET'" + ${MC} mb myminio/$BUCKET + else + echo "Bucket '$BUCKET' already exists." + fi + fi + + + # set versioning for bucket if objectlocking is disabled or not set + if [ -z $OBJECTLOCKING ] ; then + if [ ! -z $VERSIONING ] ; then + if [ $VERSIONING = true ] ; then + echo "Enabling versioning for '$BUCKET'" + ${MC} version enable myminio/$BUCKET + elif [ $VERSIONING = false ] ; then + echo "Suspending versioning for '$BUCKET'" + ${MC} version suspend myminio/$BUCKET + fi + fi + else + echo "Bucket '$BUCKET' versioning unchanged." + fi + + + # At this point, the bucket should exist, skip checking for existence + # Set policy on the bucket + echo "Setting policy of bucket '$BUCKET' to '$POLICY'." + ${MC} policy set $POLICY myminio/$BUCKET + } + + # Try connecting to MinIO instance + scheme=http + connectToMinio $scheme + + + + # Create the buckets + createBucket grafana-phlare-data none false + add-user: |- + #!/bin/sh + set -e ; # Have script exit in the event of a failed command. + MC_CONFIG_DIR="/etc/minio/mc/" + MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" + + # AccessKey and secretkey credentials file are added to prevent shell execution errors caused by special characters. + # Special characters for example : ',",<,>,{,} + MINIO_ACCESSKEY_SECRETKEY_TMP="/tmp/accessKey_and_secretKey_tmp" + + # connectToMinio + # Use a check-sleep-check loop to wait for MinIO service to be available + connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 + } + + # checkUserExists () + # Check if the user exists, by using the exit code of `mc admin user info` + checkUserExists() { + CMD=$(${MC} admin user info myminio $(head -1 $MINIO_ACCESSKEY_SECRETKEY_TMP) > /dev/null 2>&1) + return $? + } + + # createUser ($policy) + createUser() { + POLICY=$1 + #check accessKey_and_secretKey_tmp file + if [[ ! -f $MINIO_ACCESSKEY_SECRETKEY_TMP ]];then + echo "credentials file does not exist" + return 1 + fi + if [[ $(cat $MINIO_ACCESSKEY_SECRETKEY_TMP|wc -l) -ne 2 ]];then + echo "credentials file is invalid" + rm -f $MINIO_ACCESSKEY_SECRETKEY_TMP + return 1 + fi + USER=$(head -1 $MINIO_ACCESSKEY_SECRETKEY_TMP) + # Create the user if it does not exist + if ! checkUserExists ; then + echo "Creating user '$USER'" + cat $MINIO_ACCESSKEY_SECRETKEY_TMP | ${MC} admin user add myminio + else + echo "User '$USER' already exists." + fi + #clean up credentials files. + rm -f $MINIO_ACCESSKEY_SECRETKEY_TMP + + # set policy for user + if [ ! -z $POLICY -a $POLICY != " " ] ; then + echo "Adding policy '$POLICY' for '$USER'" + ${MC} admin policy set myminio $POLICY user=$USER + else + echo "User '$USER' has no policy attached." + fi + } + + # Try connecting to MinIO instance + scheme=http + connectToMinio $scheme + + + + # Create the users + echo console > $MINIO_ACCESSKEY_SECRETKEY_TMP + echo console123 >> $MINIO_ACCESSKEY_SECRETKEY_TMP + createUser consoleAdmin + + add-policy: |- + #!/bin/sh + set -e ; # Have script exit in the event of a failed command. + MC_CONFIG_DIR="/etc/minio/mc/" + MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" + + # connectToMinio + # Use a check-sleep-check loop to wait for MinIO service to be available + connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 + } + + # checkPolicyExists ($policy) + # Check if the policy exists, by using the exit code of `mc admin policy info` + checkPolicyExists() { + POLICY=$1 + CMD=$(${MC} admin policy info myminio $POLICY > /dev/null 2>&1) + return $? + } + + # createPolicy($name, $filename) + createPolicy () { + NAME=$1 + FILENAME=$2 + + # Create the name if it does not exist + echo "Checking policy: $NAME (in /config/$FILENAME.json)" + if ! checkPolicyExists $NAME ; then + echo "Creating policy '$NAME'" + else + echo "Policy '$NAME' already exists." + fi + ${MC} admin policy add myminio $NAME /config/$FILENAME.json + + } + + # Try connecting to MinIO instance + scheme=http + connectToMinio $scheme + + + custom-command: |- + #!/bin/sh + set -e ; # Have script exit in the event of a failed command. + MC_CONFIG_DIR="/etc/minio/mc/" + MC="/usr/bin/mc --insecure --config-dir ${MC_CONFIG_DIR}" + + # connectToMinio + # Use a check-sleep-check loop to wait for MinIO service to be available + connectToMinio() { + SCHEME=$1 + ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts + set -e ; # fail if we can't read the keys. + ACCESS=$(cat /config/rootUser) ; SECRET=$(cat /config/rootPassword) ; + set +e ; # The connections to minio are allowed to fail. + echo "Connecting to MinIO server: $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT" ; + MC_COMMAND="${MC} alias set myminio $SCHEME://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ; + $MC_COMMAND ; + STATUS=$? ; + until [ $STATUS = 0 ] + do + ATTEMPTS=`expr $ATTEMPTS + 1` ; + echo \"Failed attempts: $ATTEMPTS\" ; + if [ $ATTEMPTS -gt $LIMIT ]; then + exit 1 ; + fi ; + sleep 2 ; # 1 second intervals between attempts + $MC_COMMAND ; + STATUS=$? ; + done ; + set -e ; # reset `e` as active + return 0 + } + + # runCommand ($@) + # Run custom mc command + runCommand() { + ${MC} "$@" + return $? + } + + # Try connecting to MinIO instance + scheme=http + connectToMinio $scheme +--- # Source: phlare/templates/configmap.yaml apiVersion: v1 kind: ConfigMap @@ -55,6 +396,14 @@ data: source_labels: - __meta_kubernetes_pod_phase scrape_interval: 15s + storage: + backend: s3 + s3: + access_key_id: grafana-phlare + bucket_name: grafana-phlare-data + endpoint: phlare-dev-minio:9000 + insecure: true + secret_access_key: supersecret --- # Source: phlare/templates/clusterrole.yaml apiVersion: rbac.authorization.k8s.io/v1 @@ -102,6 +451,74 @@ subjects: name: phlare-dev namespace: default --- +# Source: phlare/charts/minio/templates/console-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: phlare-dev-minio-console + namespace: "default" + labels: + app: minio + chart: minio-4.0.12 + release: phlare-dev + heritage: Helm +spec: + type: ClusterIP + ports: + - name: http + port: 9001 + protocol: TCP + targetPort: 9001 + selector: + app: minio + release: phlare-dev +--- +# Source: phlare/charts/minio/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: phlare-dev-minio + namespace: "default" + labels: + app: minio + chart: minio-4.0.12 + release: phlare-dev + heritage: Helm + monitoring: "true" +spec: + type: ClusterIP + ports: + - name: http + port: 9000 + protocol: TCP + targetPort: 9000 + selector: + app: minio + release: phlare-dev +--- +# Source: phlare/charts/minio/templates/statefulset.yaml +apiVersion: v1 +kind: Service +metadata: + name: phlare-dev-minio-svc + namespace: "default" + labels: + app: minio + chart: minio-4.0.12 + release: "phlare-dev" + heritage: "Helm" +spec: + publishNotReadyAddresses: true + clusterIP: None + ports: + - name: http + port: 9000 + protocol: TCP + targetPort: 9000 + selector: + app: minio + release: phlare-dev +--- # Source: phlare/templates/memberlist-service.yaml apiVersion: v1 kind: Service @@ -132,14 +549,134 @@ spec: apiVersion: v1 kind: Service metadata: - name: phlare-dev + name: phlare-dev-agent + labels: + helm.sh/chart: phlare-0.1.3 + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/version: "0.1.2" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: "agent" +spec: + type: ClusterIP + ports: + - port: 4100 + targetPort: http2 + protocol: TCP + name: http2 + selector: + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/component: "agent" +--- +# Source: phlare/templates/services.yaml +apiVersion: v1 +kind: Service +metadata: + name: phlare-dev-distributor + labels: + helm.sh/chart: phlare-0.1.3 + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/version: "0.1.2" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: "distributor" +spec: + type: ClusterIP + ports: + - port: 4100 + targetPort: http2 + protocol: TCP + name: http2 + selector: + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/component: "distributor" +--- +# Source: phlare/templates/services.yaml +apiVersion: v1 +kind: Service +metadata: + name: phlare-dev-ingester + labels: + helm.sh/chart: phlare-0.1.3 + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/version: "0.1.2" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: "ingester" +spec: + type: ClusterIP + ports: + - port: 4100 + targetPort: http2 + protocol: TCP + name: http2 + selector: + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/component: "ingester" +--- +# Source: phlare/templates/services.yaml +apiVersion: v1 +kind: Service +metadata: + name: phlare-dev-ingester-headless + labels: + helm.sh/chart: phlare-0.1.3 + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/version: "0.1.2" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: "ingester" +spec: + type: ClusterIP + ports: + - port: 4100 + targetPort: http2 + protocol: TCP + name: http2 + selector: + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/component: "ingester" +--- +# Source: phlare/templates/services.yaml +apiVersion: v1 +kind: Service +metadata: + name: phlare-dev-querier + labels: + helm.sh/chart: phlare-0.1.3 + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/version: "0.1.2" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: "querier" +spec: + type: ClusterIP + ports: + - port: 4100 + targetPort: http2 + protocol: TCP + name: http2 + selector: + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/component: "querier" +--- +# Source: phlare/templates/services.yaml +apiVersion: v1 +kind: Service +metadata: + name: phlare-dev-query-frontend labels: helm.sh/chart: phlare-0.1.3 app.kubernetes.io/name: phlare app.kubernetes.io/instance: phlare-dev app.kubernetes.io/version: "0.1.2" app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "all" + app.kubernetes.io/component: "query-frontend" spec: type: ClusterIP ports: @@ -150,20 +687,20 @@ spec: selector: app.kubernetes.io/name: phlare app.kubernetes.io/instance: phlare-dev - app.kubernetes.io/component: "all" + app.kubernetes.io/component: "query-frontend" --- # Source: phlare/templates/services.yaml apiVersion: v1 kind: Service metadata: - name: phlare-dev-headless + name: phlare-dev-query-scheduler labels: helm.sh/chart: phlare-0.1.3 app.kubernetes.io/name: phlare app.kubernetes.io/instance: phlare-dev app.kubernetes.io/version: "0.1.2" app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "all" + app.kubernetes.io/component: "query-scheduler" spec: type: ClusterIP ports: @@ -174,40 +711,38 @@ spec: selector: app.kubernetes.io/name: phlare app.kubernetes.io/instance: phlare-dev - app.kubernetes.io/component: "all" + app.kubernetes.io/component: "query-scheduler" --- # Source: phlare/templates/deployments-statefulsets.yaml apiVersion: apps/v1 -kind: StatefulSet +kind: Deployment metadata: - name: phlare-dev + name: phlare-dev-agent labels: helm.sh/chart: phlare-0.1.3 app.kubernetes.io/name: phlare app.kubernetes.io/instance: phlare-dev app.kubernetes.io/version: "0.1.2" app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: "all" + app.kubernetes.io/component: "agent" spec: - serviceName: phlare-dev-headless - podManagementPolicy: Parallel replicas: 1 selector: matchLabels: app.kubernetes.io/name: phlare app.kubernetes.io/instance: phlare-dev - app.kubernetes.io/component: "all" + app.kubernetes.io/component: "agent" template: metadata: annotations: - checksum/config: 822e78696be7f5e8d81bd59961864293b666236f41a6db4261a554504de0da20 + checksum/config: dac4b4ee80ee3e81bd96c0df7a55044281aff3104e08a4f5c30ac7c4d49f5a49 phlare.grafana.com/port: "4100" phlare.grafana.com/scrape: "true" labels: app.kubernetes.io/name: phlare app.kubernetes.io/instance: phlare-dev - app.kubernetes.io/component: "all" - name: "phlare" + app.kubernetes.io/component: "agent" + name: "agent" spec: serviceAccountName: phlare-dev securityContext: @@ -215,15 +750,16 @@ spec: runAsNonRoot: true runAsUser: 10001 containers: - - name: "phlare" + - name: "agent" securityContext: {} image: "grafana/phlare:0.1.1" imagePullPolicy: IfNotPresent args: - - "-target=all" + - "-target=agent" - "-memberlist.cluster-label=default-phlare-dev" - "-memberlist.join=phlare-dev-memberlist.default.svc.cluster.local." + - "-client.url=http://phlare-dev-distributor.default.svc.cluster.local.:4100" - "-config.file=/etc/phlare/config.yaml" - "-log.level=debug" ports: @@ -244,10 +780,595 @@ spec: - name: data mountPath: /data resources: + limits: + memory: 512Mi + requests: + cpu: 50m + memory: 128Mi + volumes: + - name: config + configMap: + name: phlare-dev-config + - name: data + emptyDir: {} +--- +# Source: phlare/templates/deployments-statefulsets.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: phlare-dev-distributor + labels: + helm.sh/chart: phlare-0.1.3 + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/version: "0.1.2" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: "distributor" +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/component: "distributor" + template: + metadata: + annotations: + checksum/config: dac4b4ee80ee3e81bd96c0df7a55044281aff3104e08a4f5c30ac7c4d49f5a49 + phlare.grafana.com/port: "4100" + phlare.grafana.com/scrape: "true" + labels: + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/component: "distributor" + name: "distributor" + spec: + serviceAccountName: phlare-dev + securityContext: + fsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + containers: + - name: "distributor" + securityContext: {} + image: "grafana/phlare:0.1.1" + imagePullPolicy: IfNotPresent + args: + - "-target=distributor" + - "-memberlist.cluster-label=default-phlare-dev" + - "-memberlist.join=phlare-dev-memberlist.default.svc.cluster.local." + - "-config.file=/etc/phlare/config.yaml" + - "-log.level=debug" + ports: + - name: http2 + containerPort: 4100 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + readinessProbe: + httpGet: + path: /ready + port: http2 + volumeMounts: + - name: config + mountPath: /etc/phlare/config.yaml + subPath: config.yaml + - name: data + mountPath: /data + resources: + limits: + memory: 1Gi + requests: + cpu: 500m + memory: 256Mi volumes: - name: config configMap: name: phlare-dev-config - name: data emptyDir: {} +--- +# Source: phlare/templates/deployments-statefulsets.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: phlare-dev-querier + labels: + helm.sh/chart: phlare-0.1.3 + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/version: "0.1.2" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: "querier" +spec: + replicas: 3 + selector: + matchLabels: + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/component: "querier" + template: + metadata: + annotations: + checksum/config: dac4b4ee80ee3e81bd96c0df7a55044281aff3104e08a4f5c30ac7c4d49f5a49 + phlare.grafana.com/port: "4100" + phlare.grafana.com/scrape: "true" + labels: + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/component: "querier" + name: "querier" + spec: + serviceAccountName: phlare-dev + securityContext: + fsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + containers: + - name: "querier" + securityContext: + {} + image: "grafana/phlare:0.1.1" + imagePullPolicy: IfNotPresent + args: + - "-target=querier" + - "-memberlist.cluster-label=default-phlare-dev" + - "-memberlist.join=phlare-dev-memberlist.default.svc.cluster.local." + - "-config.file=/etc/phlare/config.yaml" + - "-log.level=debug" + ports: + - name: http2 + containerPort: 4100 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + readinessProbe: + httpGet: + path: /ready + port: http2 + volumeMounts: + - name: config + mountPath: /etc/phlare/config.yaml + subPath: config.yaml + - name: data + mountPath: /data + resources: + limits: + memory: 1Gi + requests: + cpu: 100m + memory: 256Mi + volumes: + - name: config + configMap: + name: phlare-dev-config + - name: data + emptyDir: {} +--- +# Source: phlare/templates/deployments-statefulsets.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: phlare-dev-query-frontend + labels: + helm.sh/chart: phlare-0.1.3 + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/version: "0.1.2" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: "query-frontend" +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/component: "query-frontend" + template: + metadata: + annotations: + checksum/config: dac4b4ee80ee3e81bd96c0df7a55044281aff3104e08a4f5c30ac7c4d49f5a49 + phlare.grafana.com/port: "4100" + phlare.grafana.com/scrape: "true" + labels: + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/component: "query-frontend" + name: "query-frontend" + spec: + serviceAccountName: phlare-dev + securityContext: + fsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + containers: + - name: "query-frontend" + securityContext: + {} + image: "grafana/phlare:0.1.1" + imagePullPolicy: IfNotPresent + args: + - "-target=query-frontend" + - "-memberlist.cluster-label=default-phlare-dev" + - "-memberlist.join=phlare-dev-memberlist.default.svc.cluster.local." + - "-config.file=/etc/phlare/config.yaml" + - "-log.level=debug" + ports: + - name: http2 + containerPort: 4100 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + readinessProbe: + httpGet: + path: /ready + port: http2 + volumeMounts: + - name: config + mountPath: /etc/phlare/config.yaml + subPath: config.yaml + - name: data + mountPath: /data + resources: + limits: + memory: 1Gi + requests: + cpu: 100m + memory: 256Mi + volumes: + - name: config + configMap: + name: phlare-dev-config + - name: data + emptyDir: {} +--- +# Source: phlare/templates/deployments-statefulsets.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: phlare-dev-query-scheduler + labels: + helm.sh/chart: phlare-0.1.3 + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/version: "0.1.2" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: "query-scheduler" +spec: + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/component: "query-scheduler" + template: + metadata: + annotations: + checksum/config: dac4b4ee80ee3e81bd96c0df7a55044281aff3104e08a4f5c30ac7c4d49f5a49 + phlare.grafana.com/port: "4100" + phlare.grafana.com/scrape: "true" + labels: + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/component: "query-scheduler" + name: "query-scheduler" + spec: + serviceAccountName: phlare-dev + securityContext: + fsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + containers: + - name: "query-scheduler" + securityContext: + {} + image: "grafana/phlare:0.1.1" + imagePullPolicy: IfNotPresent + args: + - "-target=query-scheduler" + - "-memberlist.cluster-label=default-phlare-dev" + - "-memberlist.join=phlare-dev-memberlist.default.svc.cluster.local." + - "-config.file=/etc/phlare/config.yaml" + - "-log.level=debug" + ports: + - name: http2 + containerPort: 4100 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + readinessProbe: + httpGet: + path: /ready + port: http2 + volumeMounts: + - name: config + mountPath: /etc/phlare/config.yaml + subPath: config.yaml + - name: data + mountPath: /data + resources: + limits: + memory: 1Gi + requests: + cpu: 100m + memory: 256Mi + volumes: + - name: config + configMap: + name: phlare-dev-config + - name: data + emptyDir: {} +--- +# Source: phlare/charts/minio/templates/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: phlare-dev-minio + namespace: "default" + labels: + app: minio + chart: minio-4.0.12 + release: phlare-dev + heritage: Helm +spec: + updateStrategy: + type: RollingUpdate + podManagementPolicy: "Parallel" + serviceName: phlare-dev-minio-svc + replicas: 1 + selector: + matchLabels: + app: minio + release: phlare-dev + template: + metadata: + name: phlare-dev-minio + labels: + app: minio + release: phlare-dev + annotations: + checksum/secrets: 724d4b2b4c21778e1a7ba2b50758a575320075a4ad70fb3d37039151f7a3320d + checksum/config: a33d0359184ba5e8ad28956d2aa6e225869c19657918cd81d3c90620a36609ba + phlare.grafana.com/port: "9000" + phlare.grafana.com/scrape: "true" + spec: + securityContext: + runAsUser: 1000 + runAsGroup: 1000 + fsGroup: 1000 + fsGroupChangePolicy: OnRootMismatch + + serviceAccountName: minio-sa + containers: + - name: minio + image: quay.io/minio/minio:RELEASE.2022-08-13T21-54-44Z + imagePullPolicy: IfNotPresent + + command: [ "/bin/sh", + "-ce", + "/usr/bin/docker-entrypoint.sh minio server http://phlare-dev-minio-{0...0}.phlare-dev-minio-svc.default.svc.cluster.local/export-{0...1} -S /etc/minio/certs/ --address :9000 --console-address :9001" ] + volumeMounts: + - name: export-0 + mountPath: /export-0 + - name: export-1 + mountPath: /export-1 + ports: + - name: http + containerPort: 9000 + - name: http-console + containerPort: 9001 + env: + - name: MINIO_ROOT_USER + valueFrom: + secretKeyRef: + name: phlare-dev-minio + key: rootUser + - name: MINIO_ROOT_PASSWORD + valueFrom: + secretKeyRef: + name: phlare-dev-minio + key: rootPassword + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: "public" + resources: + requests: + cpu: 100m + memory: 128Mi + volumes: + - name: minio-user + secret: + secretName: phlare-dev-minio + volumeClaimTemplates: + - metadata: + name: export-0 + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 5Gi + - metadata: + name: export-1 + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 5Gi +--- +# Source: phlare/templates/deployments-statefulsets.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: phlare-dev-ingester + labels: + helm.sh/chart: phlare-0.1.3 + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/version: "0.1.2" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: "ingester" +spec: + serviceName: phlare-dev-ingester-headless + podManagementPolicy: Parallel + replicas: 3 + selector: + matchLabels: + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/component: "ingester" + template: + metadata: + annotations: + checksum/config: dac4b4ee80ee3e81bd96c0df7a55044281aff3104e08a4f5c30ac7c4d49f5a49 + phlare.grafana.com/port: "4100" + phlare.grafana.com/scrape: "true" + labels: + app.kubernetes.io/name: phlare + app.kubernetes.io/instance: phlare-dev + app.kubernetes.io/component: "ingester" + name: "ingester" + spec: + serviceAccountName: phlare-dev + securityContext: + fsGroup: 10001 + runAsNonRoot: true + runAsUser: 10001 + containers: + - name: "ingester" + securityContext: + {} + image: "grafana/phlare:0.1.1" + imagePullPolicy: IfNotPresent + args: + - "-target=ingester" + - "-memberlist.cluster-label=default-phlare-dev" + - "-memberlist.join=phlare-dev-memberlist.default.svc.cluster.local." + - "-config.file=/etc/phlare/config.yaml" + - "-log.level=debug" + ports: + - name: http2 + containerPort: 4100 + protocol: TCP + - name: memberlist + containerPort: 7946 + protocol: TCP + readinessProbe: + httpGet: + path: /ready + port: http2 + volumeMounts: + - name: config + mountPath: /etc/phlare/config.yaml + subPath: config.yaml + - name: data + mountPath: /data + resources: + limits: + memory: 16Gi + requests: + cpu: 1 + memory: 8Gi + volumes: + - name: config + configMap: + name: phlare-dev-config + - name: data + emptyDir: {} +--- +# Source: phlare/charts/minio/templates/post-install-create-bucket-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: phlare-dev-minio-make-bucket-job + namespace: "default" + labels: + app: minio-make-bucket-job + chart: minio-4.0.12 + release: phlare-dev + heritage: Helm + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +spec: + template: + metadata: + labels: + app: minio-job + release: phlare-dev + spec: + restartPolicy: OnFailure + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: phlare-dev-minio + - secret: + name: phlare-dev-minio + containers: + - name: minio-mc + image: "quay.io/minio/mc:RELEASE.2022-08-11T00-30-48Z" + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "/config/initialize"] + env: + - name: MINIO_ENDPOINT + value: phlare-dev-minio + - name: MINIO_PORT + value: "9000" + volumeMounts: + - name: minio-configuration + mountPath: /config + resources: + requests: + memory: 128Mi +--- +# Source: phlare/charts/minio/templates/post-install-create-user-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: phlare-dev-minio-make-user-job + namespace: "default" + labels: + app: minio-make-user-job + chart: minio-4.0.12 + release: phlare-dev + heritage: Helm + annotations: + "helm.sh/hook": post-install,post-upgrade + "helm.sh/hook-delete-policy": hook-succeeded,before-hook-creation +spec: + template: + metadata: + labels: + app: minio-job + release: phlare-dev + spec: + restartPolicy: OnFailure + volumes: + - name: minio-configuration + projected: + sources: + - configMap: + name: phlare-dev-minio + - secret: + name: phlare-dev-minio + containers: + - name: minio-mc + image: "quay.io/minio/mc:RELEASE.2022-08-11T00-30-48Z" + imagePullPolicy: IfNotPresent + command: ["/bin/sh", "/config/add-user"] + env: + - name: MINIO_ENDPOINT + value: phlare-dev-minio + - name: MINIO_PORT + value: "9000" + volumeMounts: + - name: minio-configuration + mountPath: /config + resources: + requests: + memory: 128Mi diff --git a/operations/phlare/helm/phlare/values-micro-services.yaml b/operations/phlare/helm/phlare/values-micro-services.yaml index e69de29bb..7c369a5f6 100644 --- a/operations/phlare/helm/phlare/values-micro-services.yaml +++ b/operations/phlare/helm/phlare/values-micro-services.yaml @@ -0,0 +1,63 @@ +# Default values for phlare. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +phlare: + components: + querier: + kind: Deployment + replicaCount: 3 + resources: + limits: + memory: 1Gi + requests: + memory: 256Mi + cpu: 100m + query-frontend: + kind: Deployment + replicaCount: 2 + resources: + limits: + memory: 1Gi + requests: + memory: 256Mi + cpu: 100m + query-scheduler: + kind: Deployment + replicaCount: 2 + resources: + limits: + memory: 1Gi + requests: + memory: 256Mi + cpu: 100m + distributor: + kind: Deployment + replicaCount: 2 + resources: + limits: + memory: 1Gi + requests: + memory: 256Mi + cpu: 500m + agent: + kind: Deployment + replicaCount: 1 + resources: + limits: + memory: 512Mi + requests: + memory: 128Mi + cpu: 50m + ingester: + kind: StatefulSet + replicaCount: 3 + resources: + limits: + memory: 16Gi + requests: + memory: 8Gi + cpu: 1 + +minio: + enabled: true diff --git a/operations/phlare/jsonnet/values-micro-services.json b/operations/phlare/jsonnet/values-micro-services.json index ec747fa47..cf248a37b 100644 --- a/operations/phlare/jsonnet/values-micro-services.json +++ b/operations/phlare/jsonnet/values-micro-services.json @@ -1 +1,87 @@ -null \ No newline at end of file +{ + "minio": { + "enabled": true + }, + "phlare": { + "components": { + "agent": { + "kind": "Deployment", + "replicaCount": 1, + "resources": { + "limits": { + "memory": "512Mi" + }, + "requests": { + "cpu": "50m", + "memory": "128Mi" + } + } + }, + "distributor": { + "kind": "Deployment", + "replicaCount": 2, + "resources": { + "limits": { + "memory": "1Gi" + }, + "requests": { + "cpu": "500m", + "memory": "256Mi" + } + } + }, + "ingester": { + "kind": "StatefulSet", + "replicaCount": 3, + "resources": { + "limits": { + "memory": "16Gi" + }, + "requests": { + "cpu": 1, + "memory": "8Gi" + } + } + }, + "querier": { + "kind": "Deployment", + "replicaCount": 3, + "resources": { + "limits": { + "memory": "1Gi" + }, + "requests": { + "cpu": "100m", + "memory": "256Mi" + } + } + }, + "query-frontend": { + "kind": "Deployment", + "replicaCount": 2, + "resources": { + "limits": { + "memory": "1Gi" + }, + "requests": { + "cpu": "100m", + "memory": "256Mi" + } + } + }, + "query-scheduler": { + "kind": "Deployment", + "replicaCount": 2, + "resources": { + "limits": { + "memory": "1Gi" + }, + "requests": { + "cpu": "100m", + "memory": "256Mi" + } + } + } + } + } +} \ No newline at end of file