From da3899ca06cfe85d7ac2b67e2be8148f0bef9d7c Mon Sep 17 00:00:00 2001 From: Laxman Ch Date: Mon, 21 Dec 2020 00:25:18 +0530 Subject: [PATCH 1/8] Snyk fixes + Dependency upgrades to latests --- hypertrace-ingester/build.gradle.kts | 12 +++++----- .../enriched-span-constants/build.gradle.kts | 6 ----- .../build.gradle.kts | 5 ++--- .../build.gradle.kts | 22 +++++++------------ .../build.gradle.kts | 5 ----- .../trace-reader/build.gradle.kts | 5 +++++ .../hypertrace-view-creator/build.gradle.kts | 3 --- .../raw-spans-grouper/build.gradle.kts | 4 ++-- .../span-normalizer/build.gradle.kts | 6 ++--- 9 files changed, 27 insertions(+), 41 deletions(-) diff --git a/hypertrace-ingester/build.gradle.kts b/hypertrace-ingester/build.gradle.kts index 9bc6a4872..11a22c81c 100644 --- a/hypertrace-ingester/build.gradle.kts +++ b/hypertrace-ingester/build.gradle.kts @@ -25,12 +25,14 @@ hypertraceDocker { } dependencies { - implementation("org.hypertrace.core.kafkastreams.framework:kafka-streams-framework:0.1.9") - implementation("org.hypertrace.core.serviceframework:platform-service-framework:0.1.9") - implementation("org.hypertrace.core.serviceframework:platform-metrics:0.1.8") + implementation("org.hypertrace.core.kafkastreams.framework:kafka-streams-framework:0.1.15-SNAPSHOT") + implementation("org.hypertrace.core.serviceframework:platform-service-framework:0.1.18") + implementation("org.hypertrace.core.serviceframework:platform-metrics:0.1.18") implementation("org.hypertrace.core.datamodel:data-model:0.1.12") - implementation("org.hypertrace.core.viewgenerator:view-generator-framework:0.1.14") + implementation("org.hypertrace.core.viewgenerator:view-generator-framework:0.1.19") implementation("com.typesafe:config:1.4.0") + implementation("com.google.guava:guava:30.1-jre") + implementation("org.apache.commons:commons-lang3:3.11") implementation(project(":span-normalizer:span-normalizer")) implementation(project(":raw-spans-grouper:raw-spans-grouper")) @@ -40,7 +42,7 @@ dependencies { testImplementation("org.junit.jupiter:junit-jupiter:5.7.0") testImplementation("org.mockito:mockito-core:3.6.0") testImplementation("org.junit-pioneer:junit-pioneer:1.0.0") - testImplementation("org.apache.kafka:kafka-streams-test-utils:5.5.1-ccs") + testImplementation("org.apache.kafka:kafka-streams-test-utils:6.0.1-ccs") testImplementation(project(":hypertrace-view-generator:hypertrace-view-generator-api")) testImplementation(project(":span-normalizer:span-normalizer-api")) } diff --git a/hypertrace-trace-enricher/enriched-span-constants/build.gradle.kts b/hypertrace-trace-enricher/enriched-span-constants/build.gradle.kts index 53c570baa..eef4a1409 100644 --- a/hypertrace-trace-enricher/enriched-span-constants/build.gradle.kts +++ b/hypertrace-trace-enricher/enriched-span-constants/build.gradle.kts @@ -65,12 +65,6 @@ dependencies { implementation(project(":span-normalizer:span-normalizer-constants")) implementation("org.hypertrace.entity.service:entity-service-api:0.1.23") - constraints { - implementation("com.google.guava:guava:30.0-jre") { - because("https://snyk.io/vuln/SNYK-JAVA-COMGOOGLEGUAVA-1015415") - } - } - testImplementation("org.junit.jupiter:junit-jupiter:5.7.0") testImplementation("org.mockito:mockito-core:3.6.28") } diff --git a/hypertrace-trace-enricher/hypertrace-trace-enricher-api/build.gradle.kts b/hypertrace-trace-enricher/hypertrace-trace-enricher-api/build.gradle.kts index 7456281d3..f40927161 100644 --- a/hypertrace-trace-enricher/hypertrace-trace-enricher-api/build.gradle.kts +++ b/hypertrace-trace-enricher/hypertrace-trace-enricher-api/build.gradle.kts @@ -14,10 +14,9 @@ dependencies { implementation("org.slf4j:slf4j-api:1.7.30") implementation("org.apache.commons:commons-lang3:3.11") constraints { - implementation("com.google.guava:guava:30.0-jre") { - because("https://snyk.io/vuln/SNYK-JAVA-COMGOOGLEGUAVA-1015415") + implementation("com.google.guava:guava:30.1-jre") { + because("Information Disclosure [Medium Severity][https://snyk.io/vuln/SNYK-JAVA-COMGOOGLEGUAVA-1015415] in com.google.guava:guava@29.0-android") } } - testImplementation("org.junit.jupiter:junit-jupiter:5.7.0") } diff --git a/hypertrace-trace-enricher/hypertrace-trace-enricher/build.gradle.kts b/hypertrace-trace-enricher/hypertrace-trace-enricher/build.gradle.kts index 6fa814cd7..85de6ac10 100644 --- a/hypertrace-trace-enricher/hypertrace-trace-enricher/build.gradle.kts +++ b/hypertrace-trace-enricher/hypertrace-trace-enricher/build.gradle.kts @@ -34,28 +34,22 @@ tasks.test { } dependencies { - constraints { - implementation("org.hibernate.validator:hibernate-validator:6.1.5.Final") { - because("Cross-site Scripting (XSS) [Medium Severity][https://snyk.io/vuln/SNYK-JAVA-ORGHIBERNATEVALIDATOR-541187] in org.hibernate.validator:hibernate-validator@6.0.17.Final\n" + - " introduced by io.confluent:kafka-avro-serializer@5.5.0 > io.confluent:kafka-schema-registry-client@5.5.0 > org.glassfish.jersey.ext:jersey-bean-validation@2.30 > org.hibernate.validator:hibernate-validator@6.0.17.Final") - } - implementation("org.yaml:snakeyaml:1.26") { - because("Denial of Service (DoS) [Medium Severity][https://snyk.io/vuln/SNYK-JAVA-ORGYAML-537645] in org.yaml:snakeyaml@1.23\n" + - " introduced by io.confluent:kafka-avro-serializer@5.5.0 > io.confluent:kafka-schema-registry-client@5.5.0 > io.swagger:swagger-core@1.5.3 > com.fasterxml.jackson.dataformat:jackson-dataformat-yaml@2.4.5 > org.yaml:snakeyaml@1.12") - } - } - implementation(project(":hypertrace-trace-enricher:hypertrace-trace-enricher-impl")) implementation("org.hypertrace.core.datamodel:data-model:0.1.12") - implementation("org.hypertrace.core.flinkutils:flink-utils:0.1.6") implementation("org.hypertrace.core.serviceframework:platform-service-framework:0.1.18") implementation("org.hypertrace.entity.service:entity-service-client:0.1.23") implementation("com.typesafe:config:1.4.1") - implementation("org.hypertrace.core.kafkastreams.framework:kafka-streams-framework:0.1.13") + implementation("org.hypertrace.core.kafkastreams.framework:kafka-streams-framework:0.1.15-SNAPSHOT") + constraints { + implementation("com.google.guava:guava:30.1-jre") { + because("Information Disclosure [Medium Severity][https://snyk.io/vuln/SNYK-JAVA-COMGOOGLEGUAVA-1015415] in com.google.guava:guava@29.0-android") + } + } // Required for the GRPC clients. runtimeOnly("io.grpc:grpc-netty-shaded:1.33.1") + // Logging implementation("org.slf4j:slf4j-api:1.7.30") runtimeOnly("org.apache.logging.log4j:log4j-slf4j-impl:2.14.0") @@ -64,5 +58,5 @@ dependencies { testImplementation("org.junit.jupiter:junit-jupiter:5.7.0") testImplementation("org.mockito:mockito-core:3.6.28") testImplementation("org.junit-pioneer:junit-pioneer:1.1.0") - testImplementation("org.apache.kafka:kafka-streams-test-utils:5.5.1-ccs") + testImplementation("org.apache.kafka:kafka-streams-test-utils:6.0.1-ccs") } diff --git a/hypertrace-trace-enricher/hypertrace-trace-visualizer/build.gradle.kts b/hypertrace-trace-enricher/hypertrace-trace-visualizer/build.gradle.kts index 9dc2d51c4..6d254c864 100644 --- a/hypertrace-trace-enricher/hypertrace-trace-visualizer/build.gradle.kts +++ b/hypertrace-trace-enricher/hypertrace-trace-visualizer/build.gradle.kts @@ -7,11 +7,6 @@ dependencies { implementation("org.json:json:20201115") implementation("org.apache.commons:commons-lang3:3.11") - constraints { - implementation("com.google.guava:guava:30.0-jre") { - because("https://snyk.io/vuln/SNYK-JAVA-COMGOOGLEGUAVA-1015415") - } - } } description = "Trace Visualizer to help visualize a structured trace." diff --git a/hypertrace-trace-enricher/trace-reader/build.gradle.kts b/hypertrace-trace-enricher/trace-reader/build.gradle.kts index 24c5530df..c6e5822ea 100644 --- a/hypertrace-trace-enricher/trace-reader/build.gradle.kts +++ b/hypertrace-trace-enricher/trace-reader/build.gradle.kts @@ -15,6 +15,11 @@ dependencies { implementation("org.hypertrace.core.grpcutils:grpc-client-rx-utils:0.3.2") implementation("org.hypertrace.core.grpcutils:grpc-context-utils:0.3.2") implementation("io.reactivex.rxjava3:rxjava:3.0.7") + constraints { + implementation("com.google.guava:guava:30.1-jre") { + because("Information Disclosure [Medium Severity][https://snyk.io/vuln/SNYK-JAVA-COMGOOGLEGUAVA-1015415] in com.google.guava:guava@29.0-android") + } + } testImplementation("org.junit.jupiter:junit-jupiter:5.7.0") testImplementation("org.mockito:mockito-core:3.6.28") diff --git a/hypertrace-view-generator/hypertrace-view-creator/build.gradle.kts b/hypertrace-view-generator/hypertrace-view-creator/build.gradle.kts index 231b7a1d9..b93de73e6 100644 --- a/hypertrace-view-generator/hypertrace-view-creator/build.gradle.kts +++ b/hypertrace-view-generator/hypertrace-view-creator/build.gradle.kts @@ -19,9 +19,6 @@ dependencies { implementation(project(":hypertrace-view-generator:hypertrace-view-generator-api")) implementation("org.hypertrace.core.viewcreator:view-creator-framework:0.1.19") constraints { - implementation("com.google.guava:guava:30.0-jre") { - because("https://snyk.io/vuln/SNYK-JAVA-COMGOOGLEGUAVA-1015415") - } // to have calcite libs on the same version implementation("org.apache.calcite:calcite-babel:1.26.0") { because("https://snyk.io/vuln/SNYK-JAVA-ORGAPACHECALCITE-1038296") diff --git a/raw-spans-grouper/raw-spans-grouper/build.gradle.kts b/raw-spans-grouper/raw-spans-grouper/build.gradle.kts index 8cbac67c8..4fdbbb70b 100644 --- a/raw-spans-grouper/raw-spans-grouper/build.gradle.kts +++ b/raw-spans-grouper/raw-spans-grouper/build.gradle.kts @@ -39,7 +39,7 @@ dependencies { implementation("org.hypertrace.core.serviceframework:platform-service-framework:0.1.18") implementation("org.hypertrace.core.serviceframework:platform-metrics:0.1.18") - implementation("org.hypertrace.core.kafkastreams.framework:kafka-streams-framework:0.1.13") + implementation("org.hypertrace.core.kafkastreams.framework:kafka-streams-framework:0.1.15-SNAPSHOT") implementation("com.typesafe:config:1.4.1") implementation("de.javakaffee:kryo-serializers:0.45") implementation("io.confluent:kafka-avro-serializer:5.5.0") @@ -51,5 +51,5 @@ dependencies { testImplementation("org.junit.jupiter:junit-jupiter:5.7.0") testImplementation("org.mockito:mockito-core:3.6.28") testImplementation("org.junit-pioneer:junit-pioneer:1.1.0") - testImplementation("org.apache.kafka:kafka-streams-test-utils:5.5.1-ccs") + testImplementation("org.apache.kafka:kafka-streams-test-utils:6.0.1-ccs") } diff --git a/span-normalizer/span-normalizer/build.gradle.kts b/span-normalizer/span-normalizer/build.gradle.kts index 60ccf0197..69d7bb0f3 100644 --- a/span-normalizer/span-normalizer/build.gradle.kts +++ b/span-normalizer/span-normalizer/build.gradle.kts @@ -43,7 +43,7 @@ dependencies { implementation("org.hypertrace.core.datamodel:data-model:0.1.12") implementation("org.hypertrace.core.serviceframework:platform-service-framework:0.1.18") implementation("org.hypertrace.core.serviceframework:platform-metrics:0.1.18") - implementation("org.hypertrace.core.kafkastreams.framework:kafka-streams-framework:0.1.13") + implementation("org.hypertrace.core.kafkastreams.framework:kafka-streams-framework:0.1.15-SNAPSHOT") // Required for the GRPC clients. @@ -56,7 +56,7 @@ dependencies { } implementation("com.typesafe:config:1.4.1") implementation("de.javakaffee:kryo-serializers:0.45") - implementation("io.confluent:kafka-avro-serializer:5.5.1") + implementation("io.confluent:kafka-avro-serializer:6.0.1") implementation("org.apache.commons:commons-lang3:3.11") implementation("org.apache.httpcomponents:httpclient:4.5.13") @@ -67,5 +67,5 @@ dependencies { testImplementation("org.junit.jupiter:junit-jupiter:5.7.0") testImplementation("org.hypertrace.core.serviceframework:platform-metrics:0.1.18") testImplementation("org.junit-pioneer:junit-pioneer:1.1.0") - testImplementation("org.apache.kafka:kafka-streams-test-utils:5.5.1-ccs") + testImplementation("org.apache.kafka:kafka-streams-test-utils:6.0.1-ccs") } From f86d71bb7c7324f8b0d7abeb7d028e0e15071b81 Mon Sep 17 00:00:00 2001 From: Laxman Ch Date: Thu, 24 Dec 2020 15:42:33 +0530 Subject: [PATCH 2/8] Kafka streams config fix/tuning: hypertrace-ingestion --- .gitignore | 4 +- hypertrace-ingester/build.gradle.kts | 2 +- .../helm/templates/trace-enricher-config.yaml | 30 ++++++-- hypertrace-trace-enricher/helm/values.yaml | 44 ++++++++---- .../build.gradle.kts | 2 +- .../templates/raw-spans-grouper-config.yaml | 70 ++++++++++--------- raw-spans-grouper/helm/values.yaml | 65 ++++++++++------- .../raw-spans-grouper/build.gradle.kts | 2 +- .../templates/span-normalizer-config.yaml | 34 +++++++-- span-normalizer/helm/values.yaml | 56 ++++++++++----- .../span-normalizer/build.gradle.kts | 2 +- 11 files changed, 208 insertions(+), 103 deletions(-) diff --git a/.gitignore b/.gitignore index f2c64aa68..de54ef38d 100644 --- a/.gitignore +++ b/.gitignore @@ -20,4 +20,6 @@ test-output # Local config to handle using Java 8 vs java 11. .java-version *.tgz - +# helm +charts/ +Chart.lock diff --git a/hypertrace-ingester/build.gradle.kts b/hypertrace-ingester/build.gradle.kts index 11a22c81c..c2d2c489a 100644 --- a/hypertrace-ingester/build.gradle.kts +++ b/hypertrace-ingester/build.gradle.kts @@ -25,7 +25,7 @@ hypertraceDocker { } dependencies { - implementation("org.hypertrace.core.kafkastreams.framework:kafka-streams-framework:0.1.15-SNAPSHOT") + implementation("org.hypertrace.core.kafkastreams.framework:kafka-streams-framework:0.1.15") implementation("org.hypertrace.core.serviceframework:platform-service-framework:0.1.18") implementation("org.hypertrace.core.serviceframework:platform-metrics:0.1.18") implementation("org.hypertrace.core.datamodel:data-model:0.1.12") diff --git a/hypertrace-trace-enricher/helm/templates/trace-enricher-config.yaml b/hypertrace-trace-enricher/helm/templates/trace-enricher-config.yaml index 6ab46551b..b473c815f 100644 --- a/hypertrace-trace-enricher/helm/templates/trace-enricher-config.yaml +++ b/hypertrace-trace-enricher/helm/templates/trace-enricher-config.yaml @@ -8,10 +8,32 @@ data: application.conf: |- kafka.streams.config { application.id = structured-traces-enrichment-job - metrics.recording.level = "{{ .Values.traceEnricherConfig.kafka.streams.config.metricsRecordingLevel }}" - num.stream.threads = "{{ .Values.traceEnricherConfig.kafka.streams.config.numStreamThreads }}" - bootstrap.servers = "{{ .Values.traceEnricherConfig.kafka.streams.config.bootstrapServers }}" - schema.registry.url = "{{ .Values.traceEnricherConfig.kafka.streams.config.schemaRegistryUrl }}" + bootstrap.servers = "{{ .Values.traceEnricherConfig.kafkaStreamsConfig.bootstrapServers }}" + schema.registry.url = "{{ .Values.traceEnricherConfig.kafkaStreamsConfig.schemaRegistryUrl }}" + # kafka streams config + num.stream.threads = "{{ int .Values.traceEnricherConfig.kafkaStreamsConfig.numStreamThreads }}" + commit.interval.ms = "{{ int .Values.traceEnricherConfig.kafkaStreamsConfig.commitIntervalMs }}" + # Common client (prodcuer, consumer, admin) configs + receive.buffer.bytes = "{{ int .Values.traceEnricherConfig.kafkaStreamsConfig.receiveBufferBytes }}" + send.buffer.bytes = "{{ int .Values.traceEnricherConfig.kafkaStreamsConfig.sendBufferBytes }}" + # Producer configs + producer.acks = "{{ .Values.traceEnricherConfig.kafkaStreamsConfig.producerAcks }}" + producer.batch.size = "{{ int .Values.traceEnricherConfig.kafkaStreamsConfig.producerBatchSize }}" + producer.linger.ms = "{{ int .Values.traceEnricherConfig.kafkaStreamsConfig.producerLingerMs }}" + producer.compression.type = "{{ .Values.traceEnricherConfig.kafkaStreamsConfig.producerCompressionType }}" + producer.max.request.size = "{{ int .Values.traceEnricherConfig.kafkaStreamsConfig.producerMaxRequestSize }}" + producer.buffer.memory = "{{ int .Values.traceEnricherConfig.kafkaStreamsConfig.producerBufferMemory }}" + # Consumer configs + consumer.max.partition.fetch.bytes = "{{ int .Values.traceEnricherConfig.kafkaStreamsConfig.consumerMaxPartitionFetchBytes }}" + consumer.max.poll.records = "{{ int .Values.traceEnricherConfig.kafkaStreamsConfig.consumerMaxPollRecords }}" + consumer.session.timeout.ms = "{{ int .Values.traceEnricherConfig.kafkaStreamsConfig.consumerSessionTimeoutMs }}" + # Others + metrics.recording.level = "{{ .Values.traceEnricherConfig.kafkaStreamsConfig.metricsRecordingLevel }}" + {{- if .Values.traceEnricherConfig.extraKafkaStreamsConfig }} + {{- range $key,$value := .Values.traceEnricherConfig.extraKafkaStreamsConfig }} + {{ $key }} = {{ $value }} + {{- end }} + {{- end }} } enricher { diff --git a/hypertrace-trace-enricher/helm/values.yaml b/hypertrace-trace-enricher/helm/values.yaml index aadf46133..53caa9d0f 100644 --- a/hypertrace-trace-enricher/helm/values.yaml +++ b/hypertrace-trace-enricher/helm/values.yaml @@ -25,7 +25,7 @@ nodeLabels: {} # This is defined in resources/configs/trace-enricher/application.conf as service.admin.port containerAdminPort: 8099 -javaOpts: "-Xms512M -Xmx1024M" +javaOpts: "-XX:InitialRAMPercentage=50.0 -XX:MaxRAMPercentage=75.0 -XX:MaxDirectMemorySize=128M" livenessProbe: initialDelaySeconds: 10 @@ -45,7 +45,7 @@ resources: memory: 1024Mi limits: cpu: 1.0 - memory: 1536Mi + memory: 1024Mi podLabels: app: hypertrace-trace-enricher @@ -65,13 +65,32 @@ deploymentSelectorMatchLabels: ########### traceEnricherConfig: name: hypertrace-trace-enricher-config - kafka: - streams: - config: - metricsRecordingLevel: INFO - numStreamThreads: 2 - bootstrapServers: "bootstrap:9092" - schemaRegistryUrl: "http://schema-registry-service:8081" + # Important kafka streams configurations which are used in config template goes here. + kafkaStreamsConfig: + bootstrapServers: "bootstrap:9092" + schemaRegistryUrl: "http://schema-registry-service:8081" + # Core config + numStreamThreads: 2 # default = 1 + commitIntervalMs: 30000 # default = 30000 + # Common client (prodcuer, consumer, admin) configs + receiveBufferBytes: 4194304 # default = 32768 (kafka streams default) + sendBufferBytes: 4194304 # default = 131072 (kafka streams default) + # Producer configs + producerAcks: all # default: 1 + producerBatchSize: 524288 # default = 16384 + producerLingerMs: 1000 # default = 100 (kafka streams default) + producerCompressionType: "gzip" # default = none + producerMaxRequestSize: 10485760 # default = 1048576 + producerBufferMemory: 134217728 # default = 33554432 + # Consumer configs + consumerMaxPartitionFetchBytes: 4194304 # default = 1048576 + consumerMaxPollRecords: 1000 # default = 1000 (kafka streams default) + consumerSessionTimeoutMs: 10000 # default = 10000 + # Others + metricsRecordingLevel: INFO # default = INFO + # All other streams config goes here. + # Remove the flower braces and add key: value pair here. + extraKafkaStreamsConfig: {} logConfig: name: hypertrace-trace-enricher-log-config @@ -88,11 +107,12 @@ kafka-topic-creator: kafka: topics: - name: enriched-structured-traces - replicationFactor: 1 + replicationFactor: 3 partitions: 8 configs: - - retention.bytes=4294967296 - - retention.ms=259200000 + - retention.bytes=8589934592 # default = -1 + - retention.ms=86400000 # default = 604800000 (7 days) + - max.message.bytes=10485760 # Allow larger messages for traces zookeeper: address: zookeeper:2181 imagePullSecrets: [] diff --git a/hypertrace-trace-enricher/hypertrace-trace-enricher/build.gradle.kts b/hypertrace-trace-enricher/hypertrace-trace-enricher/build.gradle.kts index 85de6ac10..6e01b4f81 100644 --- a/hypertrace-trace-enricher/hypertrace-trace-enricher/build.gradle.kts +++ b/hypertrace-trace-enricher/hypertrace-trace-enricher/build.gradle.kts @@ -40,7 +40,7 @@ dependencies { implementation("org.hypertrace.entity.service:entity-service-client:0.1.23") implementation("com.typesafe:config:1.4.1") - implementation("org.hypertrace.core.kafkastreams.framework:kafka-streams-framework:0.1.15-SNAPSHOT") + implementation("org.hypertrace.core.kafkastreams.framework:kafka-streams-framework:0.1.15") constraints { implementation("com.google.guava:guava:30.1-jre") { because("Information Disclosure [Medium Severity][https://snyk.io/vuln/SNYK-JAVA-COMGOOGLEGUAVA-1015415] in com.google.guava:guava@29.0-android") diff --git a/raw-spans-grouper/helm/templates/raw-spans-grouper-config.yaml b/raw-spans-grouper/helm/templates/raw-spans-grouper-config.yaml index ef35843ab..23b67083b 100644 --- a/raw-spans-grouper/helm/templates/raw-spans-grouper-config.yaml +++ b/raw-spans-grouper/helm/templates/raw-spans-grouper-config.yaml @@ -6,44 +6,50 @@ metadata: release: {{ .Release.Name }} data: application.conf: |- - kafka.streams.config = { + # Core configs application.id = raw-spans-to-structured-traces-grouping-job + bootstrap.servers = "{{ .Values.spanNormalizerConfig.kafkaStreamsConfig.bootstrapServers }}" + schema.registry.url = "{{ .Values.spanNormalizerConfig.kafkaStreamsConfig.schemaRegistryUrl }}" + value.subject.name.strategy = "io.confluent.kafka.serializers.subject.TopicRecordNameStrategy" + # Core configs - For applications with state + num.stream.threads = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.numStreamThreads }}" + commit.interval.ms = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.commitIntervalMs }}" group.instance.id = ${?POD_NAME} + cache.max.bytes.buffering = "{{ int .Values.rawSpansGrouperConfig.kafkaStreamsConfig.cacheMaxBytesBuffering }}" + # Common client (prodcuer, consumer, admin) configs + receive.buffer.bytes = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.receiveBufferBytes }}" + send.buffer.bytes = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.sendBufferBytes }}" + # Producer configs + producer.acks = "{{ .Values.spanNormalizerConfig.kafkaStreamsConfig.producerAcks }}" + producer.batch.size = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.producerBatchSize }}" + producer.linger.ms = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.producerLingerMs }}" + producer.compression.type = "{{ .Values.spanNormalizerConfig.kafkaStreamsConfig.producerCompressionType }}" + producer.max.request.size = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.producerMaxRequestSize }}" + producer.buffer.memory = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.producerBufferMemory }}" + # Consumer configs + consumer.max.partition.fetch.bytes = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.consumerMaxPartitionFetchBytes }}" + consumer.max.poll.records = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.consumerMaxPollRecords }}" + consumer.session.timeout.ms = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.consumerSessionTimeoutMs }}" + # Changelog topic configs + replication.factor = "{{ int .Values.rawSpansGrouperConfig.kafkaStreamsConfig.replicationFactor }}" + topic.cleanup.policy = "delete,compact" + # RocksDB state store configs state.dir = "/var/data/" - metrics.recording.level = "{{ .Values.rawSpansGrouperConfig.kafka.streams.config.metricsRecordingLevel }}" - num.stream.threads = "{{ .Values.rawSpansGrouperConfig.kafka.streams.config.numStreamThreads }}" - num.standby.replicas = {{ int .Values.rawSpansGrouperConfig.kafka.streams.config.numStandbyReplicas }} - replication.factor = {{ int .Values.rawSpansGrouperConfig.kafka.streams.config.replicationFactor }} - producer.max.request.size = {{ int .Values.rawSpansGrouperConfig.kafka.streams.config.producerMaxRequestSize }} - cache.max.bytes.buffering = {{ int .Values.rawSpansGrouperConfig.kafka.streams.config.cacheMaxBytesBuffering }} - {{- if .Values.rawSpansGrouperConfig.kafka.streams.config.sessionTimeoutMs }} - session.timeout.ms = {{ int .Values.rawSpansGrouperConfig.kafka.streams.config.sessionTimeoutMs }} + rocksdb.block.cache.size = {{ int .Values.rawSpansGrouperConfig.kafkaStreamsConfig.rocksdbBlockCacheSize }} + rocksdb.write.buffer.size = {{ int .Values.rawSpansGrouperConfig.kafkaStreamsConfig.rocksdbWriteBufferSize }} + rocksdb.max.write.buffers = {{ int .Values.rawSpansGrouperConfig.kafkaStreamsConfig.rocksdbMaxWriteBuffers }} + rocksdb.cache.index.and.filter.blocks = {{ .Values.rawSpansGrouperConfig.kafkaStreamsConfig.rocksdbCacheIndexAndFilterBlocks }} + # Exception handler configs + default.production.exception.handler = {{ .Values.rawSpansGrouperConfig.kafkaStreamsConfig.defaultProductionExceptionHandler }} + ignore.production.exception.classes = {{ .Values.rawSpansGrouperConfig.kafkaStreamsConfig.ignoreProductionExceptionClasses }} + # Others + metrics.recording.level = "{{ .Values.spanNormalizerConfig.kafkaStreamsConfig.metricsRecordingLevel }}" + {{- if .Values.spanNormalizerConfig.extraKafkaStreamsConfig }} + {{- range $key,$value := .Values.spanNormalizerConfig.extraKafkaStreamsConfig }} + {{ $key }} = {{ $value }} {{- end }} - {{- if .Values.rawSpansGrouperConfig.kafka.streams.config.commitIntervalMs }} - commit.interval.ms = {{ int .Values.rawSpansGrouperConfig.kafka.streams.config.commitIntervalMs }} {{- end }} - {{- if .Values.rawSpansGrouperConfig.kafka.streams.config.producerBatchSize }} - producer.batch.size = {{ int .Values.rawSpansGrouperConfig.kafka.streams.config.producerBatchSize }} - {{- end }} - {{- if .Values.rawSpansGrouperConfig.kafka.streams.config.producerLingerMs }} - producer.linger.ms = {{ int .Values.rawSpansGrouperConfig.kafka.streams.config.producerLingerMs }} - {{- end }} - default.production.exception.handler = {{ .Values.rawSpansGrouperConfig.kafka.streams.config.defaultProductionExceptionHandler }} - ignore.production.exception.classes = {{ .Values.rawSpansGrouperConfig.kafka.streams.config.ignoreProductionExceptionClasses }} - - topic.cleanup.policy = "delete,compact" - bootstrap.servers = "{{ .Values.rawSpansGrouperConfig.kafka.streams.config.bootstrapServers }}" - - schema.registry.url = "{{ .Values.rawSpansGrouperConfig.kafka.streams.config.schemaRegistryUrl }}" - specific.avro.reader = true - - rocksdb.block.cache.size = {{ int .Values.rawSpansGrouperConfig.kafka.streams.config.rocksdbBlockCacheSize }} - rocksdb.write.buffer.size = {{ int .Values.rawSpansGrouperConfig.kafka.streams.config.rocksdbWriteBufferSize }} - rocksdb.max.write.buffers = {{ int .Values.rawSpansGrouperConfig.kafka.streams.config.rocksdbMaxWriteBuffers }} - rocksdb.cache.index.and.filter.blocks = {{ .Values.rawSpansGrouperConfig.kafka.streams.config.rocksdbCacheIndexAndFilterBlocks }} - - value.subject.name.strategy = "io.confluent.kafka.serializers.subject.TopicRecordNameStrategy" } span.groupby.session.window.interval = {{ .Values.rawSpansGrouperConfig.span.groupby.internal }} diff --git a/raw-spans-grouper/helm/values.yaml b/raw-spans-grouper/helm/values.yaml index c50b22dd6..bc6fd250f 100644 --- a/raw-spans-grouper/helm/values.yaml +++ b/raw-spans-grouper/helm/values.yaml @@ -30,7 +30,7 @@ securityContext: {} # This is defined in resources/configs/common/application.conf as service.admin.port containerAdminPort: 8099 -javaOpts: "-XX:InitialRAMPercentage=50.0 -XX:MaxRAMPercentage=75.0" +javaOpts: "-XX:InitialRAMPercentage=50.0 -XX:MaxRAMPercentage=50.0 -XX:MaxDirectMemorySize=128M" livenessProbe: initialDelaySeconds: 10 @@ -47,10 +47,10 @@ resources: # lines, adjust them as necessary, and remove the curly braces after 'resources:'. requests: cpu: 0.2 - memory: 1536Mi + memory: 2Gi limits: cpu: 1.0 - memory: 1536Mi + memory: 2Gi deploymentLabels: app: raw-spans-grouper @@ -105,25 +105,40 @@ prometheus: ########### rawSpansGrouperConfig: name: raw-spans-grouper-config - kafka: - streams: - config: - metricsRecordingLevel: INFO - numStreamThreads: 4 - bootstrapServers: "bootstrap:9092" - schemaRegistryUrl: "http://schema-registry-service:8081" - cacheMaxBytesBuffering: 16777216 # 16MB - rocksdbBlockCacheSize: 33554432 - rocksdbWriteBufferSize: 8388608 - rocksdbMaxWriteBuffers: 2 - rocksdbCacheIndexAndFilterBlocks: true - sessionTimeoutMs: 300000 - producerMaxRequestSize: 10485760 - defaultProductionExceptionHandler: "org.hypertrace.core.kafkastreams.framework.exceptionhandlers.IgnoreProductionExceptionHandler" - ignoreProductionExceptionClasses: "org.apache.kafka.common.errors.RecordTooLargeException" - numStandbyReplicas: 0 # Standby replicas are costly and needed only for applications which are high latency sensitive. - # They will help incase task failovers. Required only on need basis. - replicationFactor: 3 + kafkaStreamsConfig: + bootstrapServers: "bootstrap:9092" + schemaRegistryUrl: "http://schema-registry-service:8081" + # Core configs + numStreamThreads: 4 # default = 1 + commitIntervalMs: 30000 # default = 30000 + cacheMaxBytesBuffering: 134217728 # default = 10485760 (10MB) + # Common client (prodcuer, consumer, admin) configs + receiveBufferBytes: 4194304 # default = 32768 (kafka streams default) + sendBufferBytes: 4194304 # default = 131072 (kafka streams default) + # Producer configs + producerAcks: all # default: 1 + producerBatchSize: 524288 # default = 16384 + producerLingerMs: 1000 # default = 100 (kafka streams default) + producerCompressionType: "gzip" # default = none + producerMaxRequestSize: 2097152 # default = 1048576 + producerBufferMemory: 134217728 # default = 33554432 + # Consumer configs + consumerMaxPartitionFetchBytes: 8388608 # default = 1048576 + consumerMaxPollRecords: 1000 # default = 1000 (kafka streams default) + consumerSessionTimeoutMs: 300000 # default = 10000 + # Changelog topic configs + replicationFactor: 3 + # RocksDB state store configs + rocksdbBlockCacheSize: 33554432 + rocksdbWriteBufferSize: 8388608 + rocksdbMaxWriteBuffers: 2 + rocksdbCacheIndexAndFilterBlocks: true + # Exception handler configs + defaultProductionExceptionHandler: "org.hypertrace.core.kafkastreams.framework.exceptionhandlers.IgnoreProductionExceptionHandler" + ignoreProductionExceptionClasses: "org.apache.kafka.common.errors.RecordTooLargeException" + # Others + metricsRecordingLevel: INFO # default = INFO + span: groupby: internal: 30 @@ -147,8 +162,10 @@ kafka-topic-creator: replicationFactor: 3 partitions: 8 configs: - - retention.bytes=4294967296 - - retention.ms=259200000 + - retention.bytes=8589934592 # default = -1 + - retention.ms=86400000 # default = 604800000 (7 days) + - max.message.bytes=10485760 # default = 1048588 + zookeeper: address: zookeeper:2181 imagePullSecrets: [] diff --git a/raw-spans-grouper/raw-spans-grouper/build.gradle.kts b/raw-spans-grouper/raw-spans-grouper/build.gradle.kts index 4fdbbb70b..84868a025 100644 --- a/raw-spans-grouper/raw-spans-grouper/build.gradle.kts +++ b/raw-spans-grouper/raw-spans-grouper/build.gradle.kts @@ -39,7 +39,7 @@ dependencies { implementation("org.hypertrace.core.serviceframework:platform-service-framework:0.1.18") implementation("org.hypertrace.core.serviceframework:platform-metrics:0.1.18") - implementation("org.hypertrace.core.kafkastreams.framework:kafka-streams-framework:0.1.15-SNAPSHOT") + implementation("org.hypertrace.core.kafkastreams.framework:kafka-streams-framework:0.1.15") implementation("com.typesafe:config:1.4.1") implementation("de.javakaffee:kryo-serializers:0.45") implementation("io.confluent:kafka-avro-serializer:5.5.0") diff --git a/span-normalizer/helm/templates/span-normalizer-config.yaml b/span-normalizer/helm/templates/span-normalizer-config.yaml index d8b448e19..dd5df1b02 100644 --- a/span-normalizer/helm/templates/span-normalizer-config.yaml +++ b/span-normalizer/helm/templates/span-normalizer-config.yaml @@ -8,14 +8,34 @@ data: application.conf: |- kafka.streams.config { application.id = jaeger-spans-to-raw-spans-job - metrics.recording.level = "{{ .Values.spanNormalizerConfig.kafka.streams.config.metricsRecordingLevel }}" - num.stream.threads = "{{ .Values.spanNormalizerConfig.kafka.streams.config.numStreamThreads }}" - producer.max.request.size = 10485760 - bootstrap.servers = "{{ .Values.spanNormalizerConfig.kafka.streams.config.bootstrapServers }}" - auto.offset.reset = "latest" - auto.commit.interval.ms = 5000 - schema.registry.url = "{{ .Values.spanNormalizerConfig.kafka.streams.config.schemaRegistryUrl }}" + bootstrap.servers = "{{ .Values.spanNormalizerConfig.kafkaStreamsConfig.bootstrapServers }}" + schema.registry.url = "{{ .Values.spanNormalizerConfig.kafkaStreamsConfig.schemaRegistryUrl }}" + # kafka streams config + num.stream.threads = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.numStreamThreads }}" + commit.interval.ms = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.commitIntervalMs }}" + # Common client (prodcuer, consumer, admin) configs + receive.buffer.bytes = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.receiveBufferBytes }}" + send.buffer.bytes = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.sendBufferBytes }}" + # Producer configs + producer.acks = "{{ .Values.spanNormalizerConfig.kafkaStreamsConfig.producerAcks }}" + producer.batch.size = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.producerBatchSize }}" + producer.linger.ms = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.producerLingerMs }}" + producer.compression.type = "{{ .Values.spanNormalizerConfig.kafkaStreamsConfig.producerCompressionType }}" + producer.max.request.size = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.producerMaxRequestSize }}" + producer.buffer.memory = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.producerBufferMemory }}" + # Consumer configs + consumer.max.partition.fetch.bytes = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.consumerMaxPartitionFetchBytes }}" + consumer.max.poll.records = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.consumerMaxPollRecords }}" + consumer.session.timeout.ms = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.consumerSessionTimeoutMs }}" + # Others + metrics.recording.level = "{{ .Values.spanNormalizerConfig.kafkaStreamsConfig.metricsRecordingLevel }}" + {{- if .Values.spanNormalizerConfig.extraKafkaStreamsConfig }} + {{- range $key,$value := .Values.spanNormalizerConfig.extraKafkaStreamsConfig }} + {{ $key }} = {{ $value }} + {{- end }} + {{- end }} } + {{- if hasKey .Values.spanNormalizerConfig "processor" }} processor { {{- if hasKey .Values.spanNormalizerConfig.processor "tenantIdTagKey" }} diff --git a/span-normalizer/helm/values.yaml b/span-normalizer/helm/values.yaml index 7b6e92899..717079715 100644 --- a/span-normalizer/helm/values.yaml +++ b/span-normalizer/helm/values.yaml @@ -28,7 +28,7 @@ nodeLabels: {} # This is defined in resources/configs/span-normalizer/application.conf as service.admin.port containerAdminPort: 8099 -javaOpts: "-XX:InitialRAMPercentage=50.0 -XX:MaxRAMPercentage=75.0" +javaOpts: "-XX:InitialRAMPercentage=50.0 -XX:MaxRAMPercentage=75.0 -XX:MaxDirectMemorySize=128M" livenessProbe: initialDelaySeconds: 10 @@ -39,16 +39,13 @@ readinessProbe: periodSeconds: 5 resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. requests: - cpu: 0.2 - memory: 1536Mi + cpu: 0.1 + memory: 1024Mi limits: - cpu: 1.0 - memory: 1536Mi + cpu: 0.2 + memory: 1024Mi + deploymentLabels: app: span-normalizer @@ -74,13 +71,32 @@ serviceSelectorLabels: ########### spanNormalizerConfig: name: span-normalizer-config - kafka: - streams: - config: - metricsRecordingLevel: INFO - numStreamThreads: 2 - bootstrapServers: "bootstrap:9092" - schemaRegistryUrl: "http://schema-registry-service:8081" + # Important kafka streams configurations which are used in config template goes here. + kafkaStreamsConfig: + bootstrapServers: "bootstrap:9092" + schemaRegistryUrl: "http://schema-registry-service:8081" + # Core config + numStreamThreads: 2 # default = 1 + commitIntervalMs: 30000 # default = 30000 + # Common client (prodcuer, consumer, admin) configs + receiveBufferBytes: 4194304 # default = 32768 (kafka streams default) + sendBufferBytes: 4194304 # default = 131072 (kafka streams default) + # Producer configs + producerAcks: all # default: 1 + producerBatchSize: 524288 # default = 16384 + producerLingerMs: 1000 # default = 100 (kafka streams default) + producerCompressionType: "gzip" # default = none + producerMaxRequestSize: 1048576 # default = 1048576 + producerBufferMemory: 134217728 # default = 33554432 + # Consumer configs + consumerMaxPartitionFetchBytes: 8388608 # default = 1048576 + consumerMaxPollRecords: 1000 # default = 1000 (kafka streams default) + consumerSessionTimeoutMs: 10000 # default = 10000 + # Others + metricsRecordingLevel: INFO # default = INFO + # All other streams config goes here. + # Remove the flower braces and add key: value pair here. + extraKafkaStreamsConfig: {} logConfig: name: span-normalizer-log-appender-config @@ -95,6 +111,7 @@ jmx: enabled: true port: 7022 opts: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun.management.jmxremote.port=7022 -Dcom.sun.management.jmxremote.rmi.port=7022 -Djava.rmi.server.hostname=127.0.0.1" + prometheus: jmx: enabled: true @@ -115,11 +132,12 @@ kafka-topic-creator: kafka: topics: - name: raw-spans-from-jaeger-spans - replicationFactor: 2 + replicationFactor: 3 partitions: 8 configs: - - retention.bytes=4294967296 - - retention.ms=259200000 + - retention.bytes=8589934592 # default = -1 + - retention.ms=86400000 # default = 604800000 (7 days) + - max.message.bytes=1048588 # default = 1048588 zookeeper: address: zookeeper:2181 imagePullSecrets: [] diff --git a/span-normalizer/span-normalizer/build.gradle.kts b/span-normalizer/span-normalizer/build.gradle.kts index 69d7bb0f3..955a1d397 100644 --- a/span-normalizer/span-normalizer/build.gradle.kts +++ b/span-normalizer/span-normalizer/build.gradle.kts @@ -43,7 +43,7 @@ dependencies { implementation("org.hypertrace.core.datamodel:data-model:0.1.12") implementation("org.hypertrace.core.serviceframework:platform-service-framework:0.1.18") implementation("org.hypertrace.core.serviceframework:platform-metrics:0.1.18") - implementation("org.hypertrace.core.kafkastreams.framework:kafka-streams-framework:0.1.15-SNAPSHOT") + implementation("org.hypertrace.core.kafkastreams.framework:kafka-streams-framework:0.1.15") // Required for the GRPC clients. From e17bd1883e0b88dd4139e1a928ff880f4a8a3a7d Mon Sep 17 00:00:00 2001 From: Laxman Ch Date: Thu, 24 Dec 2020 15:52:23 +0530 Subject: [PATCH 3/8] Fix helm template. Failing validation --- .../templates/raw-spans-grouper-config.yaml | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/raw-spans-grouper/helm/templates/raw-spans-grouper-config.yaml b/raw-spans-grouper/helm/templates/raw-spans-grouper-config.yaml index 23b67083b..8fde7b562 100644 --- a/raw-spans-grouper/helm/templates/raw-spans-grouper-config.yaml +++ b/raw-spans-grouper/helm/templates/raw-spans-grouper-config.yaml @@ -9,28 +9,28 @@ data: kafka.streams.config = { # Core configs application.id = raw-spans-to-structured-traces-grouping-job - bootstrap.servers = "{{ .Values.spanNormalizerConfig.kafkaStreamsConfig.bootstrapServers }}" - schema.registry.url = "{{ .Values.spanNormalizerConfig.kafkaStreamsConfig.schemaRegistryUrl }}" + bootstrap.servers = "{{ .Values.rawSpansGrouperConfig.kafkaStreamsConfig.bootstrapServers }}" + schema.registry.url = "{{ .Values.rawSpansGrouperConfig.kafkaStreamsConfig.schemaRegistryUrl }}" value.subject.name.strategy = "io.confluent.kafka.serializers.subject.TopicRecordNameStrategy" # Core configs - For applications with state - num.stream.threads = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.numStreamThreads }}" - commit.interval.ms = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.commitIntervalMs }}" + num.stream.threads = "{{ int .Values.rawSpansGrouperConfig.kafkaStreamsConfig.numStreamThreads }}" + commit.interval.ms = "{{ int .Values.rawSpansGrouperConfig.kafkaStreamsConfig.commitIntervalMs }}" group.instance.id = ${?POD_NAME} cache.max.bytes.buffering = "{{ int .Values.rawSpansGrouperConfig.kafkaStreamsConfig.cacheMaxBytesBuffering }}" # Common client (prodcuer, consumer, admin) configs - receive.buffer.bytes = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.receiveBufferBytes }}" - send.buffer.bytes = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.sendBufferBytes }}" + receive.buffer.bytes = "{{ int .Values.rawSpansGrouperConfig.kafkaStreamsConfig.receiveBufferBytes }}" + send.buffer.bytes = "{{ int .Values.rawSpansGrouperConfig.kafkaStreamsConfig.sendBufferBytes }}" # Producer configs - producer.acks = "{{ .Values.spanNormalizerConfig.kafkaStreamsConfig.producerAcks }}" - producer.batch.size = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.producerBatchSize }}" - producer.linger.ms = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.producerLingerMs }}" - producer.compression.type = "{{ .Values.spanNormalizerConfig.kafkaStreamsConfig.producerCompressionType }}" - producer.max.request.size = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.producerMaxRequestSize }}" - producer.buffer.memory = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.producerBufferMemory }}" + producer.acks = "{{ .Values.rawSpansGrouperConfig.kafkaStreamsConfig.producerAcks }}" + producer.batch.size = "{{ int .Values.rawSpansGrouperConfig.kafkaStreamsConfig.producerBatchSize }}" + producer.linger.ms = "{{ int .Values.rawSpansGrouperConfig.kafkaStreamsConfig.producerLingerMs }}" + producer.compression.type = "{{ .Values.rawSpansGrouperConfig.kafkaStreamsConfig.producerCompressionType }}" + producer.max.request.size = "{{ int .Values.rawSpansGrouperConfig.kafkaStreamsConfig.producerMaxRequestSize }}" + producer.buffer.memory = "{{ int .Values.rawSpansGrouperConfig.kafkaStreamsConfig.producerBufferMemory }}" # Consumer configs - consumer.max.partition.fetch.bytes = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.consumerMaxPartitionFetchBytes }}" - consumer.max.poll.records = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.consumerMaxPollRecords }}" - consumer.session.timeout.ms = "{{ int .Values.spanNormalizerConfig.kafkaStreamsConfig.consumerSessionTimeoutMs }}" + consumer.max.partition.fetch.bytes = "{{ int .Values.rawSpansGrouperConfig.kafkaStreamsConfig.consumerMaxPartitionFetchBytes }}" + consumer.max.poll.records = "{{ int .Values.rawSpansGrouperConfig.kafkaStreamsConfig.consumerMaxPollRecords }}" + consumer.session.timeout.ms = "{{ int .Values.rawSpansGrouperConfig.kafkaStreamsConfig.consumerSessionTimeoutMs }}" # Changelog topic configs replication.factor = "{{ int .Values.rawSpansGrouperConfig.kafkaStreamsConfig.replicationFactor }}" topic.cleanup.policy = "delete,compact" @@ -44,9 +44,9 @@ data: default.production.exception.handler = {{ .Values.rawSpansGrouperConfig.kafkaStreamsConfig.defaultProductionExceptionHandler }} ignore.production.exception.classes = {{ .Values.rawSpansGrouperConfig.kafkaStreamsConfig.ignoreProductionExceptionClasses }} # Others - metrics.recording.level = "{{ .Values.spanNormalizerConfig.kafkaStreamsConfig.metricsRecordingLevel }}" - {{- if .Values.spanNormalizerConfig.extraKafkaStreamsConfig }} - {{- range $key,$value := .Values.spanNormalizerConfig.extraKafkaStreamsConfig }} + metrics.recording.level = "{{ .Values.rawSpansGrouperConfig.kafkaStreamsConfig.metricsRecordingLevel }}" + {{- if .Values.rawSpansGrouperConfig.extraKafkaStreamsConfig }} + {{- range $key,$value := .Values.rawSpansGrouperConfig.extraKafkaStreamsConfig }} {{ $key }} = {{ $value }} {{- end }} {{- end }} From effa62ae98dfa5c1442e22c73372a7b39fa64818 Mon Sep 17 00:00:00 2001 From: Ronak Date: Thu, 24 Dec 2020 18:18:51 +0530 Subject: [PATCH 4/8] fixing e2e test due to aliasing --- .github/workflows/hypertrace-ingester/docker-compose.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/hypertrace-ingester/docker-compose.yml b/.github/workflows/hypertrace-ingester/docker-compose.yml index 6ce3e55f4..09833da8b 100644 --- a/.github/workflows/hypertrace-ingester/docker-compose.yml +++ b/.github/workflows/hypertrace-ingester/docker-compose.yml @@ -82,6 +82,8 @@ services: default: aliases: - pinot-controller + - pinot-server + - pinot-broker cpu_shares: 2048 depends_on: kafka-zookeeper: From b0e731e0690fc8b435e441f1baaa4d156d6b0d93 Mon Sep 17 00:00:00 2001 From: Laxman Ch Date: Mon, 28 Dec 2020 10:50:46 +0530 Subject: [PATCH 5/8] Review comments fix --- raw-spans-grouper/helm/values.yaml | 2 +- span-normalizer/helm/values.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/raw-spans-grouper/helm/values.yaml b/raw-spans-grouper/helm/values.yaml index bc6fd250f..3f0fe4af5 100644 --- a/raw-spans-grouper/helm/values.yaml +++ b/raw-spans-grouper/helm/values.yaml @@ -163,7 +163,7 @@ kafka-topic-creator: partitions: 8 configs: - retention.bytes=8589934592 # default = -1 - - retention.ms=86400000 # default = 604800000 (7 days) + - retention.ms=43200000 # default = 604800000 (7 days) - max.message.bytes=10485760 # default = 1048588 zookeeper: diff --git a/span-normalizer/helm/values.yaml b/span-normalizer/helm/values.yaml index 717079715..6e2e36b1e 100644 --- a/span-normalizer/helm/values.yaml +++ b/span-normalizer/helm/values.yaml @@ -136,7 +136,7 @@ kafka-topic-creator: partitions: 8 configs: - retention.bytes=8589934592 # default = -1 - - retention.ms=86400000 # default = 604800000 (7 days) + - retention.ms=43200000 # default = 604800000 (7 days) - max.message.bytes=1048588 # default = 1048588 zookeeper: address: zookeeper:2181 From 50b421ea5fa0165ddb6effb054e3ea31b7a3cb7a Mon Sep 17 00:00:00 2001 From: Laxman Ch <60599147+laxmanchekka@users.noreply.github.com> Date: Tue, 29 Dec 2020 22:32:36 +0530 Subject: [PATCH 6/8] Update span-normalizer/helm/values.yaml MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: José Carlos Chávez --- span-normalizer/helm/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/span-normalizer/helm/values.yaml b/span-normalizer/helm/values.yaml index 6e2e36b1e..0f80ea447 100644 --- a/span-normalizer/helm/values.yaml +++ b/span-normalizer/helm/values.yaml @@ -78,7 +78,7 @@ spanNormalizerConfig: # Core config numStreamThreads: 2 # default = 1 commitIntervalMs: 30000 # default = 30000 - # Common client (prodcuer, consumer, admin) configs + # Common client (producer, consumer, admin) configs receiveBufferBytes: 4194304 # default = 32768 (kafka streams default) sendBufferBytes: 4194304 # default = 131072 (kafka streams default) # Producer configs From 16fc0b9a0a730a64359e9b2d66652baa62ca0dc0 Mon Sep 17 00:00:00 2001 From: Laxman Ch <60599147+laxmanchekka@users.noreply.github.com> Date: Tue, 29 Dec 2020 22:38:13 +0530 Subject: [PATCH 7/8] Apply suggestions from code review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: José Carlos Chávez --- hypertrace-trace-enricher/helm/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hypertrace-trace-enricher/helm/values.yaml b/hypertrace-trace-enricher/helm/values.yaml index 53caa9d0f..207103288 100644 --- a/hypertrace-trace-enricher/helm/values.yaml +++ b/hypertrace-trace-enricher/helm/values.yaml @@ -72,7 +72,7 @@ traceEnricherConfig: # Core config numStreamThreads: 2 # default = 1 commitIntervalMs: 30000 # default = 30000 - # Common client (prodcuer, consumer, admin) configs + # Common client (producer, consumer, admin) configs receiveBufferBytes: 4194304 # default = 32768 (kafka streams default) sendBufferBytes: 4194304 # default = 131072 (kafka streams default) # Producer configs From 22133400b3d30979ef9653908127438323bfbbc5 Mon Sep 17 00:00:00 2001 From: Laxman Ch Date: Wed, 30 Dec 2020 11:46:30 +0530 Subject: [PATCH 8/8] Build fix --- hypertrace-ingester/build.gradle.kts | 1 + 1 file changed, 1 insertion(+) diff --git a/hypertrace-ingester/build.gradle.kts b/hypertrace-ingester/build.gradle.kts index 16055a343..43348f9eb 100644 --- a/hypertrace-ingester/build.gradle.kts +++ b/hypertrace-ingester/build.gradle.kts @@ -31,6 +31,7 @@ dependencies { implementation("org.hypertrace.core.datamodel:data-model:0.1.12") implementation("org.hypertrace.core.viewgenerator:view-generator-framework:0.1.21") implementation("com.typesafe:config:1.4.0") + implementation("org.apache.commons:commons-lang3:3.11") implementation(project(":span-normalizer:span-normalizer")) implementation(project(":raw-spans-grouper:raw-spans-grouper"))