From 920705b4c384a3ae58a41d675340f6e9ba3b06c2 Mon Sep 17 00:00:00 2001 From: Paul Parkinson Date: Wed, 23 Mar 2022 11:36:57 -0400 Subject: [PATCH 1/7] add list of default metrics in readme --- README.md | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/README.md b/README.md index 17431fa6..7dd241f8 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,34 @@ This distribution contains scripts and code for exporting metrics and logs from v1 (preview) - contains export of key database metrics to Prometheus and suggested Grafana dashboard +The following metrics are exposed currently by default. + +- oracledb_exporter_last_scrape_duration_seconds +- oracledb_exporter_last_scrape_error +- oracledb_exporter_scrapes_total +- oracledb_up +- oracledb_activity_execute_count +- oracledb_activity_parse_count_total +- oracledb_activity_user_commits +- oracledb_activity_user_rollbacks +- oracledb_sessions_activity +- oracledb_wait_time_application +- oracledb_wait_time_commit +- oracledb_wait_time_concurrency +- oracledb_wait_time_configuration +- oracledb_wait_time_network +- oracledb_wait_time_other +- oracledb_wait_time_scheduler +- oracledb_wait_time_system_io +- oracledb_wait_time_user_io +- oracledb_tablespace_bytes +- oracledb_tablespace_max_bytes +- oracledb_tablespace_free +- oracledb_tablespace_used_percent +- oracledb_process_count +- oracledb_resource_current_utilization +- oracledb_resource_limit_value + ## Table of Contents - [Unified App Dev Monitoring with Oracle Database](#unified-app-dev-monitoring-with-oracle-database) From 56903feb6cd2a111fba7b4049a011f2e12e94639 Mon Sep 17 00:00:00 2001 From: Paul Parkinson Date: Sat, 18 Jun 2022 20:13:35 -0400 Subject: [PATCH 2/7] readme updates --- README.md | 35 +++++++++++++++++++---------------- examples/README.md | 2 +- 2 files changed, 20 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index 53309577..02f31182 100644 --- a/README.md +++ b/README.md @@ -33,32 +33,35 @@ Docker image can be pushed to $DOCKER_REGISTRY using the following. `./push.sh` -### Running +### Run Ensure the environment variable DATA_SOURCE_NAME (and TNS_ADMIN if appropriate) is set correctly before starting. -DATA_SOURCE_NAME should be in Oracle EZCONNECT format: - -19c Oracle Client supports enhanced EZCONNECT, you are able to failover to standby DB or gather some heavy metrics from active standby DB and specify some additional parameters. Within 19c client you are able to connect 12c primary/standby DB too :) For Example: ```bash -# export Oracle location: -export DATA_SOURCE_NAME=system/password@oracle-sid -# or using a complete url: -export DATA_SOURCE_NAME=user/password@//myhost:1521/service -# 19c client for primary/standby configuration -export DATA_SOURCE_NAME=user/password@//primaryhost:1521,standbyhost:1521/service -# 19c client for primary/standby configuration with options -export DATA_SOURCE_NAME=user/password@//primaryhost:1521,standbyhost:1521/service?connect_timeout=5&transport_connect_timeout=3&retry_count=3 -# 19c client for ASM instance connection (requires SYSDBA) -export DATA_SOURCE_NAME=user/password@//primaryhost:1521,standbyhost:1521/+ASM?as=sysdba -# Then run the exporter -/path/to/binary/oracle-db-monitoring-exporter --log.level error --web.listen-address 0.0.0.0:9161 +export DATA_SOURCE_NAME="%USER%/$(dbpassword)@%PDB_NAME%_tp" +``` + +Kubernetes Secrets, etc. an of course be used to store password. + +OCI Vault support for storing/accessing password values is built into exporters and is enabled by simply setting the OCI_REGION and VAULT_SECRET_OCID variables. + +For Example: + +```bash +export OCI_REGION="us-ashburn-1" +export VAULT_SECRET_OCID="ocid..." ``` The only other required environment variable is DEFAULT_METRICS value which is set to the location of the config file. +For Example: + +```bash +export DEFAULT_METRICS="/msdataworkshop/observability/db-metrics-%EXPORTER_NAME%-exporter-metrics.toml" +``` + Run using Java: `java -jar target/observability-exporter-0.1.0.jar` diff --git a/examples/README.md b/examples/README.md index 49440d70..3e262358 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,7 +1,7 @@ # Observability Exporter Example -Please refer to the Unified Observability in Grafana with converged Oracle Database Workshop at http://bit.ly/unifiedobservability and it's corresponding repos https://github.com/oracle/microservices-datadriven/tree/main/grabdish/observability for complete examples. +Please refer to the Unified Observability in Grafana with converged Oracle Database Workshop at http://bit.ly/unifiedobservability and it's corresponding repos https://github.com/oracle/microservices-datadriven/tree/main/grabdish/observability/db-metrics-exporter for complete examples. More examples will be provided here in the near future. From 24d22c6517c3e311e767365764fdc4faceb69a9f Mon Sep 17 00:00:00 2001 From: Paul Parkinson Date: Thu, 30 Jun 2022 17:40:29 -0400 Subject: [PATCH 3/7] various updates --- README.md | 2 + examples/README.md | 19 ----- examples/deploy.sh | 62 -------------- ...lity-exporter-example-service-monitor.yaml | 14 ---- ...bservability-exporter-example-service.yaml | 14 ---- .../observability/ObservabilityExporter.java | 2 + .../observability/logs/LogsExporter.java | 34 +++++--- .../metrics/MetricsExporter.java | 55 ++++++------ .../tracing/TracingExporter.java | 83 +++++++++++-------- 9 files changed, 104 insertions(+), 181 deletions(-) delete mode 100755 examples/deploy.sh delete mode 100644 examples/observability-exporter-example-service-monitor.yaml delete mode 100644 examples/observability-exporter-example-service.yaml diff --git a/README.md b/README.md index 02f31182..9957d61c 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,8 @@ All three exporters (metrics, log, and trace) can be configured in the same file The old version of the metrics exporter can be found in the [old implementation branch][old implementation branch] and the new metrics exporter implementation is backward compatible such that the same configuration for both database connection and metrics definition can be used. +Users are encouraged to open issues and enhancements requests against this github repos and feel free to ask any questions. We will actively work on them as we will the development of the exporters. + ### Build Build without running tests using the following. diff --git a/examples/README.md b/examples/README.md index 3e262358..ae4198a4 100644 --- a/examples/README.md +++ b/examples/README.md @@ -4,22 +4,3 @@ Please refer to the Unified Observability in Grafana with converged Oracle Database Workshop at http://bit.ly/unifiedobservability and it's corresponding repos https://github.com/oracle/microservices-datadriven/tree/main/grabdish/observability/db-metrics-exporter for complete examples. More examples will be provided here in the near future. - -# Metrics exporter - -1. Pre-req. Run setup for the GrabDish workshop including observability lab steps to install and configure Grafana and Prometheus -2. Run `./deploy.sh` in this directory -3. `curl http://observability-exporter-example:8080/metrics` from within cluster to see Prometheus stats -4. View same stats from within Grafana by loading AQ dashboard - -The same can be done above for TEW by simply replace `aq` with `teq` in the deployment and configmap yamls - -Troubleshooting... - -kubectl port-forward prometheus-stable-kube-prometheus-sta-prometheus-0 -n msdataworkshop 9090:9090 - -# Logs exporter - -# Trace exporter - -# Combined Metrics, Logs, and Trace exporter \ No newline at end of file diff --git a/examples/deploy.sh b/examples/deploy.sh deleted file mode 100755 index 182328b2..00000000 --- a/examples/deploy.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/bash -## Copyright (c) 2021 Oracle and/or its affiliates. -## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ - -SCRIPT_DIR=$(dirname $0) - -if [ -z "$DOCKER_REGISTRY" ]; then - echo "DOCKER_REGISTRY not set. Will get it with state_get" - export DOCKER_REGISTRY=$(state_get DOCKER_REGISTRY) -fi - -if [ -z "$DOCKER_REGISTRY" ]; then - echo "Error: DOCKER_REGISTRY env variable needs to be set!" - exit 1 -fi - -if [ -z "$ORDER_DB_NAME" ]; then - echo "ORDER_DB_NAME not set. Will get it with state_get" - export ORDER_DB_NAME=$(state_get ORDER_DB_NAME) -fi - -if [ -z "$ORDER_DB_NAME" ]; then - echo "Error: ORDER_DB_NAME env variable needs to be set!" - exit 1 -fi - -echo create configmap for db-metrics-banka-exporter... -kubectl delete configmap db-metrics-banka-exporter-config -n msdataworkshop -kubectl create configmap db-metrics-banka-exporter-config --from-file=db-metrics-banka-exporter-metrics.toml -n msdataworkshop -echo -echo create db-metrics-exporter deployment and service... -export CURRENTTIME=generated -#export CURRENTTIME=$( date '+%F_%H:%M:%S' ) -echo CURRENTTIME is $CURRENTTIME ...this will be appended to generated deployment yaml - -cp db-metrics-exporter-deployment.yaml db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml - -#sed -e "s|%DOCKER_REGISTRY%|${DOCKER_REGISTRY}|g" db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml > /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -#mv -- /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -sed -e "s|%EXPORTER_NAME%|example|g" db-metrics-exporter-banka-deployment-${CURRENTTIME}.yaml > /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -mv -- /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -sed -e "s|%PDB_NAME%|${ORDER_DB_NAME}|g" db-metrics-exporter-banka-deployment-${CURRENTTIME}.yaml > /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -mv -- /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -sed -e "s|%USER%|aquser|g" db-metrics-exporter-banka-deployment-${CURRENTTIME}.yaml > /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -mv -- /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -sed -e "s|%db-wallet-secret%|order-db-tns-admin-secret|g" db-metrics-exporter-banka-deployment-${CURRENTTIME}.yaml > /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -mv -- /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -#sed -e "s|${OCI_REGION-}|${OCI_REGION}|g" db-metrics-exporter-banka-deployment-${CURRENTTIME}.yaml > /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -#mv -- /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -#sed -e "s|${VAULT_SECRET_OCID-}|${VAULT_SECRET_OCID}|g" db-metrics-exporter-banka-deployment-${CURRENTTIME}.yaml > /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml -#mv -- /tmp/db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml db-metrics-exporter-banka-deployment-$CURRENTTIME.yaml - - -kubectl delete configmap observability-exporter-example-config -n msdataworkshop - -kubectl create configmap observability-exporter-example-config --from-file=aq-metrics.toml -n msdataworkshop - -kubectl apply -f observability-exporter-example-deployment-test.yaml -n msdataworkshop - -kubectl apply -f observability-exporter-example-service.yaml -n msdataworkshop - -kubectl apply -f observability-exporter-example-service-monitor.yaml -n msdataworkshop diff --git a/examples/observability-exporter-example-service-monitor.yaml b/examples/observability-exporter-example-service-monitor.yaml deleted file mode 100644 index 4fda4635..00000000 --- a/examples/observability-exporter-example-service-monitor.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: prometheus-observability-exporter-example - labels: - app: observability-exporter-example - release: stable -spec: - endpoints: - - interval: 5s - port: metrics - selector: - matchLabels: - app: observability-exporter-example \ No newline at end of file diff --git a/examples/observability-exporter-example-service.yaml b/examples/observability-exporter-example-service.yaml deleted file mode 100644 index c4479d89..00000000 --- a/examples/observability-exporter-example-service.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: observability-exporter-example - labels: - app: observability-exporter-example -spec: - type: NodePort - ports: - - port: 8080 - name: metrics - targetPort: 8080 - selector: - app: observability-exporter-example diff --git a/src/main/java/oracle/observability/ObservabilityExporter.java b/src/main/java/oracle/observability/ObservabilityExporter.java index 741edf8a..f0209c25 100644 --- a/src/main/java/oracle/observability/ObservabilityExporter.java +++ b/src/main/java/oracle/observability/ObservabilityExporter.java @@ -26,6 +26,8 @@ public class ObservabilityExporter { public String VAULT_SECRET_OCID = System.getenv("VAULT_SECRET_OCID"); //eg ocid.... public String OCI_CONFIG_FILE = System.getenv("OCI_CONFIG_FILE"); //eg "~/.oci/config" public String OCI_PROFILE = System.getenv("OCI_PROFILE"); //eg "DEFAULT" + public static final String CONTEXT = "context"; + public static final String REQUEST = "request"; PoolDataSource observabilityDB; public PoolDataSource getPoolDataSource() throws SQLException { diff --git a/src/main/java/oracle/observability/logs/LogsExporter.java b/src/main/java/oracle/observability/logs/LogsExporter.java index 7eb20120..2b8890c0 100644 --- a/src/main/java/oracle/observability/logs/LogsExporter.java +++ b/src/main/java/oracle/observability/logs/LogsExporter.java @@ -18,12 +18,16 @@ @RestController public class LogsExporter extends ObservabilityExporter implements Runnable { - private static final org.slf4j.Logger LOG = LoggerFactory.getLogger(LogsExporter.class); + private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(LogsExporter.class); + public static final String TIMESTAMPFIELD = "timestampfield"; + public static final String LOG = "log"; public String LOG_INTERVAL = System.getenv("LOG_INTERVAL"); // "30s" private int logInterval = 30; List lastLogged = new ArrayList<>(); private java.sql.Timestamp alertLogQueryLastLocalDateTime; + private int consecutiveExceptionCount = 0; //used to backoff todo should be a finer/log entry level rather than global + @PostConstruct public void init() throws Exception { @@ -34,15 +38,17 @@ public void init() throws Exception { public void run() { while (true) { try { - LOG.debug("LogExporter default metrics from:" + DEFAULT_METRICS); + Thread.sleep(consecutiveExceptionCount * 1000); + Thread.sleep(logInterval * 1000); + LOGGER.debug("LogsExporter default metrics from:" + DEFAULT_METRICS); if(LOG_INTERVAL!=null && !LOG_INTERVAL.trim().equals("")) logInterval = Integer.getInteger(LOG_INTERVAL); - LOG.debug("LogExporter logInterval:" + logInterval); + LOGGER.debug("LogsExporter logInterval:" + logInterval); File tomlfile = new File(DEFAULT_METRICS); TomlMapper mapper = new TomlMapper(); JsonNode jsonNode = mapper.readerFor(LogsExporterConfigEntry.class).readTree(new FileInputStream(tomlfile)); - JsonNode log = jsonNode.get("log"); + JsonNode log = jsonNode.get(LOG); if(log == null || log.isEmpty()) { - LOG.info("No logs records configured"); + LOGGER.info("No logs records configured"); return; } Iterator logs = log.iterator(); @@ -50,10 +56,15 @@ public void run() { try (Connection connection = getPoolDataSource().getConnection()) { while (logs.hasNext()) { //for each "log" entry in toml/config... JsonNode next = logs.next(); - String request = next.get("request").asText(); // the sql query - LOG.debug("DBLogsExporter. request:" + request); - String timestampfield = next.get("timestampfield").asText(); // eg ORIGINATING_TIMESTAMP - LOG.debug("DBLogsExporter. timestampfield:" + timestampfield); + String request = next.get(REQUEST).asText(); // the sql query + LOGGER.debug("LogsExporter request:" + request); + JsonNode timestampfieldNode = next.get(TIMESTAMPFIELD); + if (timestampfieldNode==null) { + LOGGER.warn("LogsExporter entry does not contain `timestampfield' value request:" + request); + continue; + } + String timestampfield = timestampfieldNode.asText(); // eg ORIGINATING_TIMESTAMP + LOGGER.debug("LogsExporter timestampfield:" + timestampfield); PreparedStatement statement = connection.prepareStatement( alertLogQueryLastLocalDateTime == null ? request : request + " WHERE " + timestampfield + " > ?"); if(alertLogQueryLastLocalDateTime!=null) statement.setTimestamp(1, alertLogQueryLastLocalDateTime); @@ -82,10 +93,11 @@ public void run() { } } lastLogged = currentLogged; + consecutiveExceptionCount = 0; } - Thread.sleep(logInterval * 1000); } catch (Exception e) { - throw new RuntimeException(e); + consecutiveExceptionCount++; + LOGGER.warn("LogsExporter.processMetric exception:" + e); } } } diff --git a/src/main/java/oracle/observability/metrics/MetricsExporter.java b/src/main/java/oracle/observability/metrics/MetricsExporter.java index 5ba606e6..5eb87e16 100644 --- a/src/main/java/oracle/observability/metrics/MetricsExporter.java +++ b/src/main/java/oracle/observability/metrics/MetricsExporter.java @@ -24,13 +24,19 @@ @RestController public class MetricsExporter extends ObservabilityExporter { + private static final Logger LOGGER = LoggerFactory.getLogger(MetricsExporter.class); + public static final String UP = "up"; + public static final String METRICSTYPE = "metricstype"; + public static final String METRICSDESC = "metricsdesc"; + public static final String LABELS = "labels"; + public static final String IGNOREZERORESULT = "ignorezeroresult"; + public static final String FALSE = "false"; public String LISTEN_ADDRESS = System.getenv("LISTEN_ADDRESS"); // ":9161" public String TELEMETRY_PATH = System.getenv("TELEMETRY_PATH"); // "/metrics" //Interval between each scrape. Default is to scrape on collect requests. scrape.interval public String SCRAPE_INTERVAL = System.getenv("scrape.interval"); // "0s" public static final String ORACLEDB_METRIC_PREFIX = "oracledb_"; Map gaugeMap = new HashMap<>(); - private static final Logger LOG = LoggerFactory.getLogger(MetricsExporter.class); /** * The endpoint that prometheus will scrape @@ -54,7 +60,7 @@ private void processMetrics() throws IOException, SQLException { JsonNode jsonNode = mapper.readerFor(MetricsExporterConfigEntry.class).readTree(new FileInputStream(tomlfile)); JsonNode metric = jsonNode.get("metric"); if(metric == null || metric.isEmpty()) { - LOG.info("No logs records configured"); + LOGGER.info("No logs records configured"); return; } Iterator metrics = metric.iterator(); @@ -65,11 +71,11 @@ private void processMetrics() throws IOException, SQLException { processMetric(connection, metrics); } } finally { - Gauge gauge = gaugeMap.get(ORACLEDB_METRIC_PREFIX + "up"); + Gauge gauge = gaugeMap.get(ORACLEDB_METRIC_PREFIX + UP); if (gauge == null) { - Gauge upgauge = Gauge.build().name(ORACLEDB_METRIC_PREFIX + "up").help("Whether the Oracle database server is up.").register(); + Gauge upgauge = Gauge.build().name(ORACLEDB_METRIC_PREFIX + UP).help("Whether the Oracle database server is up.").register(); upgauge.set(isConnectionSuccessful); - gaugeMap.put(ORACLEDB_METRIC_PREFIX + "up", upgauge); + gaugeMap.put(ORACLEDB_METRIC_PREFIX + UP, upgauge); } else gauge.set(isConnectionSuccessful); } } @@ -87,10 +93,9 @@ private void processMetrics() throws IOException, SQLException { */ private void processMetric(Connection connection, Iterator metric) { JsonNode next = metric.next(); - //todo ignore case - String context = next.get("context").asText(); // eg context = "teq" - String metricsType = next.get("metricstype") == null ? "" :next.get("metricstype").asText(); - JsonNode metricsdescNode = next.get("metricsdesc"); + String context = next.get(CONTEXT).asText(); // eg context = "teq" + String metricsType = next.get(METRICSTYPE) == null ? "" :next.get(METRICSTYPE).asText(); + JsonNode metricsdescNode = next.get(METRICSDESC); // eg metricsdesc = { enqueued_msgs = "Total enqueued messages.", dequeued_msgs = "Total dequeued messages.", remained_msgs = "Total remained messages."} Iterator> metricsdescIterator = metricsdescNode.fields(); Map metricsDescMap = new HashMap<>(); @@ -98,19 +103,19 @@ private void processMetric(Connection connection, Iterator metric) { Map.Entry metricsdesc = metricsdescIterator.next(); metricsDescMap.put(metricsdesc.getKey(), metricsdesc.getValue().asText()); } - LOG.debug("context:" + context); + LOGGER.debug("context:" + context); String[] labelNames = new String[0]; - if (next.get("labels") != null) { - int size = next.get("labels").size(); - Iterator labelIterator = next.get("labels").iterator(); + if (next.get(LABELS) != null) { + int size = next.get(LABELS).size(); + Iterator labelIterator = next.get(LABELS).iterator(); labelNames = new String[size]; for (int i = 0; i < size; i++) { labelNames[i] = labelIterator.next().asText(); } - LOG.debug("\n"); + LOGGER.debug("\n"); } - String request = next.get("request").asText(); // the sql query - String ignorezeroresult = next.get("ignorezeroresult") == null ? "false" : next.get("ignorezeroresult").asText(); //todo, currently defaults to true + String request = next.get(REQUEST).asText(); // the sql query + String ignorezeroresult = next.get(IGNOREZERORESULT) == null ? FALSE : next.get(IGNOREZERORESULT).asText(); //todo, currently defaults to true ResultSet resultSet; try { resultSet = connection.prepareStatement(request).executeQuery(); @@ -118,7 +123,7 @@ private void processMetric(Connection connection, Iterator metric) { translateQueryToPrometheusMetric(context, metricsDescMap, labelNames, resultSet); } } catch(SQLException e) { //this can be due to table not existing etc. - LOG.warn("DBMetricsExporter.processMetric during:" + request + " exception:" + e); + LOGGER.warn("MetricsExporter.processMetric during:" + request + " exception:" + e); return; } } @@ -127,7 +132,7 @@ private void translateQueryToPrometheusMetric(String context, Map sqlQueryResults = + Map sqlQueryResults = extractGaugesAndLabelValues(context, metricsDescMap, labelNames, resultSet, labelValues, resultSet.getMetaData().getColumnCount()); setLabelValues(context, labelNames, labelValues, sqlQueryResults.entrySet().iterator()); } @@ -142,10 +147,10 @@ private void translateQueryToPrometheusMetric(String context, Map extractGaugesAndLabelValues( + private Map extractGaugesAndLabelValues( String context, Map metricsDescMap, String[] labelNames, ResultSet resultSet, String[] labelValues, int columnCount) throws SQLException { - Map sqlQueryResults = new HashMap<>(); + Map sqlQueryResults = new HashMap<>(); String columnName; String columnTypeName; for (int i = 0; i < columnCount; i++) { //for each column... @@ -154,9 +159,9 @@ private Map extractGaugesAndLabelValues( if (columnTypeName.equals("VARCHAR2")) //. typename is 2/NUMBER or 12/VARCHAR2 ; else - sqlQueryResults.put(resultSet.getMetaData().getColumnName(i + 1), resultSet.getInt(i + 1)); + sqlQueryResults.put(resultSet.getMetaData().getColumnName(i + 1), resultSet.getLong(i + 1)); String gaugeName = ORACLEDB_METRIC_PREFIX + context + "_" + columnName; - LOG.debug("---gaugeName:" + gaugeName); + LOGGER.debug("---gaugeName:" + gaugeName); Gauge gauge = gaugeMap.get(gaugeName); if (gauge == null) { if(metricsDescMap.containsKey(columnName)) { @@ -173,9 +178,9 @@ private Map extractGaugesAndLabelValues( return sqlQueryResults; } - private void setLabelValues(String context, String[] labelNames, String[] labelValues, Iterator> sqlQueryRestulsEntryIterator) { + private void setLabelValues(String context, String[] labelNames, String[] labelValues, Iterator> sqlQueryRestulsEntryIterator) { while(sqlQueryRestulsEntryIterator.hasNext()) { //for each column - Map.Entry sqlQueryResultsEntry = sqlQueryRestulsEntryIterator.next(); + Map.Entry sqlQueryResultsEntry = sqlQueryRestulsEntryIterator.next(); boolean isLabel = false; for (int ii = 0; ii< labelNames.length; ii++) { if(labelNames[ii].equals(sqlQueryResultsEntry.getKey())) isLabel =true; // continue @@ -186,7 +191,7 @@ private void setLabelValues(String context, String[] labelNames, String[] labelV try { gaugeMap.get(ORACLEDB_METRIC_PREFIX + context + "_" + sqlQueryResultsEntry.getKey().toLowerCase()).labels(labelValues).set(valueToSet); } catch (Exception ex) { //todo filter to avoid unnecessary exception handling - LOG.debug("OracleDBMetricsExporter.translateQueryToPrometheusMetric Exc:" + ex); + LOGGER.debug("OracleDBMetricsExporter.translateQueryToPrometheusMetric Exc:" + ex); } else gaugeMap.get(ORACLEDB_METRIC_PREFIX + context + "_" + sqlQueryResultsEntry.getKey().toLowerCase()).set(valueToSet); } diff --git a/src/main/java/oracle/observability/tracing/TracingExporter.java b/src/main/java/oracle/observability/tracing/TracingExporter.java index 9133aff5..c943ef29 100644 --- a/src/main/java/oracle/observability/tracing/TracingExporter.java +++ b/src/main/java/oracle/observability/tracing/TracingExporter.java @@ -45,10 +45,11 @@ @RestController public final class TracingExporter extends ObservabilityExporter implements Runnable { - private static final org.slf4j.Logger LOG = LoggerFactory.getLogger(TracingExporter.class); + private static final org.slf4j.Logger LOGGER = LoggerFactory.getLogger(TracingExporter.class); + public static final String ECID = "ECID"; public static final String ECID_BIND_VALUES = "ECID_BIND_VALUES"; private static final String ECID_BIND_VALUES_GETSQLID_SQL = - "select ECID, SQL_ID from GV$SESSION where ECID IS NOT NULL"; + "select ECID, SQL_ID from GV$ACTIVE_SESSION_HISTORY where ECID IS NOT NULL"; private static final String ECID_BIND_VALUES_GETBINDING_SQL = "SELECT sql_id, t.sql_text sql_text, b.name bind_name, b.value_string bind_value " + "FROM gv$sql t " + @@ -56,9 +57,15 @@ public final class TracingExporter extends ObservabilityExporter implements Runn "WHERE b.value_string is not null AND sql_id = ? "; public static final String OTEL_JAEGER_ORACLEDBTRACER = "otel-jaeger-oracledbtracer"; public static final String HTTP_JAEGER_COLLECTOR_MSDATAWORKSHOP_14268 = "http://jaeger-collector.msdataworkshop:14268"; //default + public static final String TEMPLATE = "template"; + public static final String SQL_ID = "SQL_ID"; + public static final String SQL_TEXT = "sql_text"; + public static final String BIND_VALUE = "bind_value"; + public static final String ORACLEDB_TracingExporter = "oracledb_TracingExporter_"; public String TRACE_COLLECTOR_ADDRESS = System.getenv("TRACE_COLLECTOR_ADDRESS"); // "http://jaeger-collector.msdataworkshop:14268" "http://localhost:14250" public String TRACE_INTERVAL = System.getenv("TRACE_INTERVAL"); // "30s" - private int traceInterval = 30; + private int traceInterval; + private int traceIntervalDefault = 30; private static OpenTelemetry openTelemetry; private static Tracer tracer; public static TextMapPropagator TEXT_MAP_PROPAGATOR; @@ -74,10 +81,11 @@ public void init() { @Override public void run() { - LOG.debug("TraceExporter DEFAULT_METRICS:" + DEFAULT_METRICS); + LOGGER.debug("TracingExporter DEFAULT_METRICS:" + DEFAULT_METRICS); if (TRACE_INTERVAL != null && !TRACE_INTERVAL.trim().equals("")) traceInterval = Integer.getInteger(TRACE_INTERVAL); - LOG.debug("TraceExporter traceInterval:" + traceInterval); + else traceInterval = traceIntervalDefault; + LOGGER.debug("TracingExporter traceInterval:" + traceInterval); File tomlfile = new File(DEFAULT_METRICS); TomlMapper mapper = new TomlMapper(); JsonNode jsonNode; @@ -88,27 +96,26 @@ public void run() { } JsonNode trace = jsonNode.get("trace"); if (trace == null || trace.isEmpty()) { - LOG.info("No trace records configured"); + LOGGER.info("No trace records configured"); return; } Iterator traces = trace.iterator(); if(!traces.hasNext()) return; while (true) { try (Connection connection = getPoolDataSource().getConnection()) { - while (traces.hasNext()) { //for each "log" entry in toml/config... + while (traces.hasNext()) { JsonNode next = traces.next(); - String context = next.get("context").asText(); // the sql query - String request = next.get("request").asText(); // the sql query - String template = next.get("template").asText(); // the sql query - LOG.debug("DBTracingExporter.request:" + request); - LOG.debug("DBTracingExporter.template:" + template); + String context = next.get(CONTEXT).asText(); + String request = next.get(REQUEST).asText(); + String template = next.get(TEMPLATE).asText(); + LOGGER.debug("TracingExporter request:" + request); if (template != null && template.equals(ECID_BIND_VALUES)) { ecidTraces(connection, context); } } Thread.sleep(traceInterval * 1000); } catch (Exception e) { - throw new RuntimeException(e); + LOGGER.warn("TracingExporter.processMetric exception:" + e); } } } @@ -116,42 +123,45 @@ public void run() { void ecidTraces(Connection connection, String configContextName) throws SQLException { PreparedStatement preparedStatement = connection.prepareStatement(ECID_BIND_VALUES_GETSQLID_SQL); ResultSet rs = preparedStatement.executeQuery(); - while (rs.next()) { - String traceparent = rs.getString("ECID"); - String SQL_ID = rs.getString("SQL_ID"); +// while (rs.next()) { + rs.next(); + String traceparent = rs.getString(ECID); + LOGGER.debug("TracingExporter traceparent:" + traceparent); + String sqlID = rs.getString(SQL_ID); String getbindingSQL = ECID_BIND_VALUES_GETBINDING_SQL; PreparedStatement sqlTextPS = connection.prepareStatement(getbindingSQL); - sqlTextPS.setString(1, SQL_ID); + sqlTextPS.setString(1, sqlID); ResultSet sqlTextPSrs = sqlTextPS.executeQuery(); - String SQL_TEXT = ""; - String SQL_BIND = ""; + String sqlText = ""; + String sqlBind = ""; while (sqlTextPSrs.next()) { - SQL_TEXT = sqlTextPSrs.getString("sql_text"); - SQL_BIND = sqlTextPSrs.getString("bind_value"); + sqlText = sqlTextPSrs.getString(SQL_TEXT); + sqlBind = sqlTextPSrs.getString(BIND_VALUE); } - if (!processedTraces.contains(traceparent)) { - LOG.debug("processing ecid/traceparent:" + traceparent); - LOG.debug("processing SQL_ID:" + SQL_ID); - LOG.debug("processing SQL_TEXT:" + SQL_TEXT); - LOG.debug("processing SQL_BIND:" + SQL_BIND); + if (!processedTraces.contains(traceparent)) { //todo check contents as well + LOGGER.debug("processing ecid/traceparent:" + traceparent); + LOGGER.debug("processing SQL_ID:" + sqlID); + LOGGER.debug("processing SQL_TEXT:" + sqlText); + LOGGER.debug("processing SQL_BIND:" + sqlBind); Context context = TEXT_MAP_PROPAGATOR.extract(Context.current(), null, getTextMapGetter(traceparent)); - LOG.debug("context:" + context); + LOGGER.debug("context:" + context); Span childSpan = - tracer.spanBuilder("oracledb_traceexporter_" + configContextName).setParent(context).setSpanKind(SpanKind.SERVER).startSpan(); - LOG.debug("childSpan:" + childSpan); + tracer.spanBuilder(ORACLEDB_TracingExporter + configContextName) + .setParent(context).setSpanKind(SpanKind.SERVER).startSpan(); + LOGGER.debug("childSpan:" + childSpan); try (Scope scope = childSpan.makeCurrent()) { - childSpan.setAttribute("SQL_ID", SQL_ID); - childSpan.setAttribute("SQL_TEXT", SQL_TEXT); - childSpan.setAttribute("SQL_BIND", SQL_BIND); - childSpan.addEvent("SQL_ID:" + SQL_ID); - childSpan.addEvent("SQL_TEXT:" + SQL_TEXT); - childSpan.addEvent("SQL_BIND:" + SQL_BIND); + childSpan.setAttribute(SQL_ID, sqlID); + childSpan.setAttribute("SQL_TEXT", sqlText); + childSpan.setAttribute("SQL_BIND", sqlBind); + childSpan.addEvent("SQL_ID:" + sqlID); + childSpan.addEvent("SQL_TEXT:" + sqlText); + childSpan.addEvent("SQL_BIND:" + sqlBind); processedTraces.add(traceparent); } finally { childSpan.end(); } } - } +// } } private TextMapGetter getTextMapGetter(String traceparent) { @@ -171,6 +181,7 @@ public String get(HttpExchange carrier, String key) { private OpenTelemetry initOpenTelemetry() { String traceCollectorAddress = TRACE_COLLECTOR_ADDRESS == null || TRACE_COLLECTOR_ADDRESS.trim().equals("") ? HTTP_JAEGER_COLLECTOR_MSDATAWORKSHOP_14268 :TRACE_COLLECTOR_ADDRESS; + LOGGER.warn("TracingExporter traceCollectorAddress:" + traceCollectorAddress); JaegerGrpcSpanExporter jaegerExporter = JaegerGrpcSpanExporter.builder() .setEndpoint(traceCollectorAddress) From 574e61a79b329a9a9c5a302bcb12159847a525b6 Mon Sep 17 00:00:00 2001 From: Paul Parkinson Date: Sun, 3 Jul 2022 17:51:32 -0400 Subject: [PATCH 4/7] README.md updates --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 9957d61c..c48ec6fb 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,10 @@ # Unified Observability with Oracle Database -This v1 (preview) distribution contains scripts and code for exporting metrics, logs, and traces from any Oracle Database to provide converged observability for data-centric applications. +This distribution contains scripts and code for exporting metrics, logs, and traces from any Oracle Database to provide converged observability for data-centric applications. Metrics from the application layer, Kubernetes, and Oracle Database can be combined to provide unified observability to developers within a single Grafana console. -All three exporters (metrics, log, and trace) can be configured in the same file and each is explanined in the corresponding doc pages: +All three exporters (metrics, log, and trace) can be configured in the same file and each is explained in the corresponding doc pages: [Metrics Exporter][Metrics Exporter] @@ -15,7 +15,7 @@ All three exporters (metrics, log, and trace) can be configured in the same file The old version of the metrics exporter can be found in the [old implementation branch][old implementation branch] and the new metrics exporter implementation is backward compatible such that the same configuration for both database connection and metrics definition can be used. -Users are encouraged to open issues and enhancements requests against this github repos and feel free to ask any questions. We will actively work on them as we will the development of the exporters. +Users are encouraged to open issues and enhancements requests against this repos and feel free to ask any questions. We will actively work on them as we will the development of the exporters. ### Build From dafa3552f351c3b44da84f39e921a28ec6f97e8e Mon Sep 17 00:00:00 2001 From: Paul Parkinson Date: Sun, 3 Jul 2022 17:56:48 -0400 Subject: [PATCH 5/7] README.md updates --- README.md | 6 +++++- build-and-push.sh | 38 -------------------------------------- 2 files changed, 5 insertions(+), 39 deletions(-) delete mode 100755 build-and-push.sh diff --git a/README.md b/README.md index c48ec6fb..c51d98bb 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,11 @@ All three exporters (metrics, log, and trace) can be configured in the same file The old version of the metrics exporter can be found in the [old implementation branch][old implementation branch] and the new metrics exporter implementation is backward compatible such that the same configuration for both database connection and metrics definition can be used. -Users are encouraged to open issues and enhancements requests against this repos and feel free to ask any questions. We will actively work on them as we will the development of the exporters. +Users are encouraged to open issues and enhancements requests against this repos (https://github.com/oracle/oracle-db-appdev-monitoring/issues ) and are encouraged to ask any questions. We will actively work on them as we will the development of the exporters. + +### Pull + +The image can be pulled via `docker pull container-registry.oracle.com/database/observability-exporter:0.1.0` ### Build diff --git a/build-and-push.sh b/build-and-push.sh deleted file mode 100755 index 90437b39..00000000 --- a/build-and-push.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash -## Copyright (c) 2022 Oracle and/or its affiliates. -## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ - - -export IMAGE_NAME=observability-exporter -export IMAGE_VERSION=0.1.0 - -if [ -z "$DOCKER_REGISTRY" ]; then - echo "DOCKER_REGISTRY not set." -fi - -export IMAGE=${DOCKER_REGISTRY}/${IMAGE_NAME}:${IMAGE_VERSION} - -mvn clean package -DskipTests -docker build -t=$IMAGE . - -export IS_CREATE_REPOS=$1 -if [ -z "IS_CREATE_REPOS" ]; then - echo "not creating OCIR repos" -else - echo "creating OCIR repos and setting to public" - if [ -z "COMPARTMENT_OCID" ]; then - echo "COMPARTMENT_OCID not set. Will get it with state_get" - export COMPARTMENT_OCID=$(state_get COMPARTMENT_OCID) - fi - if [ -z "RUN_NAME" ]; then - echo "RUN_NAME not set. Will get it with state_get" - export RUN_NAME=$(state_get RUN_NAME) - fi -# RUN_NAME is randomly generated name from workshop, eg gd4930131 - oci artifacts container repository create --compartment-id "$COMPARTMENT_OCID" --display-name "$RUN_NAME/$IMAGE_NAME" --is-public true -fi - -docker push "$IMAGE" -if [ $? -eq 0 ]; then - docker rmi "$IMAGE" -fi From 72a2c4a0d729205d611e3ea40522c004ec6c8220 Mon Sep 17 00:00:00 2001 From: Paul Parkinson Date: Wed, 3 Aug 2022 19:06:55 -0400 Subject: [PATCH 6/7] basic example --- examples/README.md | 17 +++++++ ...ied-observability-exporter-deployment.yaml | 49 +++++++++++++++++++ ...nified-observability-exporter-service.yaml | 14 ++++++ ...observability-exporter-servicemonitor.yaml | 14 ++++++ ...-redeploy-unified-observabiity-exporter.sh | 17 +++++++ 5 files changed, 111 insertions(+) create mode 100644 examples/unified-observability-exporter-deployment.yaml create mode 100644 examples/unified-observability-exporter-service.yaml create mode 100644 examples/unified-observability-exporter-servicemonitor.yaml create mode 100755 examples/update-and-redeploy-unified-observabiity-exporter.sh diff --git a/examples/README.md b/examples/README.md index ae4198a4..319745de 100644 --- a/examples/README.md +++ b/examples/README.md @@ -3,4 +3,21 @@ Please refer to the Unified Observability in Grafana with converged Oracle Database Workshop at http://bit.ly/unifiedobservability and it's corresponding repos https://github.com/oracle/microservices-datadriven/tree/main/grabdish/observability/db-metrics-exporter for complete examples. +A simple setup in Kubernetes involves the following steps (with the assumption that Prometheus is already installed) + +1. Change the %EXPORTER_NAME% value in all yaml files in this directory. This can be any value such as "helloworld". + +2. Change the database connection information in the unified-observability-exporter-deployment.yaml file. + - The only value required is the DATA_SOURCE_NAME which takes the format `USER/PASSWORD@DB_SERVICE_URL` + - In the example the connection information is obtained from a mount created from the wallet obtained from a Kubernetes secret named `%db-wallet-secret%` + - In the example the password is obtained from a Kubernetes secret named `dbuser` + +3. Copy a config file to unified-observability-%EXPORTER_NAME%-exporter-metrics.toml in currently directly + - Eg, `cp ../metrics/aq-metrics.toml unified-observability-helloworld-exporter-metrics.toml` + - This will be used to create a configmap that is referenced in the deployment. + +4. Run `./update-and-redeploy-unified-observabiity-exporter.sh` + +5. You should see metrics being exported from within the container at http://localhost:9161/metrics and likewise from the Kubnernetes service at http://unified-observability-exporter-service-%EXPORTER_NAME%:9161/metrics + More examples will be provided here in the near future. diff --git a/examples/unified-observability-exporter-deployment.yaml b/examples/unified-observability-exporter-deployment.yaml new file mode 100644 index 00000000..02ff0c5c --- /dev/null +++ b/examples/unified-observability-exporter-deployment.yaml @@ -0,0 +1,49 @@ +## Copyright (c) 2021 Oracle and/or its affiliates. +## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: unified-observability-exporter-%EXPORTER_NAME% +spec: + replicas: 1 + selector: + matchLabels: + app: unified-observability-exporter-%EXPORTER_NAME% + template: + metadata: + labels: + app: unified-observability-exporter-%EXPORTER_NAME% + spec: + containers: + - name: unified-observability-exporter-%EXPORTER_NAME% + image: container-registry.oracle.com/database/observability-exporter:0.1.0 + imagePullPolicy: Always + env: + - name: DEFAULT_METRICS + value: /observability/unified-observability-%EXPORTER_NAME%-exporter-metrics.toml + - name: TNS_ADMIN + value: "/creds" + - name: dbpassword + valueFrom: + secretKeyRef: + name: dbuser + key: dbpassword + optional: true + - name: DATA_SOURCE_NAME + value: "%USER%/$(dbpassword)@%PDB_NAME%_tp" + volumeMounts: + - name: creds + mountPath: /creds + - name: config-volume + mountPath: /observability/unified-observability-%EXPORTER_NAME%-exporter-metrics.toml + subPath: unified-observability-%EXPORTER_NAME%-exporter-metrics.toml + ports: + - containerPort: 8080 + restartPolicy: Always + volumes: + - name: creds + secret: + secretName: %db-wallet-secret% + - name: config-volume + configMap: + name: unified-observability-%EXPORTER_NAME%-exporter-config \ No newline at end of file diff --git a/examples/unified-observability-exporter-service.yaml b/examples/unified-observability-exporter-service.yaml new file mode 100644 index 00000000..cc03655b --- /dev/null +++ b/examples/unified-observability-exporter-service.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Service +metadata: + name: unified-observability-exporter-service-%EXPORTER_NAME% + labels: + app: unified-observability-exporter-%EXPORTER_NAME% + release: stable +spec: + type: NodePort + ports: + - port: 9161 + name: metrics + selector: + app: unified-observability-exporter-%EXPORTER_NAME% diff --git a/examples/unified-observability-exporter-servicemonitor.yaml b/examples/unified-observability-exporter-servicemonitor.yaml new file mode 100644 index 00000000..2d44b96f --- /dev/null +++ b/examples/unified-observability-exporter-servicemonitor.yaml @@ -0,0 +1,14 @@ +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: prometheus-unified-observability-exporter-%EXPORTER_NAME% + labels: + app: unified-observability-exporter-%EXPORTER_NAME% + release: stable +spec: + endpoints: + - interval: 20s + port: metrics + selector: + matchLabels: + app: unified-observability-exporter-%EXPORTER_NAME% \ No newline at end of file diff --git a/examples/update-and-redeploy-unified-observabiity-exporter.sh b/examples/update-and-redeploy-unified-observabiity-exporter.sh new file mode 100755 index 00000000..5c6795e1 --- /dev/null +++ b/examples/update-and-redeploy-unified-observabiity-exporter.sh @@ -0,0 +1,17 @@ +#!/bin/bash +## Copyright (c) 2021 Oracle and/or its affiliates. +## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ + +# add namespace if/as appropriate, eg `kubectl apply -f unified-observability-exporter-deployment.yaml-n mynamespace` + +echo delete previous deployment so that deployment is reapplied/deployed after configmap changes for exporter are made... +kubectl delete deployment db-metrics-exporter-orderpdb + +echo create configmap for unified-observability-exporter... +kubectl delete configmap unified-observability-exporter-config +kubectl create configmap unified-observability-exporter-config --from-file=unified-observability-%EXPORTER_NAME%-exporter-metrics.toml + +kubectl apply -f unified-observability-exporter-deployment.yaml +# the following are unnecessary after initial deploy but in order to keep to a single bash script... +kubectl apply -f unified-observability-exporter-service.yaml +kubectl apply -f unified-observability-exporter-servicemonitor.yaml From 9e1649a7cae15ea347c147b6f5ecc6654ab900de Mon Sep 17 00:00:00 2001 From: Paul Parkinson Date: Thu, 27 Apr 2023 19:09:00 -0400 Subject: [PATCH 7/7] adds... 1. ability to specify DATA_SOURCE_USER, DATA_SOURCE_PASSWORD, and DATA_SOURCE_SERVICENAME individually rather than as one DATA_SOURCE_NAME entry (thus more secure) 2. multi-database support via "/scrape" endpoint with "target" param taking dns for specific database (this will soon be enhanced so that it is instead configurable, again to be more secure) . Also, updated POM entries for 21.7 version. --- build.sh | 1 + examples/metrics/default-metrics.toml | 6 ++ pom.xml | 26 +++++++- .../observability/ObservabilityExporter.java | 62 +++++++++++++++---- .../metrics/MetricsExporter.java | 57 ++++++++--------- 5 files changed, 108 insertions(+), 44 deletions(-) create mode 100644 examples/metrics/default-metrics.toml diff --git a/build.sh b/build.sh index e7ccba24..41c5f0e7 100755 --- a/build.sh +++ b/build.sh @@ -15,3 +15,4 @@ export IMAGE=${DOCKER_REGISTRY}/${IMAGE_NAME}:${IMAGE_VERSION} mvn clean package -DskipTests docker build -t=$IMAGE . +docker push $IMAGE diff --git a/examples/metrics/default-metrics.toml b/examples/metrics/default-metrics.toml new file mode 100644 index 00000000..b7d8f7ff --- /dev/null +++ b/examples/metrics/default-metrics.toml @@ -0,0 +1,6 @@ +[[metric]] +context = "sessions" +labels = ["inst_id", "status", "type"] +metricsdesc = { value = "Gauge metric with count of sessions by status and type." } +request = "select inst_id, status, type, count(*) as value from gv$session group by status, type, inst_id" +ignorezeroresult = true \ No newline at end of file diff --git a/pom.xml b/pom.xml index 49fa8290..e6c97436 100644 --- a/pom.xml +++ b/pom.xml @@ -15,6 +15,7 @@ Exporter for metrics, logs, and tracing from Oracle database 11 + 21.7.0.0 @@ -28,9 +29,28 @@ com.oracle.database.jdbc - ojdbc11-production - 21.3.0.0 - pom + ojdbc11 + ${oracle.jdbc.version} + + + com.oracle.database.jdbc + ucp + ${oracle.jdbc.version} + + + com.oracle.database.security + oraclepki + ${oracle.jdbc.version} + + + com.oracle.database.security + osdt_core + ${oracle.jdbc.version} + + + com.oracle.database.security + osdt_cert + ${oracle.jdbc.version} io.opentelemetry diff --git a/src/main/java/oracle/observability/ObservabilityExporter.java b/src/main/java/oracle/observability/ObservabilityExporter.java index f0209c25..4bf200f9 100644 --- a/src/main/java/oracle/observability/ObservabilityExporter.java +++ b/src/main/java/oracle/observability/ObservabilityExporter.java @@ -6,21 +6,32 @@ import com.oracle.bmc.secrets.model.Base64SecretBundleContentDetails; import com.oracle.bmc.secrets.requests.GetSecretBundleRequest; import com.oracle.bmc.secrets.responses.GetSecretBundleResponse; +import oracle.observability.metrics.MetricsExporter; import oracle.ucp.jdbc.PoolDataSource; import oracle.ucp.jdbc.PoolDataSourceFactory; import org.apache.commons.codec.binary.Base64; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import java.io.File; import java.io.IOException; import java.sql.SQLException; +import java.util.HashMap; +import java.util.Map; public class ObservabilityExporter { + private static final Logger LOGGER = LoggerFactory.getLogger(ObservabilityExporter.class); public String DEFAULT_METRICS = System.getenv("DEFAULT_METRICS"); // "default-metrics.toml" + public File DEFAULT_METRICS_FILE; public String CUSTOM_METRICS = System.getenv("CUSTOM_METRICS"); // public String QUERY_TIMEOUT = System.getenv("QUERY_TIMEOUT"); // "5" public String DATABASE_MAXIDLECONNS = System.getenv("DATABASE_MAXIDLECONNS"); // "0" public String DATABASE_MAXOPENCONNS = System.getenv("DATABASE_MAXOPENCONNS"); // "10" - public String DATA_SOURCE_NAME = System.getenv("DATA_SOURCE_NAME"); //eg %USER%/$(dbpassword)@%PDB_NAME%_tp + public static String DATA_SOURCE_NAME = System.getenv("DATA_SOURCE_NAME"); //eg %USER%/$(dbpassword)@%PDB_NAME%_tp + public static String DATA_SOURCE_USER = System.getenv("DATA_SOURCE_USER"); //eg %USER%/$(dbpassword)@%PDB_NAME%_tp + public static String DATA_SOURCE_PASSWORD = System.getenv("DATA_SOURCE_PASSWORD"); //eg %USER%/$(dbpassword)@%PDB_NAME%_tp + public static String DATA_SOURCE_SERVICENAME = System.getenv("DATA_SOURCE_SERVICENAME"); //eg %USER%/$(dbpassword)@%PDB_NAME%_tp public String TNS_ADMIN = System.getenv("TNS_ADMIN"); //eg /msdataworkshop/creds public String OCI_REGION = System.getenv("OCI_REGION"); //eg us-ashburn-1 public String VAULT_SECRET_OCID = System.getenv("VAULT_SECRET_OCID"); //eg ocid.... @@ -29,27 +40,52 @@ public class ObservabilityExporter { public static final String CONTEXT = "context"; public static final String REQUEST = "request"; + static { + + if (DATA_SOURCE_USER != null && DATA_SOURCE_PASSWORD != null && DATA_SOURCE_SERVICENAME != null) { + DATA_SOURCE_NAME = DATA_SOURCE_USER + "/" + DATA_SOURCE_PASSWORD + "@" + DATA_SOURCE_SERVICENAME; + LOGGER.info("DATA_SOURCE_NAME = DATA_SOURCE_USER + \"/\" + DATA_SOURCE_PASSWORD + \"@\" + DATA_SOURCE_SERVICENAME"); + //eg %USER%/$(dbpassword)@%PDB_NAME%_tp + } + } PoolDataSource observabilityDB; + Map dataSourceNameToDataSourceMap = new HashMap<>(); + public PoolDataSource getPoolDataSource() throws SQLException { - if (observabilityDB != null) return observabilityDB; - observabilityDB = PoolDataSourceFactory.getPoolDataSource(); - observabilityDB.setConnectionFactoryClassName("oracle.jdbc.pool.OracleDataSource"); - String user = DATA_SOURCE_NAME.substring(0, DATA_SOURCE_NAME.indexOf("/")); - String pw = DATA_SOURCE_NAME.substring(DATA_SOURCE_NAME.indexOf("/") + 1, DATA_SOURCE_NAME.indexOf("@")); - String serviceName = DATA_SOURCE_NAME.substring(DATA_SOURCE_NAME.indexOf("@") + 1); + return getPoolDataSource(DATA_SOURCE_NAME); + } + public PoolDataSource getPoolDataSource(String dataSourceName) throws SQLException { + if (dataSourceName.equals(DATA_SOURCE_NAME)) { + if (observabilityDB != null) return observabilityDB; + return observabilityDB = getDataSource(DATA_SOURCE_NAME); + } else { + if(dataSourceNameToDataSourceMap.containsKey(dataSourceName) && dataSourceNameToDataSourceMap.get(dataSourceName) != null) + return dataSourceNameToDataSourceMap.get(dataSourceName); + PoolDataSource poolDataSource = getDataSource(dataSourceName); + dataSourceNameToDataSourceMap.put(dataSourceName, poolDataSource); + return poolDataSource; + } + } + + private PoolDataSource getDataSource(String dataSourceName) throws SQLException { + PoolDataSource poolDataSource = PoolDataSourceFactory.getPoolDataSource(); + poolDataSource.setConnectionFactoryClassName("oracle.jdbc.pool.OracleDataSource"); + String user = dataSourceName.substring(0, dataSourceName.indexOf("/")); + String pw = dataSourceName.substring(dataSourceName.indexOf("/") + 1, dataSourceName.indexOf("@")); + String serviceName = dataSourceName.substring(dataSourceName.indexOf("@") + 1); String url = "jdbc:oracle:thin:@" + serviceName + "?TNS_ADMIN=" + TNS_ADMIN; - observabilityDB.setURL(url); - observabilityDB.setUser(user); - if (VAULT_SECRET_OCID == null || VAULT_SECRET_OCID.trim().equals("")) { - observabilityDB.setPassword(pw); + poolDataSource.setURL(url); + poolDataSource.setUser(user); + if (VAULT_SECRET_OCID == null || VAULT_SECRET_OCID.trim().equals("") || !dataSourceName.equals(DATA_SOURCE_NAME)) { + poolDataSource.setPassword(pw); } else { try { - observabilityDB.setPassword(getPasswordFromVault()); + poolDataSource.setPassword(getPasswordFromVault()); } catch (IOException e) { throw new SQLException(e); } } - return observabilityDB; + return poolDataSource; } diff --git a/src/main/java/oracle/observability/metrics/MetricsExporter.java b/src/main/java/oracle/observability/metrics/MetricsExporter.java index 5eb87e16..967273b2 100644 --- a/src/main/java/oracle/observability/metrics/MetricsExporter.java +++ b/src/main/java/oracle/observability/metrics/MetricsExporter.java @@ -7,6 +7,7 @@ import io.prometheus.client.Gauge; import oracle.observability.ObservabilityExporter; import org.springframework.web.bind.annotation.GetMapping; +import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; import org.slf4j.Logger; @@ -31,30 +32,38 @@ public class MetricsExporter extends ObservabilityExporter { public static final String LABELS = "labels"; public static final String IGNOREZERORESULT = "ignorezeroresult"; public static final String FALSE = "false"; - public String LISTEN_ADDRESS = System.getenv("LISTEN_ADDRESS"); // ":9161" - public String TELEMETRY_PATH = System.getenv("TELEMETRY_PATH"); // "/metrics" - //Interval between each scrape. Default is to scrape on collect requests. scrape.interval - public String SCRAPE_INTERVAL = System.getenv("scrape.interval"); // "0s" public static final String ORACLEDB_METRIC_PREFIX = "oracledb_"; Map gaugeMap = new HashMap<>(); + Map dnsToCollectorRegistryMap = new HashMap<>(); + + /** * The endpoint that prometheus will scrape * @return Prometheus metric - * @throws Exception */ @GetMapping(value = "/metrics", produces = "text/plain") public String metrics() throws Exception { - processMetrics(); - return getMetricsString(); + processMetrics(DATA_SOURCE_NAME, CollectorRegistry.defaultRegistry); + return getMetricsString(CollectorRegistry.defaultRegistry); + } + @GetMapping(value = "/scrape", produces = "text/plain") + public String scrape(@RequestParam("target") String target) throws Exception { + CollectorRegistry collectorRegistry = dnsToCollectorRegistryMap.get(target); + if (collectorRegistry == null) { + collectorRegistry = new CollectorRegistry(); + dnsToCollectorRegistryMap.put(target, collectorRegistry); + } + processMetrics(target, dnsToCollectorRegistryMap.get(target)); + return getMetricsString(collectorRegistry); } @PostConstruct public void init() throws Exception { - processMetrics(); + processMetrics(DATA_SOURCE_NAME, CollectorRegistry.defaultRegistry); } - private void processMetrics() throws IOException, SQLException { + private void processMetrics(String datasourceName, CollectorRegistry registry) throws IOException, SQLException { File tomlfile = new File(DEFAULT_METRICS); TomlMapper mapper = new TomlMapper(); JsonNode jsonNode = mapper.readerFor(MetricsExporterConfigEntry.class).readTree(new FileInputStream(tomlfile)); @@ -65,15 +74,15 @@ private void processMetrics() throws IOException, SQLException { } Iterator metrics = metric.iterator(); int isConnectionSuccessful = 0; - try(Connection connection = getPoolDataSource().getConnection()) { + try(Connection connection = getPoolDataSource(datasourceName).getConnection()) { isConnectionSuccessful = 1; while (metrics.hasNext()) { - processMetric(connection, metrics); + processMetric(registry, connection, metrics); } } finally { Gauge gauge = gaugeMap.get(ORACLEDB_METRIC_PREFIX + UP); if (gauge == null) { - Gauge upgauge = Gauge.build().name(ORACLEDB_METRIC_PREFIX + UP).help("Whether the Oracle database server is up.").register(); + Gauge upgauge = Gauge.build().name(ORACLEDB_METRIC_PREFIX + UP).help("Whether the Oracle database server is up.").register(registry); upgauge.set(isConnectionSuccessful); gaugeMap.put(ORACLEDB_METRIC_PREFIX + UP, upgauge); } else gauge.set(isConnectionSuccessful); @@ -91,7 +100,7 @@ private void processMetrics() throws IOException, SQLException { * Request string * IgnoreZeroResult bool */ - private void processMetric(Connection connection, Iterator metric) { + private void processMetric(CollectorRegistry registry, Connection connection, Iterator metric) { JsonNode next = metric.next(); String context = next.get(CONTEXT).asText(); // eg context = "teq" String metricsType = next.get(METRICSTYPE) == null ? "" :next.get(METRICSTYPE).asText(); @@ -120,7 +129,7 @@ private void processMetric(Connection connection, Iterator metric) { try { resultSet = connection.prepareStatement(request).executeQuery(); while (resultSet.next()) { - translateQueryToPrometheusMetric(context, metricsDescMap, labelNames, resultSet); + translateQueryToPrometheusMetric(registry, context, metricsDescMap, labelNames, resultSet); } } catch(SQLException e) { //this can be due to table not existing etc. LOGGER.warn("MetricsExporter.processMetric during:" + request + " exception:" + e); @@ -128,26 +137,19 @@ private void processMetric(Connection connection, Iterator metric) { } } - private void translateQueryToPrometheusMetric(String context, Map metricsDescMap, + private void translateQueryToPrometheusMetric(CollectorRegistry registry, String context, Map metricsDescMap, String[] labelNames, ResultSet resultSet) throws SQLException { String[] labelValues = new String[labelNames.length]; Map sqlQueryResults = - extractGaugesAndLabelValues(context, metricsDescMap, labelNames, resultSet, labelValues, resultSet.getMetaData().getColumnCount()); + extractGaugesAndLabelValues(registry, context, metricsDescMap, labelNames, resultSet, labelValues, resultSet.getMetaData().getColumnCount()); setLabelValues(context, labelNames, labelValues, sqlQueryResults.entrySet().iterator()); } /** * Creates Gauges and gets label values - * @param context - * @param metricsDescMap - * @param labelNames - * @param resultSet - * @param labelValues - * @param columnCount - * @throws SQLException */ - private Map extractGaugesAndLabelValues( + private Map extractGaugesAndLabelValues(CollectorRegistry registry, String context, Map metricsDescMap, String[] labelNames, ResultSet resultSet, String[] labelValues, int columnCount) throws SQLException { Map sqlQueryResults = new HashMap<>(); @@ -166,8 +168,8 @@ private Map extractGaugesAndLabelValues( if (gauge == null) { if(metricsDescMap.containsKey(columnName)) { if (labelNames.length > 0) { - gauge = Gauge.build().name(gaugeName.toLowerCase()).help(metricsDescMap.get(columnName)).labelNames(labelNames).register(); - } else gauge = Gauge.build().name(gaugeName.toLowerCase()).help(metricsDescMap.get(columnName)).register(); + gauge = Gauge.build().name(gaugeName.toLowerCase()).help(metricsDescMap.get(columnName)).labelNames(labelNames).register(registry); + } else gauge = Gauge.build().name(gaugeName.toLowerCase()).help(metricsDescMap.get(columnName)).register(registry); gaugeMap.put(gaugeName, gauge); } } @@ -198,8 +200,7 @@ private void setLabelValues(String context, String[] labelNames, String[] labelV } } - public static String getMetricsString() { - CollectorRegistry collectorRegistry = CollectorRegistry.defaultRegistry; + public static String getMetricsString(CollectorRegistry collectorRegistry) { Enumeration mfs = collectorRegistry.filteredMetricFamilySamples(new HashSet<>()); return compose(mfs); }