diff --git a/docs/generated/http/full.md b/docs/generated/http/full.md
index 215dbcc4f884..60ef6ca5260e 100644
--- a/docs/generated/http/full.md
+++ b/docs/generated/http/full.md
@@ -3932,6 +3932,8 @@ tenant pods.
| last_reset | [google.protobuf.Timestamp](#cockroach.server.serverpb.StatementsResponse-google.protobuf.Timestamp) | | Timestamp of the last stats reset. | [reserved](#support-status) |
| internal_app_name_prefix | [string](#cockroach.server.serverpb.StatementsResponse-string) | | If set and non-empty, indicates the prefix to application_name used for statements/queries issued internally by CockroachDB. | [reserved](#support-status) |
| transactions | [StatementsResponse.ExtendedCollectedTransactionStatistics](#cockroach.server.serverpb.StatementsResponse-cockroach.server.serverpb.StatementsResponse.ExtendedCollectedTransactionStatistics) | repeated | Transactions is transaction-level statistics for the collection of statements in this response. | [reserved](#support-status) |
+| stmts_total_runtime_secs | [float](#cockroach.server.serverpb.StatementsResponse-float) | | | [reserved](#support-status) |
+| txns_total_runtime_secs | [float](#cockroach.server.serverpb.StatementsResponse-float) | | | [reserved](#support-status) |
@@ -4004,12 +4006,28 @@ Support status: [reserved](#support-status)
| ----- | ---- | ----- | ----------- | -------------- |
| start | [int64](#cockroach.server.serverpb.CombinedStatementsStatsRequest-int64) | | Unix time range for aggregated statements. | [reserved](#support-status) |
| end | [int64](#cockroach.server.serverpb.CombinedStatementsStatsRequest-int64) | | | [reserved](#support-status) |
+| fetch_mode | [CombinedStatementsStatsRequest.FetchMode](#cockroach.server.serverpb.CombinedStatementsStatsRequest-cockroach.server.serverpb.CombinedStatementsStatsRequest.FetchMode) | | Note that if fetch_mode is set to transactions only, we will also include the statement statistics for the stmts in the transactions response. This is more of a hack-y method to get the complete stats for txns, because in the client we need to fill in some txn stats info from its stmt stats, such as the query string.
We prefer this hackier method right now to reduce surface area for backporting these changes, but in the future we will introduce more endpoints to properly organize these differing requests. TODO (xinhaoz) - Split this API into stmts and txns properly instead of using this param. | [reserved](#support-status) |
+| limit | [int64](#cockroach.server.serverpb.CombinedStatementsStatsRequest-int64) | | | [reserved](#support-status) |
+
+#### CombinedStatementsStatsRequest.FetchMode
+
+
+
+| Field | Type | Label | Description | Support status |
+| ----- | ---- | ----- | ----------- | -------------- |
+| stats_type | [CombinedStatementsStatsRequest.StatsType](#cockroach.server.serverpb.CombinedStatementsStatsRequest-cockroach.server.serverpb.CombinedStatementsStatsRequest.StatsType) | | | [reserved](#support-status) |
+| sort | [StatsSortOptions](#cockroach.server.serverpb.CombinedStatementsStatsRequest-cockroach.server.serverpb.StatsSortOptions) | | | [reserved](#support-status) |
+
+
+
+
+
#### Response Parameters
@@ -4025,6 +4043,8 @@ Support status: [reserved](#support-status)
| last_reset | [google.protobuf.Timestamp](#cockroach.server.serverpb.StatementsResponse-google.protobuf.Timestamp) | | Timestamp of the last stats reset. | [reserved](#support-status) |
| internal_app_name_prefix | [string](#cockroach.server.serverpb.StatementsResponse-string) | | If set and non-empty, indicates the prefix to application_name used for statements/queries issued internally by CockroachDB. | [reserved](#support-status) |
| transactions | [StatementsResponse.ExtendedCollectedTransactionStatistics](#cockroach.server.serverpb.StatementsResponse-cockroach.server.serverpb.StatementsResponse.ExtendedCollectedTransactionStatistics) | repeated | Transactions is transaction-level statistics for the collection of statements in this response. | [reserved](#support-status) |
+| stmts_total_runtime_secs | [float](#cockroach.server.serverpb.StatementsResponse-float) | | | [reserved](#support-status) |
+| txns_total_runtime_secs | [float](#cockroach.server.serverpb.StatementsResponse-float) | | | [reserved](#support-status) |
diff --git a/pkg/ccl/serverccl/statusccl/tenant_status_test.go b/pkg/ccl/serverccl/statusccl/tenant_status_test.go
index 67e2264c8739..0eef15cd0bfb 100644
--- a/pkg/ccl/serverccl/statusccl/tenant_status_test.go
+++ b/pkg/ccl/serverccl/statusccl/tenant_status_test.go
@@ -187,11 +187,9 @@ func TestTenantCannotSeeNonTenantStats(t *testing.T) {
require.NoError(t, err)
request := &serverpb.StatementsRequest{}
- combinedStatsRequest := &serverpb.CombinedStatementsStatsRequest{}
var tenantStats *serverpb.StatementsResponse
- var tenantCombinedStats *serverpb.StatementsResponse
- // Populate `tenantStats` and `tenantCombinedStats`. The tenant server
+ // Populate `tenantStats`. The tenant server
// `Statements` and `CombinedStatements` methods are backed by the
// sqlinstance system which uses a cache populated through rangefeed
// for keeping track of SQL pod data. We use `SucceedsSoon` to eliminate
@@ -206,10 +204,6 @@ func TestTenantCannotSeeNonTenantStats(t *testing.T) {
return errors.New("tenant statements are unexpectedly empty")
}
- tenantCombinedStats, err = tenantStatusServer.CombinedStatementStats(ctx, combinedStatsRequest)
- if tenantCombinedStats == nil || len(tenantCombinedStats.Statements) == 0 {
- return errors.New("tenant combined statements are unexpectedly empty")
- }
return nil
})
@@ -218,11 +212,6 @@ func TestTenantCannotSeeNonTenantStats(t *testing.T) {
err = serverutils.GetJSONProto(nonTenant, path, &nonTenantStats)
require.NoError(t, err)
- path = "/_status/combinedstmts"
- var nonTenantCombinedStats serverpb.StatementsResponse
- err = serverutils.GetJSONProto(nonTenant, path, &nonTenantCombinedStats)
- require.NoError(t, err)
-
checkStatements := func(t *testing.T, tc []testCase, actual *serverpb.StatementsResponse) {
t.Helper()
var expectedStatements []string
@@ -258,13 +247,11 @@ func TestTenantCannotSeeNonTenantStats(t *testing.T) {
// First we verify that we have expected stats from tenants.
t.Run("tenant-stats", func(t *testing.T) {
checkStatements(t, testCaseTenant, tenantStats)
- checkStatements(t, testCaseTenant, tenantCombinedStats)
})
// Now we verify the non tenant stats are what we expected.
t.Run("non-tenant-stats", func(t *testing.T) {
checkStatements(t, testCaseNonTenant, &nonTenantStats)
- checkStatements(t, testCaseNonTenant, &nonTenantCombinedStats)
})
// Now we verify that tenant and non-tenant have no visibility into each other's stats.
@@ -281,17 +268,6 @@ func TestTenantCannotSeeNonTenantStats(t *testing.T) {
}
}
- for _, tenantStmt := range tenantCombinedStats.Statements {
- for _, nonTenantStmt := range nonTenantCombinedStats.Statements {
- require.NotEqual(t, tenantStmt, nonTenantStmt, "expected tenant to have no visibility to non-tenant's statement stats, but found:", nonTenantStmt)
- }
- }
-
- for _, tenantTxn := range tenantCombinedStats.Transactions {
- for _, nonTenantTxn := range nonTenantCombinedStats.Transactions {
- require.NotEqual(t, tenantTxn, nonTenantTxn, "expected tenant to have no visibility to non-tenant's transaction stats, but found:", nonTenantTxn)
- }
- }
})
}
@@ -307,43 +283,46 @@ func testResetSQLStatsRPCForTenant(
testCluster := testHelper.TestCluster()
controlCluster := testHelper.ControlCluster()
- // Disable automatic flush to ensure tests are deterministic.
+ // Set automatic flush to some long duration we'll never hit to
+ // ensure tests are deterministic.
testCluster.TenantConn(0 /* idx */).
- Exec(t, "SET CLUSTER SETTING sql.stats.flush.enabled = false")
+ Exec(t, "SET CLUSTER SETTING sql.stats.flush.interval = '24h'")
controlCluster.TenantConn(0 /* idx */).
- Exec(t, "SET CLUSTER SETTING sql.stats.flush.enabled = false")
+ Exec(t, "SET CLUSTER SETTING sql.stats.flush.interval = '24h'")
defer func() {
// Cleanup
testCluster.TenantConn(0 /* idx */).
- Exec(t, "SET CLUSTER SETTING sql.stats.flush.enabled = true")
+ Exec(t, "SET CLUSTER SETTING sql.stats.flush.interval = '10m'")
controlCluster.TenantConn(0 /* idx */).
- Exec(t, "SET CLUSTER SETTING sql.stats.flush.enabled = true")
+ Exec(t, "SET CLUSTER SETTING sql.stats.flush.interval = '10m'")
}()
for _, flushed := range []bool{false, true} {
+ testTenant := testCluster.Tenant(serverccl.RandomServer)
+ testTenantConn := testTenant.GetTenantConn()
t.Run(fmt.Sprintf("flushed=%t", flushed), func(t *testing.T) {
// Clears the SQL Stats at the end of each test via builtin.
defer func() {
- testCluster.TenantConn(serverccl.RandomServer).Exec(t, "SELECT crdb_internal.reset_sql_stats()")
+ testTenantConn.Exec(t, "SELECT crdb_internal.reset_sql_stats()")
controlCluster.TenantConn(serverccl.RandomServer).Exec(t, "SELECT crdb_internal.reset_sql_stats()")
}()
for _, stmt := range stmts {
- testCluster.TenantConn(serverccl.RandomServer).Exec(t, stmt)
+ testTenantConn.Exec(t, stmt)
controlCluster.TenantConn(serverccl.RandomServer).Exec(t, stmt)
}
if flushed {
- testCluster.TenantSQLStats(serverccl.RandomServer).Flush(ctx)
+ testTenant.TenantSQLStats().Flush(ctx)
controlCluster.TenantSQLStats(serverccl.RandomServer).Flush(ctx)
}
- status := testCluster.TenantStatusSrv(serverccl.RandomServer)
+ status := testTenant.TenantStatusSrv()
statsPreReset, err := status.Statements(ctx, &serverpb.StatementsRequest{
- Combined: true,
+ Combined: flushed,
})
require.NoError(t, err)
@@ -357,7 +336,7 @@ func testResetSQLStatsRPCForTenant(
require.NoError(t, err)
statsPostReset, err := status.Statements(ctx, &serverpb.StatementsRequest{
- Combined: true,
+ Combined: flushed,
})
require.NoError(t, err)
@@ -382,7 +361,7 @@ func testResetSQLStatsRPCForTenant(
// Ensures that sql stats reset is isolated by tenant boundary.
statsFromControlCluster, err :=
controlCluster.TenantStatusSrv(serverccl.RandomServer).Statements(ctx, &serverpb.StatementsRequest{
- Combined: true,
+ Combined: flushed,
})
require.NoError(t, err)
diff --git a/pkg/server/combined_statement_stats.go b/pkg/server/combined_statement_stats.go
index 3d4c4b1284ac..4e4663285a41 100644
--- a/pkg/server/combined_statement_stats.go
+++ b/pkg/server/combined_statement_stats.go
@@ -67,18 +67,36 @@ func getCombinedStatementStats(
settings *cluster.Settings,
testingKnobs *sqlstats.TestingKnobs,
) (*serverpb.StatementsResponse, error) {
- startTime := getTimeFromSeconds(req.Start)
- endTime := getTimeFromSeconds(req.End)
- limit := SQLStatsResponseMax.Get(&settings.SV)
showInternal := SQLStatsShowInternal.Get(&settings.SV)
whereClause, orderAndLimit, args := getCombinedStatementsQueryClausesAndArgs(
- startTime, endTime, limit, testingKnobs, showInternal)
- statements, err := collectCombinedStatements(ctx, ie, whereClause, args, orderAndLimit)
+ req, testingKnobs, showInternal, settings)
+
+ var statements []serverpb.StatementsResponse_CollectedStatementStatistics
+ var transactions []serverpb.StatementsResponse_ExtendedCollectedTransactionStatistics
+ var err error
+
+ if req.FetchMode == nil || req.FetchMode.StatsType == serverpb.CombinedStatementsStatsRequest_TxnStatsOnly {
+ transactions, err = collectCombinedTransactions(ctx, ie, whereClause, args, orderAndLimit, testingKnobs)
+ if err != nil {
+ return nil, serverError(ctx, err)
+ }
+ }
+
+ if req.FetchMode != nil && req.FetchMode.StatsType == serverpb.CombinedStatementsStatsRequest_TxnStatsOnly {
+ // Change the whereClause for the statements to those matching the txn_fingerprint_ids in the
+ // transactions response that are within the desired interval. We also don't need the order and
+ // limit anymore.
+ orderAndLimit = ""
+ whereClause, args = buildWhereClauseForStmtsByTxn(req, transactions, testingKnobs)
+ }
+
+ statements, err = collectCombinedStatements(ctx, ie, whereClause, args, orderAndLimit, testingKnobs)
if err != nil {
return nil, serverError(ctx, err)
}
- transactions, err := collectCombinedTransactions(ctx, ie, whereClause, args, orderAndLimit)
+ stmtsRunTime, txnsRunTime, err := getTotalRuntimeSecs(ctx, req, ie, testingKnobs)
+
if err != nil {
return nil, serverError(ctx, err)
}
@@ -88,11 +106,181 @@ func getCombinedStatementStats(
Transactions: transactions,
LastReset: statsProvider.GetLastReset(),
InternalAppNamePrefix: catconstants.InternalAppNamePrefix,
+ StmtsTotalRuntimeSecs: stmtsRunTime,
+ TxnsTotalRuntimeSecs: txnsRunTime,
}
return response, nil
}
+func getTotalRuntimeSecs(
+ ctx context.Context,
+ req *serverpb.CombinedStatementsStatsRequest,
+ ie *sql.InternalExecutor,
+ testingKnobs *sqlstats.TestingKnobs,
+) (stmtsRuntime float32, txnsRuntime float32, err error) {
+ var buffer strings.Builder
+ buffer.WriteString(testingKnobs.GetAOSTClause())
+ var args []interface{}
+ startTime := getTimeFromSeconds(req.Start)
+ endTime := getTimeFromSeconds(req.End)
+
+ buffer.WriteString(" WHERE true")
+
+ if startTime != nil {
+ args = append(args, *startTime)
+ buffer.WriteString(fmt.Sprintf(" AND aggregated_ts >= $%d", len(args)))
+ }
+
+ if endTime != nil {
+ args = append(args, *endTime)
+ buffer.WriteString(fmt.Sprintf(" AND aggregated_ts <= $%d", len(args)))
+ }
+
+ whereClause := buffer.String()
+
+ queryWithPlaceholders := `
+SELECT
+COALESCE(
+ sum(
+ (statistics -> 'statistics' -> 'svcLat' ->> 'mean')::FLOAT *
+ (statistics-> 'statistics' ->> 'cnt')::FLOAT
+ )
+, 0)
+FROM crdb_internal.%s_statistics_persisted
+%s
+`
+
+ getRuntime := func(table string) (float32, error) {
+ it, err := ie.QueryIteratorEx(
+ ctx,
+ fmt.Sprintf(`%s-total-runtime`, table),
+ nil,
+ sessiondata.NodeUserSessionDataOverride,
+ fmt.Sprintf(queryWithPlaceholders, table, whereClause),
+ args...)
+
+ if err != nil {
+ return 0, err
+ }
+
+ defer func() {
+ closeErr := it.Close()
+ if closeErr != nil {
+ err = errors.CombineErrors(err, closeErr)
+ }
+ }()
+
+ ok, err := it.Next(ctx)
+ if err != nil {
+ return 0, err
+ }
+
+ if !ok {
+ return 0, errors.New("expected one row but got none")
+ }
+
+ var row tree.Datums
+ if row = it.Cur(); row == nil {
+ return 0, errors.New("unexpected null row")
+ }
+
+ return float32(tree.MustBeDFloat(row[0])), nil
+
+ }
+
+ if req.FetchMode == nil || req.FetchMode.StatsType != serverpb.CombinedStatementsStatsRequest_TxnStatsOnly {
+ stmtsRuntime, err = getRuntime("statement")
+ if err != nil {
+ return 0, 0, err
+ }
+ }
+
+ if req.FetchMode == nil || req.FetchMode.StatsType != serverpb.CombinedStatementsStatsRequest_StmtStatsOnly {
+ txnsRuntime, err = getRuntime("transaction")
+ if err != nil {
+ return 0, 0, err
+ }
+ }
+
+ return stmtsRuntime, txnsRuntime, err
+}
+
+// Common stmt and txn columns to sort on.
+const (
+ sortSvcLatDesc = `(statistics -> 'statistics' -> 'svcLat' ->> 'mean')::FLOAT DESC`
+ sortExecCountDesc = `(statistics -> 'statistics' ->> 'cnt')::INT DESC`
+ sortContentionTimeDesc = `(statistics -> 'execution_statistics' -> 'contentionTime' ->> 'mean')::FLOAT DESC`
+ sortPCTRuntimeDesc = `((statistics -> 'statistics' -> 'svcLat' ->> 'mean')::FLOAT *
+ (statistics -> 'statistics' ->> 'cnt')::FLOAT) DESC`
+)
+
+func getStmtColumnFromSortOption(sort serverpb.StatsSortOptions) string {
+ switch sort {
+ case serverpb.StatsSortOptions_SERVICE_LAT:
+ return sortSvcLatDesc
+ case serverpb.StatsSortOptions_EXECUTION_COUNT:
+ return sortExecCountDesc
+ case serverpb.StatsSortOptions_CONTENTION_TIME:
+ return sortContentionTimeDesc
+ default:
+ return sortSvcLatDesc
+ }
+}
+
+func getTxnColumnFromSortOption(sort serverpb.StatsSortOptions) string {
+ switch sort {
+ case serverpb.StatsSortOptions_SERVICE_LAT:
+ return sortSvcLatDesc
+ case serverpb.StatsSortOptions_EXECUTION_COUNT:
+ return sortExecCountDesc
+ case serverpb.StatsSortOptions_CONTENTION_TIME:
+ return sortContentionTimeDesc
+ case serverpb.StatsSortOptions_PCT_RUNTIME:
+ return sortPCTRuntimeDesc
+ default:
+ return sortSvcLatDesc
+ }
+}
+
+// buildWhereClauseForStmtsByTxn builds the where clause to get the statement
+// stats based on a list of transactions. The list of transactions provided must
+// contain no duplicate transaction fingerprint ids.
+func buildWhereClauseForStmtsByTxn(
+ req *serverpb.CombinedStatementsStatsRequest,
+ transactions []serverpb.StatementsResponse_ExtendedCollectedTransactionStatistics,
+ testingKnobs *sqlstats.TestingKnobs,
+) (whereClause string, args []interface{}) {
+ var buffer strings.Builder
+ buffer.WriteString(testingKnobs.GetAOSTClause())
+
+ buffer.WriteString(" WHERE true")
+
+ // Add start and end filters from request.
+ startTime := getTimeFromSeconds(req.Start)
+ endTime := getTimeFromSeconds(req.End)
+ if startTime != nil {
+ args = append(args, *startTime)
+ buffer.WriteString(fmt.Sprintf(" AND aggregated_ts >= $%d", len(args)))
+ }
+
+ if endTime != nil {
+ args = append(args, *endTime)
+ buffer.WriteString(fmt.Sprintf(" AND aggregated_ts <= $%d", len(args)))
+ }
+
+ txnFingerprints := make([]string, 0, len(transactions))
+ for i := range transactions {
+ fingerprint := uint64(transactions[i].StatsData.TransactionFingerprintID)
+ txnFingerprints = append(txnFingerprints, fmt.Sprintf("\\x%016x", fingerprint))
+ }
+
+ args = append(args, txnFingerprints)
+ buffer.WriteString(fmt.Sprintf(" AND transaction_fingerprint_id = any $%d", len(args)))
+
+ return buffer.String(), args
+}
+
// getCombinedStatementsQueryClausesAndArgs returns:
// - where clause (filtering by name and aggregates_ts when defined)
// - order and limit clause
@@ -100,7 +288,10 @@ func getCombinedStatementStats(
// The whereClause will be in the format `WHERE A = $1 AND B = $2` and
// args will return the list of arguments in order that will replace the actual values.
func getCombinedStatementsQueryClausesAndArgs(
- start, end *time.Time, limit int64, testingKnobs *sqlstats.TestingKnobs, showInternal bool,
+ req *serverpb.CombinedStatementsStatsRequest,
+ testingKnobs *sqlstats.TestingKnobs,
+ showInternal bool,
+ settings *cluster.Settings,
) (whereClause string, orderAndLimitClause string, args []interface{}) {
var buffer strings.Builder
buffer.WriteString(testingKnobs.GetAOSTClause())
@@ -112,17 +303,37 @@ func getCombinedStatementsQueryClausesAndArgs(
buffer.WriteString(fmt.Sprintf(" WHERE app_name NOT LIKE '%s%%'", catconstants.InternalAppNamePrefix))
}
- if start != nil {
+ // Add start and end filters from request.
+ startTime := getTimeFromSeconds(req.Start)
+ endTime := getTimeFromSeconds(req.End)
+ if startTime != nil {
buffer.WriteString(" AND aggregated_ts >= $1")
- args = append(args, *start)
+ args = append(args, *startTime)
}
- if end != nil {
- args = append(args, *end)
+ if endTime != nil {
+ args = append(args, *endTime)
buffer.WriteString(fmt.Sprintf(" AND aggregated_ts <= $%d", len(args)))
}
+
+ // Add LIMIT from request.
+ limit := req.Limit
+ if limit == 0 {
+ limit = SQLStatsResponseMax.Get(&settings.SV)
+ }
args = append(args, limit)
- orderAndLimitClause = fmt.Sprintf(` ORDER BY aggregated_ts DESC LIMIT $%d`, len(args))
+
+ // Determine sort column.
+ var col string
+ if req.FetchMode == nil {
+ col = "fingerprint_id"
+ } else if req.FetchMode.StatsType == serverpb.CombinedStatementsStatsRequest_StmtStatsOnly {
+ col = getStmtColumnFromSortOption(req.FetchMode.Sort)
+ } else if req.FetchMode.StatsType == serverpb.CombinedStatementsStatsRequest_TxnStatsOnly {
+ col = getTxnColumnFromSortOption(req.FetchMode.Sort)
+ }
+
+ orderAndLimitClause = fmt.Sprintf(` ORDER BY %s LIMIT $%d`, col, len(args))
return buffer.String(), orderAndLimitClause, args
}
@@ -133,28 +344,28 @@ func collectCombinedStatements(
whereClause string,
args []interface{},
orderAndLimit string,
+ testingKnobs *sqlstats.TestingKnobs,
) ([]serverpb.StatementsResponse_CollectedStatementStatistics, error) {
+ aostClause := testingKnobs.GetAOSTClause()
+ query := fmt.Sprintf(`
+SELECT * FROM (
+SELECT
+ fingerprint_id,
+ transaction_fingerprint_id,
+ app_name,
+ max(aggregated_ts) as aggregated_ts,
+ metadata,
+ crdb_internal.merge_statement_stats(array_agg(statistics)) AS statistics
+FROM crdb_internal.statement_statistics_persisted %s
+GROUP BY
+ fingerprint_id,
+ transaction_fingerprint_id,
+ app_name,
+ metadata
+) %s
+%s`, whereClause, aostClause, orderAndLimit)
- query := fmt.Sprintf(
- `SELECT
- fingerprint_id,
- transaction_fingerprint_id,
- app_name,
- max(aggregated_ts) as aggregated_ts,
- metadata,
- crdb_internal.merge_statement_stats(array_agg(statistics)) AS statistics,
- max(sampled_plan) AS sampled_plan,
- aggregation_interval
- FROM crdb_internal.statement_statistics %s
- GROUP BY
- fingerprint_id,
- transaction_fingerprint_id,
- app_name,
- metadata,
- aggregation_interval
- %s`, whereClause, orderAndLimit)
-
- const expectedNumDatums = 8
+ const expectedNumDatums = 6
it, err := ie.QueryIteratorEx(ctx, "combined-stmts-by-interval", nil,
sessiondata.InternalExecutorOverride{
@@ -212,20 +423,10 @@ func collectCombinedStatements(
return nil, serverError(ctx, err)
}
- planJSON := tree.MustBeDJSON(row[6]).JSON
- plan, err := sqlstatsutil.JSONToExplainTreePlanNode(planJSON)
- if err != nil {
- return nil, serverError(ctx, err)
- }
- metadata.Stats.SensitiveInfo.MostRecentPlanDescription = *plan
-
- aggInterval := tree.MustBeDInterval(row[7]).Duration
-
stmt := serverpb.StatementsResponse_CollectedStatementStatistics{
Key: serverpb.StatementsResponse_ExtendedStatementStatisticsKey{
- KeyData: metadata.Key,
- AggregatedTs: aggregatedTs,
- AggregationInterval: time.Duration(aggInterval.Nanos()),
+ KeyData: metadata.Key,
+ AggregatedTs: aggregatedTs,
},
ID: roachpb.StmtFingerprintID(statementFingerprintID),
Stats: metadata.Stats,
@@ -248,25 +449,27 @@ func collectCombinedTransactions(
whereClause string,
args []interface{},
orderAndLimit string,
+ testingKnobs *sqlstats.TestingKnobs,
) ([]serverpb.StatementsResponse_ExtendedCollectedTransactionStatistics, error) {
+ aostClause := testingKnobs.GetAOSTClause()
+
+ query := fmt.Sprintf(`
+SELECT * FROM (
+SELECT
+ app_name,
+ max(aggregated_ts) as aggregated_ts,
+ fingerprint_id,
+ metadata,
+ crdb_internal.merge_transaction_stats(array_agg(statistics)) AS statistics
+FROM crdb_internal.transaction_statistics_persisted %s
+GROUP BY
+ app_name,
+ fingerprint_id,
+ metadata
+) %s
+%s`, whereClause, aostClause, orderAndLimit)
- query := fmt.Sprintf(
- `SELECT
- app_name,
- max(aggregated_ts) as aggregated_ts,
- fingerprint_id,
- metadata,
- crdb_internal.merge_transaction_stats(array_agg(statistics)) AS statistics,
- aggregation_interval
- FROM crdb_internal.transaction_statistics %s
- GROUP BY
- app_name,
- fingerprint_id,
- metadata,
- aggregation_interval
- %s`, whereClause, orderAndLimit)
-
- const expectedNumDatums = 6
+ const expectedNumDatums = 5
it, err := ie.QueryIteratorEx(ctx, "combined-txns-by-interval", nil,
sessiondata.InternalExecutorOverride{
@@ -314,15 +517,12 @@ func collectCombinedTransactions(
return nil, serverError(ctx, err)
}
- aggInterval := tree.MustBeDInterval(row[5]).Duration
-
txnStats := serverpb.StatementsResponse_ExtendedCollectedTransactionStatistics{
StatsData: roachpb.CollectedTransactionStatistics{
StatementFingerprintIDs: metadata.StatementFingerprintIDs,
App: app,
Stats: metadata.Stats,
AggregatedTs: aggregatedTs,
- AggregationInterval: time.Duration(aggInterval.Nanos()),
TransactionFingerprintID: roachpb.TransactionFingerprintID(fingerprintID),
},
}
@@ -482,18 +682,14 @@ func getTotalStatementDetails(
query := fmt.Sprintf(
`SELECT
crdb_internal.merge_stats_metadata(array_agg(metadata)) AS metadata,
- aggregation_interval,
array_agg(app_name) as app_names,
- crdb_internal.merge_statement_stats(array_agg(statistics)) AS statistics,
- max(sampled_plan) as sampled_plan,
- encode(fingerprint_id, 'hex') as fingerprint_id
- FROM crdb_internal.statement_statistics %s
+ crdb_internal.merge_statement_stats(array_agg(statistics)) AS statistics
+ FROM crdb_internal.statement_statistics_persisted %s
GROUP BY
- aggregation_interval,
fingerprint_id
LIMIT 1`, whereClause)
- const expectedNumDatums = 6
+ const expectedNumDatums = 3
var statement serverpb.StatementDetailsResponse_CollectedStatementSummary
row, err := ie.QueryRowEx(ctx, "combined-stmts-details-total", nil,
@@ -519,34 +715,23 @@ func getTotalStatementDetails(
return statement, serverError(ctx, err)
}
- aggInterval := tree.MustBeDInterval(row[1]).Duration
-
- apps := tree.MustBeDArray(row[2])
+ apps := tree.MustBeDArray(row[1])
var appNames []string
for _, s := range apps.Array {
appNames = util.CombineUniqueString(appNames, []string{string(tree.MustBeDString(s))})
}
aggregatedMetadata.AppNames = appNames
- statsJSON := tree.MustBeDJSON(row[3]).JSON
+ statsJSON := tree.MustBeDJSON(row[2]).JSON
if err = sqlstatsutil.DecodeStmtStatsStatisticsJSON(statsJSON, &statistics.Stats); err != nil {
return statement, serverError(ctx, err)
}
- planJSON := tree.MustBeDJSON(row[4]).JSON
- plan, err := sqlstatsutil.JSONToExplainTreePlanNode(planJSON)
- if err != nil {
- return statement, serverError(ctx, err)
- }
- statistics.Stats.SensitiveInfo.MostRecentPlanDescription = *plan
-
aggregatedMetadata.FormattedQuery = aggregatedMetadata.Query
- aggregatedMetadata.FingerprintID = string(tree.MustBeDString(row[5]))
statement = serverpb.StatementDetailsResponse_CollectedStatementSummary{
- Metadata: aggregatedMetadata,
- AggregationInterval: time.Duration(aggInterval.Nanos()),
- Stats: statistics.Stats,
+ Metadata: aggregatedMetadata,
+ Stats: statistics.Stats,
}
return statement, nil
@@ -566,18 +751,15 @@ func getStatementDetailsPerAggregatedTs(
`SELECT
aggregated_ts,
crdb_internal.merge_stats_metadata(array_agg(metadata)) AS metadata,
- crdb_internal.merge_statement_stats(array_agg(statistics)) AS statistics,
- max(sampled_plan) as sampled_plan,
- aggregation_interval
- FROM crdb_internal.statement_statistics %s
+ crdb_internal.merge_statement_stats(array_agg(statistics)) AS statistics
+ FROM crdb_internal.statement_statistics_persisted %s
GROUP BY
- aggregated_ts,
- aggregation_interval
+ aggregated_ts
ORDER BY aggregated_ts ASC
LIMIT $%d`, whereClause, len(args)+1)
args = append(args, limit)
- const expectedNumDatums = 5
+ const expectedNumDatums = 3
it, err := ie.QueryIteratorEx(ctx, "combined-stmts-details-by-aggregated-timestamp", nil,
sessiondata.InternalExecutorOverride{
@@ -621,20 +803,10 @@ func getStatementDetailsPerAggregatedTs(
return nil, serverError(ctx, err)
}
- planJSON := tree.MustBeDJSON(row[3]).JSON
- plan, err := sqlstatsutil.JSONToExplainTreePlanNode(planJSON)
- if err != nil {
- return nil, serverError(ctx, err)
- }
- metadata.Stats.SensitiveInfo.MostRecentPlanDescription = *plan
-
- aggInterval := tree.MustBeDInterval(row[4]).Duration
-
stmt := serverpb.StatementDetailsResponse_CollectedStatementGroupedByAggregatedTs{
- AggregatedTs: aggregatedTs,
- AggregationInterval: time.Duration(aggInterval.Nanos()),
- Stats: metadata.Stats,
- Metadata: aggregatedMetadata,
+ AggregatedTs: aggregatedTs,
+ Stats: metadata.Stats,
+ Metadata: aggregatedMetadata,
}
statements = append(statements, stmt)
@@ -695,18 +867,15 @@ func getStatementDetailsPerPlanHash(
plan_hash,
(statistics -> 'statistics' -> 'planGists'->>0) as plan_gist,
crdb_internal.merge_stats_metadata(array_agg(metadata)) AS metadata,
- crdb_internal.merge_statement_stats(array_agg(statistics)) AS statistics,
- max(sampled_plan) as sampled_plan,
- aggregation_interval
- FROM crdb_internal.statement_statistics %s
+ crdb_internal.merge_statement_stats(array_agg(statistics)) AS statistics
+ FROM crdb_internal.statement_statistics_persisted %s
GROUP BY
plan_hash,
- plan_gist,
- aggregation_interval
+ plan_gist
LIMIT $%d`, whereClause, len(args)+1)
args = append(args, limit)
- const expectedNumDatums = 6
+ const expectedNumDatums = 4
it, err := ie.QueryIteratorEx(ctx, "combined-stmts-details-by-plan-hash", nil,
sessiondata.InternalExecutorOverride{
@@ -758,14 +927,6 @@ func getStatementDetailsPerPlanHash(
return nil, serverError(ctx, err)
}
- planJSON := tree.MustBeDJSON(row[4]).JSON
- plan, err := sqlstatsutil.JSONToExplainTreePlanNode(planJSON)
- if err != nil {
- return nil, serverError(ctx, err)
- }
- metadata.Stats.SensitiveInfo.MostRecentPlanDescription = *plan
- aggInterval := tree.MustBeDInterval(row[5]).Duration
-
// A metadata is unique for each plan, meaning if any of the counts are greater than zero,
// we can update the value of each count with the execution count of this plan hash to
// have the correct count of each metric.
@@ -784,11 +945,10 @@ func getStatementDetailsPerPlanHash(
aggregatedMetadata.TotalCount = metadata.Stats.Count
stmt := serverpb.StatementDetailsResponse_CollectedStatementGroupedByPlanHash{
- AggregationInterval: time.Duration(aggInterval.Nanos()),
- ExplainPlan: explainPlan,
- PlanHash: planHash,
- Stats: metadata.Stats,
- Metadata: aggregatedMetadata,
+ ExplainPlan: explainPlan,
+ PlanHash: planHash,
+ Stats: metadata.Stats,
+ Metadata: aggregatedMetadata,
}
statements = append(statements, stmt)
diff --git a/pkg/server/serverpb/status.proto b/pkg/server/serverpb/status.proto
index 43d341477bfe..685463de2254 100644
--- a/pkg/server/serverpb/status.proto
+++ b/pkg/server/serverpb/status.proto
@@ -1452,12 +1452,50 @@ message StatementsResponse {
// Transactions is transaction-level statistics for the collection of
// statements in this response.
repeated ExtendedCollectedTransactionStatistics transactions = 5 [(gogoproto.nullable) = false];
+
+ float stmts_total_runtime_secs = 6;
+
+ float txns_total_runtime_secs = 7;
+}
+
+enum StatsSortOptions {
+ SERVICE_LAT = 0;
+ reserved 1; // This is for CPU Time in 23.1
+ EXECUTION_COUNT = 2;
+ reserved 3; // This is for P99 in 23.1
+ CONTENTION_TIME = 4;
+ PCT_RUNTIME = 5;
}
message CombinedStatementsStatsRequest {
+ enum StatsType {
+ StmtStatsOnly = 0;
+ TxnStatsOnly = 1;
+ }
+
+ message FetchMode {
+ StatsType stats_type = 1;
+ StatsSortOptions sort = 2;
+ }
+
// Unix time range for aggregated statements.
int64 start = 1 [(gogoproto.nullable) = true];
int64 end = 2 [(gogoproto.nullable) = true];
+
+ // Note that if fetch_mode is set to transactions only, we will also
+ // include the statement statistics for the stmts in the transactions
+ // response. This is more of a hack-y method to get the complete stats
+ // for txns, because in the client we need to fill in some txn stats info
+ // from its stmt stats, such as the query string.
+ //
+ // We prefer this hackier method right now to reduce surface area for backporting
+ // these changes, but in the future we will introduce more endpoints to properly
+ // organize these differing requests.
+ // TODO (xinhaoz) - Split this API into stmts and txns properly instead of using
+ // this param.
+ FetchMode fetch_mode = 5 [(gogoproto.nullable) = true];
+
+ int64 limit = 6;
}
// StatementDetailsRequest requests the details of a Statement, based on its keys.
@@ -2051,6 +2089,7 @@ service Status {
get: "/_status/combinedstmts"
};
}
+
rpc StatementDetails(StatementDetailsRequest) returns (StatementDetailsResponse) {
option (google.api.http) = {
get: "/_status/stmtdetails/{fingerprint_id}"
diff --git a/pkg/server/stats_test.go b/pkg/server/stats_test.go
index bedca43947fb..03858e8ba579 100644
--- a/pkg/server/stats_test.go
+++ b/pkg/server/stats_test.go
@@ -325,7 +325,7 @@ func TestClusterResetSQLStats(t *testing.T) {
}
statsPreReset, err := status.Statements(ctx, &serverpb.StatementsRequest{
- Combined: true,
+ Combined: flushed,
})
require.NoError(t, err)
@@ -339,7 +339,7 @@ func TestClusterResetSQLStats(t *testing.T) {
require.NoError(t, err)
statsPostReset, err := status.Statements(ctx, &serverpb.StatementsRequest{
- Combined: true,
+ Combined: flushed,
})
require.NoError(t, err)
diff --git a/pkg/server/status_test.go b/pkg/server/status_test.go
index d5a6e820e844..068b122b7cbe 100644
--- a/pkg/server/status_test.go
+++ b/pkg/server/status_test.go
@@ -49,6 +49,7 @@ import (
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/sql/sqlstats"
+ "github.com/cockroachdb/cockroach/pkg/sql/sqlstats/persistedsqlstats"
"github.com/cockroachdb/cockroach/pkg/sql/tests"
"github.com/cockroachdb/cockroach/pkg/storage/enginepb"
"github.com/cockroachdb/cockroach/pkg/testutils"
@@ -1617,6 +1618,9 @@ func TestStatusAPICombinedTransactions(t *testing.T) {
}
}
+ // Flush stats, as combinedstmts reads only from system.
+ thirdServer.SQLServer().(*sql.Server).GetSQLStatsProvider().(*persistedsqlstats.PersistedSQLStats).Flush(ctx)
+
// Hit query endpoint.
var resp serverpb.StatementsResponse
if err := getStatusJSONProto(firstServerProto, "combinedstmts", &resp); err != nil {
@@ -1989,6 +1993,8 @@ func TestStatusAPICombinedStatements(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
+ ctx := context.Background()
+
// Aug 30 2021 19:50:00 GMT+0000
aggregatedTs := int64(1630353000)
testCluster := serverutils.StartNewTestCluster(t, 3, base.TestClusterArgs{
@@ -2027,6 +2033,8 @@ func TestStatusAPICombinedStatements(t *testing.T) {
thirdServerSQL.Exec(t, stmt.stmt)
}
+ testCluster.Server(2).SQLServer().(*sql.Server).GetSQLStatsProvider().(*persistedsqlstats.PersistedSQLStats).Flush(ctx)
+
var resp serverpb.StatementsResponse
// Test that non-admin without VIEWACTIVITY privileges cannot access.
err := getStatusJSONProtoWithAdminOption(firstServerProto, "combinedstmts", &resp, false)
@@ -2034,7 +2042,7 @@ func TestStatusAPICombinedStatements(t *testing.T) {
t.Fatalf("expected privilege error, got %v", err)
}
- testPath := func(path string, expectedStmts []string) {
+ verifyStmts := func(path string, expectedStmts []string, hasTxns bool, t *testing.T) {
// Hit query endpoint.
if err := getStatusJSONProtoWithAdminOption(firstServerProto, path, &resp, false); err != nil {
t.Fatal(err)
@@ -2042,6 +2050,7 @@ func TestStatusAPICombinedStatements(t *testing.T) {
// See if the statements returned are what we executed.
var statementsInResponse []string
+ expectedTxnFingerprints := map[roachpb.TransactionFingerprintID]struct{}{}
for _, respStatement := range resp.Statements {
if respStatement.Key.KeyData.Failed {
// We ignore failed statements here as the INSERT statement can fail and
@@ -2058,14 +2067,24 @@ func TestStatusAPICombinedStatements(t *testing.T) {
}
statementsInResponse = append(statementsInResponse, respStatement.Key.KeyData.Query)
+ expectedTxnFingerprints[respStatement.Key.KeyData.TransactionFingerprintID] = struct{}{}
+ }
+
+ for _, respTxn := range resp.Transactions {
+ delete(expectedTxnFingerprints, respTxn.StatsData.TransactionFingerprintID)
}
sort.Strings(expectedStmts)
sort.Strings(statementsInResponse)
if !reflect.DeepEqual(expectedStmts, statementsInResponse) {
- t.Fatalf("expected queries\n\n%v\n\ngot queries\n\n%v\n%s",
- expectedStmts, statementsInResponse, pretty.Sprint(resp))
+ t.Fatalf("expected queries\n\n%v\n\ngot queries\n\n%v\n%s\n path: %s",
+ expectedStmts, statementsInResponse, pretty.Sprint(resp), path)
+ }
+ if hasTxns {
+ assert.Empty(t, expectedTxnFingerprints)
+ } else {
+ assert.Empty(t, resp.Transactions)
}
}
@@ -2078,33 +2097,65 @@ func TestStatusAPICombinedStatements(t *testing.T) {
expectedStatements = append(expectedStatements, expectedStmt)
}
- // Grant VIEWACTIVITY.
- thirdServerSQL.Exec(t, fmt.Sprintf("ALTER USER %s VIEWACTIVITY", authenticatedUserNameNoAdmin().Normalized()))
+ oneMinAfterAggregatedTs := aggregatedTs + 60
- // Test with no query params.
- testPath("combinedstmts", expectedStatements)
+ t.Run("fetch_mode=combined, VIEWACTIVITY", func(t *testing.T) {
+ // Grant VIEWACTIVITY.
+ thirdServerSQL.Exec(t, fmt.Sprintf("ALTER USER %s VIEWACTIVITY", authenticatedUserNameNoAdmin().Normalized()))
+
+ // Test with no query params.
+ verifyStmts("combinedstmts", expectedStatements, true, t)
+ // Test with end = 1 min after aggregatedTs; should give the same results as get all.
+ verifyStmts(fmt.Sprintf("combinedstmts?end=%d", oneMinAfterAggregatedTs), expectedStatements, true, t)
+ // Test with start = 1 hour before aggregatedTs end = 1 min after aggregatedTs; should give same results as get all.
+ verifyStmts(fmt.Sprintf("combinedstmts?start=%d&end=%d", aggregatedTs-3600, oneMinAfterAggregatedTs),
+ expectedStatements, true, t)
+ // Test with start = 1 min after aggregatedTs; should give no results
+ verifyStmts(fmt.Sprintf("combinedstmts?start=%d", oneMinAfterAggregatedTs), nil, true, t)
+ })
- oneMinAfterAggregatedTs := aggregatedTs + 60
- // Test with end = 1 min after aggregatedTs; should give the same results as get all.
- testPath(fmt.Sprintf("combinedstmts?end=%d", oneMinAfterAggregatedTs), expectedStatements)
- // Test with start = 1 hour before aggregatedTs end = 1 min after aggregatedTs; should give same results as get all.
- testPath(fmt.Sprintf("combinedstmts?start=%d&end=%d", aggregatedTs-3600, oneMinAfterAggregatedTs), expectedStatements)
- // Test with start = 1 min after aggregatedTs; should give no results
- testPath(fmt.Sprintf("combinedstmts?start=%d", oneMinAfterAggregatedTs), nil)
+ t.Run("fetch_mode=combined, VIEWACTIVITYREDACTED", func(t *testing.T) {
+ // Remove VIEWACTIVITY so we can test with just the VIEWACTIVITYREDACTED role.
+ thirdServerSQL.Exec(t, fmt.Sprintf("ALTER USER %s NOVIEWACTIVITY", authenticatedUserNameNoAdmin().Normalized()))
+ // Grant VIEWACTIVITYREDACTED.
+ thirdServerSQL.Exec(t, fmt.Sprintf("ALTER USER %s VIEWACTIVITYREDACTED", authenticatedUserNameNoAdmin().Normalized()))
+
+ // Test with no query params.
+ verifyStmts("combinedstmts", expectedStatements, true, t)
+ // Test with end = 1 min after aggregatedTs; should give the same results as get all.
+ verifyStmts(fmt.Sprintf("combinedstmts?end=%d", oneMinAfterAggregatedTs), expectedStatements, true, t)
+ // Test with start = 1 hour before aggregatedTs end = 1 min after aggregatedTs; should give same results as get all.
+ verifyStmts(fmt.Sprintf("combinedstmts?start=%d&end=%d", aggregatedTs-3600, oneMinAfterAggregatedTs), expectedStatements, true, t)
+ // Test with start = 1 min after aggregatedTs; should give no results
+ verifyStmts(fmt.Sprintf("combinedstmts?start=%d", oneMinAfterAggregatedTs), nil, true, t)
+ })
- // Remove VIEWACTIVITY so we can test with just the VIEWACTIVITYREDACTED role.
- thirdServerSQL.Exec(t, fmt.Sprintf("ALTER USER %s NOVIEWACTIVITY", authenticatedUserNameNoAdmin().Normalized()))
- // Grant VIEWACTIVITYREDACTED.
- thirdServerSQL.Exec(t, fmt.Sprintf("ALTER USER %s VIEWACTIVITYREDACTED", authenticatedUserNameNoAdmin().Normalized()))
+ t.Run("fetch_mode=StmtsOnly", func(t *testing.T) {
+ verifyStmts("combinedstmts?fetch_mode.stats_type=0", expectedStatements, false, t)
+ })
- // Test with no query params.
- testPath("combinedstmts", expectedStatements)
- // Test with end = 1 min after aggregatedTs; should give the same results as get all.
- testPath(fmt.Sprintf("combinedstmts?end=%d", oneMinAfterAggregatedTs), expectedStatements)
- // Test with start = 1 hour before aggregatedTs end = 1 min after aggregatedTs; should give same results as get all.
- testPath(fmt.Sprintf("combinedstmts?start=%d&end=%d", aggregatedTs-3600, oneMinAfterAggregatedTs), expectedStatements)
- // Test with start = 1 min after aggregatedTs; should give no results
- testPath(fmt.Sprintf("combinedstmts?start=%d", oneMinAfterAggregatedTs), nil)
+ t.Run("fetch_mode=TxnsOnly with limit", func(t *testing.T) {
+ // Verify that we only return stmts for the txns in the response.
+ // We'll add a limit in a later commit to help verify this behaviour.
+ if err := getStatusJSONProtoWithAdminOption(firstServerProto, "combinedstmts?fetch_mode.stats_type=1&limit=2",
+ &resp, false); err != nil {
+ t.Fatal(err)
+ }
+
+ assert.Equal(t, 2, len(resp.Transactions))
+ stmtFingerprintIDs := map[roachpb.StmtFingerprintID]struct{}{}
+ for _, txn := range resp.Transactions {
+ for _, stmtFingerprint := range txn.StatsData.StatementFingerprintIDs {
+ stmtFingerprintIDs[stmtFingerprint] = struct{}{}
+ }
+ }
+
+ for _, stmt := range resp.Statements {
+ if _, ok := stmtFingerprintIDs[stmt.ID]; !ok {
+ t.Fatalf("unexpected stmt; stmt unrelated to a txn int he response: %s", stmt.Key.KeyData.Query)
+ }
+ }
+ })
}
func TestStatusAPIStatementDetails(t *testing.T) {
@@ -2113,6 +2164,8 @@ func TestStatusAPIStatementDetails(t *testing.T) {
// The liveness session might expire before the stress race can finish.
skip.UnderStressRace(t, "expensive tests")
+ ctx := context.Background()
+
// Aug 30 2021 19:50:00 GMT+0000
aggregatedTs := int64(1630353000)
testCluster := serverutils.StartNewTestCluster(t, 3, base.TestClusterArgs{
@@ -2147,6 +2200,7 @@ func TestStatusAPIStatementDetails(t *testing.T) {
for _, stmt := range statements {
thirdServerSQL.Exec(t, stmt)
}
+
query := `INSERT INTO posts VALUES (_, '_')`
fingerprintID := roachpb.ConstructStatementFingerprintID(query,
false, true, `roachblog`)
@@ -2170,6 +2224,9 @@ func TestStatusAPIStatementDetails(t *testing.T) {
}
testPath := func(path string, expected resultValues) {
+ // Need to flush since this EP reads only flushed data.
+ testCluster.Server(2).SQLServer().(*sql.Server).GetSQLStatsProvider().(*persistedsqlstats.PersistedSQLStats).Flush(ctx)
+
err := getStatusJSONProtoWithAdminOption(firstServerProto, path, &resp, false)
require.NoError(t, err)
require.Equal(t, int64(expected.totalCount), resp.Statement.Stats.Count)
diff --git a/pkg/ui/workspaces/cluster-ui/src/api/statementsApi.spec.ts b/pkg/ui/workspaces/cluster-ui/src/api/statementsApi.spec.ts
new file mode 100644
index 000000000000..8814e6cc2bde
--- /dev/null
+++ b/pkg/ui/workspaces/cluster-ui/src/api/statementsApi.spec.ts
@@ -0,0 +1,433 @@
+// Copyright 2023 The Cockroach Authors.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+import Long from "long";
+import {
+ createCombinedStmtsRequest,
+ getCombinedStatements,
+ getFlushedTxnStatsApi,
+ SqlStatsSortOptions,
+ SqlStatsSortType,
+} from "./statementsApi";
+import { mockStmtStats, mockTxnStats } from "./testUtils";
+import * as fetchData from "./fetchData";
+import { cockroach } from "@cockroachlabs/crdb-protobuf-client";
+import { shuffle } from "lodash";
+
+type Stmt = cockroach.server.serverpb.StatementsResponse.ICollectedStatementStatistics;
+type Txn = cockroach.server.serverpb.StatementsResponse.IExtendedCollectedTransactionStatistics;
+
+const mockReturnVal = (stmts: Stmt[], txns: Txn[]) => {
+ jest.spyOn(fetchData, "fetchData").mockReturnValue(
+ Promise.resolve(
+ new cockroach.server.serverpb.StatementsResponse({
+ statements: stmts,
+ transactions: txns,
+ }),
+ ),
+ );
+};
+
+type GeneratorFn = (length: number, i: number) => Partial;
+
+function createStmtsOverLoop(
+ length: number,
+ stmtGenerator: GeneratorFn,
+): Stmt[] {
+ return Array.from(new Array(length)).map((_, i) =>
+ mockStmtStats(stmtGenerator(length, i)),
+ );
+}
+
+function createTxnsOverLoop(
+ length: number,
+ generator: GeneratorFn,
+): Txn[] {
+ return Array.from(new Array(length)).map((_, i) =>
+ mockTxnStats(generator(length, i)),
+ );
+}
+
+describe("getCombinedStatements", () => {
+ afterAll(() => {
+ jest.resetModules();
+ });
+
+ it("truncate response when the payload does not adhere to the limit from the request", async () => {
+ const tests = [
+ { limit: 50, respSize: 100 },
+ { limit: 10, respSize: 10 }, // No truncation occurs.
+ { limit: 10, respSize: 11 },
+ { limit: 100, respSize: 11 }, // No truncation occurs.
+ ];
+
+ for (const tc of tests) {
+ const req = createCombinedStmtsRequest({
+ limit: tc.limit,
+ sort: SqlStatsSortOptions.EXECUTION_COUNT,
+ start: null,
+ end: null,
+ });
+
+ const stmts: Stmt[] = [];
+ const txns: Txn[] = [];
+
+ for (let i = 1; i <= tc.respSize; ++i) {
+ stmts.push(mockStmtStats({ id: Long.fromInt(i) }));
+ txns.push(
+ mockTxnStats({
+ stats_data: { transaction_fingerprint_id: Long.fromInt(i) },
+ }),
+ );
+ }
+
+ mockReturnVal(stmts, txns);
+
+ const res = await getCombinedStatements(req);
+
+ const expectedLen = tc.limit > tc.respSize ? tc.respSize : tc.limit;
+ expect(res?.statements?.length).toBe(expectedLen);
+
+ // // Transactions half should have been discarded, regardless of whether we have truncated.
+ expect(res?.transactions?.length).toBe(0);
+ }
+ });
+
+ // Each test case will take a list of statements ordered by the provided
+ // sort value. The test will shuffle the given array to use as the mocked
+ // return value, and verify that the truncated list is ordered by the sort.
+ it.each([
+ [
+ "EXECUTION_COUNT",
+ createStmtsOverLoop(100, (length, i) => ({
+ id: Long.fromInt(i),
+ stats: { count: Long.fromInt(length - i) },
+ })),
+ SqlStatsSortOptions.EXECUTION_COUNT,
+ ],
+ [
+ "CONTENTION_TIME",
+ createStmtsOverLoop(100, (length, i) => ({
+ id: Long.fromInt(i),
+ stats: {
+ count: Long.fromInt(i),
+ exec_stats: {
+ contention_time: {
+ mean: length - i,
+ squared_diffs: 0,
+ },
+ },
+ },
+ })),
+ SqlStatsSortOptions.CONTENTION_TIME,
+ ],
+ [
+ "SVC_LAT",
+ createStmtsOverLoop(100, (length, i) => ({
+ id: Long.fromInt(i),
+ stats: {
+ count: Long.fromInt(i),
+ service_lat: {
+ mean: length - i,
+ squared_diffs: 0,
+ },
+ },
+ })),
+ SqlStatsSortOptions.SERVICE_LAT,
+ ],
+ [
+ "PCT_RUNTIME",
+ createStmtsOverLoop(100, (length, i) => ({
+ id: Long.fromInt(i),
+ stats: {
+ count: Long.fromInt(5),
+ service_lat: { mean: length - i, squared_diffs: 0 },
+ },
+ })),
+ SqlStatsSortOptions.PCT_RUNTIME,
+ ],
+ ])(
+ "sorts data by requested option before truncating > %s",
+ async (_name: string, stmtsOrdered: Stmt[], sortBy: SqlStatsSortType) => {
+ const shuffledStmts = shuffle(stmtsOrdered);
+
+ mockReturnVal(shuffledStmts, null);
+
+ const limit = Math.floor(stmtsOrdered.length / 2);
+ const req = createCombinedStmtsRequest({
+ limit,
+ sort: sortBy,
+ start: null,
+ end: null,
+ });
+
+ const res = await getCombinedStatements(req);
+
+ expect(res.statements.length).toEqual(limit);
+
+ res.statements.forEach((stmt, i) =>
+ expect(stmt.id.toInt()).toEqual(stmtsOrdered[i].id.toInt()),
+ );
+ },
+ );
+});
+
+describe("getFlushedTxnStatsApi", () => {
+ afterAll(() => {
+ jest.resetModules();
+ });
+
+ it("truncates response when the payload does not adhere to the limit from the request", async () => {
+ const tests = [
+ { limit: 50, respSize: 100, txnIDForStmts: 20 },
+ { limit: 20, respSize: 56, txnIDForStmts: 57 },
+ { limit: 10, respSize: 10, txnIDForStmts: 1 }, // No truncation occurs.
+ { limit: 10, respSize: 11, txnIDForStmts: 11 },
+ { limit: 100, respSize: 11, txnIDForStmts: 23 }, // No truncation occurs.
+ ];
+
+ for (const tc of tests) {
+ const stmts: Stmt[] = [];
+ const txns: Txn[] = [];
+
+ for (let i = 1; i <= tc.respSize; ++i) {
+ stmts.push(
+ mockStmtStats({
+ id: Long.fromInt(i),
+ key: {
+ key_data: {
+ transaction_fingerprint_id: Long.fromInt(tc.txnIDForStmts),
+ },
+ },
+ }),
+ );
+ txns.push(
+ mockTxnStats({
+ stats_data: {
+ transaction_fingerprint_id: Long.fromInt(i),
+ stats: {
+ count: Long.fromInt(tc.respSize - i),
+ },
+ },
+ }),
+ );
+ }
+
+ mockReturnVal(stmts, txns);
+
+ const req = createCombinedStmtsRequest({
+ limit: tc.limit,
+ sort: SqlStatsSortOptions.EXECUTION_COUNT,
+ start: null,
+ end: null,
+ });
+ const res = await getFlushedTxnStatsApi(req);
+
+ const expectedLen = tc.limit > tc.respSize ? tc.respSize : tc.limit;
+ expect(res?.transactions?.length).toBe(expectedLen);
+
+ if (tc.txnIDForStmts > tc.limit && tc.respSize > tc.limit) {
+ // No txn in the resp will have this ID and so we'll expect nothing in the
+ // transformed stmts response.
+ expect(res?.statements?.length).toBe(0);
+ } else {
+ // For this test we assign the txn id for all stmts to be either in the
+ // response or not, so we either get the entire stmts resp or none.
+ expect(res?.statements?.length).toBe(tc.respSize);
+ }
+ }
+ });
+
+ it("should filter out stmts that don't match any txn id if truncation occurs", async () => {
+ const tests = [
+ {
+ stmts: [
+ { stmtID: 1, txnID: 1 },
+ { stmtID: 2, txnID: 3 },
+ { stmtID: 3, txnID: 5 },
+ { stmtID: 4, txnID: 7 },
+ { stmtID: 5, txnID: 9 },
+ { stmtID: 8, txnID: 11 },
+ ],
+ txnIDs: [3, 9, 7, 8],
+ expectedStmts: [2, 4, 5],
+ },
+ {
+ stmts: [
+ { stmtID: 1, txnID: 8 },
+ { stmtID: 2, txnID: 8 },
+ { stmtID: 3, txnID: 8 },
+ { stmtID: 4, txnID: 8 },
+ { stmtID: 5, txnID: 8 },
+ { stmtID: 8, txnID: 8 },
+ ],
+ txnIDs: [3, 9, 7, 5],
+ expectedStmts: [],
+ },
+ {
+ stmts: [
+ { stmtID: 1, txnID: 1 },
+ { stmtID: 2, txnID: 1 },
+ { stmtID: 3, txnID: 1 },
+ { stmtID: 4, txnID: 2 },
+ { stmtID: 5, txnID: 3 },
+ { stmtID: 6, txnID: 4 },
+ ],
+ txnIDs: [1, 2, 3, 4],
+ expectedStmts: [1, 2, 3, 4, 5, 6],
+ },
+ ];
+
+ for (const tc of tests) {
+ const stmts: Stmt[] = [];
+ const txns: Txn[] = [];
+
+ for (let i = 0; i < tc.stmts.length; ++i) {
+ stmts.push(
+ mockStmtStats({
+ id: Long.fromInt(tc.stmts[i].stmtID),
+ key: {
+ key_data: {
+ transaction_fingerprint_id: Long.fromInt(tc.stmts[i].txnID),
+ },
+ },
+ }),
+ );
+ }
+ for (let i = 0; i <= tc.txnIDs.length; ++i) {
+ let txnID: number;
+ let count: number;
+ if (i === tc.txnIDs.length) {
+ // This additional txn will trigger truncation.
+ txnID = 10000000000;
+ count = -1;
+ } else {
+ txnID = tc.txnIDs[i];
+ count = tc.txnIDs.length - i;
+ }
+ txns.push(
+ mockTxnStats({
+ stats_data: {
+ transaction_fingerprint_id: Long.fromInt(txnID),
+ stats: {
+ count: Long.fromInt(count),
+ },
+ },
+ }),
+ );
+ }
+
+ mockReturnVal(stmts, txns);
+
+ const req = createCombinedStmtsRequest({
+ limit: tc.txnIDs.length,
+ sort: SqlStatsSortOptions.EXECUTION_COUNT,
+ start: null,
+ end: null,
+ });
+ const res = await getFlushedTxnStatsApi(req);
+
+ res.statements.sort((stmtA, stmtB) => stmtA.id.comp(stmtB.id));
+ expect(res.statements.map(stmt => stmt.id.toInt())).toEqual(
+ tc.expectedStmts,
+ );
+ }
+ });
+
+ // Each test case will take a list of transactions ordered by the provided
+ // sort value. The test will shuffle the given array to use as the mocked
+ // return value, and verify that the truncated list is ordered by the sort.
+ it.each([
+ [
+ "EXECUTION_COUNT",
+ createTxnsOverLoop(100, (length, i) => ({
+ stats_data: {
+ transaction_fingerprint_id: Long.fromInt(i),
+ stats: { count: Long.fromInt(length - i) },
+ },
+ })),
+ SqlStatsSortOptions.EXECUTION_COUNT,
+ ],
+ [
+ "CONTENTION_TIME",
+ createTxnsOverLoop(100, (length, i) => ({
+ stats_data: {
+ transaction_fingerprint_id: Long.fromInt(i),
+ stats: {
+ count: Long.fromInt(i),
+ exec_stats: {
+ contention_time: {
+ mean: length - i,
+ squared_diffs: 0,
+ },
+ },
+ },
+ },
+ })),
+ SqlStatsSortOptions.CONTENTION_TIME,
+ ],
+ [
+ "SVC_LAT",
+ createTxnsOverLoop(100, (length, i) => ({
+ stats_data: {
+ transaction_fingerprint_id: Long.fromInt(i),
+ stats: {
+ count: Long.fromInt(i),
+ service_lat: {
+ mean: length - i,
+ squared_diffs: 0,
+ },
+ },
+ },
+ })),
+ SqlStatsSortOptions.SERVICE_LAT,
+ ],
+ [
+ "PCT_RUNTIME",
+ createTxnsOverLoop(100, (length, i) => ({
+ stats_data: {
+ transaction_fingerprint_id: Long.fromInt(i),
+ stats: {
+ count: Long.fromInt(2),
+ service_lat: {
+ mean: length - i,
+ squared_diffs: 0,
+ },
+ },
+ },
+ })),
+ SqlStatsSortOptions.PCT_RUNTIME,
+ ],
+ ])(
+ "sorts data by requested option before truncating > %s",
+ async (_name: string, txnsOrdered: Txn[], sortBy: SqlStatsSortType) => {
+ const shuffledTxns = shuffle(txnsOrdered);
+
+ mockReturnVal(null, shuffledTxns);
+
+ const limit = Math.floor(txnsOrdered.length / 2);
+ const req = createCombinedStmtsRequest({
+ limit,
+ sort: sortBy,
+ start: null,
+ end: null,
+ });
+
+ const res = await getFlushedTxnStatsApi(req);
+
+ expect(res.transactions.length).toEqual(limit);
+
+ res.transactions.forEach((txn, i) =>
+ expect(txn.stats_data.transaction_fingerprint_id.toInt()).toEqual(
+ txnsOrdered[i].stats_data.transaction_fingerprint_id.toInt(),
+ ),
+ );
+ },
+ );
+});
diff --git a/pkg/ui/workspaces/cluster-ui/src/api/statementsApi.ts b/pkg/ui/workspaces/cluster-ui/src/api/statementsApi.ts
index f21983553927..23a901d1a873 100644
--- a/pkg/ui/workspaces/cluster-ui/src/api/statementsApi.ts
+++ b/pkg/ui/workspaces/cluster-ui/src/api/statementsApi.ts
@@ -9,8 +9,10 @@
// licenses/APL.txt.
import { cockroach } from "@cockroachlabs/crdb-protobuf-client";
-import { fetchData } from "src/api";
+import { fetchData } from "src/api/fetchData";
import { propsToQueryString } from "src/util";
+import Long from "long";
+import moment from "moment";
const STATEMENTS_PATH = "/_status/combinedstmts";
const STATEMENT_DETAILS_PATH = "/_status/stmtdetails";
@@ -24,25 +26,224 @@ export type StatementDetailsResponseWithKey = {
key: string;
};
+export type SqlStatsResponse = cockroach.server.serverpb.StatementsResponse;
+export const SqlStatsSortOptions = cockroach.server.serverpb.StatsSortOptions;
+export type SqlStatsSortType = cockroach.server.serverpb.StatsSortOptions;
+
+type Stmt = cockroach.server.serverpb.StatementsResponse.ICollectedStatementStatistics;
+type Txn = cockroach.server.serverpb.StatementsResponse.IExtendedCollectedTransactionStatistics;
+
+const FetchStatsMode =
+ cockroach.server.serverpb.CombinedStatementsStatsRequest.StatsType;
+
export type ErrorWithKey = {
err: Error;
key: string;
};
+export const DEFAULT_STATS_REQ_OPTIONS = {
+ limit: 100,
+ sort: SqlStatsSortOptions.SERVICE_LAT,
+};
+
+// The required fields to create a stmts request.
+type StmtReqFields = {
+ limit: number;
+ sort: SqlStatsSortType;
+ start: moment.Moment | null;
+ end: moment.Moment | null;
+};
+
+export function createCombinedStmtsRequest({
+ limit,
+ sort,
+ start,
+ end,
+}: StmtReqFields): StatementsRequest {
+ return new cockroach.server.serverpb.CombinedStatementsStatsRequest({
+ start: start != null ? Long.fromNumber(start.unix()) : null,
+ end: end != null ? Long.fromNumber(end.unix()) : null,
+ limit: Long.fromNumber(limit ?? DEFAULT_STATS_REQ_OPTIONS.limit),
+ fetch_mode: new cockroach.server.serverpb.CombinedStatementsStatsRequest.FetchMode(
+ {
+ sort: sort,
+ },
+ ),
+ });
+}
+
+// Mutates the sqlstats response to conform to the provided sort and limit params.
+export function sortAndTruncateStmtsResponse(
+ res: SqlStatsResponse,
+ sort: SqlStatsSortType,
+ limit: number,
+): void {
+ // Discard txn half of the response. This is a little wasteful but the
+ // cleanest and least complex way of handling this scenario.
+ res.transactions = [];
+
+ switch (sort) {
+ case SqlStatsSortOptions.SERVICE_LAT:
+ res.statements?.sort((stmtA: Stmt, stmtB: Stmt): number => {
+ return stmtB.stats.service_lat.mean - stmtA.stats.service_lat.mean;
+ });
+ break;
+ case SqlStatsSortOptions.CONTENTION_TIME:
+ res.statements?.sort((stmtA: Stmt, stmtB: Stmt): number => {
+ return (
+ stmtB.stats.exec_stats.contention_time.mean -
+ stmtA.stats.exec_stats.contention_time.mean
+ );
+ });
+ break;
+ case SqlStatsSortOptions.EXECUTION_COUNT:
+ res.statements?.sort((stmtA: Stmt, stmtB: Stmt): number => {
+ return stmtB.stats.count.toInt() - stmtA.stats.count.toInt();
+ });
+ break;
+ case SqlStatsSortOptions.PCT_RUNTIME:
+ default:
+ res.statements?.sort((stmtA: Stmt, stmtB: Stmt): number => {
+ return (
+ stmtB.stats.service_lat.mean * stmtB.stats.count.toInt() -
+ stmtA.stats.service_lat.mean * stmtA.stats.count.toInt()
+ );
+ });
+ }
+
+ // Finally, truncate the response not fitting into limit.
+ res.statements.splice(limit);
+}
+
export const getCombinedStatements = (
req: StatementsRequest,
-): Promise => {
+): Promise => {
+ const limit = req.limit?.toInt() ?? DEFAULT_STATS_REQ_OPTIONS.limit;
+
const queryStr = propsToQueryString({
start: req.start.toInt(),
end: req.end.toInt(),
+ "fetch_mode.stats_type": FetchStatsMode.StmtStatsOnly,
+ "fetch_mode.sort": req.fetch_mode?.sort,
+ limit,
});
+
return fetchData(
cockroach.server.serverpb.StatementsResponse,
`${STATEMENTS_PATH}?${queryStr}`,
null,
null,
- "30M",
+ "10M",
+ ).then(res => {
+ // We may fall into the scenario of a newer UI version talking to an older server
+ // version that does not support the fetch_mode and limit request params. In that
+ // case We will have to manually sort and truncate the data to align the UI with
+ // the data returned.
+
+ const isOldServer =
+ res?.transactions?.length || res?.statements?.length > limit;
+
+ if (isOldServer) {
+ sortAndTruncateStmtsResponse(res, req?.fetch_mode?.sort, limit);
+ }
+
+ return res;
+ });
+};
+
+// Mutates the sqlstats txns response to conform to the provided sort and limit params.
+function sortAndTruncateTxnsResponse(
+ res: SqlStatsResponse,
+ sort: SqlStatsSortType,
+ limit: number,
+): void {
+ switch (sort) {
+ case SqlStatsSortOptions.SERVICE_LAT:
+ res.transactions?.sort((txnA: Txn, txnB: Txn): number => {
+ return (
+ txnB.stats_data.stats.service_lat.mean -
+ txnA.stats_data.stats.service_lat.mean
+ );
+ });
+ break;
+ case SqlStatsSortOptions.CONTENTION_TIME:
+ res.transactions?.sort((txnA: Txn, txnB: Txn): number => {
+ return (
+ txnB.stats_data.stats.exec_stats.contention_time.mean -
+ txnA.stats_data.stats.exec_stats.contention_time.mean
+ );
+ });
+ break;
+ case SqlStatsSortOptions.EXECUTION_COUNT:
+ res.transactions?.sort((txnA: Txn, txnB: Txn): number => {
+ return (
+ txnB.stats_data.stats.count.toInt() -
+ txnA.stats_data.stats.count.toInt()
+ );
+ });
+ break;
+ case SqlStatsSortOptions.PCT_RUNTIME:
+ default:
+ res.transactions?.sort((txnA: Txn, txnB: Txn): number => {
+ return (
+ txnB.stats_data.stats.service_lat.mean *
+ txnB.stats_data.stats.count.toInt() -
+ txnA.stats_data.stats.service_lat.mean *
+ txnA.stats_data.stats.count.toInt()
+ );
+ });
+ }
+
+ // Finally, truncate the response not fitting into limit.
+ res.transactions.splice(limit);
+
+ const txnFingerprintsIDs = new Set(
+ res.transactions.map(txn =>
+ txn.stats_data.transaction_fingerprint_id?.toInt(),
+ ),
+ );
+
+ // Filter out stmts not belonging to txns response.
+ res.statements = res.statements.filter(stmt =>
+ txnFingerprintsIDs.has(
+ stmt.key.key_data.transaction_fingerprint_id?.toInt(),
+ ),
);
+}
+
+export const getFlushedTxnStatsApi = (
+ req: StatementsRequest,
+): Promise => {
+ const limit = req.limit?.toInt() ?? DEFAULT_STATS_REQ_OPTIONS.limit;
+
+ const queryStr = propsToQueryString({
+ start: req.start?.toInt(),
+ end: req.end?.toInt(),
+ "fetch_mode.stats_type": FetchStatsMode.TxnStatsOnly,
+ "fetch_mode.sort": req.fetch_mode?.sort,
+ limit,
+ });
+
+ return fetchData(
+ cockroach.server.serverpb.StatementsResponse,
+ `${STATEMENTS_PATH}?${queryStr}`,
+ null,
+ null,
+ "10M",
+ ).then(res => {
+ // We may fall into the scenario of a newer UI version talking to an older server
+ // version that does not support the fetch_mode and limit request params. In that
+ // case We will have to manually sort and truncate the data to align the UI with
+ // the data returned.
+
+ const isOldServer = res?.transactions?.length > limit;
+
+ if (isOldServer) {
+ sortAndTruncateTxnsResponse(res, req.fetch_mode?.sort, limit);
+ }
+
+ return res;
+ });
};
export const getStatementDetails = (
diff --git a/pkg/ui/workspaces/cluster-ui/src/api/testUtils.ts b/pkg/ui/workspaces/cluster-ui/src/api/testUtils.ts
new file mode 100644
index 000000000000..282d8f77734a
--- /dev/null
+++ b/pkg/ui/workspaces/cluster-ui/src/api/testUtils.ts
@@ -0,0 +1,207 @@
+// Copyright 2023 The Cockroach Authors.
+//
+// Use of this software is governed by the Business Source License
+// included in the file licenses/BSL.txt.
+//
+// As of the Change Date specified in that file, in accordance with
+// the Business Source License, use of this software will be governed
+// by the Apache License, Version 2.0, included in the file
+// licenses/APL.txt.
+
+import { cockroach, google } from "@cockroachlabs/crdb-protobuf-client";
+import Long from "long";
+
+type Stmt = cockroach.server.serverpb.StatementsResponse.ICollectedStatementStatistics;
+type Txn = cockroach.server.serverpb.StatementsResponse.IExtendedCollectedTransactionStatistics;
+
+const baseStmt: Partial = {
+ id: Long.fromInt(11871906682067483964),
+ key: {
+ key_data: {
+ query: "SELECT node_id FROM system.statement_statistics",
+ app: "$ cockroach sql",
+ distSQL: true,
+ failed: false,
+ implicit_txn: true,
+ vec: true,
+ full_scan: true,
+ database: "defaultdb",
+ query_summary: "SELECT node_id FROM system.statement_statistics",
+ transaction_fingerprint_id: Long.fromInt(1),
+ },
+ node_id: 0,
+ },
+ stats: {
+ count: Long.fromInt(1),
+ first_attempt_count: Long.fromInt(1),
+ max_retries: Long.fromInt(0),
+ num_rows: {
+ mean: 1576,
+ squared_diffs: 0,
+ },
+ parse_lat: {
+ mean: 0.000044584,
+ squared_diffs: 0,
+ },
+ plan_lat: {
+ mean: 0.037206708,
+ squared_diffs: 0,
+ },
+ run_lat: {
+ mean: 0.003240459,
+ squared_diffs: 0,
+ },
+ service_lat: {
+ mean: 0.040506917,
+ squared_diffs: 0,
+ },
+ overhead_lat: {
+ mean: 0.000015166000000003954,
+ squared_diffs: 0,
+ },
+ sensitive_info: {
+ last_err: "",
+ most_recent_plan_description: {
+ name: "",
+ attrs: [],
+ children: [],
+ },
+ most_recent_plan_timestamp: new google.protobuf.Timestamp(),
+ },
+ bytes_read: {
+ mean: 162109,
+ squared_diffs: 0,
+ },
+ rows_read: {
+ mean: 1576,
+ squared_diffs: 0,
+ },
+ rows_written: {
+ mean: 0,
+ squared_diffs: 0,
+ },
+ exec_stats: {
+ count: Long.fromInt(1),
+ network_bytes: {
+ mean: 0,
+ squared_diffs: 0,
+ },
+ max_mem_usage: {
+ mean: 184320,
+ squared_diffs: 0,
+ },
+ contention_time: {
+ mean: 0,
+ squared_diffs: 0,
+ },
+ network_messages: {
+ mean: 0,
+ squared_diffs: 0,
+ },
+ max_disk_usage: {
+ mean: 0,
+ squared_diffs: 0,
+ },
+ },
+ sql_type: "TypeDML",
+ last_exec_timestamp: new google.protobuf.Timestamp(),
+ plan_gists: ["AgFUBAAgAAAABgI="],
+ },
+};
+
+const baseTxn: Partial = {
+ stats_data: {
+ statement_fingerprint_ids: [Long.fromInt(18262870370352730905)],
+ app: "$ cockroach sql",
+ stats: {
+ count: Long.fromInt(8),
+ max_retries: Long.fromInt(0),
+ num_rows: {
+ mean: 0,
+ squared_diffs: 0,
+ },
+ service_lat: {
+ mean: 0.00013457312500000002,
+ squared_diffs: 5.992246806875002e-9,
+ },
+ retry_lat: {
+ mean: 0,
+ squared_diffs: 0,
+ },
+ commit_lat: {
+ mean: 0.0000031143749999999997,
+ squared_diffs: 1.1728737874999997e-11,
+ },
+ bytes_read: {
+ mean: 0,
+ squared_diffs: 0,
+ },
+ rows_read: {
+ mean: 0,
+ squared_diffs: 0,
+ },
+ exec_stats: {
+ count: Long.fromInt(8),
+ network_bytes: {
+ mean: 0,
+ squared_diffs: 0,
+ },
+ max_mem_usage: {
+ mean: 10240,
+ squared_diffs: 0,
+ },
+ contention_time: {
+ mean: 0,
+ squared_diffs: 0,
+ },
+ network_messages: {
+ mean: 0,
+ squared_diffs: 0,
+ },
+ max_disk_usage: {
+ mean: 0,
+ squared_diffs: 0,
+ },
+ },
+ rows_written: {
+ mean: 0,
+ squared_diffs: 0,
+ },
+ },
+ aggregated_ts: new google.protobuf.Timestamp(),
+ transaction_fingerprint_id: Long.fromInt(5913510653911377094),
+ },
+ node_id: 0,
+};
+
+const assignObjectPropsIfExists = (
+ baseObj: T,
+ overrides: Partial,
+): T => {
+ const copiedObj: T = { ...baseObj };
+ for (const prop in baseObj) {
+ if (overrides[prop] === undefined) {
+ continue;
+ }
+
+ const val = copiedObj[prop];
+ if (typeof val === "object") {
+ copiedObj[prop] = assignObjectPropsIfExists(
+ val as Record,
+ overrides[prop] as Record,
+ ) as typeof val;
+ } else {
+ copiedObj[prop] = overrides[prop];
+ }
+ }
+
+ return copiedObj;
+};
+
+export const mockStmtStats = (partialStmt: Partial = {}): Stmt => {
+ return assignObjectPropsIfExists(baseStmt, partialStmt);
+};
+
+export const mockTxnStats = (partialTxn: Partial = {}): Txn => {
+ return assignObjectPropsIfExists(baseTxn, partialTxn);
+};
diff --git a/pkg/ui/workspaces/cluster-ui/src/barCharts/barCharts.tsx b/pkg/ui/workspaces/cluster-ui/src/barCharts/barCharts.tsx
index e3494301336e..6dd593baf721 100644
--- a/pkg/ui/workspaces/cluster-ui/src/barCharts/barCharts.tsx
+++ b/pkg/ui/workspaces/cluster-ui/src/barCharts/barCharts.tsx
@@ -132,10 +132,11 @@ export function workloadPctBarChart(
return barChartFactory(
"grey",
[
- bar(
- "pct-workload",
- (d: StatementStatistics) =>
- (d.stats.service_lat.mean * longToInt(d.stats.count)) / totalWorkload,
+ bar("pct-workload", (d: StatementStatistics) =>
+ totalWorkload !== 0
+ ? (d.stats.service_lat.mean * longToInt(d.stats.count)) /
+ totalWorkload
+ : 0,
),
],
v => Percentage(v, 1, 1),
diff --git a/pkg/ui/workspaces/cluster-ui/src/columnsSelector/columnsSelector.module.scss b/pkg/ui/workspaces/cluster-ui/src/columnsSelector/columnsSelector.module.scss
index 02279ebb21b1..45cb2f667790 100644
--- a/pkg/ui/workspaces/cluster-ui/src/columnsSelector/columnsSelector.module.scss
+++ b/pkg/ui/workspaces/cluster-ui/src/columnsSelector/columnsSelector.module.scss
@@ -6,7 +6,7 @@
}
&__btn {
- height: $line-height--large;
+ height: $line-height--larger;
width: 67px;
font-size: $font-size--small;
}
@@ -36,9 +36,10 @@
}
}
-.float {
- float: left;
- margin-right: 7px;
+.btn-area {
+ float: left;
+ margin-right: 7px;
+ font-size: $font-size--medium;
}
.label {
diff --git a/pkg/ui/workspaces/cluster-ui/src/columnsSelector/columnsSelector.tsx b/pkg/ui/workspaces/cluster-ui/src/columnsSelector/columnsSelector.tsx
index c9a53c43de46..3821f137daff 100644
--- a/pkg/ui/workspaces/cluster-ui/src/columnsSelector/columnsSelector.tsx
+++ b/pkg/ui/workspaces/cluster-ui/src/columnsSelector/columnsSelector.tsx
@@ -36,6 +36,7 @@ export interface ColumnsSelectorProps {
// options provides the list of available columns and their initial selection state
options: SelectOption[];
onSubmitColumns: (selectedColumns: string[]) => void;
+ size?: "default" | "small";
}
export interface ColumnsSelectorState {
@@ -222,6 +223,7 @@ export default class ColumnsSelector extends React.Component<
render() {
const { hide } = this.state;
+ const { size = "default" } = this.props;
const dropdownArea = hide ? hidden : dropdown;
const options = this.getOptions();
const columnsSelected = options.filter(o => o.isSelected);
@@ -230,10 +232,11 @@ export default class ColumnsSelector extends React.Component<