-
Notifications
You must be signed in to change notification settings - Fork 3.8k
/
exec_util.go
1924 lines (1724 loc) · 63.5 KB
/
exec_util.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"bytes"
"context"
"fmt"
"net"
"net/url"
"reflect"
"regexp"
"sort"
"strings"
"sync"
"time"
"github.com/cockroachdb/apd"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/cockroach/pkg/config"
"github.com/cockroachdb/cockroach/pkg/gossip"
"github.com/cockroachdb/cockroach/pkg/internal/client"
"github.com/cockroachdb/cockroach/pkg/jobs"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/rpc"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/server/serverpb"
"github.com/cockroachdb/cockroach/pkg/server/status/statuspb"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/colexec"
"github.com/cockroachdb/cockroach/pkg/sql/distsql"
"github.com/cockroachdb/cockroach/pkg/sql/execinfra"
"github.com/cockroachdb/cockroach/pkg/sql/opt"
"github.com/cockroachdb/cockroach/pkg/sql/parser"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/querycache"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
"github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/cockroach/pkg/sql/stats"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util/bitarray"
"github.com/cockroachdb/cockroach/pkg/util/duration"
"github.com/cockroachdb/cockroach/pkg/util/errorutil/unimplemented"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/metric"
"github.com/cockroachdb/cockroach/pkg/util/retry"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
"github.com/opentracing/opentracing-go"
)
// ClusterOrganization is the organization name.
var ClusterOrganization = settings.RegisterStringSetting(
"cluster.organization",
"organization name",
"",
)
// ClusterSecret is a cluster specific secret. This setting is hidden.
var ClusterSecret = func() *settings.StringSetting {
s := settings.RegisterStringSetting(
"cluster.secret",
"cluster specific secret",
"",
)
s.SetConfidential()
return s
}()
// defaultIntSize controls how a "naked" INT type will be parsed.
// TODO(bob): Change this to 4 in v2.3; https://github.com/cockroachdb/cockroach/issues/32534
// TODO(bob): Remove or n-op this in v2.4: https://github.com/cockroachdb/cockroach/issues/32844
var defaultIntSize = settings.RegisterValidatedIntSetting(
"sql.defaults.default_int_size",
"the size, in bytes, of an INT type", 8, func(i int64) error {
if i != 4 && i != 8 {
return errors.New("only 4 or 8 are valid values")
}
return nil
})
// traceTxnThreshold can be used to log SQL transactions that take
// longer than duration to complete. For example, traceTxnThreshold=1s
// will log the trace for any transaction that takes 1s or longer. To
// log traces for all transactions use traceTxnThreshold=1ns. Note
// that any positive duration will enable tracing and will slow down
// all execution because traces are gathered for all transactions even
// if they are not output.
var traceTxnThreshold = settings.RegisterDurationSetting(
"sql.trace.txn.enable_threshold",
"duration beyond which all transactions are traced (set to 0 to disable)", 0,
)
// traceSessionEventLogEnabled can be used to enable the event log
// that is normally kept for every SQL connection. The event log has a
// non-trivial performance impact and also reveals SQL statements
// which may be a privacy concern.
var traceSessionEventLogEnabled = settings.RegisterBoolSetting(
"sql.trace.session_eventlog.enabled",
"set to true to enable session tracing", false,
)
// ReorderJoinsLimitClusterSettingName is the name of the cluster setting for
// the maximum number of joins to reorder.
const ReorderJoinsLimitClusterSettingName = "sql.defaults.reorder_joins_limit"
// ReorderJoinsLimitClusterValue controls the cluster default for the maximum
// number of joins reordered.
var ReorderJoinsLimitClusterValue = settings.RegisterValidatedIntSetting(
ReorderJoinsLimitClusterSettingName,
"default number of joins to reorder",
opt.DefaultJoinOrderLimit,
func(v int64) error {
if v < 0 {
return pgerror.Newf(pgcode.InvalidParameterValue,
"cannot set sql.defaults.reorder_joins_limit to a negative value: %d", v)
}
return nil
},
)
var zigzagJoinClusterMode = settings.RegisterBoolSetting(
"sql.defaults.zigzag_join.enabled",
"default value for enable_zigzag_join session setting; allows use of zig-zag join by default",
true,
)
var optDrivenFKClusterMode = settings.RegisterBoolSetting(
"sql.defaults.experimental_optimizer_foreign_keys.enabled",
"enables optimizer-driven foreign key checks by default",
false,
)
// VectorizeClusterMode controls the cluster default for when automatic
// vectorization is enabled.
var VectorizeClusterMode = settings.RegisterEnumSetting(
"sql.defaults.vectorize",
"default vectorize mode",
"auto",
map[int64]string{
int64(sessiondata.VectorizeOff): "off",
int64(sessiondata.VectorizeAuto): "auto",
int64(sessiondata.VectorizeExperimentalOn): "experimental_on",
},
)
// VectorizeRowCountThresholdClusterValue controls the cluster default for the
// vectorize row count threshold. When it is met, the vectorized execution
// engine will be used if possible.
var VectorizeRowCountThresholdClusterValue = settings.RegisterValidatedIntSetting(
"sql.defaults.vectorize_row_count_threshold",
"default vectorize row count threshold",
colexec.DefaultVectorizeRowCountThreshold,
func(v int64) error {
if v < 0 {
return pgerror.Newf(pgcode.InvalidParameterValue,
"cannot set sql.defaults.vectorize_row_count_threshold to a negative value: %d", v)
}
return nil
},
)
// DistSQLClusterExecMode controls the cluster default for when DistSQL is used.
var DistSQLClusterExecMode = settings.RegisterEnumSetting(
"sql.defaults.distsql",
"default distributed SQL execution mode",
"auto",
map[int64]string{
int64(sessiondata.DistSQLOff): "off",
int64(sessiondata.DistSQLAuto): "auto",
int64(sessiondata.DistSQLOn): "on",
},
)
// SerialNormalizationMode controls how the SERIAL type is interpreted in table
// definitions.
var SerialNormalizationMode = settings.RegisterEnumSetting(
"sql.defaults.serial_normalization",
"default handling of SERIAL in table definitions",
"rowid",
map[int64]string{
int64(sessiondata.SerialUsesRowID): "rowid",
int64(sessiondata.SerialUsesVirtualSequences): "virtual_sequence",
int64(sessiondata.SerialUsesSQLSequences): "sql_sequence",
},
)
var errNoTransactionInProgress = errors.New("there is no transaction in progress")
var errTransactionInProgress = errors.New("there is already a transaction in progress")
const sqlTxnName string = "sql txn"
const metricsSampleInterval = 10 * time.Second
// Fully-qualified names for metrics.
var (
MetaSQLExecLatency = metric.Metadata{
Name: "sql.exec.latency",
Help: "Latency of SQL statement execution",
Measurement: "Latency",
Unit: metric.Unit_NANOSECONDS,
}
MetaSQLServiceLatency = metric.Metadata{
Name: "sql.service.latency",
Help: "Latency of SQL request execution",
Measurement: "Latency",
Unit: metric.Unit_NANOSECONDS,
}
MetaSQLOpt = metric.Metadata{
Name: "sql.optimizer.count",
Help: "Number of statements which ran with the cost-based optimizer",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaSQLOptFallback = metric.Metadata{
Name: "sql.optimizer.fallback.count",
Help: "Number of statements which the cost-based optimizer was unable to plan",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaSQLOptPlanCacheHits = metric.Metadata{
Name: "sql.optimizer.plan_cache.hits",
Help: "Number of non-prepared statements for which a cached plan was used",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaSQLOptPlanCacheMisses = metric.Metadata{
Name: "sql.optimizer.plan_cache.misses",
Help: "Number of non-prepared statements for which a cached plan was not used",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaDistSQLSelect = metric.Metadata{
Name: "sql.distsql.select.count",
Help: "Number of DistSQL SELECT statements",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaDistSQLExecLatency = metric.Metadata{
Name: "sql.distsql.exec.latency",
Help: "Latency of DistSQL statement execution",
Measurement: "Latency",
Unit: metric.Unit_NANOSECONDS,
}
MetaDistSQLServiceLatency = metric.Metadata{
Name: "sql.distsql.service.latency",
Help: "Latency of DistSQL request execution",
Measurement: "Latency",
Unit: metric.Unit_NANOSECONDS,
}
MetaTxnAbort = metric.Metadata{
Name: "sql.txn.abort.count",
Help: "Number of SQL transaction abort errors",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaFailure = metric.Metadata{
Name: "sql.failure.count",
Help: "Number of statements resulting in a planning or runtime error",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaSQLTxnLatency = metric.Metadata{
Name: "sql.txn.latency",
Help: "Latency of SQL transactions",
Measurement: "Latency",
Unit: metric.Unit_NANOSECONDS,
}
// Below are the metadata for the statement started counters.
MetaQueryStarted = metric.Metadata{
Name: "sql.query.started.count",
Help: "Number of SQL queries started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaTxnBeginStarted = metric.Metadata{
Name: "sql.txn.begin.started.count",
Help: "Number of SQL transaction BEGIN statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaTxnCommitStarted = metric.Metadata{
Name: "sql.txn.commit.started.count",
Help: "Number of SQL transaction COMMIT statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaTxnRollbackStarted = metric.Metadata{
Name: "sql.txn.rollback.started.count",
Help: "Number of SQL transaction ROLLBACK statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaSelectStarted = metric.Metadata{
Name: "sql.select.started.count",
Help: "Number of SQL SELECT statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaUpdateStarted = metric.Metadata{
Name: "sql.update.started.count",
Help: "Number of SQL UPDATE statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaInsertStarted = metric.Metadata{
Name: "sql.insert.started.count",
Help: "Number of SQL INSERT statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaDeleteStarted = metric.Metadata{
Name: "sql.delete.started.count",
Help: "Number of SQL DELETE statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaSavepointStarted = metric.Metadata{
Name: "sql.savepoint.started.count",
Help: "Number of SQL SAVEPOINT statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaRestartSavepointStarted = metric.Metadata{
Name: "sql.restart_savepoint.started.count",
Help: "Number of `SAVEPOINT cockroach_restart` statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaReleaseRestartSavepointStarted = metric.Metadata{
Name: "sql.restart_savepoint.release.started.count",
Help: "Number of `RELEASE SAVEPOINT cockroach_restart` statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaRollbackToRestartSavepointStarted = metric.Metadata{
Name: "sql.restart_savepoint.rollback.started.count",
Help: "Number of `ROLLBACK TO SAVEPOINT cockroach_restart` statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaDdlStarted = metric.Metadata{
Name: "sql.ddl.started.count",
Help: "Number of SQL DDL statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaMiscStarted = metric.Metadata{
Name: "sql.misc.started.count",
Help: "Number of other SQL statements started",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
// Below are the metadata for the statement executed counters.
MetaQueryExecuted = metric.Metadata{
Name: "sql.query.count",
Help: "Number of SQL queries executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaTxnBeginExecuted = metric.Metadata{
Name: "sql.txn.begin.count",
Help: "Number of SQL transaction BEGIN statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaTxnCommitExecuted = metric.Metadata{
Name: "sql.txn.commit.count",
Help: "Number of SQL transaction COMMIT statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaTxnRollbackExecuted = metric.Metadata{
Name: "sql.txn.rollback.count",
Help: "Number of SQL transaction ROLLBACK statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaSelectExecuted = metric.Metadata{
Name: "sql.select.count",
Help: "Number of SQL SELECT statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaUpdateExecuted = metric.Metadata{
Name: "sql.update.count",
Help: "Number of SQL UPDATE statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaInsertExecuted = metric.Metadata{
Name: "sql.insert.count",
Help: "Number of SQL INSERT statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaDeleteExecuted = metric.Metadata{
Name: "sql.delete.count",
Help: "Number of SQL DELETE statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaSavepointExecuted = metric.Metadata{
Name: "sql.savepoint.count",
Help: "Number of SQL SAVEPOINT statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaRestartSavepointExecuted = metric.Metadata{
Name: "sql.restart_savepoint.count",
Help: "Number of `SAVEPOINT cockroach_restart` statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaReleaseRestartSavepointExecuted = metric.Metadata{
Name: "sql.restart_savepoint.release.count",
Help: "Number of `RELEASE SAVEPOINT cockroach_restart` statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaRollbackToRestartSavepointExecuted = metric.Metadata{
Name: "sql.restart_savepoint.rollback.count",
Help: "Number of `ROLLBACK TO SAVEPOINT cockroach_restart` statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaDdlExecuted = metric.Metadata{
Name: "sql.ddl.count",
Help: "Number of SQL DDL statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
MetaMiscExecuted = metric.Metadata{
Name: "sql.misc.count",
Help: "Number of other SQL statements successfully executed",
Measurement: "SQL Statements",
Unit: metric.Unit_COUNT,
}
)
func getMetricMeta(meta metric.Metadata, internal bool) metric.Metadata {
if internal {
meta.Name += ".internal"
meta.Help += " (internal queries)"
meta.Measurement = "SQL Internal Statements"
}
return meta
}
// NodeInfo contains metadata about the executing node and cluster.
type NodeInfo struct {
ClusterID func() uuid.UUID
NodeID *base.NodeIDContainer
AdminURL func() *url.URL
PGURL func(*url.Userinfo) (*url.URL, error)
}
// nodeStatusGenerator is a limited portion of the status.MetricsRecorder
// struct, to avoid having to import all of status in sql.
type nodeStatusGenerator interface {
GenerateNodeStatus(ctx context.Context) *statuspb.NodeStatus
}
// An ExecutorConfig encompasses the auxiliary objects and configuration
// required to create an executor.
// All fields holding a pointer or an interface are required to create
// a Executor; the rest will have sane defaults set if omitted.
type ExecutorConfig struct {
Settings *cluster.Settings
NodeInfo
DefaultZoneConfig *config.ZoneConfig
Locality roachpb.Locality
AmbientCtx log.AmbientContext
DB *client.DB
Gossip *gossip.Gossip
DistSender *kv.DistSender
RPCContext *rpc.Context
LeaseManager *LeaseManager
Clock *hlc.Clock
DistSQLSrv *distsql.ServerImpl
StatusServer serverpb.StatusServer
MetricsRecorder nodeStatusGenerator
SessionRegistry *SessionRegistry
JobRegistry *jobs.Registry
VirtualSchemas *VirtualSchemaHolder
DistSQLPlanner *DistSQLPlanner
TableStatsCache *stats.TableStatisticsCache
StatsRefresher *stats.Refresher
ExecLogger *log.SecondaryLogger
AuditLogger *log.SecondaryLogger
InternalExecutor *InternalExecutor
QueryCache *querycache.C
TestingKnobs ExecutorTestingKnobs
PGWireTestingKnobs *PGWireTestingKnobs
SchemaChangerTestingKnobs *SchemaChangerTestingKnobs
DistSQLRunTestingKnobs *execinfra.TestingKnobs
EvalContextTestingKnobs tree.EvalContextTestingKnobs
// HistogramWindowInterval is (server.Config).HistogramWindowInterval.
HistogramWindowInterval time.Duration
// Caches updated by DistSQL.
RangeDescriptorCache *kv.RangeDescriptorCache
LeaseHolderCache *kv.LeaseHolderCache
}
// Organization returns the value of cluster.organization.
func (ec *ExecutorConfig) Organization() string {
return ClusterOrganization.Get(&ec.Settings.SV)
}
var _ base.ModuleTestingKnobs = &ExecutorTestingKnobs{}
// ModuleTestingKnobs is part of the base.ModuleTestingKnobs interface.
func (*ExecutorTestingKnobs) ModuleTestingKnobs() {}
// StatementFilter is the type of callback that
// ExecutorTestingKnobs.StatementFilter takes.
type StatementFilter func(context.Context, string, error)
// ExecutorTestingKnobs is part of the context used to control parts of the
// system during testing.
type ExecutorTestingKnobs struct {
// StatementFilter can be used to trap execution of SQL statements and
// optionally change their results. The filter function is invoked after each
// statement has been executed.
StatementFilter StatementFilter
// BeforeExecute is called by the Executor before plan execution. It is useful
// for synchronizing statement execution.
BeforeExecute func(ctx context.Context, stmt string)
// AfterExecute is like StatementFilter, but it runs in the same goroutine of the
// statement.
AfterExecute func(ctx context.Context, stmt string, err error)
// DisableAutoCommit, if set, disables the auto-commit functionality of some
// SQL statements. That functionality allows some statements to commit
// directly when they're executed in an implicit SQL txn, without waiting for
// the Executor to commit the implicit txn.
// This has to be set in tests that need to abort such statements using a
// StatementFilter; otherwise, the statement commits immediately after
// execution so there'll be nothing left to abort by the time the filter runs.
DisableAutoCommit bool
// BeforeAutoCommit is called when the Executor is about to commit the KV
// transaction after running a statement in an implicit transaction, allowing
// tests to inject errors into that commit.
// If an error is returned, that error will be considered the result of
// txn.Commit(), and the txn.Commit() call will not actually be
// made. If no error is returned, txn.Commit() is called normally.
//
// Note that this is not called if the SQL statement representing the implicit
// transaction has committed the KV txn itself (e.g. if it used the 1-PC
// optimization). This is only called when the Executor is the one doing the
// committing.
BeforeAutoCommit func(ctx context.Context, stmt string) error
}
// PGWireTestingKnobs contains knobs for the pgwire module.
type PGWireTestingKnobs struct {
// CatchPanics causes the pgwire.conn to recover from panics in its execution
// thread and return them as errors to the client, closing the connection
// afterward.
CatchPanics bool
// AuthHook is used to override the normal authentication handling on new
// connections.
AuthHook func(context.Context) error
}
var _ base.ModuleTestingKnobs = &PGWireTestingKnobs{}
// ModuleTestingKnobs implements the base.ModuleTestingKnobs interface.
func (*PGWireTestingKnobs) ModuleTestingKnobs() {}
// databaseCacheHolder is a thread-safe container for a *databaseCache.
// It also allows clients to block until the cache is updated to a desired
// state.
//
// NOTE(andrei): The way in which we handle the database cache is funky: there's
// this top-level holder, which gets updated on gossip updates. Then, each
// session gets its *databaseCache, which is updated from the holder after every
// transaction - the SystemConfig is updated and the lazily computer map of db
// names to ids is wiped. So many session are sharing and contending on a
// mutable cache, but nobody's sharing this holder. We should make up our mind
// about whether we like the sharing or not and, if we do, share the holder too.
// Also, we could use the SystemConfigDeltaFilter to limit the updates to
// databases that chaged. One of the problems with the existing architecture
// is if a transaction is completed on a session and the session remains dormant
// for a long time, the next transaction will see a rather old database cache.
type databaseCacheHolder struct {
mu struct {
syncutil.Mutex
c *databaseCache
cv *sync.Cond
}
}
func newDatabaseCacheHolder(c *databaseCache) *databaseCacheHolder {
dc := &databaseCacheHolder{}
dc.mu.c = c
dc.mu.cv = sync.NewCond(&dc.mu.Mutex)
return dc
}
func (dc *databaseCacheHolder) getDatabaseCache() *databaseCache {
dc.mu.Lock()
defer dc.mu.Unlock()
return dc.mu.c
}
// waitForCacheState implements the dbCacheSubscriber interface.
func (dc *databaseCacheHolder) waitForCacheState(cond func(*databaseCache) bool) {
dc.mu.Lock()
defer dc.mu.Unlock()
for done := cond(dc.mu.c); !done; done = cond(dc.mu.c) {
dc.mu.cv.Wait()
}
}
// databaseCacheHolder implements the dbCacheSubscriber interface.
var _ dbCacheSubscriber = &databaseCacheHolder{}
// updateSystemConfig is called whenever a new system config gossip entry is
// received.
func (dc *databaseCacheHolder) updateSystemConfig(cfg *config.SystemConfig) {
dc.mu.Lock()
dc.mu.c = newDatabaseCache(cfg)
dc.mu.cv.Broadcast()
dc.mu.Unlock()
}
func shouldDistributeGivenRecAndMode(
rec distRecommendation, mode sessiondata.DistSQLExecMode,
) bool {
switch mode {
case sessiondata.DistSQLOff:
return false
case sessiondata.DistSQLAuto:
return rec == shouldDistribute
case sessiondata.DistSQLOn, sessiondata.DistSQLAlways:
return rec != cannotDistribute
}
panic(fmt.Sprintf("unhandled distsql mode %v", mode))
}
// shouldDistributePlan determines whether we should distribute the
// given logical plan, based on the session settings.
func shouldDistributePlan(
ctx context.Context, distSQLMode sessiondata.DistSQLExecMode, dp *DistSQLPlanner, plan planNode,
) bool {
if distSQLMode == sessiondata.DistSQLOff {
return false
}
// Don't try to run empty nodes (e.g. SET commands) with distSQL.
if _, ok := plan.(*zeroNode); ok {
return false
}
rec, err := dp.checkSupportForNode(plan)
if err != nil {
// Don't use distSQL for this request.
log.VEventf(ctx, 1, "query not supported for distSQL: %s", err)
return false
}
return shouldDistributeGivenRecAndMode(rec, distSQLMode)
}
// golangFillQueryArguments transforms Go values into datums.
// Some of the args can be datums (in which case the transformation is a no-op).
//
// TODO: This does not support arguments of the SQL 'Date' type, as there is not
// an equivalent type in Go's standard library. It's not currently needed by any
// of our internal tables.
func golangFillQueryArguments(args ...interface{}) tree.Datums {
res := make(tree.Datums, len(args))
for i, arg := range args {
if arg == nil {
res[i] = tree.DNull
continue
}
// A type switch to handle a few explicit types with special semantics:
// - Datums are passed along as is.
// - Time datatypes get special representation in the database.
var d tree.Datum
switch t := arg.(type) {
case tree.Datum:
d = t
case time.Time:
d = tree.MakeDTimestamp(t, time.Microsecond)
case time.Duration:
d = &tree.DInterval{Duration: duration.MakeDuration(t.Nanoseconds(), 0, 0)}
case bitarray.BitArray:
d = &tree.DBitArray{BitArray: t}
case *apd.Decimal:
dd := &tree.DDecimal{}
dd.Set(t)
d = dd
}
if d == nil {
// Handle all types which have an underlying type that can be stored in the
// database.
// Note: if this reflection becomes a performance concern in the future,
// commonly used types could be added explicitly into the type switch above
// for a performance gain.
val := reflect.ValueOf(arg)
switch val.Kind() {
case reflect.Bool:
d = tree.MakeDBool(tree.DBool(val.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
d = tree.NewDInt(tree.DInt(val.Int()))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
d = tree.NewDInt(tree.DInt(val.Uint()))
case reflect.Float32, reflect.Float64:
d = tree.NewDFloat(tree.DFloat(val.Float()))
case reflect.String:
d = tree.NewDString(val.String())
case reflect.Slice:
// Handle byte slices.
if val.Type().Elem().Kind() == reflect.Uint8 {
d = tree.NewDBytes(tree.DBytes(val.Bytes()))
}
}
if d == nil {
panic(fmt.Sprintf("unexpected type %T", arg))
}
}
res[i] = d
}
return res
}
// checkResultType verifies that a table result can be returned to the
// client.
func checkResultType(typ *types.T) error {
// Compare all types that can rely on == equality.
switch typ.Family() {
case types.UnknownFamily:
case types.BitFamily:
case types.BoolFamily:
case types.IntFamily:
case types.FloatFamily:
case types.DecimalFamily:
case types.BytesFamily:
case types.StringFamily:
case types.CollatedStringFamily:
case types.DateFamily:
case types.TimestampFamily:
case types.TimeFamily:
case types.TimestampTZFamily:
case types.IntervalFamily:
case types.JsonFamily:
case types.UuidFamily:
case types.INetFamily:
case types.OidFamily:
case types.TupleFamily:
case types.ArrayFamily:
if typ.ArrayContents().Family() == types.ArrayFamily {
// Technically we could probably return arrays of arrays to a
// client (the encoding exists) but we don't want to give
// mixed signals -- that nested arrays appear to be supported
// in this case, and not in other cases (eg. CREATE). So we
// reject them in every case instead.
return unimplemented.NewWithIssueDetail(32552,
"result", "arrays cannot have arrays as element type")
}
case types.AnyFamily:
// Placeholder case.
return errors.Errorf("could not determine data type of %s", typ)
default:
return errors.Errorf("unsupported result type: %s", typ)
}
return nil
}
// EvalAsOfTimestamp evaluates and returns the timestamp from an AS OF SYSTEM
// TIME clause.
func (p *planner) EvalAsOfTimestamp(asOf tree.AsOfClause) (_ hlc.Timestamp, err error) {
ts, err := tree.EvalAsOfTimestamp(asOf, &p.semaCtx, p.EvalContext())
if err != nil {
return hlc.Timestamp{}, err
}
if now := p.execCfg.Clock.Now(); now.Less(ts) {
return hlc.Timestamp{}, errors.Errorf(
"AS OF SYSTEM TIME: cannot specify timestamp in the future (%s > %s)", ts, now)
}
return ts, nil
}
// ParseHLC parses a string representation of an `hlc.Timestamp`.
func ParseHLC(s string) (hlc.Timestamp, error) {
dec, _, err := apd.NewFromString(s)
if err != nil {
return hlc.Timestamp{}, err
}
return tree.DecimalToHLC(dec)
}
// isAsOf analyzes a statement to bypass the logic in newPlan(), since
// that requires the transaction to be started already. If the returned
// timestamp is not nil, it is the timestamp to which a transaction
// should be set. The statements that will be checked are Select,
// ShowTrace (of a Select statement), Scrub, Export, and CreateStats.
func (p *planner) isAsOf(stmt tree.Statement) (*hlc.Timestamp, error) {
var asOf tree.AsOfClause
switch s := stmt.(type) {
case *tree.Select:
selStmt := s.Select
var parenSel *tree.ParenSelect
var ok bool
for parenSel, ok = selStmt.(*tree.ParenSelect); ok; parenSel, ok = selStmt.(*tree.ParenSelect) {
selStmt = parenSel.Select.Select
}
sc, ok := selStmt.(*tree.SelectClause)
if !ok {
return nil, nil
}
if sc.From.AsOf.Expr == nil {
return nil, nil
}
asOf = sc.From.AsOf
case *tree.Scrub:
if s.AsOf.Expr == nil {
return nil, nil
}
asOf = s.AsOf
case *tree.Export:
return p.isAsOf(s.Query)
case *tree.CreateStats:
if s.Options.AsOf.Expr == nil {
return nil, nil
}
asOf = s.Options.AsOf
default:
return nil, nil
}
ts, err := p.EvalAsOfTimestamp(asOf)
return &ts, err
}
// isSavepoint returns true if stmt is a SAVEPOINT statement.
func isSavepoint(stmt Statement) bool {
_, isSavepoint := stmt.AST.(*tree.Savepoint)
return isSavepoint
}
// isSetTransaction returns true if stmt is a "SET TRANSACTION ..." statement.
func isSetTransaction(stmt Statement) bool {
_, isSet := stmt.AST.(*tree.SetTransaction)
return isSet
}
// queryPhase represents a phase during a query's execution.
type queryPhase int
const (
// The phase before start of execution (includes parsing, building a plan).
preparing queryPhase = 0
// Execution phase.
executing queryPhase = 1
)
// queryMeta stores metadata about a query. Stored as reference in
// session.mu.ActiveQueries.
type queryMeta struct {
// The timestamp when this query began execution.
start time.Time
// AST of the SQL statement - converted to query string only when necessary.
stmt tree.Statement
// States whether this query is distributed. Note that all queries,
// including those that are distributed, have this field set to false until
// start of execution; only at that point can we can actually determine whether
// this query will be distributed. Use the phase variable below
// to determine whether this query has entered execution yet.
isDistributed bool
// Current phase of execution of query.
phase queryPhase
// Cancellation function for the context associated with this query's transaction.
ctxCancel context.CancelFunc
// If set, this query will not be reported as part of SHOW QUERIES. This is
// set based on the statement implementing tree.HiddenFromShowQueries.
hidden bool
}
// cancel cancels the query associated with this queryMeta, by closing the associated
// txn context.
func (q *queryMeta) cancel() {
q.ctxCancel()
}
// SessionDefaults mirrors fields in Session, for restoring default
// configuration values in SET ... TO DEFAULT (or RESET ...) statements.
type SessionDefaults map[string]string
// SessionArgs contains arguments for serving a client connection.
type SessionArgs struct {
User string
SessionDefaults SessionDefaults
// RemoteAddr is the client's address. This is nil iff this is an internal
// client.
RemoteAddr net.Addr
ConnResultsBufferSize int64
}
// isDefined returns true iff the SessionArgs is well-defined.
// This method exists because SessionArgs is passed by value but it
// matters to the functions using it whether the value was explicitly
// specified or left empty.
func (s SessionArgs) isDefined() bool { return len(s.User) != 0 }
// SessionRegistry stores a set of all sessions on this node.
// Use register() and deregister() to modify this registry.
type SessionRegistry struct {
syncutil.Mutex
sessions map[ClusterWideID]registrySession
}
// NewSessionRegistry creates a new SessionRegistry with an empty set
// of sessions.
func NewSessionRegistry() *SessionRegistry {
return &SessionRegistry{sessions: make(map[ClusterWideID]registrySession)}
}
func (r *SessionRegistry) register(id ClusterWideID, s registrySession) {
r.Lock()
r.sessions[id] = s
r.Unlock()
}
func (r *SessionRegistry) deregister(id ClusterWideID) {
r.Lock()
delete(r.sessions, id)
r.Unlock()
}
type registrySession interface {
user() string
cancelQuery(queryID ClusterWideID) bool
cancelSession()
// serialize serializes a Session into a serverpb.Session
// that can be served over RPC.
serialize() serverpb.Session
}
// CancelQuery looks up the associated query in the session registry and cancels it.
func (r *SessionRegistry) CancelQuery(queryIDStr string, username string) (bool, error) {
queryID, err := StringToClusterWideID(queryIDStr)
if err != nil {
return false, fmt.Errorf("query ID %s malformed: %s", queryID, err)
}
r.Lock()
defer r.Unlock()
for _, session := range r.sessions {
if !(username == security.RootUser || username == session.user()) {
// Skip this session.
continue
}
if session.cancelQuery(queryID) {
return true, nil
}
}
return false, fmt.Errorf("query ID %s not found", queryID)
}
// CancelSession looks up the specified session in the session registry and cancels it.