-
Notifications
You must be signed in to change notification settings - Fork 17
/
session.go
1214 lines (1070 loc) · 43.6 KB
/
session.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2020 Zhizhesihai (Beijing) Technology Limited.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package variable
import (
"bytes"
"crypto/tls"
"fmt"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/klauspost/cpuid"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/auth"
"github.com/pingcap/parser/mysql"
"github.com/pingcap/parser/terror"
pumpcli "github.com/pingcap/tidb-tools/tidb-binlog/pump_client"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/meta/autoid"
"github.com/pingcap/tidb/metrics"
"github.com/pingcap/tidb/sessionctx/stmtctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/execdetails"
"github.com/pingcap/tidb/util/timeutil"
)
const (
codeCantGetValidID terror.ErrCode = 1
codeCantSetToNull terror.ErrCode = 2
codeSnapshotTooOld terror.ErrCode = 3
)
var preparedStmtCount int64
// Error instances.
var (
errCantGetValidID = terror.ClassVariable.New(codeCantGetValidID, "cannot get valid auto-increment id in retry")
ErrCantSetToNull = terror.ClassVariable.New(codeCantSetToNull, "cannot set variable to null")
ErrSnapshotTooOld = terror.ClassVariable.New(codeSnapshotTooOld, "snapshot is older than GC safe point %s")
)
// RetryInfo saves retry information.
type RetryInfo struct {
Retrying bool
DroppedPreparedStmtIDs []uint32
currRetryOff int
autoIncrementIDs []int64
}
// Clean does some clean work.
func (r *RetryInfo) Clean() {
r.currRetryOff = 0
if len(r.autoIncrementIDs) > 0 {
r.autoIncrementIDs = r.autoIncrementIDs[:0]
}
if len(r.DroppedPreparedStmtIDs) > 0 {
r.DroppedPreparedStmtIDs = r.DroppedPreparedStmtIDs[:0]
}
}
// AddAutoIncrementID adds id to AutoIncrementIDs.
func (r *RetryInfo) AddAutoIncrementID(id int64) {
r.autoIncrementIDs = append(r.autoIncrementIDs, id)
}
// ResetOffset resets the current retry offset.
func (r *RetryInfo) ResetOffset() {
r.currRetryOff = 0
}
// GetCurrAutoIncrementID gets current AutoIncrementID.
func (r *RetryInfo) GetCurrAutoIncrementID() (int64, error) {
if r.currRetryOff >= len(r.autoIncrementIDs) {
return 0, errCantGetValidID
}
id := r.autoIncrementIDs[r.currRetryOff]
r.currRetryOff++
return id, nil
}
// TransactionContext is used to store variables that has transaction scope.
type TransactionContext struct {
ForUpdate bool
forUpdateTS uint64
DirtyDB interface{}
Binlog interface{}
InfoSchema interface{}
History interface{}
SchemaVersion int64
StartTS uint64
Shard *int64
TableDeltaMap map[int64]TableDelta
IsPessimistic bool
// CreateTime For metrics.
CreateTime time.Time
StatementCount int
IsBatched bool
}
// UpdateDeltaForTable updates the delta info for some table.
func (tc *TransactionContext) UpdateDeltaForTable(tableID int64, delta int64, count int64, colSize map[int64]int64) {
if tc.TableDeltaMap == nil {
tc.TableDeltaMap = make(map[int64]TableDelta)
}
item := tc.TableDeltaMap[tableID]
if item.ColSize == nil && colSize != nil {
item.ColSize = make(map[int64]int64)
}
item.Delta += delta
item.Count += count
for key, val := range colSize {
item.ColSize[key] += val
}
tc.TableDeltaMap[tableID] = item
}
// Cleanup clears up transaction info that no longer use.
func (tc *TransactionContext) Cleanup() {
//tc.InfoSchema = nil; we cannot do it now, because some operation like handleFieldList depend on this.
tc.DirtyDB = nil
tc.Binlog = nil
tc.History = nil
tc.TableDeltaMap = nil
}
// ClearDelta clears the delta map.
func (tc *TransactionContext) ClearDelta() {
tc.TableDeltaMap = nil
}
// GetForUpdateTS returns the ts for update.
func (tc *TransactionContext) GetForUpdateTS() uint64 {
if tc.forUpdateTS > tc.StartTS {
return tc.forUpdateTS
}
return tc.StartTS
}
// SetForUpdateTS sets the ts for update.
func (tc *TransactionContext) SetForUpdateTS(forUpdateTS uint64) {
if forUpdateTS > tc.forUpdateTS {
tc.forUpdateTS = forUpdateTS
}
}
// WriteStmtBufs can be used by insert/replace/delete/update statement.
// TODO: use a common memory pool to replace this.
type WriteStmtBufs struct {
// RowValBuf is used by tablecodec.EncodeRow, to reduce runtime.growslice.
RowValBuf []byte
// BufStore stores temp KVs for a row when executing insert statement.
// We could reuse a BufStore for multiple rows of a session to reduce memory allocations.
BufStore *kv.BufferStore
// AddRowValues use to store temp insert rows value, to reduce memory allocations when importing data.
AddRowValues []types.Datum
// IndexValsBuf is used by index.FetchValues
IndexValsBuf []types.Datum
// IndexKeyBuf is used by index.GenIndexKey
IndexKeyBuf []byte
}
func (ib *WriteStmtBufs) clean() {
ib.BufStore = nil
ib.RowValBuf = nil
ib.AddRowValues = nil
ib.IndexValsBuf = nil
ib.IndexKeyBuf = nil
}
// SessionVars is to handle user-defined or global variables in the current session.
type SessionVars struct {
Concurrency
MemQuota
BatchSize
RetryLimit int64
DisableTxnAutoRetry bool
// UsersLock is a lock for user defined variables.
UsersLock sync.RWMutex
// Users are user defined variables.
Users map[string]string
// systems variables, don't modify it directly, use GetSystemVar/SetSystemVar method.
systems map[string]string
// PreparedStmts stores prepared statement.
PreparedStmts map[uint32]*ast.Prepared
PreparedStmtNameToID map[string]uint32
// preparedStmtID is id of prepared statement.
preparedStmtID uint32
// PreparedParams params for prepared statements
PreparedParams PreparedParams
// ActiveRoles stores active roles for current user
ActiveRoles []*auth.RoleIdentity
RetryInfo *RetryInfo
// TxnCtx Should be reset on transaction finished.
TxnCtx *TransactionContext
// KVVars is the variables for KV storage.
KVVars *kv.Variables
// TxnIsolationLevelOneShot is used to implements "set transaction isolation level ..."
TxnIsolationLevelOneShot struct {
// State 0 means default
// State 1 means it's set in current transaction.
// State 2 means it should be used in current transaction.
State int
Value string
}
// Status stands for the session status. e.g. in transaction or not, auto commit is on or off, and so on.
Status uint16
// ClientCapability is client's capability.
ClientCapability uint32
// TLSConnectionState is the TLS connection state (nil if not using TLS).
TLSConnectionState *tls.ConnectionState
// ConnectionID is the connection id of the current session.
ConnectionID uint64
// PlanID is the unique id of logical and physical plan.
PlanID int
// PlanColumnID is the unique id for column when building plan.
PlanColumnID int64
// User is the user identity with which the session login.
User *auth.UserIdentity
// CurrentDB is the default database of this session.
CurrentDB string
// StrictSQLMode indicates if the session is in strict mode.
StrictSQLMode bool
// CommonGlobalLoaded indicates if common global variable has been loaded for this session.
CommonGlobalLoaded bool
// InRestrictedSQL indicates if the session is handling restricted SQL execution.
InRestrictedSQL bool
// SnapshotTS is used for reading history data. For simplicity, SnapshotTS only supports distsql request.
SnapshotTS uint64
// SnapshotInfoschema is used with SnapshotTS, when the schema version at snapshotTS less than current schema
// version, we load an old version schema for query.
SnapshotInfoschema interface{}
// BinlogClient is used to write binlog.
BinlogClient *pumpcli.PumpsClient
// GlobalVarsAccessor is used to set and get global variables.
GlobalVarsAccessor GlobalVarAccessor
// LastFoundRows is the number of found rows of last query statement
LastFoundRows uint64
// StmtCtx holds variables for current executing statement.
StmtCtx *stmtctx.StatementContext
// AllowAggPushDown can be set to false to forbid aggregation push down.
AllowAggPushDown bool
// AllowWriteRowID can be set to false to forbid write data to _tidb_rowid.
// This variable is currently not recommended to be turned on.
AllowWriteRowID bool
// AllowInSubqToJoinAndAgg can be set to false to forbid rewriting the semi join to inner join with agg.
AllowInSubqToJoinAndAgg bool
// CorrelationThreshold is the guard to enable row count estimation using column order correlation.
CorrelationThreshold float64
// CorrelationExpFactor is used to control the heuristic approach of row count estimation when CorrelationThreshold is not met.
CorrelationExpFactor int
// CurrInsertValues is used to record current ValuesExpr's values.
// See http://dev.mysql.com/doc/refman/5.7/en/miscellaneous-functions.html#function_values
CurrInsertValues chunk.Row
// Per-connection time zones. Each client that connects has its own time zone setting, given by the session time_zone variable.
// See https://dev.mysql.com/doc/refman/5.7/en/time-zone-support.html
TimeZone *time.Location
SQLMode mysql.SQLMode
/* TiDB system variables */
// LightningMode is true when the lightning use the kvencoder to transfer sql to raw kv.
LightningMode bool
// SkipUTF8Check check on input value.
SkipUTF8Check bool
// BatchInsert indicates if we should split insert data into multiple batches.
BatchInsert bool
// BatchDelete indicates if we should split delete data into multiple batches.
BatchDelete bool
// BatchCommit indicates if we should split the transaction into multiple batches.
BatchCommit bool
// IDAllocator is provided by kvEncoder, if it is provided, we will use it to alloc auto id instead of using
// Table.alloc.
IDAllocator autoid.Allocator
// OptimizerSelectivityLevel defines the level of the selectivity estimation in plan.
OptimizerSelectivityLevel int
// EnableTablePartition enables table partition feature.
EnableTablePartition string
// EnableCascadesPlanner enables the cascades planner.
EnableCascadesPlanner bool
// EnableWindowFunction enables the window function.
EnableWindowFunction bool
// DDLReorgPriority is the operation priority of adding indices.
DDLReorgPriority int
// WaitSplitRegionFinish defines the split region behaviour is sync or async.
WaitSplitRegionFinish bool
// WaitSplitRegionTimeout defines the split region timeout.
WaitSplitRegionTimeout uint64
// EnableStreaming indicates whether the coprocessor request can use streaming API.
// TODO: remove this after tidb-server configuration "enable-streaming' removed.
EnableStreaming bool
writeStmtBufs WriteStmtBufs
// L2CacheSize indicates the size of CPU L2 cache, using byte as unit.
L2CacheSize int
// EnableRadixJoin indicates whether to use radix hash join to execute
// HashJoin.
EnableRadixJoin bool
// ConstraintCheckInPlace indicates whether to check the constraint when the SQL executing.
ConstraintCheckInPlace bool
// CommandValue indicates which command current session is doing.
CommandValue uint32
// TiDBOptJoinReorderThreshold defines the minimal number of join nodes
// to use the greedy join reorder algorithm.
TiDBOptJoinReorderThreshold int
// SlowQueryFile indicates which slow query log file for SLOW_QUERY table to parse.
SlowQueryFile string
// EnableFastAnalyze indicates whether to take fast analyze.
EnableFastAnalyze bool
// TxnMode indicates should be pessimistic or optimistic.
TxnMode string
// LowResolutionTSO is used for reading data with low resolution TSO which is updated once every two seconds.
LowResolutionTSO bool
// MaxExecutionTime is the timeout for select statement, in milliseconds.
// If the value is 0, timeouts are not enabled.
// See https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_max_execution_time
MaxExecutionTime uint64
// Killed is a flag to indicate that this query is killed.
Killed uint32
// ConnectionInfo indicates current connection info used by current session, only be lazy assigned by plugin.
ConnectionInfo *ConnectionInfo
// StartTime is the start time of the last query.
StartTime time.Time
// DurationParse is the duration of parsing SQL string to AST of the last query.
DurationParse time.Duration
// DurationCompile is the duration of compiling AST to execution plan of the last query.
DurationCompile time.Duration
// PrevStmt is used to store the previous executed statement in the current session.
PrevStmt fmt.Stringer
// AllowRemoveAutoInc indicates whether a user can drop the auto_increment column attribute or not.
AllowRemoveAutoInc bool
// LockWaitTimeout is the duration waiting for pessimistic lock in milliseconds
// negative value means nowait, 0 means default behavior, others means actual wait time
LockWaitTimeout int64
}
// PreparedParams contains the parameters of the current prepared statement when executing it.
type PreparedParams []types.Datum
func (pps PreparedParams) String() string {
if len(pps) == 0 {
return ""
}
return " [arguments: " + types.DatumsToStrNoErr(pps) + "]"
}
// ConnectionInfo present connection used by audit.
type ConnectionInfo struct {
ConnectionID uint32
ConnectionType string
Host string
ClientIP string
ClientPort string
ServerID int
ServerPort int
Duration float64
User string
ServerOSLoginUser string
OSVersion string
ClientVersion string
ServerVersion string
SSLVersion string
PID int
DB string
}
// NewSessionVars creates a session vars object.
func NewSessionVars() *SessionVars {
vars := &SessionVars{
Users: make(map[string]string),
systems: make(map[string]string),
PreparedStmts: make(map[uint32]*ast.Prepared),
PreparedStmtNameToID: make(map[string]uint32),
PreparedParams: make([]types.Datum, 0, 10),
TxnCtx: &TransactionContext{},
RetryInfo: &RetryInfo{},
ActiveRoles: make([]*auth.RoleIdentity, 0, 10),
StrictSQLMode: true,
Status: mysql.ServerStatusAutocommit,
StmtCtx: new(stmtctx.StatementContext),
AllowAggPushDown: false,
OptimizerSelectivityLevel: DefTiDBOptimizerSelectivityLevel,
RetryLimit: DefTiDBRetryLimit,
DisableTxnAutoRetry: DefTiDBDisableTxnAutoRetry,
DDLReorgPriority: kv.PriorityLow,
AllowInSubqToJoinAndAgg: DefOptInSubqToJoinAndAgg,
CorrelationThreshold: DefOptCorrelationThreshold,
CorrelationExpFactor: DefOptCorrelationExpFactor,
EnableRadixJoin: false,
L2CacheSize: cpuid.CPU.Cache.L2,
CommandValue: uint32(mysql.ComSleep),
TiDBOptJoinReorderThreshold: DefTiDBOptJoinReorderThreshold,
SlowQueryFile: config.GetGlobalConfig().Log.SlowQueryFile,
WaitSplitRegionFinish: DefTiDBWaitSplitRegionFinish,
WaitSplitRegionTimeout: DefWaitSplitRegionTimeout,
AllowRemoveAutoInc: DefTiDBAllowRemoveAutoInc,
LockWaitTimeout: DefInnodbLockWaitTimeout * 1000,
}
vars.KVVars = kv.NewVariables(&vars.Killed)
vars.Concurrency = Concurrency{
IndexLookupConcurrency: DefIndexLookupConcurrency,
IndexSerialScanConcurrency: DefIndexSerialScanConcurrency,
IndexLookupJoinConcurrency: DefIndexLookupJoinConcurrency,
HashJoinConcurrency: DefTiDBHashJoinConcurrency,
ProjectionConcurrency: DefTiDBProjectionConcurrency,
DistSQLScanConcurrency: DefDistSQLScanConcurrency,
HashAggPartialConcurrency: DefTiDBHashAggPartialConcurrency,
HashAggFinalConcurrency: DefTiDBHashAggFinalConcurrency,
}
vars.MemQuota = MemQuota{
MemQuotaQuery: config.GetGlobalConfig().MemQuotaQuery,
MemQuotaHashJoin: DefTiDBMemQuotaHashJoin,
MemQuotaMergeJoin: DefTiDBMemQuotaMergeJoin,
MemQuotaSort: DefTiDBMemQuotaSort,
MemQuotaTopn: DefTiDBMemQuotaTopn,
MemQuotaIndexLookupReader: DefTiDBMemQuotaIndexLookupReader,
MemQuotaIndexLookupJoin: DefTiDBMemQuotaIndexLookupJoin,
MemQuotaNestedLoopApply: DefTiDBMemQuotaNestedLoopApply,
MemQuotaDistSQL: DefTiDBMemQuotaDistSQL,
}
vars.BatchSize = BatchSize{
IndexJoinBatchSize: DefIndexJoinBatchSize,
IndexLookupSize: DefIndexLookupSize,
InitChunkSize: DefInitChunkSize,
MaxChunkSize: DefMaxChunkSize,
DMLBatchSize: DefDMLBatchSize,
}
var enableStreaming string
if config.GetGlobalConfig().EnableStreaming {
enableStreaming = "1"
} else {
enableStreaming = "0"
}
terror.Log(vars.SetSystemVar(TiDBEnableStreaming, enableStreaming))
return vars
}
// GetWriteStmtBufs get pointer of SessionVars.writeStmtBufs.
func (s *SessionVars) GetWriteStmtBufs() *WriteStmtBufs {
return &s.writeStmtBufs
}
// GetSplitRegionTimeout gets split region timeout.
func (s *SessionVars) GetSplitRegionTimeout() time.Duration {
return time.Duration(s.WaitSplitRegionTimeout) * time.Second
}
// CleanBuffers cleans the temporary bufs
func (s *SessionVars) CleanBuffers() {
if !s.LightningMode {
s.GetWriteStmtBufs().clean()
}
}
// AllocPlanColumnID allocates column id for plan.
func (s *SessionVars) AllocPlanColumnID() int64 {
s.PlanColumnID++
return s.PlanColumnID
}
// GetCharsetInfo gets charset and collation for current context.
// What character set should the server translate a statement to after receiving it?
// For this, the server uses the character_set_connection and collation_connection system variables.
// It converts statements sent by the client from character_set_client to character_set_connection
// (except for string literals that have an introducer such as _latin1 or _utf8).
// collation_connection is important for comparisons of literal strings.
// For comparisons of strings with column values, collation_connection does not matter because columns
// have their own collation, which has a higher collation precedence.
// See https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html
func (s *SessionVars) GetCharsetInfo() (charset, collation string) {
// charset = s.systems[CharacterSetConnection]
// collation = s.systems[CollationConnection]
return
}
// SetLastInsertID saves the last insert id to the session context.
// TODO: we may store the result for last_insert_id sys var later.
func (s *SessionVars) SetLastInsertID(insertID uint64) {
s.StmtCtx.LastInsertID = insertID
}
// SetStatusFlag sets the session server status variable.
// If on is ture sets the flag in session status,
// otherwise removes the flag.
func (s *SessionVars) SetStatusFlag(flag uint16, on bool) {
if on {
s.Status |= flag
return
}
s.Status &= ^flag
}
// GetStatusFlag gets the session server status variable, returns true if it is on.
func (s *SessionVars) GetStatusFlag(flag uint16) bool {
return s.Status&flag > 0
}
// InTxn returns if the session is in transaction.
func (s *SessionVars) InTxn() bool {
return s.GetStatusFlag(mysql.ServerStatusInTrans)
}
// IsAutocommit returns if the session is set to autocommit.
func (s *SessionVars) IsAutocommit() bool {
return s.GetStatusFlag(mysql.ServerStatusAutocommit)
}
// GetNextPreparedStmtID generates and returns the next session scope prepared statement id.
func (s *SessionVars) GetNextPreparedStmtID() uint32 {
s.preparedStmtID++
return s.preparedStmtID
}
// Location returns the value of time_zone session variable. If it is nil, then return time.Local.
func (s *SessionVars) Location() *time.Location {
loc := s.TimeZone
if loc == nil {
loc = timeutil.SystemLocation()
}
return loc
}
// GetSystemVar gets the string value of a system variable.
func (s *SessionVars) GetSystemVar(name string) (string, bool) {
val, ok := s.systems[name]
return val, ok
}
// deleteSystemVar deletes a system variable.
func (s *SessionVars) deleteSystemVar(name string) error {
if name != CharacterSetResults {
return ErrCantSetToNull
}
delete(s.systems, name)
return nil
}
func (s *SessionVars) setDDLReorgPriority(val string) {
val = strings.ToLower(val)
switch val {
case "priority_low":
s.DDLReorgPriority = kv.PriorityLow
case "priority_normal":
s.DDLReorgPriority = kv.PriorityNormal
case "priority_high":
s.DDLReorgPriority = kv.PriorityHigh
default:
s.DDLReorgPriority = kv.PriorityLow
}
}
// AddPreparedStmt adds prepareStmt to current session and count in global.
func (s *SessionVars) AddPreparedStmt(stmtID uint32, stmt *ast.Prepared) error {
// if _, exists := s.PreparedStmts[stmtID]; !exists {
// valStr, _ := s.GetSystemVar(MaxPreparedStmtCount)
// maxPreparedStmtCount, err := strconv.ParseInt(valStr, 10, 64)
// if err != nil {
// maxPreparedStmtCount = DefMaxPreparedStmtCount
// }
// newPreparedStmtCount := atomic.AddInt64(&preparedStmtCount, 1)
// if maxPreparedStmtCount >= 0 && newPreparedStmtCount > maxPreparedStmtCount {
// atomic.AddInt64(&preparedStmtCount, -1)
// return ErrMaxPreparedStmtCountReached.GenWithStackByArgs(maxPreparedStmtCount)
// }
// metrics.PreparedStmtGauge.Set(float64(newPreparedStmtCount))
// }
s.PreparedStmts[stmtID] = stmt
return nil
}
// RemovePreparedStmt removes preparedStmt from current session and decrease count in global.
func (s *SessionVars) RemovePreparedStmt(stmtID uint32) {
_, exists := s.PreparedStmts[stmtID]
if !exists {
return
}
delete(s.PreparedStmts, stmtID)
afterMinus := atomic.AddInt64(&preparedStmtCount, -1)
metrics.PreparedStmtGauge.Set(float64(afterMinus))
}
// WithdrawAllPreparedStmt remove all preparedStmt in current session and decrease count in global.
func (s *SessionVars) WithdrawAllPreparedStmt() {
psCount := len(s.PreparedStmts)
if psCount == 0 {
return
}
afterMinus := atomic.AddInt64(&preparedStmtCount, -int64(psCount))
metrics.PreparedStmtGauge.Set(float64(afterMinus))
}
// SetSystemVar sets the value of a system variable.
func (s *SessionVars) SetSystemVar(name string, val string) error {
// switch name {
// case TxnIsolationOneShot:
// switch val {
// case "SERIALIZABLE", "READ-UNCOMMITTED":
// skipIsolationLevelCheck, err := GetSessionSystemVar(s, TiDBSkipIsolationLevelCheck)
// returnErr := ErrUnsupportedIsolationLevel.GenWithStackByArgs(val)
// if err != nil {
// returnErr = err
// }
// if !TiDBOptOn(skipIsolationLevelCheck) || err != nil {
// return returnErr
// }
// //SET TRANSACTION ISOLATION LEVEL will affect two internal variables:
// // 1. tx_isolation
// // 2. transaction_isolation
// // The following if condition is used to deduplicate two same warnings.
// if name == "transaction_isolation" {
// s.StmtCtx.AppendWarning(returnErr)
// }
// }
// s.TxnIsolationLevelOneShot.State = 1
// s.TxnIsolationLevelOneShot.Value = val
// case TimeZone:
// tz, err := parseTimeZone(val)
// if err != nil {
// return err
// }
// s.TimeZone = tz
// case SQLModeVar:
// val = mysql.FormatSQLModeStr(val)
// // Modes is a list of different modes separated by commas.
// sqlMode, err2 := mysql.GetSQLMode(val)
// if err2 != nil {
// return errors.Trace(err2)
// }
// s.StrictSQLMode = sqlMode.HasStrictMode()
// s.SQLMode = sqlMode
// s.SetStatusFlag(mysql.ServerStatusNoBackslashEscaped, sqlMode.HasNoBackslashEscapesMode())
// case TiDBSnapshot:
// err := setSnapshotTS(s, val)
// if err != nil {
// return err
// }
// case AutoCommit:
// isAutocommit := TiDBOptOn(val)
// s.SetStatusFlag(mysql.ServerStatusAutocommit, isAutocommit)
// if isAutocommit {
// s.SetStatusFlag(mysql.ServerStatusInTrans, false)
// }
// case MaxExecutionTime:
// timeoutMS := tidbOptPositiveInt32(val, 0)
// s.MaxExecutionTime = uint64(timeoutMS)
// case InnodbLockWaitTimeout:
// lockWaitSec := tidbOptInt64(val, DefInnodbLockWaitTimeout)
// s.LockWaitTimeout = int64(lockWaitSec * 1000)
// case TiDBSkipUTF8Check:
// s.SkipUTF8Check = TiDBOptOn(val)
// case TiDBOptAggPushDown:
// s.AllowAggPushDown = TiDBOptOn(val)
// case TiDBOptWriteRowID:
// s.AllowWriteRowID = TiDBOptOn(val)
// case TiDBOptInSubqToJoinAndAgg:
// s.AllowInSubqToJoinAndAgg = TiDBOptOn(val)
// case TiDBOptCorrelationThreshold:
// s.CorrelationThreshold = tidbOptFloat64(val, DefOptCorrelationThreshold)
// case TiDBOptCorrelationExpFactor:
// s.CorrelationExpFactor = int(tidbOptInt64(val, DefOptCorrelationExpFactor))
// case TiDBIndexLookupConcurrency:
// s.IndexLookupConcurrency = tidbOptPositiveInt32(val, DefIndexLookupConcurrency)
// case TiDBIndexLookupJoinConcurrency:
// s.IndexLookupJoinConcurrency = tidbOptPositiveInt32(val, DefIndexLookupJoinConcurrency)
// case TiDBIndexJoinBatchSize:
// s.IndexJoinBatchSize = tidbOptPositiveInt32(val, DefIndexJoinBatchSize)
// case TiDBIndexLookupSize:
// s.IndexLookupSize = tidbOptPositiveInt32(val, DefIndexLookupSize)
// case TiDBHashJoinConcurrency:
// s.HashJoinConcurrency = tidbOptPositiveInt32(val, DefTiDBHashJoinConcurrency)
// case TiDBProjectionConcurrency:
// s.ProjectionConcurrency = tidbOptInt64(val, DefTiDBProjectionConcurrency)
// case TiDBHashAggPartialConcurrency:
// s.HashAggPartialConcurrency = tidbOptPositiveInt32(val, DefTiDBHashAggPartialConcurrency)
// case TiDBHashAggFinalConcurrency:
// s.HashAggFinalConcurrency = tidbOptPositiveInt32(val, DefTiDBHashAggFinalConcurrency)
// case TiDBDistSQLScanConcurrency:
// s.DistSQLScanConcurrency = tidbOptPositiveInt32(val, DefDistSQLScanConcurrency)
// case TiDBIndexSerialScanConcurrency:
// s.IndexSerialScanConcurrency = tidbOptPositiveInt32(val, DefIndexSerialScanConcurrency)
// case TiDBBackoffLockFast:
// s.KVVars.BackoffLockFast = tidbOptPositiveInt32(val, kv.DefBackoffLockFast)
// case TiDBBackOffWeight:
// s.KVVars.BackOffWeight = tidbOptPositiveInt32(val, kv.DefBackOffWeight)
// case TiDBConstraintCheckInPlace:
// s.ConstraintCheckInPlace = TiDBOptOn(val)
// case TiDBBatchInsert:
// s.BatchInsert = TiDBOptOn(val)
// case TiDBBatchDelete:
// s.BatchDelete = TiDBOptOn(val)
// case TiDBBatchCommit:
// s.BatchCommit = TiDBOptOn(val)
// case TiDBDMLBatchSize:
// s.DMLBatchSize = tidbOptPositiveInt32(val, DefDMLBatchSize)
// case TiDBCurrentTS, TiDBConfig:
// return ErrReadOnly
// case TiDBMaxChunkSize:
// s.MaxChunkSize = tidbOptPositiveInt32(val, DefMaxChunkSize)
// case TiDBInitChunkSize:
// s.InitChunkSize = tidbOptPositiveInt32(val, DefInitChunkSize)
// case TIDBMemQuotaQuery:
// s.MemQuotaQuery = tidbOptInt64(val, config.GetGlobalConfig().MemQuotaQuery)
// case TIDBMemQuotaHashJoin:
// s.MemQuotaHashJoin = tidbOptInt64(val, DefTiDBMemQuotaHashJoin)
// case TIDBMemQuotaMergeJoin:
// s.MemQuotaMergeJoin = tidbOptInt64(val, DefTiDBMemQuotaMergeJoin)
// case TIDBMemQuotaSort:
// s.MemQuotaSort = tidbOptInt64(val, DefTiDBMemQuotaSort)
// case TIDBMemQuotaTopn:
// s.MemQuotaTopn = tidbOptInt64(val, DefTiDBMemQuotaTopn)
// case TIDBMemQuotaIndexLookupReader:
// s.MemQuotaIndexLookupReader = tidbOptInt64(val, DefTiDBMemQuotaIndexLookupReader)
// case TIDBMemQuotaIndexLookupJoin:
// s.MemQuotaIndexLookupJoin = tidbOptInt64(val, DefTiDBMemQuotaIndexLookupJoin)
// case TIDBMemQuotaNestedLoopApply:
// s.MemQuotaNestedLoopApply = tidbOptInt64(val, DefTiDBMemQuotaNestedLoopApply)
// case TiDBGeneralLog:
// atomic.StoreUint32(&ProcessGeneralLog, uint32(tidbOptPositiveInt32(val, DefTiDBGeneralLog)))
// case TiDBSlowLogThreshold:
// atomic.StoreUint64(&config.GetGlobalConfig().Log.SlowThreshold, uint64(tidbOptInt64(val, logutil.DefaultSlowThreshold)))
// case TiDBRecordPlanInSlowLog:
// atomic.StoreUint32(&config.GetGlobalConfig().Log.RecordPlanInSlowLog, uint32(tidbOptInt64(val, logutil.DefaultRecordPlanInSlowLog)))
// case TiDBDDLSlowOprThreshold:
// atomic.StoreUint32(&DDLSlowOprThreshold, uint32(tidbOptPositiveInt32(val, DefTiDBDDLSlowOprThreshold)))
// case TiDBQueryLogMaxLen:
// atomic.StoreUint64(&config.GetGlobalConfig().Log.QueryLogMaxLen, uint64(tidbOptInt64(val, logutil.DefaultQueryLogMaxLen)))
// case TiDBRetryLimit:
// s.RetryLimit = tidbOptInt64(val, DefTiDBRetryLimit)
// case TiDBDisableTxnAutoRetry:
// s.DisableTxnAutoRetry = TiDBOptOn(val)
// case TiDBEnableStreaming:
// s.EnableStreaming = TiDBOptOn(val)
// case TiDBEnableCascadesPlanner:
// s.EnableCascadesPlanner = TiDBOptOn(val)
// case TiDBOptimizerSelectivityLevel:
// s.OptimizerSelectivityLevel = tidbOptPositiveInt32(val, DefTiDBOptimizerSelectivityLevel)
// case TiDBEnableTablePartition:
// s.EnableTablePartition = val
// case TiDBDDLReorgPriority:
// s.setDDLReorgPriority(val)
// case TiDBForcePriority:
// atomic.StoreInt32(&ForcePriority, int32(mysql.Str2Priority(val)))
// case TiDBEnableRadixJoin:
// s.EnableRadixJoin = TiDBOptOn(val)
// case TiDBEnableWindowFunction:
// s.EnableWindowFunction = TiDBOptOn(val)
// case TiDBOptJoinReorderThreshold:
// s.TiDBOptJoinReorderThreshold = tidbOptPositiveInt32(val, DefTiDBOptJoinReorderThreshold)
// case TiDBCheckMb4ValueInUTF8:
// config.GetGlobalConfig().CheckMb4ValueInUTF8 = TiDBOptOn(val)
// case TiDBSlowQueryFile:
// s.SlowQueryFile = val
// case TiDBEnableFastAnalyze:
// s.EnableFastAnalyze = TiDBOptOn(val)
// case TiDBWaitSplitRegionFinish:
// s.WaitSplitRegionFinish = TiDBOptOn(val)
// case TiDBWaitSplitRegionTimeout:
// s.WaitSplitRegionTimeout = uint64(tidbOptPositiveInt32(val, DefWaitSplitRegionTimeout))
// case TiDBExpensiveQueryTimeThreshold:
// atomic.StoreUint64(&ExpensiveQueryTimeThreshold, uint64(tidbOptPositiveInt32(val, DefTiDBExpensiveQueryTimeThreshold)))
// case TiDBTxnMode:
// s.TxnMode = strings.ToUpper(val)
// case TiDBLowResolutionTSO:
// s.LowResolutionTSO = TiDBOptOn(val)
// case TiDBAllowRemoveAutoInc:
// s.AllowRemoveAutoInc = TiDBOptOn(val)
// // It's a global variable, but it also wants to be cached in server.
// case TiDBMaxDeltaSchemaCount:
// SetMaxDeltaSchemaCount(tidbOptInt64(val, DefTiDBMaxDeltaSchemaCount))
// case TiDBStoreLimit:
// storeutil.StoreLimit.Store(tidbOptInt64(val, DefTiDBStoreLimit))
// }
// s.systems[name] = val
return nil
}
func (s *SessionVars) setTxnMode(val string) error {
switch strings.ToUpper(val) {
case ast.Pessimistic:
s.TxnMode = ast.Pessimistic
case ast.Optimistic:
s.TxnMode = ast.Optimistic
case "":
s.TxnMode = ""
default:
// return ErrWrongValueForVar.FastGenByArgs(TiDBTxnMode, val)
}
return nil
}
// SetLocalSystemVar sets values of the local variables which in "server" scope.
func SetLocalSystemVar(name string, val string) {
// switch name {
// case TiDBDDLReorgWorkerCount:
// SetDDLReorgWorkerCounter(int32(tidbOptPositiveInt32(val, DefTiDBDDLReorgWorkerCount)))
// case TiDBDDLReorgBatchSize:
// SetDDLReorgBatchSize(int32(tidbOptPositiveInt32(val, DefTiDBDDLReorgBatchSize)))
// case TiDBDDLErrorCountLimit:
// SetDDLErrorCountLimit(tidbOptInt64(val, DefTiDBDDLErrorCountLimit))
// }
}
// special session variables.
const (
SQLModeVar = "sql_mode"
CharacterSetResults = "character_set_results"
MaxAllowedPacket = "max_allowed_packet"
TimeZone = "time_zone"
TxnIsolation = "tx_isolation"
TransactionIsolation = "transaction_isolation"
TxnIsolationOneShot = "tx_isolation_one_shot"
MaxExecutionTime = "max_execution_time"
)
// these variables are useless for TiDB, but still need to validate their values for some compatible issues.
// TODO: some more variables need to be added here.
const (
serverReadOnly = "read_only"
)
var (
// TxIsolationNames are the valid values of the variable "tx_isolation" or "transaction_isolation".
TxIsolationNames = map[string]struct{}{
"READ-UNCOMMITTED": {},
"READ-COMMITTED": {},
"REPEATABLE-READ": {},
"SERIALIZABLE": {},
}
)
// TableDelta stands for the changed count for one table.
type TableDelta struct {
Delta int64
Count int64
ColSize map[int64]int64
InitTime time.Time // InitTime is the time that this delta is generated.
}
// Concurrency defines concurrency values.
type Concurrency struct {
// IndexLookupConcurrency is the number of concurrent index lookup worker.
IndexLookupConcurrency int
// IndexLookupJoinConcurrency is the number of concurrent index lookup join inner worker.
IndexLookupJoinConcurrency int
// DistSQLScanConcurrency is the number of concurrent dist SQL scan worker.
DistSQLScanConcurrency int
// HashJoinConcurrency is the number of concurrent hash join outer worker.
HashJoinConcurrency int
// ProjectionConcurrency is the number of concurrent projection worker.
ProjectionConcurrency int64
// HashAggPartialConcurrency is the number of concurrent hash aggregation partial worker.
HashAggPartialConcurrency int
// HashAggFinalConcurrency is the number of concurrent hash aggregation final worker.
HashAggFinalConcurrency int
// IndexSerialScanConcurrency is the number of concurrent index serial scan worker.
IndexSerialScanConcurrency int
}
// MemQuota defines memory quota values.
type MemQuota struct {
// MemQuotaQuery defines the memory quota for a query.
MemQuotaQuery int64
// MemQuotaHashJoin defines the memory quota for a hash join executor.
MemQuotaHashJoin int64
// MemQuotaMergeJoin defines the memory quota for a merge join executor.
MemQuotaMergeJoin int64
// MemQuotaSort defines the memory quota for a sort executor.
MemQuotaSort int64
// MemQuotaTopn defines the memory quota for a top n executor.
MemQuotaTopn int64
// MemQuotaIndexLookupReader defines the memory quota for a index lookup reader executor.
MemQuotaIndexLookupReader int64
// MemQuotaIndexLookupJoin defines the memory quota for a index lookup join executor.
MemQuotaIndexLookupJoin int64
// MemQuotaNestedLoopApply defines the memory quota for a nested loop apply executor.
MemQuotaNestedLoopApply int64
// MemQuotaDistSQL defines the memory quota for all operators in DistSQL layer like co-processor and selectResult.
MemQuotaDistSQL int64
}
// BatchSize defines batch size values.
type BatchSize struct {
// DMLBatchSize indicates the size of batches for DML.
// It will be used when BatchInsert or BatchDelete is on.
DMLBatchSize int
// IndexJoinBatchSize is the batch size of a index lookup join.
IndexJoinBatchSize int
// IndexLookupSize is the number of handles for an index lookup task in index double read executor.
IndexLookupSize int
// InitChunkSize defines init row count of a Chunk during query execution.
InitChunkSize int
// MaxChunkSize defines max row count of a Chunk during query execution.
MaxChunkSize int
}
const (
// SlowLogRowPrefixStr is slow log row prefix.