/
analyzer.go
661 lines (582 loc) · 20.9 KB
/
analyzer.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
/*
Copyright 2020 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package semantics
import (
"vitess.io/vitess/go/mysql/collations"
vschemapb "vitess.io/vitess/go/vt/proto/vschema"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vtgate/evalengine"
"vitess.io/vitess/go/vt/vtgate/vindexes"
)
// analyzer controls the flow of the analysis.
// It starts the tree walking and controls which part of the analysis sees which parts of the tree
type analyzer struct {
scoper *scoper
earlyTables *earlyTableCollector
tables *tableCollector
binder *binder
typer *typer
rewriter *earlyRewriter
sig QuerySignature
si SchemaInformation
currentDb string
recheck bool
err error
inProjection int
projErr error
unshardedErr error
warning string
singleUnshardedKeyspace bool
fullAnalysis bool
}
// newAnalyzer create the semantic analyzer
func newAnalyzer(dbName string, si SchemaInformation, fullAnalysis bool) *analyzer {
// TODO dependencies between these components are a little tangled. We should try to clean up
s := newScoper()
a := &analyzer{
scoper: s,
earlyTables: newEarlyTableCollector(si, dbName),
typer: newTyper(si.Environment().CollationEnv()),
si: si,
currentDb: dbName,
fullAnalysis: fullAnalysis,
}
s.org = a
return a
}
func (a *analyzer) lateInit() {
a.tables = a.earlyTables.newTableCollector(a.scoper, a)
a.binder = newBinder(a.scoper, a, a.tables, a.typer)
a.scoper.binder = a.binder
a.rewriter = &earlyRewriter{
binder: a.binder,
scoper: a.scoper,
expandedColumns: map[sqlparser.TableName][]*sqlparser.ColName{},
env: a.si.Environment(),
aliasMapCache: map[*sqlparser.Select]map[string]exprContainer{},
reAnalyze: a.reAnalyze,
tables: a.tables,
}
}
// Analyze analyzes the parsed query.
func Analyze(statement sqlparser.Statement, currentDb string, si SchemaInformation) (*SemTable, error) {
return analyseAndGetSemTable(statement, currentDb, si, false)
}
func analyseAndGetSemTable(statement sqlparser.Statement, currentDb string, si SchemaInformation, fullAnalysis bool) (*SemTable, error) {
analyzer := newAnalyzer(currentDb, newSchemaInfo(si), fullAnalysis)
// Analysis for initial scope
err := analyzer.analyze(statement)
if err != nil {
return nil, err
}
// Creation of the semantic table
return analyzer.newSemTable(statement, si.ConnCollation(), si.GetForeignKeyChecksState(), si.Environment().CollationEnv())
}
// AnalyzeStrict analyzes the parsed query, and fails the analysis for any possible errors
func AnalyzeStrict(statement sqlparser.Statement, currentDb string, si SchemaInformation) (*SemTable, error) {
st, err := analyseAndGetSemTable(statement, currentDb, si, true)
if err != nil {
return nil, err
}
if st.NotUnshardedErr != nil {
return nil, st.NotUnshardedErr
}
if st.NotSingleRouteErr != nil {
return nil, st.NotSingleRouteErr
}
return st, nil
}
func (a *analyzer) newSemTable(
statement sqlparser.Statement,
coll collations.ID,
fkChecksState *bool,
env *collations.Environment,
) (*SemTable, error) {
var comments *sqlparser.ParsedComments
commentedStmt, isCommented := statement.(sqlparser.Commented)
if isCommented {
comments = commentedStmt.GetParsedComments()
}
if a.singleUnshardedKeyspace {
return &SemTable{
Tables: a.earlyTables.Tables,
Comments: comments,
Warning: a.warning,
Collation: coll,
ExprTypes: map[sqlparser.Expr]evalengine.Type{},
NotSingleRouteErr: a.projErr,
NotUnshardedErr: a.unshardedErr,
Recursive: ExprDependencies{},
Direct: ExprDependencies{},
Targets: map[sqlparser.IdentifierCS]TableSet{},
ColumnEqualities: map[columnName][]sqlparser.Expr{},
ExpandedColumns: map[sqlparser.TableName][]*sqlparser.ColName{},
columns: map[*sqlparser.Union]sqlparser.SelectExprs{},
comparator: nil,
StatementIDs: a.scoper.statementIDs,
QuerySignature: QuerySignature{},
childForeignKeysInvolved: map[TableSet][]vindexes.ChildFKInfo{},
parentForeignKeysInvolved: map[TableSet][]vindexes.ParentFKInfo{},
childFkToUpdExprs: map[string]sqlparser.UpdateExprs{},
collEnv: env,
}, nil
}
columns := map[*sqlparser.Union]sqlparser.SelectExprs{}
for union, info := range a.tables.unionInfo {
columns[union] = info.exprs
}
childFks, parentFks, childFkToUpdExprs, err := a.getInvolvedForeignKeys(statement, fkChecksState)
if err != nil {
return nil, err
}
return &SemTable{
Recursive: a.binder.recursive,
Direct: a.binder.direct,
ExprTypes: a.typer.m,
Tables: a.tables.Tables,
Targets: a.binder.targets,
NotSingleRouteErr: a.projErr,
NotUnshardedErr: a.unshardedErr,
Warning: a.warning,
Comments: comments,
ColumnEqualities: map[columnName][]sqlparser.Expr{},
Collation: coll,
ExpandedColumns: a.rewriter.expandedColumns,
columns: columns,
StatementIDs: a.scoper.statementIDs,
QuerySignature: a.sig,
childForeignKeysInvolved: childFks,
parentForeignKeysInvolved: parentFks,
childFkToUpdExprs: childFkToUpdExprs,
collEnv: env,
}, nil
}
func (a *analyzer) setError(err error) {
switch err := err.(type) {
case ProjError:
a.projErr = err.Inner
case ShardedError:
a.unshardedErr = err.Inner
default:
if a.inProjection > 0 && vterrors.ErrState(err) == vterrors.NonUniqError {
a.projErr = err
} else {
a.err = err
}
}
}
func (a *analyzer) analyzeDown(cursor *sqlparser.Cursor) bool {
// If we have an error we keep on going down the tree without checking for anything else
// this way we can abort when we come back up.
if !a.shouldContinue() {
return true
}
if err := a.scoper.down(cursor); err != nil {
a.setError(err)
return true
}
if err := a.checkForInvalidConstructs(cursor); err != nil {
a.setError(err)
return true
}
if err := a.rewriter.down(cursor); err != nil {
a.setError(err)
return true
}
// log any warn in rewriting.
a.warning = a.rewriter.warning
a.noteQuerySignature(cursor.Node())
a.enterProjection(cursor)
// this is the visitor going down the tree. Returning false here would just not visit the children
// to the current node, but that is not what we want if we have encountered an error.
// In order to abort the whole visitation, we have to return true here and then return false in the `analyzeUp` method
return true
}
func (a *analyzer) analyzeUp(cursor *sqlparser.Cursor) bool {
if !a.shouldContinue() {
return false
}
if err := a.tables.up(cursor); err != nil {
a.setError(err)
return false
}
if err := a.binder.up(cursor); err != nil {
a.setError(err)
return true
}
if err := a.typer.up(cursor); err != nil {
a.setError(err)
return false
}
if !a.recheck {
// no need to run the rewriter on rechecking
if err := a.rewriter.up(cursor); err != nil {
a.setError(err)
return true
}
}
if err := a.scoper.up(cursor); err != nil {
a.setError(err)
return false
}
a.leaveProjection(cursor)
return a.shouldContinue()
}
func containsStar(s sqlparser.SelectExprs) bool {
for _, expr := range s {
_, isStar := expr.(*sqlparser.StarExpr)
if isStar {
return true
}
}
return false
}
func checkUnionColumns(union *sqlparser.Union) error {
firstProj := sqlparser.GetFirstSelect(union).SelectExprs
if containsStar(firstProj) {
// if we still have *, we can't figure out if the query is invalid or not
// we'll fail it at run time instead
return nil
}
secondProj := sqlparser.GetFirstSelect(union.Right).SelectExprs
if containsStar(secondProj) {
return nil
}
if len(secondProj) != len(firstProj) {
return &UnionColumnsDoNotMatchError{FirstProj: len(firstProj), SecondProj: len(secondProj)}
}
return nil
}
/*
errors that happen when we are evaluating SELECT expressions are saved until we know
if we can merge everything into a single route or not
*/
func (a *analyzer) enterProjection(cursor *sqlparser.Cursor) {
_, ok := cursor.Node().(sqlparser.SelectExprs)
if ok && isParentSelect(cursor) {
a.inProjection++
}
}
func (a *analyzer) leaveProjection(cursor *sqlparser.Cursor) {
_, ok := cursor.Node().(sqlparser.SelectExprs)
if ok && isParentSelect(cursor) {
a.inProjection--
}
}
func isParentSelect(cursor *sqlparser.Cursor) bool {
_, isSelect := cursor.Parent().(*sqlparser.Select)
return isSelect
}
func isParentDeleteOrUpdate(cursor *sqlparser.Cursor) bool {
_, isDelete := cursor.Parent().(*sqlparser.Delete)
_, isUpdate := cursor.Parent().(*sqlparser.Update)
return isDelete || isUpdate
}
func isParentSelectStatement(cursor *sqlparser.Cursor) bool {
_, isSelect := cursor.Parent().(sqlparser.SelectStatement)
return isSelect
}
type originable interface {
tableSetFor(t *sqlparser.AliasedTableExpr) TableSet
depsForExpr(expr sqlparser.Expr) (direct, recursive TableSet, typ evalengine.Type)
}
func (a *analyzer) depsForExpr(expr sqlparser.Expr) (direct, recursive TableSet, typ evalengine.Type) {
recursive = a.binder.recursive.dependencies(expr)
direct = a.binder.direct.dependencies(expr)
typ = a.typer.exprType(expr)
return
}
func (a *analyzer) analyze(statement sqlparser.Statement) error {
_ = sqlparser.Rewrite(statement, nil, a.earlyUp)
if a.err != nil {
return a.err
}
if a.canShortCut(statement) {
return nil
}
a.lateInit()
return a.lateAnalyze(statement)
}
func (a *analyzer) lateAnalyze(statement sqlparser.SQLNode) error {
_ = sqlparser.Rewrite(statement, a.analyzeDown, a.analyzeUp)
return a.err
}
func (a *analyzer) reAnalyze(statement sqlparser.SQLNode) error {
a.recheck = true
defer func() {
a.recheck = false
}()
return a.lateAnalyze(statement)
}
// canShortCut checks if we are dealing with a single unsharded keyspace and no tables that have managed foreign keys
// if so, we can stop the analyzer early
func (a *analyzer) canShortCut(statement sqlparser.Statement) (canShortCut bool) {
if a.fullAnalysis {
return false
}
ks, _ := singleUnshardedKeyspace(a.earlyTables.Tables)
if ks == nil {
return false
}
defer func() {
a.singleUnshardedKeyspace = canShortCut
}()
if !sqlparser.IsDMLStatement(statement) {
return true
}
fkMode, err := a.si.ForeignKeyMode(ks.Name)
if err != nil {
a.err = err
return false
}
if fkMode != vschemapb.Keyspace_managed {
return true
}
for _, table := range a.earlyTables.Tables {
vtbl := table.GetVindexTable()
if len(vtbl.ChildForeignKeys) > 0 || len(vtbl.ParentForeignKeys) > 0 {
return false
}
}
return true
}
// earlyUp collects tables in the query, so we can check
// if this a single unsharded query we are dealing with
func (a *analyzer) earlyUp(cursor *sqlparser.Cursor) bool {
a.earlyTables.up(cursor)
return true
}
func (a *analyzer) shouldContinue() bool {
return a.err == nil
}
func (a *analyzer) tableSetFor(t *sqlparser.AliasedTableExpr) TableSet {
return a.tables.tableSetFor(t)
}
func (a *analyzer) noteQuerySignature(node sqlparser.SQLNode) {
switch node := node.(type) {
case *sqlparser.Union:
a.sig.Union = true
if node.Distinct {
a.sig.Distinct = true
}
case *sqlparser.Subquery:
a.sig.SubQueries = true
case *sqlparser.Select:
if node.Distinct {
a.sig.Distinct = true
}
if node.GroupBy != nil {
a.sig.Aggregation = true
}
case sqlparser.AggrFunc:
a.sig.Aggregation = true
case *sqlparser.Delete:
a.sig.Delete = true
}
}
// getInvolvedForeignKeys gets the foreign keys that might require taking care off when executing the given statement.
func (a *analyzer) getInvolvedForeignKeys(statement sqlparser.Statement, fkChecksState *bool) (map[TableSet][]vindexes.ChildFKInfo, map[TableSet][]vindexes.ParentFKInfo, map[string]sqlparser.UpdateExprs, error) {
if fkChecksState != nil && !*fkChecksState {
return nil, nil, nil, nil
}
// There are only the DML statements that require any foreign keys handling.
switch stmt := statement.(type) {
case *sqlparser.Delete:
// For DELETE statements, none of the parent foreign keys require handling.
// So we collect all the child foreign keys.
allChildFks, _, err := a.getAllManagedForeignKeys()
return allChildFks, nil, nil, err
case *sqlparser.Insert:
// For INSERT statements, we have 3 different cases:
// 1. REPLACE statement: REPLACE statements are essentially DELETEs and INSERTs rolled into one.
// So we need to the parent foreign keys to ensure we are inserting the correct values, and the child foreign keys
// to ensure we don't change a row that breaks the constraint or cascade any operations on the child tables.
// 2. Normal INSERT statement: We don't need to check anything on the child foreign keys, so we just get all the parent foreign keys.
// 3. INSERT with ON DUPLICATE KEY UPDATE: This might trigger an update on the columns specified in the ON DUPLICATE KEY UPDATE clause.
allChildFks, allParentFKs, err := a.getAllManagedForeignKeys()
if err != nil {
return nil, nil, nil, err
}
if stmt.Action == sqlparser.ReplaceAct {
return allChildFks, allParentFKs, nil, nil
}
if len(stmt.OnDup) == 0 {
return nil, allParentFKs, nil, nil
}
// If only a certain set of columns are being updated, then there might be some child foreign keys that don't need any consideration since their columns aren't being updated.
// So, we filter these child foreign keys out. We can't filter any parent foreign keys because the statement will INSERT a row too, which requires validating all the parent foreign keys.
updatedChildFks, _, childFkToUpdExprs, err := a.filterForeignKeysUsingUpdateExpressions(allChildFks, nil, sqlparser.UpdateExprs(stmt.OnDup))
return updatedChildFks, allParentFKs, childFkToUpdExprs, err
case *sqlparser.Update:
// For UPDATE queries we get all the parent and child foreign keys, but we can filter some of them out if the columns that they consist off aren't being updated or are set to NULLs.
allChildFks, allParentFks, err := a.getAllManagedForeignKeys()
if err != nil {
return nil, nil, nil, err
}
return a.filterForeignKeysUsingUpdateExpressions(allChildFks, allParentFks, stmt.Exprs)
default:
return nil, nil, nil, nil
}
}
// filterForeignKeysUsingUpdateExpressions filters the child and parent foreign key constraints that don't require any validations/cascades given the updated expressions.
func (a *analyzer) filterForeignKeysUsingUpdateExpressions(allChildFks map[TableSet][]vindexes.ChildFKInfo, allParentFks map[TableSet][]vindexes.ParentFKInfo, updExprs sqlparser.UpdateExprs) (map[TableSet][]vindexes.ChildFKInfo, map[TableSet][]vindexes.ParentFKInfo, map[string]sqlparser.UpdateExprs, error) {
if len(allChildFks) == 0 && len(allParentFks) == 0 {
return nil, nil, nil, nil
}
pFksRequired := make(map[TableSet][]bool, len(allParentFks))
cFksRequired := make(map[TableSet][]bool, len(allChildFks))
for ts, fks := range allParentFks {
pFksRequired[ts] = make([]bool, len(fks))
}
for ts, fks := range allChildFks {
cFksRequired[ts] = make([]bool, len(fks))
}
// updExprToTableSet stores the tables that the updated expressions are from.
updExprToTableSet := make(map[*sqlparser.ColName]TableSet)
// childFKToUpdExprs stores child foreign key to update expressions mapping.
childFKToUpdExprs := map[string]sqlparser.UpdateExprs{}
// Go over all the update expressions
for _, updateExpr := range updExprs {
deps := a.binder.direct.dependencies(updateExpr.Name)
if deps.NumberOfTables() != 1 {
// If we don't get exactly one table for the given update expression, we would have definitely run into an error
// during the binder phase that we would have stored. We should return that error, since we can't safely proceed with
// foreign key related changes without having all the information.
return nil, nil, nil, a.getError()
}
updExprToTableSet[updateExpr.Name] = deps
// Get all the child and parent foreign keys for the given table that the update expression belongs to.
childFks := allChildFks[deps]
parentFKs := allParentFks[deps]
// Any foreign key to a child table for a column that has been updated
// will require the cascade operations or restrict verification to happen, so we include all such foreign keys.
for idx, childFk := range childFks {
if childFk.ParentColumns.FindColumn(updateExpr.Name.Name) >= 0 {
cFksRequired[deps][idx] = true
tbl, _ := a.tables.tableInfoFor(deps)
ue := childFKToUpdExprs[childFk.String(tbl.GetVindexTable())]
ue = append(ue, updateExpr)
childFKToUpdExprs[childFk.String(tbl.GetVindexTable())] = ue
}
}
// If we are setting a column to NULL, then we don't need to verify the existence of an
// equivalent row in the parent table, even if this column was part of a foreign key to a parent table.
if sqlparser.IsNull(updateExpr.Expr) {
continue
}
// We add all the possible parent foreign key constraints that need verification that an equivalent row
// exists, given that this column has changed.
for idx, parentFk := range parentFKs {
if parentFk.ChildColumns.FindColumn(updateExpr.Name.Name) >= 0 {
pFksRequired[deps][idx] = true
}
}
}
// For the parent foreign keys, if any of the columns part of the fk is set to NULL,
// then, we don't care for the existence of an equivalent row in the parent table.
for _, updateExpr := range updExprs {
if !sqlparser.IsNull(updateExpr.Expr) {
continue
}
ts := updExprToTableSet[updateExpr.Name]
parentFKs := allParentFks[ts]
for idx, parentFk := range parentFKs {
if parentFk.ChildColumns.FindColumn(updateExpr.Name.Name) >= 0 {
pFksRequired[ts][idx] = false
}
}
}
// Create new maps with only the required foreign keys.
pFksNeedsHandling := map[TableSet][]vindexes.ParentFKInfo{}
cFksNeedsHandling := map[TableSet][]vindexes.ChildFKInfo{}
for ts, parentFks := range allParentFks {
var pFKNeeded []vindexes.ParentFKInfo
for idx, fk := range parentFks {
if pFksRequired[ts][idx] {
pFKNeeded = append(pFKNeeded, fk)
}
}
pFksNeedsHandling[ts] = pFKNeeded
}
for ts, childFks := range allChildFks {
var cFKNeeded []vindexes.ChildFKInfo
for idx, fk := range childFks {
if cFksRequired[ts][idx] {
cFKNeeded = append(cFKNeeded, fk)
}
}
cFksNeedsHandling[ts] = cFKNeeded
}
return cFksNeedsHandling, pFksNeedsHandling, childFKToUpdExprs, nil
}
// getError gets the error stored in the analyzer during previous phases.
func (a *analyzer) getError() error {
if a.projErr != nil {
return a.projErr
}
if a.unshardedErr != nil {
return a.unshardedErr
}
return a.err
}
// getAllManagedForeignKeys gets all the foreign keys for the query we are analyzing that Vitess is responsible for managing.
func (a *analyzer) getAllManagedForeignKeys() (map[TableSet][]vindexes.ChildFKInfo, map[TableSet][]vindexes.ParentFKInfo, error) {
allChildFKs := make(map[TableSet][]vindexes.ChildFKInfo)
allParentFKs := make(map[TableSet][]vindexes.ParentFKInfo)
// Go over all the tables and collect the foreign keys.
for idx, table := range a.tables.Tables {
vi := table.GetVindexTable()
if vi == nil || vi.Keyspace == nil {
// If is not a real table, so should be skipped.
continue
}
// Check whether Vitess needs to manage the foreign keys in this keyspace or not.
fkMode, err := a.si.ForeignKeyMode(vi.Keyspace.Name)
if err != nil {
return nil, nil, err
}
if fkMode != vschemapb.Keyspace_managed {
continue
}
// Cyclic foreign key constraints error is stored in the keyspace.
ksErr := a.si.KeyspaceError(vi.Keyspace.Name)
if ksErr != nil {
return nil, nil, ksErr
}
// Add all the child and parent foreign keys to our map.
ts := SingleTableSet(idx)
allChildFKs[ts] = vi.ChildForeignKeys
allParentFKs[ts] = vi.ParentForeignKeys
}
return allChildFKs, allParentFKs, nil
}
// ProjError is used to mark an error as something that should only be returned
// if the planner fails to merge everything down to a single route
type ProjError struct {
Inner error
}
func (p ProjError) Error() string {
return p.Inner.Error()
}
// ShardedError is used to mark an error as something that should only be returned
// if the query is not unsharded
type ShardedError struct {
Inner error
}
func (p ShardedError) Unwrap() error {
return p.Inner
}
func (p ShardedError) Error() string {
return p.Inner.Error()
}