-
Notifications
You must be signed in to change notification settings - Fork 260
/
sqlcheck.go
299 lines (254 loc) · 8.86 KB
/
sqlcheck.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
// Copyright 2021-present The Atlas Authors. All rights reserved.
// This source code is licensed under the Apache 2.0 license found
// in the LICENSE file in the root directory of this source tree.
// Package sqlcheck provides interfaces for analyzing the contents of SQL files
// to generate insights on the safety of many kinds of changes to database
// schemas. With this package developers may define an Analyzer that can be used
// to diagnose the impact of SQL statements on the target database. For instance,
// The `destructive` package exposes an Analyzer that detects destructive changes
// to the database schema, such as the dropping of tables or columns.
package sqlcheck
import (
"context"
"sync"
"ariga.io/atlas/schemahcl"
"ariga.io/atlas/sql/migrate"
"ariga.io/atlas/sql/schema"
"ariga.io/atlas/sql/sqlclient"
)
type (
// An Analyzer describes a migration file analyzer.
Analyzer interface {
// Analyze executes the analysis function.
Analyze(context.Context, *Pass) error
}
// A NamedAnalyzer describes an Analyzer that has a name.
NamedAnalyzer interface {
Analyzer
// Name of the analyzer. Identifies the analyzer
// in configuration and linting passes.
Name() string
}
// A Pass provides information to the Run function that
// applies a specific analyzer to an SQL file.
Pass struct {
// A migration file and the changes it describes.
File *File
// Dev is a driver-specific environment used to execute analysis work.
Dev *sqlclient.Client
// Report reports analysis reports.
Reporter ReportWriter
}
// File represents a parsed version of a migration file.
File struct {
migrate.File
// Changes represents the list of changes this file represents.
Changes []*Change
// Sum represents a summary of changes this file represents. For example,
// in case of a file that contains exactly two statements, and the first
// statement is reverted by the one after it, the Sum is nil.
Sum schema.Changes
// A Parser that may be used for parsing this file. It sets to any as the contract
// between checks and their parsers can vary. For example, in case of running checks
// from CLI, the injected parser can be found in cmd/atlas/internal/sqlparse.Parser.
Parser any
// schema spans. lazily initialized.
spans map[string]*schemaSpan
}
// A Change in a migration file.
Change struct {
schema.Changes // The actual changes.
Stmt *migrate.Stmt // The SQL statement generated this change.
}
// A Report describes an analysis report with an optional specific diagnostic.
Report struct {
Text string `json:"Text"` // Report text.
Diagnostics []Diagnostic `json:"Diagnostics,omitempty"` // Report diagnostics.
SuggestedFixes []SuggestedFix `json:"SuggestedFixes,omitempty"` // Report-level suggested fixes.
}
// A Diagnostic is a text associated with a specific position of a statement in a file.
Diagnostic struct {
Pos int `json:"Pos"` // Diagnostic position.
Text string `json:"Text"` // Diagnostic text.
Code string `json:"Code"` // Code describes the check. For example, DS101
SuggestedFixes []SuggestedFix `json:"SuggestedFixes,omitempty"` // Fixes to this specific diagnostics (statement-level).
}
// A SuggestedFix is a change associated with a diagnostic that can
// be applied to fix the issue. Both the message and the text edit
// are optional.
SuggestedFix struct {
Message string `json:"Message"`
TextEdit *TextEdit `json:"TextEdit,omitempty"`
}
// A TextEdit represents a code changes in a file.
// The suggested edits are line-based starting from 1.
TextEdit struct {
Line int `json:"Line"` // Start line to edit.
End int `json:"End"` // End line to edit.
NewText string `json:"NewText"` // New text to replace.
}
// ReportWriter represents a writer for analysis reports.
ReportWriter interface {
WriteReport(Report)
}
// Options defines a generic configuration options for analyzers.
Options struct {
// Error indicates if an analyzer should
// error in case a Diagnostic was found.
Error *bool `spec:"error"`
// Allow drivers to extend the configuration.
schemahcl.DefaultExtension
}
)
// Analyzers implements Analyzer.
type Analyzers []Analyzer
// Analyze implements Analyzer.
func (a Analyzers) Analyze(ctx context.Context, p *Pass) error {
for _, a := range a {
if err := a.Analyze(ctx, p); err != nil {
return err
}
}
return nil
}
// AnalyzerFunc allows using ordinary functions as analyzers.
type AnalyzerFunc func(ctx context.Context, p *Pass) error
// Analyze calls f.
func (f AnalyzerFunc) Analyze(ctx context.Context, p *Pass) error {
return f(ctx, p)
}
// ReportWriterFunc is a function that implements Reporter.
type ReportWriterFunc func(Report)
// WriteReport calls f(r).
func (f ReportWriterFunc) WriteReport(r Report) {
f(r)
}
// ResourceSpan describes the lifespan of a resource
// in perspective to the migration file.
type ResourceSpan uint
const (
// SpanUnknown describes unknown lifespan.
// e.g. resource may exist before this file.
SpanUnknown ResourceSpan = iota
// SpanAdded describes that a span of
// a resource was started in this file.
SpanAdded
// SpanDropped describes that a span of
// a resource was ended in this file.
SpanDropped
// SpanTemporary indicates that a resource lifetime
// was started and ended in this file (CREATE and DROP).
SpanTemporary = SpanAdded | SpanDropped
)
// SchemaSpan returns the span information for the schema.
func (f *File) SchemaSpan(s *schema.Schema) ResourceSpan {
return f.schemaSpan(s).state
}
// TableSpan returns the span information for the table.
func (f *File) TableSpan(t *schema.Table) ResourceSpan {
return f.tableSpan(t).state
}
// ColumnSpan returns the span information for the column.
func (f *File) ColumnSpan(t *schema.Table, c *schema.Column) ResourceSpan {
return f.tableSpan(t).columns[c.Name]
}
// IndexSpan returns the span information for the span.
func (f *File) IndexSpan(t *schema.Table, i *schema.Index) ResourceSpan {
return f.tableSpan(t).indexes[i.Name]
}
type (
// schemaSpan holds the span structure of a schema.
schemaSpan struct {
state ResourceSpan
tables map[string]*tableSpan
}
// schemaSpan holds the span structure of a table.
tableSpan struct {
state ResourceSpan
columns map[string]ResourceSpan
indexes map[string]ResourceSpan
}
)
func (f *File) loadSpans() {
f.spans = make(map[string]*schemaSpan)
for _, sc := range f.Changes {
for _, c := range sc.Changes {
switch c := c.(type) {
case *schema.AddSchema:
f.schemaSpan(c.S).state = SpanAdded
case *schema.DropSchema:
f.schemaSpan(c.S).state |= SpanDropped
case *schema.AddTable:
span := f.tableSpan(c.T)
span.state = SpanAdded
for _, column := range c.T.Columns {
span.columns[column.Name] = SpanAdded
}
for _, idx := range c.T.Indexes {
span.indexes[idx.Name] = SpanAdded
}
case *schema.DropTable:
f.tableSpan(c.T).state |= SpanDropped
case *schema.ModifyTable:
span := f.tableSpan(c.T)
for _, c1 := range c.Changes {
switch c1 := c1.(type) {
case *schema.AddColumn:
span.columns[c1.C.Name] = SpanAdded
case *schema.DropColumn:
span.columns[c1.C.Name] |= SpanDropped
case *schema.AddIndex:
span.indexes[c1.I.Name] = SpanAdded
case *schema.DropIndex:
span.indexes[c1.I.Name] |= SpanDropped
}
}
}
}
}
}
func (f *File) schemaSpan(s *schema.Schema) *schemaSpan {
if f.spans == nil {
f.loadSpans()
}
if f.spans[s.Name] == nil {
f.spans[s.Name] = &schemaSpan{tables: make(map[string]*tableSpan)}
}
return f.spans[s.Name]
}
func (f *File) tableSpan(t *schema.Table) *tableSpan {
span := f.schemaSpan(t.Schema)
if span.tables[t.Name] == nil {
span.tables[t.Name] = &tableSpan{
columns: make(map[string]ResourceSpan),
indexes: make(map[string]ResourceSpan),
}
}
return f.spans[t.Schema.Name].tables[t.Name]
}
// codes registry
var codes sync.Map
// Code stores the given code in the registry.
// It protects from duplicate analyzers' codes.
func Code(code string) string {
if _, loaded := codes.LoadOrStore(code, struct{}{}); loaded {
panic("sqlcheck: Code called twice for " + code)
}
return code
}
// drivers specific analyzers.
var drivers sync.Map
// Register allows drivers to register a constructor function for creating
// analyzers from the given HCL resource.
func Register(name string, f func(*schemahcl.Resource) ([]Analyzer, error)) {
drivers.Store(name, f)
}
// AnalyzerFor instantiates a new Analyzer from the given HCL resource
// based on the registered constructor function.
func AnalyzerFor(name string, r *schemahcl.Resource) ([]Analyzer, error) {
f, ok := drivers.Load(name)
if ok {
return f.(func(*schemahcl.Resource) ([]Analyzer, error))(r)
}
return nil, nil
}