forked from argoproj/argo-workflows
/
migrate.go
227 lines (214 loc) · 8.89 KB
/
migrate.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
package sqldb
import (
"context"
"database/sql"
"github.com/go-sql-driver/mysql"
log "github.com/sirupsen/logrus"
"upper.io/db.v3/lib/sqlbuilder"
)
type Migrate interface {
Exec(ctx context.Context) error
}
func NewMigrate(session sqlbuilder.Database, clusterName string, tableName string) Migrate {
return migrate{session, clusterName, tableName}
}
type migrate struct {
session sqlbuilder.Database
clusterName string
tableName string
}
type change interface {
apply(session sqlbuilder.Database) error
}
func ternary(condition bool, left, right change) change {
if condition {
return left
} else {
return right
}
}
func (m migrate) Exec(ctx context.Context) error {
{
// poor mans SQL migration
_, err := m.session.Exec("create table if not exists schema_history(schema_version int not null)")
if err != nil {
return err
}
rs, err := m.session.Query("select schema_version from schema_history")
if err != nil {
return err
}
if !rs.Next() {
_, err := m.session.Exec("insert into schema_history values(-1)")
if err != nil {
return err
}
}
err = rs.Close()
if err != nil {
return err
}
}
dbType := "postgres"
switch m.session.Driver().(*sql.DB).Driver().(type) {
case *mysql.MySQLDriver:
dbType = "mysql"
}
log.WithFields(log.Fields{"clusterName": m.clusterName, "dbType": dbType}).Info("Migrating database schema")
// try and make changes idempotent, as it is possible for the change to apply, but the archive update to fail
// and therefore try and apply again next try
for changeSchemaVersion, change := range []change{
ansiSQLChange(`create table if not exists ` + m.tableName + ` (
id varchar(128) ,
name varchar(256),
phase varchar(25),
namespace varchar(256),
workflow text,
startedat timestamp,
finishedat timestamp,
primary key (id, namespace)
)`),
ansiSQLChange(`create unique index idx_name on ` + m.tableName + ` (name)`),
ansiSQLChange(`create table if not exists argo_workflow_history (
id varchar(128) ,
name varchar(256),
phase varchar(25),
namespace varchar(256),
workflow text,
startedat timestamp,
finishedat timestamp,
primary key (id, namespace)
)`),
ansiSQLChange(`alter table argo_workflow_history rename to argo_archived_workflows`),
ternary(dbType == "mysql",
ansiSQLChange(`drop index idx_name on `+m.tableName),
ansiSQLChange(`drop index idx_name`),
),
ansiSQLChange(`create unique index idx_name on ` + m.tableName + `(name, namespace)`),
ternary(dbType == "mysql",
ansiSQLChange(`alter table `+m.tableName+` drop primary key`),
ansiSQLChange(`alter table `+m.tableName+` drop constraint `+m.tableName+`_pkey`),
),
ansiSQLChange(`alter table ` + m.tableName + ` add primary key(name,namespace)`),
// huh - why does the pkey not have the same name as the table - history
ternary(dbType == "mysql",
ansiSQLChange(`alter table argo_archived_workflows drop primary key`),
ansiSQLChange(`alter table argo_archived_workflows drop constraint argo_workflow_history_pkey`),
),
ansiSQLChange(`alter table argo_archived_workflows add primary key(id)`),
// ***
// THE CHANGES ABOVE THIS LINE MAY BE IN PER-PRODUCTION SYSTEMS - DO NOT CHANGE THEM
// ***
ansiSQLChange(`alter table argo_archived_workflows rename column id to uid`),
ternary(dbType == "mysql",
ansiSQLChange(`alter table argo_archived_workflows modify column uid varchar(128) not null`),
ansiSQLChange(`alter table argo_archived_workflows alter column uid set not null`),
),
ternary(dbType == "mysql",
ansiSQLChange(`alter table argo_archived_workflows modify column phase varchar(25) not null`),
ansiSQLChange(`alter table argo_archived_workflows alter column phase set not null`),
),
ternary(dbType == "mysql",
ansiSQLChange(`alter table argo_archived_workflows modify column namespace varchar(256) not null`),
ansiSQLChange(`alter table argo_archived_workflows alter column namespace set not null`),
),
ternary(dbType == "mysql",
ansiSQLChange(`alter table argo_archived_workflows modify column workflow text not null`),
ansiSQLChange(`alter table argo_archived_workflows alter column workflow set not null`),
),
ternary(dbType == "mysql",
ansiSQLChange(`alter table argo_archived_workflows modify column startedat timestamp not null`),
ansiSQLChange(`alter table argo_archived_workflows alter column startedat set not null`),
),
ternary(dbType == "mysql",
ansiSQLChange(`alter table argo_archived_workflows modify column finishedat timestamp not null`),
ansiSQLChange(`alter table argo_archived_workflows alter column finishedat set not null`),
),
ansiSQLChange(`alter table argo_archived_workflows add clustername varchar(64)`), // DNS entry can only be max 63 bytes
backfillClusterName{clusterName: m.clusterName, tableName: "argo_archived_workflows"},
ternary(dbType == "mysql",
ansiSQLChange(`alter table argo_archived_workflows modify column clustername varchar(64) not null`),
ansiSQLChange(`alter table argo_archived_workflows alter column clustername set not null`),
),
ternary(dbType == "mysql",
ansiSQLChange(`alter table argo_archived_workflows drop primary key`),
ansiSQLChange(`alter table argo_archived_workflows drop constraint argo_archived_workflows_pkey`),
),
ansiSQLChange(`alter table argo_archived_workflows add primary key(clustername,uid)`),
ansiSQLChange(`create index argo_archived_workflows_i1 on argo_archived_workflows (clustername,namespace)`),
// argo_archived_workflows now looks like:
// clustername(not null) uid(not null) | phase(not null) | namespace(not null) | workflow(not null) | startedat(not null) | finishedat(not null)
// remove unused columns
ansiSQLChange(`alter table ` + m.tableName + ` drop column phase`),
ansiSQLChange(`alter table ` + m.tableName + ` drop column startedat`),
ansiSQLChange(`alter table ` + m.tableName + ` drop column finishedat`),
ansiSQLChange(`alter table ` + m.tableName + ` rename column id to uid`),
ternary(dbType == "mysql",
ansiSQLChange(`alter table `+m.tableName+` modify column uid varchar(128) not null`),
ansiSQLChange(`alter table `+m.tableName+` alter column uid set not null`),
),
ternary(dbType == "mysql",
ansiSQLChange(`alter table `+m.tableName+` modify column namespace varchar(256) not null`),
ansiSQLChange(`alter table `+m.tableName+` alter column namespace set not null`),
),
ansiSQLChange(`alter table ` + m.tableName + ` add column clustername varchar(64)`), // DNS cannot be longer than 64 bytes
backfillClusterName{m.clusterName, m.tableName},
ternary(dbType == "mysql",
ansiSQLChange(`alter table `+m.tableName+` modify column clustername varchar(64) not null`),
ansiSQLChange(`alter table `+m.tableName+` alter column clustername set not null`),
),
ansiSQLChange(`alter table ` + m.tableName + ` add column version varchar(64)`),
ansiSQLChange(`alter table ` + m.tableName + ` add column nodes text`),
backfillNodes{tableName: m.tableName},
ternary(dbType == "mysql",
ansiSQLChange(`alter table `+m.tableName+` modify column nodes text not null`),
ansiSQLChange(`alter table `+m.tableName+` alter column nodes set not null`),
),
ansiSQLChange(`alter table ` + m.tableName + ` drop column workflow`),
// add a timestamp column to indicate updated time
ansiSQLChange(`alter table ` + m.tableName + ` add column updatedat timestamp not null default current_timestamp`),
// remove the old primary key and add a new one
ternary(dbType == "mysql",
ansiSQLChange(`alter table `+m.tableName+` drop primary key`),
ansiSQLChange(`alter table `+m.tableName+` drop constraint `+m.tableName+`_pkey`),
),
ternary(dbType == "mysql",
ansiSQLChange(`drop index idx_name on `+m.tableName),
ansiSQLChange(`drop index idx_name`),
),
ansiSQLChange(`alter table ` + m.tableName + ` drop column name`),
ansiSQLChange(`alter table ` + m.tableName + ` add primary key(clustername,uid,version)`),
ansiSQLChange(`create index ` + m.tableName + `_i1 on ` + m.tableName + ` (clustername,namespace)`),
// argo_workflows now looks like:
// clustername(not null) | uid(not null) | namespace(not null) | version(not null) | nodes(not null) | updatedat(not null)
} {
err := m.applyChange(ctx, changeSchemaVersion, change)
if err != nil {
return err
}
}
return nil
}
func (m migrate) applyChange(ctx context.Context, changeSchemaVersion int, c change) error {
tx, err := m.session.NewTx(ctx)
if err != nil {
return err
}
defer func() { _ = tx.Rollback() }()
rs, err := tx.Exec("update schema_history set schema_version = ? where schema_version = ?", changeSchemaVersion, changeSchemaVersion-1)
if err != nil {
return err
}
rowsAffected, err := rs.RowsAffected()
if err != nil {
return err
}
if rowsAffected == 1 {
log.WithFields(log.Fields{"changeSchemaVersion": changeSchemaVersion, "change": c}).Info("applying database change")
err := c.apply(m.session)
if err != nil {
return err
}
}
return tx.Commit()
}