-
-
Notifications
You must be signed in to change notification settings - Fork 293
/
hydra.sql
721 lines (582 loc) · 27.8 KB
/
hydra.sql
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
-- Making a database change:
--
-- 1. Update this schema document to match what the end result should be.
--
-- 2. If you're making a new database table, edit `update-dbix.pl` and
-- add a map of the lowercase name of your table to the CamelCase
-- version of your table.
--
-- 3. Run `make -C src/sql update-dbix` in the root
-- of the project directory, and git add / git commit the changed,
-- generated files.
--
-- 4. Create a migration in this same directory, named `upgrade-N.sql`
--
-- Singleton table to keep track of the schema version.
create table SchemaVersion (
version integer not null
);
create table Users (
userName text primary key not null,
fullName text,
emailAddress text not null,
password text not null, -- sha256 hash
emailOnError integer not null default 0,
type text not null default 'hydra', -- either "hydra", "google" or "github"
publicDashboard boolean not null default false
);
create table UserRoles (
userName text not null,
role text not null,
primary key (userName, role),
foreign key (userName) references Users(userName) on delete cascade on update cascade
);
create table Projects (
name text primary key not null, -- project id, lowercase (e.g. "patchelf")
displayName text not null, -- display name (e.g. "PatchELF")
description text,
enabled integer not null default 1,
hidden integer not null default 0,
owner text not null,
homepage text, -- URL for the project
declfile text, -- File containing declarative jobset specification
decltype text, -- Type of the input containing declarative jobset specification
declvalue text, -- Value of the input containing declarative jobset specification
enable_dynamic_run_command boolean not null default false,
foreign key (owner) references Users(userName) on update cascade
);
create table ProjectMembers (
project text not null,
userName text not null,
primary key (project, userName),
foreign key (project) references Projects(name) on delete cascade on update cascade,
foreign key (userName) references Users(userName) on delete cascade on update cascade
);
-- A jobset consists of a set of inputs (e.g. SVN repositories), one
-- of which contains a Nix expression containing an attribute set
-- describing build jobs.
create table Jobsets (
name text not null,
id serial not null,
project text not null,
description text,
nixExprInput text, -- name of the jobsetInput containing the Nix or Guix expression
nixExprPath text, -- relative path of the Nix or Guix expression
errorMsg text, -- used to signal the last evaluation error etc. for this jobset
errorTime integer, -- timestamp associated with errorMsg
lastCheckedTime integer, -- last time the evaluator looked at this jobset
triggerTime integer, -- set if we were triggered by a push event
enabled integer not null default 1, -- 0 = disabled, 1 = enabled, 2 = one-shot, 3 = one-at-a-time
enableEmail integer not null default 1,
hidden integer not null default 0,
emailOverride text not null,
keepnr integer not null default 3,
checkInterval integer not null default 300, -- minimum time in seconds between polls (0 = disable polling)
schedulingShares integer not null default 100,
fetchErrorMsg text,
forceEval boolean,
startTime integer, -- if jobset is currently running
type integer not null default 0, -- 0 == legacy, 1 == flake
flake text,
enable_dynamic_run_command boolean not null default false,
constraint jobsets_schedulingshares_nonzero_check check (schedulingShares > 0),
constraint jobsets_type_known_check check (type = 0 or type = 1),
-- If the type is 0, then nixExprInput and nixExprPath should be non-null and other type-specific fields should be null
-- Otherwise the check passes
constraint jobsets_legacy_paths_check check ((type = 0) = (nixExprInput is not null and nixExprPath is not null and flake is null)),
-- If the type is 1, then flake should be non-null and other type-specific fields should be null
-- Otherwise the check passes
constraint jobsets_flake_paths_check check ((type = 1) = (nixExprInput is null and nixExprPath is null and flake is not null)),
primary key (project, name),
foreign key (project) references Projects(name) on delete cascade on update cascade,
constraint Jobsets_id_unique UNIQUE(id)
);
create function notifyJobsetSharesChanged() returns trigger as 'begin notify jobset_shares_changed; return null; end;' language plpgsql;
create trigger JobsetSharesChanged after update on Jobsets for each row
when (old.schedulingShares != new.schedulingShares) execute procedure notifyJobsetSharesChanged();
create function notifyJobsetsAdded() returns trigger as 'begin notify jobsets_added; return null; end;' language plpgsql;
create trigger JobsetsAdded after insert on Jobsets execute procedure notifyJobsetsAdded();
create function notifyJobsetsDeleted() returns trigger as 'begin notify jobsets_deleted; return null; end;' language plpgsql;
create trigger JobsetsDeleted after delete on Jobsets execute procedure notifyJobsetsDeleted();
create function notifyJobsetSchedulingChanged() returns trigger as 'begin notify jobset_scheduling_changed; return null; end;' language plpgsql;
create trigger JobsetSchedulingChanged after update on Jobsets for each row
when (((old.triggerTime is distinct from new.triggerTime) and (new.triggerTime is not null))
or (old.checkInterval != new.checkInterval)
or (old.enabled != new.enabled))
execute procedure notifyJobsetSchedulingChanged();
create table JobsetRenames (
project text not null,
from_ text not null,
to_ text not null,
primary key (project, from_),
foreign key (project) references Projects(name) on delete cascade on update cascade,
foreign key (project, to_) references Jobsets(project, name) on delete cascade on update cascade
);
create table JobsetInputs (
project text not null,
jobset text not null,
name text not null,
type text not null, -- "svn", "path", "uri", "string", "boolean", "nix"
emailResponsible integer not null default 0, -- whether to email committers to this input who change a build
primary key (project, jobset, name),
foreign key (project, jobset) references Jobsets(project, name) on delete cascade on update cascade
);
create table JobsetInputAlts (
project text not null,
jobset text not null,
input text not null,
altnr integer not null,
-- urgh
value text, -- for most types, a URI; for 'path', an absolute path; for 'string', an arbitrary value
revision text, -- for repositories
primary key (project, jobset, input, altnr),
foreign key (project, jobset, input) references JobsetInputs(project, jobset, name) on delete cascade on update cascade
);
create table Builds (
id serial primary key not null,
finished integer not null, -- 0 = scheduled, 1 = finished
timestamp integer not null, -- time this build was added
-- Info about the inputs.
jobset_id integer not null,
job text not null,
-- Info about the build result.
nixName text, -- name attribute of the derivation
description text, -- meta.description
drvPath text not null,
system text not null,
license text, -- meta.license
homepage text, -- meta.homepage
maintainers text, -- meta.maintainers (concatenated, comma-separated)
maxsilent integer default 3600, -- meta.maxsilent
timeout integer default 36000, -- meta.timeout
isChannel integer not null default 0, -- meta.isHydraChannel
isCurrent integer default 0,
-- Priority within a jobset, set via meta.schedulingPriority.
priority integer not null default 0,
-- Priority among all builds, used by the admin to bump builds to
-- the front of the queue via the web interface.
globalPriority integer not null default 0,
-- FIXME: remove startTime?
startTime integer, -- if busy/finished, time we started
stopTime integer, -- if finished, time we finished
-- Information about finished builds.
isCachedBuild integer, -- boolean
-- Status codes used for builds and steps:
-- 0 = succeeded
-- 1 = regular Nix failure (derivation returned non-zero exit code)
-- 2 = build of a dependency failed [builds only]
-- 3 = build or step aborted due to misc failure
-- 4 = build or step cancelled
-- 5 = [obsolete]
-- 6 = failure with output (i.e. $out/nix-support/failed exists) [builds only]
-- 7 = build timed out
-- 8 = cached failure [steps only; builds use isCachedBuild]
-- 9 = unsupported system type
-- 10 = log limit exceeded
-- 11 = NAR size limit exceeded
-- 12 = build or step was not deterministic
buildStatus integer,
size bigint,
closureSize bigint,
releaseName text, -- e.g. "patchelf-0.5pre1234"
keep integer not null default 0, -- true means never garbage-collect the build output
notificationPendingSince integer,
check (finished = 0 or (stoptime is not null and stoptime != 0)),
check (finished = 0 or (starttime is not null and starttime != 0)),
foreign key (jobset_id) references Jobsets(id) on delete cascade
);
create function notifyBuildsDeleted() returns trigger as 'begin notify builds_deleted; return null; end;' language plpgsql;
create trigger BuildsDeleted after delete on Builds execute procedure notifyBuildsDeleted();
create function notifyBuildRestarted() returns trigger as 'begin notify builds_restarted; return null; end;' language plpgsql;
create trigger BuildRestarted after update on Builds for each row
when (old.finished = 1 and new.finished = 0) execute procedure notifyBuildRestarted();
create function notifyBuildCancelled() returns trigger as 'begin notify builds_cancelled; return null; end;' language plpgsql;
create trigger BuildCancelled after update on Builds for each row
when (old.finished = 0 and new.finished = 1 and new.buildStatus = 4) execute procedure notifyBuildCancelled();
create function notifyBuildBumped() returns trigger as 'begin notify builds_bumped; return null; end;' language plpgsql;
create trigger BuildBumped after update on Builds for each row
when (old.globalPriority != new.globalPriority) execute procedure notifyBuildBumped();
create table BuildOutputs (
build integer not null,
name text not null,
path text,
primary key (build, name),
foreign key (build) references Builds(id) on delete cascade
);
-- TODO: normalize this. Currently there can be multiple BuildSteps
-- for a single step.
create table BuildSteps (
build integer not null,
stepnr integer not null,
type integer not null, -- 0 = build, 1 = substitution
drvPath text,
-- 0 = not busy
-- 1 = building
-- 2 = preparing to build
-- 3 = connecting
-- 4 = sending inputs
-- 5 = receiving outputs
-- 6 = analysing build result
busy integer not null,
status integer, -- see Builds.buildStatus
errorMsg text,
startTime integer,
stopTime integer,
machine text not null default '',
system text,
propagatedFrom integer,
-- Time in milliseconds spend copying stuff from/to build machines.
overhead integer,
-- How many times this build step was done (for checking determinism).
timesBuilt integer,
-- Whether this build step produced different results when repeated.
isNonDeterministic boolean,
primary key (build, stepnr),
foreign key (build) references Builds(id) on delete cascade,
foreign key (propagatedFrom) references Builds(id) on delete cascade
);
create table BuildStepOutputs (
build integer not null,
stepnr integer not null,
name text not null,
path text,
primary key (build, stepnr, name),
foreign key (build) references Builds(id) on delete cascade,
foreign key (build, stepnr) references BuildSteps(build, stepnr) on delete cascade
);
-- Inputs of builds.
create table BuildInputs (
id serial primary key not null,
-- Which build this input belongs to.
build integer,
-- Copied from the jobsetinputs from which the build was created.
name text not null,
type text not null,
uri text,
revision text,
value text,
emailResponsible integer not null default 0,
dependency integer, -- build ID of the input, for type == 'build'
path text,
sha256hash text,
foreign key (build) references Builds(id) on delete cascade,
foreign key (dependency) references Builds(id)
);
create table BuildProducts (
build integer not null,
productnr integer not null,
type text not null, -- "nix-build", "file", "doc", "report", ...
subtype text not null, -- "source-dist", "rpm", ...
fileSize bigint,
sha256hash text,
path text,
name text not null, -- generally just the filename part of `path'
defaultPath text, -- if `path' is a directory, the default file relative to `path' to be served
primary key (build, productnr),
foreign key (build) references Builds(id) on delete cascade
);
create table BuildMetrics (
build integer not null,
name text not null,
unit text,
value double precision not null,
-- Denormalisation for performance: copy some columns from the
-- corresponding build.
project text not null,
jobset text not null,
job text not null,
timestamp integer not null,
primary key (build, name),
foreign key (build) references Builds(id) on delete cascade,
foreign key (project) references Projects(name) on update cascade,
foreign key (project, jobset) references Jobsets(project, name) on update cascade
);
-- Cache for inputs of type "path" (used for testing Hydra), storing
-- the SHA-256 hash and store path for each source path. Also stores
-- the timestamp when we first saw the path have these contents.
create table CachedPathInputs (
srcPath text not null,
timestamp integer not null, -- when we first saw this hash
lastSeen integer not null, -- when we last saw this hash
sha256hash text not null,
storePath text not null,
primary key (srcPath, sha256hash)
);
create table CachedSubversionInputs (
uri text not null,
revision integer not null,
sha256hash text not null,
storePath text not null,
primary key (uri, revision)
);
create table CachedBazaarInputs (
uri text not null,
revision integer not null,
sha256hash text not null,
storePath text not null,
primary key (uri, revision)
);
create table CachedGitInputs (
uri text not null,
branch text not null,
revision text not null,
isDeepClone boolean not null,
sha256hash text not null,
storePath text not null,
primary key (uri, branch, revision, isDeepClone)
);
create table CachedDarcsInputs (
uri text not null,
revision text not null,
sha256hash text not null,
storePath text not null,
revCount integer not null,
primary key (uri, revision)
);
create table CachedHgInputs (
uri text not null,
branch text not null,
revision text not null,
sha256hash text not null,
storePath text not null,
primary key (uri, branch, revision)
);
create table CachedCVSInputs (
uri text not null,
module text not null,
timestamp integer not null, -- when we first saw this hash
lastSeen integer not null, -- when we last saw this hash
sha256hash text not null,
storePath text not null,
primary key (uri, module, sha256hash)
);
create table EvaluationErrors (
id serial primary key not null,
errorMsg text, -- error output from the evaluator
errorTime integer -- timestamp associated with errorMsg
);
create table JobsetEvals (
id serial primary key not null,
jobset_id integer not null,
evaluationerror_id integer,
timestamp integer not null, -- when this entry was added
checkoutTime integer not null, -- how long obtaining the inputs took (in seconds)
evalTime integer not null, -- how long evaluation took (in seconds)
-- If 0, then the evaluation of this jobset did not cause any new
-- builds to be added to the database. Otherwise, *all* the
-- builds resulting from the evaluation of the jobset (including
-- existing ones) can be found in the JobsetEvalMembers table.
hasNewBuilds integer not null,
-- Used to prevent repeated Nix expression evaluation for the same
-- set of inputs for a jobset. In the evaluator, after obtaining
-- the current inputs for a jobset, we hash the inputs together,
-- and if the resulting hash already appears in this table, we can
-- skip the jobset. Otherwise we proceed. The hash is computed
-- over the command-line arguments to hydra-eval-jobs.
hash text not null,
-- Cached stats about the builds.
nrBuilds integer,
nrSucceeded integer, -- set lazily when all builds are finished
flake text, -- immutable flake reference
nixExprInput text, -- name of the jobsetInput containing the Nix or Guix expression
nixExprPath text, -- relative path of the Nix or Guix expression
foreign key (jobset_id) references Jobsets(id) on delete cascade,
foreign key (evaluationerror_id) references EvaluationErrors(id) on delete set null
);
create table JobsetEvalInputs (
eval integer not null references JobsetEvals(id) on delete cascade,
name text not null,
altNr integer not null,
-- Copied from the jobsetinputs from which the build was created.
type text not null,
uri text,
revision text,
value text,
dependency integer, -- build ID of the input, for type == 'build'
path text,
sha256hash text,
primary key (eval, name, altNr),
foreign key (dependency) references Builds(id)
);
create table JobsetEvalMembers (
eval integer not null references JobsetEvals(id) on delete cascade,
build integer not null references Builds(id) on delete cascade,
isNew integer not null,
primary key (eval, build)
);
create table UriRevMapper (
baseuri text not null,
uri text not null,
primary key (baseuri)
);
create table NewsItems (
id serial primary key not null,
contents text not null,
createTime integer not null,
author text not null,
foreign key (author) references Users(userName) on delete cascade on update cascade
);
create table AggregateConstituents (
aggregate integer not null references Builds(id) on delete cascade,
constituent integer not null references Builds(id) on delete cascade,
primary key (aggregate, constituent)
);
create table StarredJobs (
userName text not null,
project text not null,
jobset text not null,
job text not null,
primary key (userName, project, jobset, job),
foreign key (userName) references Users(userName) on update cascade on delete cascade,
foreign key (project) references Projects(name) on update cascade on delete cascade,
foreign key (project, jobset) references Jobsets(project, name) on update cascade on delete cascade
);
-- Events processed by hydra-notify which have failed at least once
--
-- The payload field contains the original, unparsed payload.
--
-- One row is created for each plugin which fails to process the event,
-- with an increasing retry_at and attempts field.
create table TaskRetries (
id serial primary key not null,
channel text not null,
pluginname text not null,
payload text not null,
attempts integer not null,
retry_at integer not null
);
create index IndexTaskRetriesOrdered on TaskRetries(retry_at asc);
-- Records of RunCommand executions
--
-- The intended flow is:
--
-- 1. Create a RunCommandLogs entry when the task is "queued" to run
-- 2. Update the start_time when it begins
-- 3. Update the end_time and exit_code when it completes
create table RunCommandLogs (
id serial primary key not null,
uuid uuid not null,
job_matcher text not null,
build_id integer not null,
-- TODO: evaluation_id integer not null,
-- can we do this in a principled way? a build can be part of many evaluations
-- but a "bug" of RunCommand, imho, is that it should probably run per evaluation?
command text not null,
start_time integer,
end_time integer,
error_number integer,
exit_code integer,
signal integer,
core_dumped boolean,
foreign key (build_id) references Builds(id) on delete cascade,
-- foreign key (evaluation_id) references Builds(id) on delete cascade,
constraint RunCommandLogs_not_started_no_exit_time_no_code check (
-- If start time is null, then end_time, exit_code, signal, and core_dumped should be null.
-- A logical implication operator would be nice :).
(start_time is not null) or (
end_time is null
and error_number is null
and exit_code is null
and signal is null
and core_dumped is null
)
),
constraint RunCommandLogs_end_time_has_start_time check (
-- If end time is not null, then end_time, exit_code, and core_dumped should not be null
(end_time is null) or (start_time is not null)
),
-- The uuid should be actually unique.
constraint RunCommandLogs_uuid_unique unique(uuid)
-- Note: if exit_code is not null then signal and core_dumped must be null.
-- Similarly, if signal is not null then exit_code must be null and
-- core_dumped must not be null. However, these semantics are tricky
-- to encode as constraints and probably provide limited actual value.
);
create index IndexRunCommandLogsOnBuildID on RunCommandLogs(build_id);
-- The output paths that have permanently failed.
create table FailedPaths (
path text primary key not null
);
-- Needed because Postgres doesn't have "ignore duplicate" or upsert
-- yet.
create rule IdempotentInsert as on insert to FailedPaths
where exists (select 1 from FailedPaths where path = new.path)
do instead nothing;
create table SystemStatus (
what text primary key not null,
status json not null
);
-- Cache of the number of finished builds.
create table NrBuilds (
what text primary key not null,
count integer not null
);
insert into NrBuilds(what, count) values('finished', 0);
create function modifyNrBuildsFinished() returns trigger as $$
begin
if ((tg_op = 'INSERT' and new.finished = 1) or
(tg_op = 'UPDATE' and old.finished = 0 and new.finished = 1)) then
update NrBuilds set count = count + 1 where what = 'finished';
elsif ((tg_op = 'DELETE' and old.finished = 1) or
(tg_op = 'UPDATE' and old.finished = 1 and new.finished = 0)) then
update NrBuilds set count = count - 1 where what = 'finished';
end if;
return null;
end;
$$ language plpgsql;
create trigger NrBuildsFinished after insert or update or delete on Builds
for each row
execute procedure modifyNrBuildsFinished();
-- Some indices.
create index IndexBuildInputsOnBuild on BuildInputs(build);
create index IndexBuildInputsOnDependency on BuildInputs(dependency);
create index IndexBuildMetricsOnJobTimestamp on BuildMetrics(project, jobset, job, timestamp desc);
create index IndexBuildProducstOnBuildAndType on BuildProducts(build, type);
create index IndexBuildProductsOnBuild on BuildProducts(build);
create index IndexBuildStepsOnBusy on BuildSteps(busy) where busy != 0;
create index IndexBuildStepsOnDrvPath on BuildSteps(drvpath);
create index IndexBuildStepsOnPropagatedFrom on BuildSteps(propagatedFrom) where propagatedFrom is not null;
create index IndexBuildStepsOnStopTime on BuildSteps(stopTime desc) where startTime is not null and stopTime is not null;
create index IndexBuildStepOutputsOnPath on BuildStepOutputs(path);
create index IndexBuildsOnFinished on Builds(finished) where finished = 0;
create index IndexBuildsOnIsCurrent on Builds(isCurrent) where isCurrent = 1;
create index IndexBuildsJobsetIdCurrentUnfinished on Builds(jobset_id) where isCurrent = 1 and finished = 0;
create index IndexBuildsJobsetIdCurrentFinishedStatus on Builds(jobset_id, buildstatus) where isCurrent = 1 and finished = 1;
create index IndexBuildsJobsetIdCurrent on Builds(jobset_id) where isCurrent = 1;
create index IndexBuildsOnTimestamp on Builds(timestamp);
create index IndexBuildsOnFinishedStopTime on Builds(finished, stoptime DESC);
create index IndexBuildsOnJobsetIdFinishedId on Builds(jobset_id, job, finished, id DESC);
create index IndexFinishedSuccessfulBuilds on Builds(jobset_id, job, finished, buildstatus, id DESC) where buildstatus = 0 and finished = 1;
create index IndexBuildsOnDrvPath on Builds(drvPath);
create index IndexCachedHgInputsOnHash on CachedHgInputs(uri, branch, sha256hash);
create index IndexCachedGitInputsOnHash on CachedGitInputs(uri, branch, sha256hash);
create index IndexCachedSubversionInputsOnUriRevision on CachedSubversionInputs(uri, revision);
create index IndexCachedBazaarInputsOnUriRevision on CachedBazaarInputs(uri, revision);
create index IndexJobsetEvalMembersOnBuild on JobsetEvalMembers(build);
create index IndexJobsetEvalMembersOnEval on JobsetEvalMembers(eval);
create index IndexJobsetInputAltsOnInput on JobsetInputAlts(project, jobset, input);
create index IndexJobsetInputAltsOnJobset on JobsetInputAlts(project, jobset);
create index IndexProjectsOnEnabled on Projects(enabled);
create index IndexBuildOutputsPath on BuildOutputs using hash(path);
-- For hydra-update-gc-roots.
create index IndexBuildsOnKeep on Builds(keep) where keep = 1;
-- To get the most recent eval for a jobset.
create index IndexJobsetEvalsOnJobsetId on JobsetEvals(jobset_id, id desc) where hasNewBuilds = 1;
create index IndexJobsetIdEvals on JobsetEvals(jobset_id) where hasNewBuilds = 1;
create index IndexBuildsOnNotificationPendingSince on Builds(notificationPendingSince) where notificationPendingSince is not null;
-- The pg_trgm extension has to be created by a superuser. The NixOS
-- module creates this extension in the systemd prestart script. We
-- then ensure the extension has been created before creating the
-- index. If it is not possible to create the extension, a warning
-- message is emitted to inform the user the index creation is skipped
-- (slower complex queries on builds.drvpath).
do $$
begin
create extension if not exists pg_trgm;
-- Provide an index used by LIKE operator on builds.drvpath (search query)
create index IndexTrgmBuildsOnDrvpath on builds using gin (drvpath gin_trgm_ops);
exception when others then
raise warning 'Can not create extension pg_trgm: %', SQLERRM;
raise warning 'HINT: Temporary provide superuser role to your Hydra Postgresql user and run the script src/sql/upgrade-57.sql';
raise warning 'The pg_trgm index on builds.drvpath has been skipped (slower complex queries on builds.drvpath)';
end$$;