/
preparerequest.go
250 lines (205 loc) · 10.8 KB
/
preparerequest.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
package schedulerutils
import (
"sync"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/logger"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkggraph"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/pkgjson"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/sliceutils"
"github.com/microsoft/CBL-Mariner/toolkit/tools/internal/timestamp"
)
// ConvertNodesToRequests converts a slice of nodes into a slice of build requests.
// - It will determine if the cache can be used for prebuilt nodes.
// - It will group similar build nodes together into AncillaryNodes.
//
// Explanation of handling of the test nodes:
// 1. The virtual B -> T edge guarantees the build node are unblocked and analyzed first.
// 2. Once the build node is unblocked, analyze its partner test node in partnerTestNodesToRequest().
// We remove the virtual edge and the test node either gets immediately queued or is blocked on some extra dependencies.
// Blocking is decided by canUseCacheForNode().
// 3. If the test node ends up being blocked, it gets re-analyzed later once its dependencies are done.
// The test nodes unblocked this way end up inside the 'testNodes' list in ConvertNodesToRequests()
// and are queued for building in the testNodesToRequests() function.
// At this point the partner build nodes for these test nodes have either already finished building or are being built,
// thus the check for active and cached SRPMs inside testNodesToRequests().
func ConvertNodesToRequests(pkgGraph *pkggraph.PkgGraph, graphMutex *sync.RWMutex, nodesToBuild []*pkggraph.PkgNode, packagesToRebuild, testsToRerun []*pkgjson.PackageVer, buildState *GraphBuildState, isCacheAllowed bool) (requests []*BuildRequest) {
timestamp.StartEvent("generate requests", nil)
defer timestamp.StopEvent(nil)
graphMutex.RLock()
defer graphMutex.RUnlock()
// Group build and test nodes together as they will be unblocked all at once for any given SRPM,
// and building a single build node will result in all of them becoming available.
buildNodes := make(map[string][]*pkggraph.PkgNode)
testNodes := make(map[string][]*pkggraph.PkgNode)
for _, node := range nodesToBuild {
if node.Type == pkggraph.TypeLocalBuild {
buildNodes[node.SrpmPath] = append(buildNodes[node.SrpmPath], node)
continue
}
if node.Type == pkggraph.TypeTest {
testNodes[node.SrpmPath] = append(testNodes[node.SrpmPath], node)
continue
}
ancillaryNodes := []*pkggraph.PkgNode{node}
isDelta := node.State == pkggraph.StateDelta
req := buildRequest(pkgGraph, buildState, packagesToRebuild, node, ancillaryNodes, isCacheAllowed, isDelta)
requests = append(requests, req)
}
requests = append(requests, buildNodesToRequests(pkgGraph, buildState, packagesToRebuild, testsToRerun, buildNodes, isCacheAllowed)...)
requests = append(requests, testNodesToRequests(pkgGraph, buildState, testsToRerun, testNodes)...)
return
}
func buildNodesToRequests(pkgGraph *pkggraph.PkgGraph, buildState *GraphBuildState, packagesToRebuild, testsToRerun []*pkgjson.PackageVer, buildNodesLists map[string][]*pkggraph.PkgNode, isCacheAllowed bool) (requests []*BuildRequest) {
for _, buildNodes := range buildNodesLists {
// Check if any of the build nodes is a delta node and mark it. We will use this to determine if the
// build is a delta build that might have pre-built .rpm files available.
hasADeltaNode := false
for _, node := range buildNodes {
if node.State == pkggraph.StateDelta {
hasADeltaNode = true
break
}
}
defaultNode := buildNodes[0]
req := buildRequest(pkgGraph, buildState, packagesToRebuild, defaultNode, buildNodes, isCacheAllowed, hasADeltaNode)
if req.UseCache {
expectedFiles, missingFiles := pkggraph.FindRPMFiles(defaultNode.SrpmPath, pkgGraph, nil)
if len(missingFiles) > 0 && len(missingFiles) < len(expectedFiles) {
logger.Log.Infof("SRPM '%s' will be rebuilt due to partially missing components: %v", defaultNode.SRPMFileName(), missingFiles)
}
req.ExpectedFiles = expectedFiles
if len(missingFiles) != 0 {
req.UseCache = false
req.Freshness = buildState.GetMaxFreshness()
logger.Log.Debugf("Resetting freshness to %d due to missing files.", req.Freshness)
}
}
requests = append(requests, req)
partnerTestNodeRequest := partnerTestNodesToRequest(pkgGraph, buildState, testsToRerun, buildNodes, req.UseCache)
if partnerTestNodeRequest != nil {
requests = append(requests, partnerTestNodeRequest)
}
}
return
}
func buildNodeToTestNode(pkgGraph *pkggraph.PkgGraph, buildNode *pkggraph.PkgNode) (testNode *pkggraph.PkgNode) {
dependents := pkgGraph.To(buildNode.ID())
for dependents.Next() {
dependent := dependents.Node().(*pkggraph.PkgNode)
if dependent.Type == pkggraph.TypeTest {
testNode = dependent
break
}
}
return
}
func buildRequest(pkgGraph *pkggraph.PkgGraph, buildState *GraphBuildState, packagesToRebuild []*pkgjson.PackageVer, builtNode *pkggraph.PkgNode, ancillaryNodes []*pkggraph.PkgNode, isCacheAllowed, isDelta bool) (request *BuildRequest) {
request = &BuildRequest{
Node: builtNode,
PkgGraph: pkgGraph,
AncillaryNodes: ancillaryNodes,
IsDelta: isDelta,
Freshness: buildState.GetMaxFreshness(),
}
requiredRebuild := isRequiredRebuild(request.Node, packagesToRebuild)
if !requiredRebuild && isCacheAllowed {
// We might be able to use the cache, set the freshness based on node's dependencies.
request.UseCache, request.Freshness = canUseCacheForNode(pkgGraph, request.Node, buildState)
}
return
}
func partnerTestNodesToRequest(pkgGraph *pkggraph.PkgGraph, buildState *GraphBuildState, testsToRerun []*pkgjson.PackageVer, buildNodes []*pkggraph.PkgNode, buildUsesCache bool) (request *BuildRequest) {
const isDelta = false
defaultBuildNode := buildNodes[0]
testNode := buildNodeToTestNode(pkgGraph, defaultBuildNode)
if testNode == nil {
return
}
ancillaryTestNodes := []*pkggraph.PkgNode{}
for _, buildNode := range buildNodes {
testNode = buildNodeToTestNode(pkgGraph, buildNode)
// Removing edges even if tests are blocked by other dependencies,
// so that they can get unblocked once these other dependencies are available.
pkgGraph.RemoveEdge(testNode.ID(), buildNode.ID())
ancillaryTestNodes = append(ancillaryTestNodes, testNode)
}
if !isNodeUnblocked(pkgGraph, buildState, testNode) {
return
}
request = buildRequest(pkgGraph, buildState, testsToRerun, testNode, ancillaryTestNodes, buildUsesCache, isDelta)
return
}
// testNodesToRequests converts lists of test nodes into test build requests.
// The function is expected to be only called for test nodes corresponding to build nodes,
// which have already been queued to build or finished building.
//
// NOTE: the caller must guarantee the build state does not change while this function is running.
func testNodesToRequests(pkgGraph *pkggraph.PkgGraph, buildState *GraphBuildState, testsToRerun []*pkgjson.PackageVer, testNodesLists map[string][]*pkggraph.PkgNode) (requests []*BuildRequest) {
const isDelta = false
for _, testNodes := range testNodesLists {
defaultTestNode := testNodes[0]
srpmFileName := defaultTestNode.SRPMFileName()
buildUsedCache := buildState.IsSRPMCached(srpmFileName)
if buildRequest := buildState.ActiveBuildFromSRPM(srpmFileName); buildRequest != nil {
buildUsedCache = buildRequest.UseCache
}
testRequest := buildRequest(pkgGraph, buildState, testsToRerun, defaultTestNode, testNodes, buildUsedCache, isDelta)
requests = append(requests, testRequest)
}
return
}
// isRequiredRebuild checks if a node is required to be rebuilt based on the packagesToRebuild list.
func isRequiredRebuild(node *pkggraph.PkgNode, packagesToRebuild []*pkgjson.PackageVer) (requiredRebuild bool) {
packageVer := node.VersionedPkg
requiredRebuild = sliceutils.Contains(packagesToRebuild, packageVer, sliceutils.PackageVerMatch)
if requiredRebuild {
logger.Log.Debugf("Marking (%s) for rebuild per user request", packageVer)
}
return
}
// canUseCacheForNode checks if the cache can be used for a given node by:
// - Assume the node is stale to begin (freshness == 0).
// - Check if all dependencies of the node were cached, and calculate the expected freshness of the node based on the freshest dependency.
// - If all dependencies are cached (freshness == 0, aka stale) then the node will keep freshness 0 and may use the cache.
// - If any dependency is fresh (aka freshness > 0) then the node can't use the cache and will inherit the freshness of
// the freshest dependency (possibly adjusted by -1 for certain edges).
func canUseCacheForNode(pkgGraph *pkggraph.PkgGraph, node *pkggraph.PkgNode, buildState *GraphBuildState) (canUseCache bool, freshness uint) {
freshness = 0
canUseCache = true
// If any of the node's dependencies were built instead of being cached then a build is required. We treat any node
// with a freshness > 0 as being built. Each layer of the build completed will decrement the freshness of the node by 1.
dependencies := pkgGraph.From(node.ID())
for dependencies.Next() {
dependency := dependencies.Node().(*pkggraph.PkgNode)
inheritedFreshness, shouldRebuild := calculateExpectedFreshness(dependency, buildState)
if inheritedFreshness > freshness {
freshness = inheritedFreshness
}
if shouldRebuild {
logger.Log.Debugf("Can't use cached version of %v because %v has been rebuilt with a freshness of %d", node.FriendlyName(), dependency.FriendlyName(), inheritedFreshness)
canUseCache = false
}
}
return
}
// calculateExpectedFreshness calculates how "fresh" a node will be based on one of its dependencies, and if that
// dependency should cause a rebuild. This function will determine if the freshness should be attenuated based on
// the dependency type.
func calculateExpectedFreshness(dependencyNode *pkggraph.PkgNode, buildState *GraphBuildState) (expectedFreshness uint, shouldRebuild bool) {
// Remote nodes are always 'stale' and should never generate a rebuild.
if dependencyNode.Type == pkggraph.TypeRemoteRun {
return 0, false
}
expectedFreshness = buildState.GetFreshnessOfNode(dependencyNode)
shouldRebuild = expectedFreshness > 0
// The transition from (* -> run) nodes is sufficient to attenuate the freshness throughout the graph. For BuildRequires,
// each build node will always be accompanied by a run node (i.e., no other nodes depend directly on the build
// node, and we would like the associated run node to inherit its build node's freshness). We also want to
// attenuate for runtime requires which again will generally be a (run -> run) transition. Meta nodes may be interposed
// between any nodes so we pass the freshness through unchanged everywhere else.
if dependencyNode.Type == pkggraph.TypeLocalRun && expectedFreshness != 0 {
expectedFreshness -= 1
}
return expectedFreshness, shouldRebuild
}