/
depgraph.py
9496 lines (8335 loc) · 324 KB
/
depgraph.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright 1999-2016 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import division, print_function, unicode_literals
import collections
import errno
import io
import logging
import stat
import sys
import textwrap
import warnings
from collections import deque
from itertools import chain
import portage
from portage import os, OrderedDict
from portage import _unicode_decode, _unicode_encode, _encodings
from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH, VCS_DIRS
from portage.dbapi import dbapi
from portage.dbapi.dep_expand import dep_expand
from portage.dbapi.DummyTree import DummyTree
from portage.dbapi.IndexedPortdb import IndexedPortdb
from portage.dbapi._similar_name_search import similar_name_search
from portage.dep import Atom, best_match_to_list, extract_affecting_use, \
check_required_use, human_readable_required_use, match_from_list, \
_repo_separator
from portage.dep._slot_operator import (ignore_built_slot_operator_deps,
strip_slots)
from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use, \
_get_eapi_attrs
from portage.exception import (InvalidAtom, InvalidData, InvalidDependString,
PackageNotFound, PortageException)
from portage.localization import _
from portage.output import colorize, create_color_func, \
darkgreen, green
bad = create_color_func("BAD")
from portage.package.ebuild.config import _get_feature_flags
from portage.package.ebuild.getmaskingstatus import \
_getmaskingstatus, _MaskReason
from portage._sets import SETPREFIX
from portage._sets.base import InternalPackageSet
from portage.util import ConfigProtect, shlex_split, new_protect_filename
from portage.util import cmp_sort_key, writemsg, writemsg_stdout
from portage.util import ensure_dirs
from portage.util import writemsg_level, write_atomic
from portage.util.digraph import digraph
from portage.util._async.TaskScheduler import TaskScheduler
from portage.util._eventloop.EventLoop import EventLoop
from portage.util._eventloop.global_event_loop import global_event_loop
from portage.versions import catpkgsplit
from _emerge.AtomArg import AtomArg
from _emerge.Blocker import Blocker
from _emerge.BlockerCache import BlockerCache
from _emerge.BlockerDepPriority import BlockerDepPriority
from .chk_updated_cfg_files import chk_updated_cfg_files
from _emerge.countdown import countdown
from _emerge.create_world_atom import create_world_atom
from _emerge.Dependency import Dependency
from _emerge.DependencyArg import DependencyArg
from _emerge.DepPriority import DepPriority
from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
from _emerge.FakeVartree import FakeVartree
from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
from _emerge.is_valid_package_atom import insert_category_into_atom, \
is_valid_package_atom
from _emerge.Package import Package
from _emerge.PackageArg import PackageArg
from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
from _emerge.RootConfig import RootConfig
from _emerge.search import search
from _emerge.SetArg import SetArg
from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
from _emerge.UnmergeDepPriority import UnmergeDepPriority
from _emerge.UseFlagDisplay import pkg_use_display
from _emerge.UserQuery import UserQuery
from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
from _emerge.resolver.DbapiProvidesIndex import DbapiProvidesIndex
from _emerge.resolver.package_tracker import PackageTracker, PackageTrackerDbapiWrapper
from _emerge.resolver.slot_collision import slot_conflict_handler
from _emerge.resolver.circular_dependency import circular_dependency_handler
from _emerge.resolver.output import Display, format_unmatched_atom
if sys.hexversion >= 0x3000000:
basestring = str
long = int
_unicode = str
else:
_unicode = unicode
class _scheduler_graph_config(object):
def __init__(self, trees, pkg_cache, graph, mergelist):
self.trees = trees
self.pkg_cache = pkg_cache
self.graph = graph
self.mergelist = mergelist
def _wildcard_set(atoms):
pkgs = InternalPackageSet(allow_wildcard=True)
for x in atoms:
try:
x = Atom(x, allow_wildcard=True, allow_repo=False)
except portage.exception.InvalidAtom:
x = Atom("*/" + x, allow_wildcard=True, allow_repo=False)
pkgs.add(x)
return pkgs
class _frozen_depgraph_config(object):
def __init__(self, settings, trees, myopts, params, spinner):
self.settings = settings
self.target_root = settings["EROOT"]
self.myopts = myopts
self.edebug = 0
if settings.get("PORTAGE_DEBUG", "") == "1":
self.edebug = 1
self.spinner = spinner
self.requested_depth = params.get("deep", 0)
self._running_root = trees[trees._running_eroot]["root_config"]
self.pkgsettings = {}
self.trees = {}
self._trees_orig = trees
self.roots = {}
# All Package instances
self._pkg_cache = {}
self._highest_license_masked = {}
# We can't know that an soname dep is unsatisfied if there are
# any unbuilt ebuilds in the graph, since unbuilt ebuilds have
# no soname data. Therefore, only enable soname dependency
# resolution if --usepkgonly is enabled, or for removal actions.
self.soname_deps_enabled = (
("--usepkgonly" in myopts or "remove" in params) and
params.get("ignore_soname_deps") != "y")
dynamic_deps = myopts.get("--dynamic-deps", "y") != "n"
ignore_built_slot_operator_deps = myopts.get(
"--ignore-built-slot-operator-deps", "n") == "y"
for myroot in trees:
self.trees[myroot] = {}
# Create a RootConfig instance that references
# the FakeVartree instead of the real one.
self.roots[myroot] = RootConfig(
trees[myroot]["vartree"].settings,
self.trees[myroot],
trees[myroot]["root_config"].setconfig)
for tree in ("porttree", "bintree"):
self.trees[myroot][tree] = trees[myroot][tree]
self.trees[myroot]["vartree"] = \
FakeVartree(trees[myroot]["root_config"],
pkg_cache=self._pkg_cache,
pkg_root_config=self.roots[myroot],
dynamic_deps=dynamic_deps,
ignore_built_slot_operator_deps=ignore_built_slot_operator_deps,
soname_deps=self.soname_deps_enabled)
self.pkgsettings[myroot] = portage.config(
clone=self.trees[myroot]["vartree"].settings)
if self.soname_deps_enabled and "remove" not in params:
self.trees[myroot]["bintree"] = DummyTree(
DbapiProvidesIndex(trees[myroot]["bintree"].dbapi))
self._required_set_names = set(["world"])
atoms = ' '.join(myopts.get("--exclude", [])).split()
self.excluded_pkgs = _wildcard_set(atoms)
atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split()
self.reinstall_atoms = _wildcard_set(atoms)
atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split()
self.usepkg_exclude = _wildcard_set(atoms)
atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split()
self.useoldpkg_atoms = _wildcard_set(atoms)
atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split()
self.rebuild_exclude = _wildcard_set(atoms)
atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split()
self.rebuild_ignore = _wildcard_set(atoms)
self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts
self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts
self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
class _depgraph_sets(object):
def __init__(self):
# contains all sets added to the graph
self.sets = {}
# contains non-set atoms given as arguments
self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
# contains all atoms from all sets added to the graph, including
# atoms given as arguments
self.atoms = InternalPackageSet(allow_repo=True)
self.atom_arg_map = {}
class _rebuild_config(object):
def __init__(self, frozen_config, backtrack_parameters):
self._graph = digraph()
self._frozen_config = frozen_config
self.rebuild_list = backtrack_parameters.rebuild_list.copy()
self.orig_rebuild_list = self.rebuild_list.copy()
self.reinstall_list = backtrack_parameters.reinstall_list.copy()
self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev
self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver
self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt
self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or
self.rebuild_if_unbuilt)
def add(self, dep_pkg, dep):
parent = dep.collapsed_parent
priority = dep.collapsed_priority
rebuild_exclude = self._frozen_config.rebuild_exclude
rebuild_ignore = self._frozen_config.rebuild_ignore
if (self.rebuild and isinstance(parent, Package) and
parent.built and priority.buildtime and
isinstance(dep_pkg, Package) and
not rebuild_exclude.findAtomForPackage(parent) and
not rebuild_ignore.findAtomForPackage(dep_pkg)):
self._graph.add(dep_pkg, parent, priority)
def _needs_rebuild(self, dep_pkg):
"""Check whether packages that depend on dep_pkg need to be rebuilt."""
dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom)
if dep_pkg.built or dep_root_slot in self.orig_rebuild_list:
return False
if self.rebuild_if_unbuilt:
# dep_pkg is being installed from source, so binary
# packages for parents are invalid. Force rebuild
return True
trees = self._frozen_config.trees
vardb = trees[dep_pkg.root]["vartree"].dbapi
if self.rebuild_if_new_rev:
# Parent packages are valid if a package with the same
# cpv is already installed.
return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom)
# Otherwise, parent packages are valid if a package with the same
# version (excluding revision) is already installed.
assert self.rebuild_if_new_ver
cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
for inst_cpv in vardb.match(dep_pkg.slot_atom):
inst_cpv_norev = catpkgsplit(inst_cpv)[:-1]
if inst_cpv_norev == cpv_norev:
return False
return True
def _trigger_rebuild(self, parent, build_deps):
root_slot = (parent.root, parent.slot_atom)
if root_slot in self.rebuild_list:
return False
trees = self._frozen_config.trees
reinstall = False
for slot_atom, dep_pkg in build_deps.items():
dep_root_slot = (dep_pkg.root, slot_atom)
if self._needs_rebuild(dep_pkg):
self.rebuild_list.add(root_slot)
return True
elif ("--usepkg" in self._frozen_config.myopts and
(dep_root_slot in self.reinstall_list or
dep_root_slot in self.rebuild_list or
not dep_pkg.installed)):
# A direct rebuild dependency is being installed. We
# should update the parent as well to the latest binary,
# if that binary is valid.
#
# To validate the binary, we check whether all of the
# rebuild dependencies are present on the same binhost.
#
# 1) If parent is present on the binhost, but one of its
# rebuild dependencies is not, then the parent should
# be rebuilt from source.
# 2) Otherwise, the parent binary is assumed to be valid,
# because all of its rebuild dependencies are
# consistent.
bintree = trees[parent.root]["bintree"]
uri = bintree.get_pkgindex_uri(parent.cpv)
dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
bindb = bintree.dbapi
if self.rebuild_if_new_ver and uri and uri != dep_uri:
cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
for cpv in bindb.match(dep_pkg.slot_atom):
if cpv_norev == catpkgsplit(cpv)[:-1]:
dep_uri = bintree.get_pkgindex_uri(cpv)
if uri == dep_uri:
break
if uri and uri != dep_uri:
# 1) Remote binary package is invalid because it was
# built without dep_pkg. Force rebuild.
self.rebuild_list.add(root_slot)
return True
elif (parent.installed and
root_slot not in self.reinstall_list):
try:
bin_build_time, = bindb.aux_get(parent.cpv,
["BUILD_TIME"])
except KeyError:
continue
if bin_build_time != _unicode(parent.build_time):
# 2) Remote binary package is valid, and local package
# is not up to date. Force reinstall.
reinstall = True
if reinstall:
self.reinstall_list.add(root_slot)
return reinstall
def trigger_rebuilds(self):
"""
Trigger rebuilds where necessary. If pkgA has been updated, and pkgB
depends on pkgA at both build-time and run-time, pkgB needs to be
rebuilt.
"""
need_restart = False
graph = self._graph
build_deps = {}
leaf_nodes = deque(graph.leaf_nodes())
# Trigger rebuilds bottom-up (starting with the leaves) so that parents
# will always know which children are being rebuilt.
while graph:
if not leaf_nodes:
# We'll have to drop an edge. This should be quite rare.
leaf_nodes.append(graph.order[-1])
node = leaf_nodes.popleft()
if node not in graph:
# This can be triggered by circular dependencies.
continue
slot_atom = node.slot_atom
# Remove our leaf node from the graph, keeping track of deps.
parents = graph.parent_nodes(node)
graph.remove(node)
node_build_deps = build_deps.get(node, {})
for parent in parents:
if parent == node:
# Ignore a direct cycle.
continue
parent_bdeps = build_deps.setdefault(parent, {})
parent_bdeps[slot_atom] = node
if not graph.child_nodes(parent):
leaf_nodes.append(parent)
# Trigger rebuilds for our leaf node. Because all of our children
# have been processed, the build_deps will be completely filled in,
# and self.rebuild_list / self.reinstall_list will tell us whether
# any of our children need to be rebuilt or reinstalled.
if self._trigger_rebuild(node, node_build_deps):
need_restart = True
return need_restart
class _dynamic_depgraph_config(object):
def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
self.myparams = myparams.copy()
self._vdb_loaded = False
self._allow_backtracking = allow_backtracking
# Maps nodes to the reasons they were selected for reinstallation.
self._reinstall_nodes = {}
# Contains a filtered view of preferred packages that are selected
# from available repositories.
self._filtered_trees = {}
# Contains installed packages and new packages that have been added
# to the graph.
self._graph_trees = {}
# Caches visible packages returned from _select_package, for use in
# depgraph._iter_atoms_for_pkg() SLOT logic.
self._visible_pkgs = {}
#contains the args created by select_files
self._initial_arg_list = []
self.digraph = portage.digraph()
# manages sets added to the graph
self.sets = {}
# contains all nodes pulled in by self.sets
self._set_nodes = set()
# Contains only Blocker -> Uninstall edges
self._blocker_uninstalls = digraph()
# Contains only Package -> Blocker edges
self._blocker_parents = digraph()
# Contains only irrelevant Package -> Blocker edges
self._irrelevant_blockers = digraph()
# Contains only unsolvable Package -> Blocker edges
self._unsolvable_blockers = digraph()
# Contains all Blocker -> Blocked Package edges
self._blocked_pkgs = digraph()
# Contains world packages that have been protected from
# uninstallation but may not have been added to the graph
# if the graph is not complete yet.
self._blocked_world_pkgs = {}
# Contains packages whose dependencies have been traversed.
# This use used to check if we have accounted for blockers
# relevant to a package.
self._traversed_pkg_deps = set()
self._parent_atoms = {}
self._slot_conflict_handler = None
self._circular_dependency_handler = None
self._serialized_tasks_cache = None
self._scheduler_graph = None
self._displayed_list = None
self._pprovided_args = []
self._missing_args = []
self._masked_installed = set()
self._masked_license_updates = set()
self._unsatisfied_deps_for_display = []
self._unsatisfied_blockers_for_display = None
self._circular_deps_for_display = None
self._dep_stack = []
self._dep_disjunctive_stack = []
self._unsatisfied_deps = []
self._initially_unsatisfied_deps = []
self._ignored_deps = []
self._highest_pkg_cache = {}
self._highest_pkg_cache_cp_map = {}
self._flatten_atoms_cache = {}
# Binary packages that have been rejected because their USE
# didn't match the user's config. It maps packages to a set
# of flags causing the rejection.
self.ignored_binaries = {}
self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
self._needed_license_changes = backtrack_parameters.needed_license_changes
self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
self._slot_operator_replace_installed = backtrack_parameters.slot_operator_replace_installed
self._prune_rebuilds = backtrack_parameters.prune_rebuilds
self._need_restart = False
# For conditions that always require user intervention, such as
# unsatisfied REQUIRED_USE (currently has no autounmask support).
self._skip_restart = False
self._backtrack_infos = {}
self._buildpkgonly_deps_unsatisfied = False
self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
self._success_without_autounmask = False
self._required_use_unsatisfied = False
self._traverse_ignored_deps = False
self._complete_mode = False
self._slot_operator_deps = {}
self._installed_sonames = collections.defaultdict(list)
self._package_tracker = PackageTracker(
soname_deps=depgraph._frozen_config.soname_deps_enabled)
# Track missed updates caused by solved conflicts.
self._conflict_missed_update = collections.defaultdict(dict)
for myroot in depgraph._frozen_config.trees:
self.sets[myroot] = _depgraph_sets()
vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
# This dbapi instance will model the state that the vdb will
# have after new packages have been installed.
fakedb = PackageTrackerDbapiWrapper(myroot, self._package_tracker)
def graph_tree():
pass
graph_tree.dbapi = fakedb
self._graph_trees[myroot] = {}
self._filtered_trees[myroot] = {}
# Substitute the graph tree for the vartree in dep_check() since we
# want atom selections to be consistent with package selections
# have already been made.
self._graph_trees[myroot]["porttree"] = graph_tree
self._graph_trees[myroot]["vartree"] = graph_tree
self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi
self._graph_trees[myroot]["graph"] = self.digraph
self._graph_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg
self._graph_trees[myroot]["downgrade_probe"] = depgraph._downgrade_probe
def filtered_tree():
pass
filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
self._filtered_trees[myroot]["porttree"] = filtered_tree
self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
# Passing in graph_tree as the vartree here could lead to better
# atom selections in some cases by causing atoms for packages that
# have been added to the graph to be preferred over other choices.
# However, it can trigger atom selections that result in
# unresolvable direct circular dependencies. For example, this
# happens with gwydion-dylan which depends on either itself or
# gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
# gwydion-dylan-bin needs to be selected in order to avoid a
# an unresolvable direct circular dependency.
#
# To solve the problem described above, pass in "graph_db" so that
# packages that have been added to the graph are distinguishable
# from other available packages and installed packages. Also, pass
# the parent package into self._select_atoms() calls so that
# unresolvable direct circular dependencies can be detected and
# avoided when possible.
self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
self._filtered_trees[myroot]["graph"] = self.digraph
self._filtered_trees[myroot]["vartree"] = \
depgraph._frozen_config.trees[myroot]["vartree"]
self._filtered_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg
self._filtered_trees[myroot]["downgrade_probe"] = depgraph._downgrade_probe
dbs = []
# (db, pkg_type, built, installed, db_keys)
if "remove" in self.myparams:
# For removal operations, use _dep_check_composite_db
# for availability and visibility checks. This provides
# consistency with install operations, so we don't
# get install/uninstall cycles like in bug #332719.
self._graph_trees[myroot]["porttree"] = filtered_tree
else:
if "--usepkgonly" not in depgraph._frozen_config.myopts:
portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
db_keys = list(portdb._aux_cache_keys)
dbs.append((portdb, "ebuild", False, False, db_keys))
if "--usepkg" in depgraph._frozen_config.myopts:
bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
db_keys = list(bindb._aux_cache_keys)
dbs.append((bindb, "binary", True, False, db_keys))
vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
db_keys = list(depgraph._frozen_config._trees_orig[myroot
]["vartree"].dbapi._aux_cache_keys)
dbs.append((vardb, "installed", True, True, db_keys))
self._filtered_trees[myroot]["dbs"] = dbs
class depgraph(object):
# Represents the depth of a node that is unreachable from explicit
# user arguments (or their deep dependencies). Such nodes are pulled
# in by the _complete_graph method.
_UNREACHABLE_DEPTH = object()
pkg_tree_map = RootConfig.pkg_tree_map
def __init__(self, settings, trees, myopts, myparams, spinner,
frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
if frozen_config is None:
frozen_config = _frozen_depgraph_config(settings, trees,
myopts, myparams, spinner)
self._frozen_config = frozen_config
self._dynamic_config = _dynamic_depgraph_config(self, myparams,
allow_backtracking, backtrack_parameters)
self._rebuild = _rebuild_config(frozen_config, backtrack_parameters)
self._select_atoms = self._select_atoms_highest_available
self._select_package = self._select_pkg_highest_available
self._event_loop = (portage._internal_caller and
global_event_loop() or EventLoop(main=False))
self._select_atoms_parent = None
self.query = UserQuery(myopts).query
def _index_binpkgs(self):
for root in self._frozen_config.trees:
bindb = self._frozen_config.trees[root]["bintree"].dbapi
if bindb._provides_index:
# don't repeat this when backtracking
continue
root_config = self._frozen_config.roots[root]
for cpv in self._frozen_config._trees_orig[
root]["bintree"].dbapi.cpv_all():
bindb._provides_inject(
self._pkg(cpv, "binary", root_config))
def _load_vdb(self):
"""
Load installed package metadata if appropriate. This used to be called
from the constructor, but that wasn't very nice since this procedure
is slow and it generates spinner output. So, now it's called on-demand
by various methods when necessary.
"""
if self._dynamic_config._vdb_loaded:
return
for myroot in self._frozen_config.trees:
dynamic_deps = self._dynamic_config.myparams.get(
"dynamic_deps", "y") != "n"
preload_installed_pkgs = \
"--nodeps" not in self._frozen_config.myopts
fake_vartree = self._frozen_config.trees[myroot]["vartree"]
if not fake_vartree.dbapi:
# This needs to be called for the first depgraph, but not for
# backtracking depgraphs that share the same frozen_config.
fake_vartree.sync()
# FakeVartree.sync() populates virtuals, and we want
# self.pkgsettings to have them populated too.
self._frozen_config.pkgsettings[myroot] = \
portage.config(clone=fake_vartree.settings)
if preload_installed_pkgs:
vardb = fake_vartree.dbapi
if not dynamic_deps:
for pkg in vardb:
self._dynamic_config._package_tracker.add_installed_pkg(pkg)
self._add_installed_sonames(pkg)
else:
max_jobs = self._frozen_config.myopts.get("--jobs")
max_load = self._frozen_config.myopts.get("--load-average")
scheduler = TaskScheduler(
self._dynamic_deps_preload(fake_vartree),
max_jobs=max_jobs,
max_load=max_load,
event_loop=fake_vartree._portdb._event_loop)
scheduler.start()
scheduler.wait()
self._dynamic_config._vdb_loaded = True
def _dynamic_deps_preload(self, fake_vartree):
portdb = fake_vartree._portdb
for pkg in fake_vartree.dbapi:
self._spinner_update()
self._dynamic_config._package_tracker.add_installed_pkg(pkg)
self._add_installed_sonames(pkg)
ebuild_path, repo_path = \
portdb.findname2(pkg.cpv, myrepo=pkg.repo)
if ebuild_path is None:
fake_vartree.dynamic_deps_preload(pkg, None)
continue
metadata, ebuild_hash = portdb._pull_valid_cache(
pkg.cpv, ebuild_path, repo_path)
if metadata is not None:
fake_vartree.dynamic_deps_preload(pkg, metadata)
else:
proc = EbuildMetadataPhase(cpv=pkg.cpv,
ebuild_hash=ebuild_hash,
portdb=portdb, repo_path=repo_path,
settings=portdb.doebuild_settings)
proc.addExitListener(
self._dynamic_deps_proc_exit(pkg, fake_vartree))
yield proc
class _dynamic_deps_proc_exit(object):
__slots__ = ('_pkg', '_fake_vartree')
def __init__(self, pkg, fake_vartree):
self._pkg = pkg
self._fake_vartree = fake_vartree
def __call__(self, proc):
metadata = None
if proc.returncode == os.EX_OK:
metadata = proc.metadata
self._fake_vartree.dynamic_deps_preload(self._pkg, metadata)
def _spinner_update(self):
if self._frozen_config.spinner:
self._frozen_config.spinner.update()
def _compute_abi_rebuild_info(self):
"""
Fill self._forced_rebuilds with packages that cause rebuilds.
"""
debug = "--debug" in self._frozen_config.myopts
installed_sonames = self._dynamic_config._installed_sonames
package_tracker = self._dynamic_config._package_tracker
# Get all atoms that might have caused a forced rebuild.
atoms = {}
for s in self._dynamic_config._initial_arg_list:
if s.force_reinstall:
root = s.root_config.root
atoms.setdefault(root, set()).update(s.pset)
if debug:
writemsg_level("forced reinstall atoms:\n",
level=logging.DEBUG, noiselevel=-1)
for root in atoms:
writemsg_level(" root: %s\n" % root,
level=logging.DEBUG, noiselevel=-1)
for atom in atoms[root]:
writemsg_level(" atom: %s\n" % atom,
level=logging.DEBUG, noiselevel=-1)
writemsg_level("\n\n",
level=logging.DEBUG, noiselevel=-1)
# Go through all slot operator deps and check if one of these deps
# has a parent that is matched by one of the atoms from above.
forced_rebuilds = {}
for root, rebuild_atoms in atoms.items():
for slot_atom in rebuild_atoms:
inst_pkg, reinst_pkg = \
self._select_pkg_from_installed(root, slot_atom)
if inst_pkg is reinst_pkg or reinst_pkg is None:
continue
if (inst_pkg is not None and
inst_pkg.requires is not None):
for atom in inst_pkg.requires:
initial_providers = installed_sonames.get(
(root, atom))
if initial_providers is None:
continue
final_provider = next(
package_tracker.match(root, atom),
None)
if final_provider:
continue
for provider in initial_providers:
# Find the replacement child.
child = next((pkg for pkg in
package_tracker.match(
root, provider.slot_atom)
if not pkg.installed), None)
if child is None:
continue
forced_rebuilds.setdefault(
root, {}).setdefault(
child, set()).add(inst_pkg)
# Generate pseudo-deps for any slot-operator deps of
# inst_pkg. Its deps aren't in _slot_operator_deps
# because it hasn't been added to the graph, but we
# are interested in any rebuilds that it triggered.
built_slot_op_atoms = []
if inst_pkg is not None:
selected_atoms = self._select_atoms_probe(
inst_pkg.root, inst_pkg)
for atom in selected_atoms:
if atom.slot_operator_built:
built_slot_op_atoms.append(atom)
if not built_slot_op_atoms:
continue
# Use a cloned list, since we may append to it below.
deps = self._dynamic_config._slot_operator_deps.get(
(root, slot_atom), [])[:]
if built_slot_op_atoms and reinst_pkg is not None:
for child in self._dynamic_config.digraph.child_nodes(
reinst_pkg):
if child.installed:
continue
for atom in built_slot_op_atoms:
# NOTE: Since atom comes from inst_pkg, and
# reinst_pkg is the replacement parent, there's
# no guarantee that atom will completely match
# child. So, simply use atom.cp and atom.slot
# for matching.
if atom.cp != child.cp:
continue
if atom.slot and atom.slot != child.slot:
continue
deps.append(Dependency(atom=atom, child=child,
root=child.root, parent=reinst_pkg))
for dep in deps:
if dep.child.installed:
# Find the replacement child.
child = next((pkg for pkg in
self._dynamic_config._package_tracker.match(
dep.root, dep.child.slot_atom)
if not pkg.installed), None)
if child is None:
continue
inst_child = dep.child
else:
child = dep.child
inst_child = self._select_pkg_from_installed(
child.root, child.slot_atom)[0]
# Make sure the child's slot/subslot has changed. If it
# hasn't, then another child has forced this rebuild.
if inst_child and inst_child.slot == child.slot and \
inst_child.sub_slot == child.sub_slot:
continue
if dep.parent.installed:
# Find the replacement parent.
parent = next((pkg for pkg in
self._dynamic_config._package_tracker.match(
dep.parent.root, dep.parent.slot_atom)
if not pkg.installed), None)
if parent is None:
continue
else:
parent = dep.parent
# The child has forced a rebuild of the parent
forced_rebuilds.setdefault(root, {}
).setdefault(child, set()).add(parent)
if debug:
writemsg_level("slot operator dependencies:\n",
level=logging.DEBUG, noiselevel=-1)
for (root, slot_atom), deps in self._dynamic_config._slot_operator_deps.items():
writemsg_level(" (%s, %s)\n" % \
(root, slot_atom), level=logging.DEBUG, noiselevel=-1)
for dep in deps:
writemsg_level(" parent: %s\n" % dep.parent, level=logging.DEBUG, noiselevel=-1)
writemsg_level(" child: %s (%s)\n" % (dep.child, dep.priority), level=logging.DEBUG, noiselevel=-1)
writemsg_level("\n\n",
level=logging.DEBUG, noiselevel=-1)
writemsg_level("forced rebuilds:\n",
level=logging.DEBUG, noiselevel=-1)
for root in forced_rebuilds:
writemsg_level(" root: %s\n" % root,
level=logging.DEBUG, noiselevel=-1)
for child in forced_rebuilds[root]:
writemsg_level(" child: %s\n" % child,
level=logging.DEBUG, noiselevel=-1)
for parent in forced_rebuilds[root][child]:
writemsg_level(" parent: %s\n" % parent,
level=logging.DEBUG, noiselevel=-1)
writemsg_level("\n\n",
level=logging.DEBUG, noiselevel=-1)
self._forced_rebuilds = forced_rebuilds
def _show_abi_rebuild_info(self):
if not self._forced_rebuilds:
return
writemsg_stdout("\nThe following packages are causing rebuilds:\n\n", noiselevel=-1)
for root in self._forced_rebuilds:
for child in self._forced_rebuilds[root]:
writemsg_stdout(" %s causes rebuilds for:\n" % (child,), noiselevel=-1)
for parent in self._forced_rebuilds[root][child]:
writemsg_stdout(" %s\n" % (parent,), noiselevel=-1)
def _show_ignored_binaries(self):
"""
Show binaries that have been ignored because their USE didn't
match the user's config.
"""
if not self._dynamic_config.ignored_binaries \
or '--quiet' in self._frozen_config.myopts:
return
ignored_binaries = {}
for pkg in list(self._dynamic_config.ignored_binaries):
for selected_pkg in self._dynamic_config._package_tracker.match(
pkg.root, pkg.slot_atom):
if selected_pkg > pkg:
self._dynamic_config.ignored_binaries.pop(pkg)
break
if selected_pkg.installed and \
selected_pkg.cpv == pkg.cpv and \
selected_pkg.build_time == pkg.build_time:
# We don't care about ignored binaries when an
# identical installed instance is selected to
# fill the slot.
self._dynamic_config.ignored_binaries.pop(pkg)
break
else:
for reason, info in self._dynamic_config.\
ignored_binaries[pkg].items():
ignored_binaries.setdefault(reason, {})[pkg] = info
if self._dynamic_config.myparams.get(
"binpkg_respect_use") in ("y", "n"):
ignored_binaries.pop("respect_use", None)
if self._dynamic_config.myparams.get(
"binpkg_changed_deps") in ("y", "n"):
ignored_binaries.pop("changed_deps", None)
if not ignored_binaries:
return
self._show_merge_list()
if ignored_binaries.get("respect_use"):
self._show_ignored_binaries_respect_use(
ignored_binaries["respect_use"])
if ignored_binaries.get("changed_deps"):
self._show_ignored_binaries_changed_deps(
ignored_binaries["changed_deps"])
def _show_ignored_binaries_respect_use(self, respect_use):
writemsg("\n!!! The following binary packages have been ignored " + \
"due to non matching USE:\n\n", noiselevel=-1)
for pkg, flags in respect_use.items():
flag_display = []
for flag in sorted(flags):
if flag not in pkg.use.enabled:
flag = "-" + flag
flag_display.append(flag)
flag_display = " ".join(flag_display)
# The user can paste this line into package.use
writemsg(" =%s %s" % (pkg.cpv, flag_display), noiselevel=-1)
if pkg.root_config.settings["ROOT"] != "/":
writemsg(" # for %s" % (pkg.root,), noiselevel=-1)
writemsg("\n", noiselevel=-1)
msg = [
"",
"NOTE: The --binpkg-respect-use=n option will prevent emerge",
" from ignoring these binary packages if possible.",
" Using --binpkg-respect-use=y will silence this warning."
]
for line in msg:
if line:
line = colorize("INFORM", line)
writemsg(line + "\n", noiselevel=-1)
def _show_ignored_binaries_changed_deps(self, changed_deps):
writemsg("\n!!! The following binary packages have been "
"ignored due to changed dependencies:\n\n",
noiselevel=-1)
for pkg in changed_deps:
msg = " %s%s%s" % (pkg.cpv, _repo_separator, pkg.repo)
if pkg.root_config.settings["ROOT"] != "/":
msg += " for %s" % pkg.root
writemsg("%s\n" % msg, noiselevel=-1)
msg = [
"",
"NOTE: The --binpkg-changed-deps=n option will prevent emerge",
" from ignoring these binary packages if possible.",
" Using --binpkg-changed-deps=y will silence this warning."
]
for line in msg:
if line:
line = colorize("INFORM", line)
writemsg(line + "\n", noiselevel=-1)
def _get_missed_updates(self):
# In order to minimize noise, show only the highest
# missed update from each SLOT.
missed_updates = {}
for pkg, mask_reasons in \
chain(self._dynamic_config._runtime_pkg_mask.items(),
self._dynamic_config._conflict_missed_update.items()):
if pkg.installed:
# Exclude installed here since we only
# want to show available updates.
continue
missed_update = True
any_selected = False
for chosen_pkg in self._dynamic_config._package_tracker.match(
pkg.root, pkg.slot_atom):
any_selected = True
if chosen_pkg > pkg or (not chosen_pkg.installed and \
chosen_pkg.version == pkg.version):
missed_update = False
break
if any_selected and missed_update:
k = (pkg.root, pkg.slot_atom)
if k in missed_updates:
other_pkg, mask_type, parent_atoms = missed_updates[k]
if other_pkg > pkg:
continue
for mask_type, parent_atoms in mask_reasons.items():
if not parent_atoms:
continue
missed_updates[k] = (pkg, mask_type, parent_atoms)
break
return missed_updates
def _show_missed_update(self):
missed_updates = self._get_missed_updates()