blob: aa6876bb3fc5bd6c9262e7955e45b4262eed9948 [file] [log] [blame]
David Jamesfcb70ef2011-02-02 16:02:30 -08001#!/usr/bin/python2.6
2# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""Program to run emerge in parallel, for significant speedup.
7
8Usage:
David James386ccd12011-05-04 20:17:42 -07009 ./parallel_emerge [--board=BOARD] [--workon=PKGS]
David Jamesfcb70ef2011-02-02 16:02:30 -080010 [--force-remote-binary=PKGS] [emerge args] package
11
David James78b6cd92012-04-02 21:36:12 -070012This script runs multiple emerge processes in parallel, using appropriate
13Portage APIs. It is faster than standard emerge because it has a
14multiprocess model instead of an asynchronous model.
David Jamesfcb70ef2011-02-02 16:02:30 -080015"""
16
17import codecs
18import copy
19import errno
David James8c7e5e32011-06-28 11:26:03 -070020import heapq
David Jamesfcb70ef2011-02-02 16:02:30 -080021import multiprocessing
22import os
23import Queue
David Jamesfcb70ef2011-02-02 16:02:30 -080024import signal
25import sys
26import tempfile
27import time
28import traceback
David Jamesfcb70ef2011-02-02 16:02:30 -080029
30# If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
31# Chromium OS, the default "portage" user doesn't have the necessary
32# permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
33# is "root" here because we get called through sudo.
34#
35# We need to set this before importing any portage modules, because portage
36# looks up "PORTAGE_USERNAME" at import time.
37#
38# NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
39# encounter this case unless they have an old chroot or blow away the
40# environment by running sudo without the -E specifier.
41if "PORTAGE_USERNAME" not in os.environ:
42 homedir = os.environ.get("HOME")
43 if homedir:
44 os.environ["PORTAGE_USERNAME"] = os.path.basename(homedir)
45
46# Portage doesn't expose dependency trees in its public API, so we have to
47# make use of some private APIs here. These modules are found under
48# /usr/lib/portage/pym/.
49#
50# TODO(davidjames): Update Portage to expose public APIs for these features.
51from _emerge.actions import adjust_configs
52from _emerge.actions import load_emerge_config
53from _emerge.create_depgraph_params import create_depgraph_params
David James386ccd12011-05-04 20:17:42 -070054from _emerge.depgraph import backtrack_depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -080055from _emerge.main import emerge_main
56from _emerge.main import parse_opts
57from _emerge.Package import Package
58from _emerge.Scheduler import Scheduler
David Jamesfcb70ef2011-02-02 16:02:30 -080059from _emerge.stdout_spinner import stdout_spinner
David James386ccd12011-05-04 20:17:42 -070060from portage._global_updates import _global_updates
David Jamesfcb70ef2011-02-02 16:02:30 -080061import portage
62import portage.debug
David Jamesfcb70ef2011-02-02 16:02:30 -080063
David Jamesfcb70ef2011-02-02 16:02:30 -080064def Usage():
65 """Print usage."""
66 print "Usage:"
David James386ccd12011-05-04 20:17:42 -070067 print " ./parallel_emerge [--board=BOARD] [--workon=PKGS]"
David Jamesfcb70ef2011-02-02 16:02:30 -080068 print " [--rebuild] [emerge args] package"
69 print
70 print "Packages specified as workon packages are always built from source."
David Jamesfcb70ef2011-02-02 16:02:30 -080071 print
72 print "The --workon argument is mainly useful when you want to build and"
73 print "install packages that you are working on unconditionally, but do not"
74 print "to have to rev the package to indicate you want to build it from"
75 print "source. The build_packages script will automatically supply the"
76 print "workon argument to emerge, ensuring that packages selected using"
77 print "cros-workon are rebuilt."
78 print
79 print "The --rebuild option rebuilds packages whenever their dependencies"
80 print "are changed. This ensures that your build is correct."
81 sys.exit(1)
82
83
David Jamesfcb70ef2011-02-02 16:02:30 -080084# Global start time
85GLOBAL_START = time.time()
86
David James7358d032011-05-19 10:40:03 -070087# Whether process has been killed by a signal.
88KILLED = multiprocessing.Event()
89
David Jamesfcb70ef2011-02-02 16:02:30 -080090
91class EmergeData(object):
92 """This simple struct holds various emerge variables.
93
94 This struct helps us easily pass emerge variables around as a unit.
95 These variables are used for calculating dependencies and installing
96 packages.
97 """
98
David Jamesbf1e3442011-05-28 07:44:20 -070099 __slots__ = ["action", "cmdline_packages", "depgraph", "favorites",
100 "mtimedb", "opts", "root_config", "scheduler_graph",
101 "settings", "spinner", "trees"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800102
103 def __init__(self):
104 # The action the user requested. If the user is installing packages, this
105 # is None. If the user is doing anything other than installing packages,
106 # this will contain the action name, which will map exactly to the
107 # long-form name of the associated emerge option.
108 #
109 # Example: If you call parallel_emerge --unmerge package, the action name
110 # will be "unmerge"
111 self.action = None
112
113 # The list of packages the user passed on the command-line.
114 self.cmdline_packages = None
115
116 # The emerge dependency graph. It'll contain all the packages involved in
117 # this merge, along with their versions.
118 self.depgraph = None
119
David Jamesbf1e3442011-05-28 07:44:20 -0700120 # The list of candidates to add to the world file.
121 self.favorites = None
122
David Jamesfcb70ef2011-02-02 16:02:30 -0800123 # A dict of the options passed to emerge. This dict has been cleaned up
124 # a bit by parse_opts, so that it's a bit easier for the emerge code to
125 # look at the options.
126 #
127 # Emerge takes a few shortcuts in its cleanup process to make parsing of
128 # the options dict easier. For example, if you pass in "--usepkg=n", the
129 # "--usepkg" flag is just left out of the dictionary altogether. Because
130 # --usepkg=n is the default, this makes parsing easier, because emerge
131 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
132 #
133 # These cleanup processes aren't applied to all options. For example, the
134 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
135 # applied by emerge, see the parse_opts function in the _emerge.main
136 # package.
137 self.opts = None
138
139 # A dictionary used by portage to maintain global state. This state is
140 # loaded from disk when portage starts up, and saved to disk whenever we
141 # call mtimedb.commit().
142 #
143 # This database contains information about global updates (i.e., what
144 # version of portage we have) and what we're currently doing. Portage
145 # saves what it is currently doing in this database so that it can be
146 # resumed when you call it with the --resume option.
147 #
148 # parallel_emerge does not save what it is currently doing in the mtimedb,
149 # so we do not support the --resume option.
150 self.mtimedb = None
151
152 # The portage configuration for our current root. This contains the portage
153 # settings (see below) and the three portage trees for our current root.
154 # (The three portage trees are explained below, in the documentation for
155 # the "trees" member.)
156 self.root_config = None
157
158 # The scheduler graph is used by emerge to calculate what packages to
159 # install. We don't actually install any deps, so this isn't really used,
160 # but we pass it in to the Scheduler object anyway.
161 self.scheduler_graph = None
162
163 # Portage settings for our current session. Most of these settings are set
164 # in make.conf inside our current install root.
165 self.settings = None
166
167 # The spinner, which spews stuff to stdout to indicate that portage is
168 # doing something. We maintain our own spinner, so we set the portage
169 # spinner to "silent" mode.
170 self.spinner = None
171
172 # The portage trees. There are separate portage trees for each root. To get
173 # the portage tree for the current root, you can look in self.trees[root],
174 # where root = self.settings["ROOT"].
175 #
176 # In each root, there are three trees: vartree, porttree, and bintree.
177 # - vartree: A database of the currently-installed packages.
178 # - porttree: A database of ebuilds, that can be used to build packages.
179 # - bintree: A database of binary packages.
180 self.trees = None
181
182
183class DepGraphGenerator(object):
184 """Grab dependency information about packages from portage.
185
186 Typical usage:
187 deps = DepGraphGenerator()
188 deps.Initialize(sys.argv[1:])
189 deps_tree, deps_info = deps.GenDependencyTree()
190 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
191 deps.PrintTree(deps_tree)
192 PrintDepsMap(deps_graph)
193 """
194
David James386ccd12011-05-04 20:17:42 -0700195 __slots__ = ["board", "emerge", "package_db", "show_output"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800196
197 def __init__(self):
198 self.board = None
199 self.emerge = EmergeData()
David Jamesfcb70ef2011-02-02 16:02:30 -0800200 self.package_db = {}
David Jamesfcb70ef2011-02-02 16:02:30 -0800201 self.show_output = False
David Jamesfcb70ef2011-02-02 16:02:30 -0800202
203 def ParseParallelEmergeArgs(self, argv):
204 """Read the parallel emerge arguments from the command-line.
205
206 We need to be compatible with emerge arg format. We scrape arguments that
207 are specific to parallel_emerge, and pass through the rest directly to
208 emerge.
209 Args:
210 argv: arguments list
211 Returns:
212 Arguments that don't belong to parallel_emerge
213 """
214 emerge_args = []
215 for arg in argv:
216 # Specifically match arguments that are specific to parallel_emerge, and
217 # pass through the rest.
218 if arg.startswith("--board="):
219 self.board = arg.replace("--board=", "")
220 elif arg.startswith("--workon="):
221 workon_str = arg.replace("--workon=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700222 emerge_args.append("--reinstall-atoms=%s" % workon_str)
223 emerge_args.append("--usepkg-exclude=%s" % workon_str)
David Jamesfcb70ef2011-02-02 16:02:30 -0800224 elif arg.startswith("--force-remote-binary="):
225 force_remote_binary = arg.replace("--force-remote-binary=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700226 emerge_args.append("--useoldpkg-atoms=%s" % force_remote_binary)
David Jamesfcb70ef2011-02-02 16:02:30 -0800227 elif arg == "--show-output":
228 self.show_output = True
David James386ccd12011-05-04 20:17:42 -0700229 elif arg == "--rebuild":
David James7a1ea4b2011-10-13 15:06:41 -0700230 emerge_args.append("--rebuild-if-unbuilt")
David Jamesfcb70ef2011-02-02 16:02:30 -0800231 else:
232 # Not one of our options, so pass through to emerge.
233 emerge_args.append(arg)
234
David James386ccd12011-05-04 20:17:42 -0700235 # These packages take a really long time to build, so, for expediency, we
236 # are blacklisting them from automatic rebuilds because one of their
237 # dependencies needs to be recompiled.
238 for pkg in ("chromeos-base/chromeos-chrome", "media-plugins/o3d",
239 "dev-java/icedtea"):
David James7a1ea4b2011-10-13 15:06:41 -0700240 emerge_args.append("--rebuild-exclude=%s" % pkg)
David Jamesfcb70ef2011-02-02 16:02:30 -0800241
242 return emerge_args
243
244 def Initialize(self, args):
245 """Initializer. Parses arguments and sets up portage state."""
246
247 # Parse and strip out args that are just intended for parallel_emerge.
248 emerge_args = self.ParseParallelEmergeArgs(args)
249
250 # Setup various environment variables based on our current board. These
251 # variables are normally setup inside emerge-${BOARD}, but since we don't
252 # call that script, we have to set it up here. These variables serve to
253 # point our tools at /build/BOARD and to setup cross compiles to the
254 # appropriate board as configured in toolchain.conf.
255 if self.board:
256 os.environ["PORTAGE_CONFIGROOT"] = "/build/" + self.board
257 os.environ["PORTAGE_SYSROOT"] = "/build/" + self.board
258 os.environ["SYSROOT"] = "/build/" + self.board
David Jamesfcb70ef2011-02-02 16:02:30 -0800259
260 # Although CHROMEOS_ROOT isn't specific to boards, it's normally setup
261 # inside emerge-${BOARD}, so we set it up here for compatibility. It
262 # will be going away soon as we migrate to CROS_WORKON_SRCROOT.
263 os.environ.setdefault("CHROMEOS_ROOT", os.environ["HOME"] + "/trunk")
264
265 # Turn off interactive delays
266 os.environ["EBEEP_IGNORE"] = "1"
267 os.environ["EPAUSE_IGNORE"] = "1"
268 os.environ["UNMERGE_DELAY"] = "0"
269
270 # Parse the emerge options.
David Jamesea3ca332011-05-26 11:48:29 -0700271 action, opts, cmdline_packages = parse_opts(emerge_args, silent=True)
David Jamesfcb70ef2011-02-02 16:02:30 -0800272
273 # Set environment variables based on options. Portage normally sets these
274 # environment variables in emerge_main, but we can't use that function,
275 # because it also does a bunch of other stuff that we don't want.
276 # TODO(davidjames): Patch portage to move this logic into a function we can
277 # reuse here.
278 if "--debug" in opts:
279 os.environ["PORTAGE_DEBUG"] = "1"
280 if "--config-root" in opts:
281 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
282 if "--root" in opts:
283 os.environ["ROOT"] = opts["--root"]
284 if "--accept-properties" in opts:
285 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
286
David Jamesfcb70ef2011-02-02 16:02:30 -0800287 # If we're installing packages to the board, and we're not using the
David James927a56d2012-04-03 11:26:39 -0700288 # official flag, we can disable vardb locks. This is safe because we
289 # only run up to one instance of parallel_emerge in parallel.
David Jamesfcb70ef2011-02-02 16:02:30 -0800290 if self.board and os.environ.get("CHROMEOS_OFFICIAL") != "1":
291 os.environ.setdefault("PORTAGE_LOCKS", "false")
David Jamesfcb70ef2011-02-02 16:02:30 -0800292
293 # Now that we've setup the necessary environment variables, we can load the
294 # emerge config from disk.
295 settings, trees, mtimedb = load_emerge_config()
296
David Jamesea3ca332011-05-26 11:48:29 -0700297 # Add in EMERGE_DEFAULT_OPTS, if specified.
298 tmpcmdline = []
299 if "--ignore-default-opts" not in opts:
300 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
301 tmpcmdline.extend(emerge_args)
302 action, opts, cmdline_packages = parse_opts(tmpcmdline)
303
304 # If we're installing to the board, we want the --root-deps option so that
305 # portage will install the build dependencies to that location as well.
306 if self.board:
307 opts.setdefault("--root-deps", True)
308
David Jamesfcb70ef2011-02-02 16:02:30 -0800309 # Check whether our portage tree is out of date. Typically, this happens
310 # when you're setting up a new portage tree, such as in setup_board and
311 # make_chroot. In that case, portage applies a bunch of global updates
312 # here. Once the updates are finished, we need to commit any changes
313 # that the global update made to our mtimedb, and reload the config.
314 #
315 # Portage normally handles this logic in emerge_main, but again, we can't
316 # use that function here.
317 if _global_updates(trees, mtimedb["updates"]):
318 mtimedb.commit()
319 settings, trees, mtimedb = load_emerge_config(trees=trees)
320
321 # Setup implied options. Portage normally handles this logic in
322 # emerge_main.
323 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
324 opts.setdefault("--buildpkg", True)
325 if "--getbinpkgonly" in opts:
326 opts.setdefault("--usepkgonly", True)
327 opts.setdefault("--getbinpkg", True)
328 if "getbinpkg" in settings.features:
329 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
330 opts["--getbinpkg"] = True
331 if "--getbinpkg" in opts or "--usepkgonly" in opts:
332 opts.setdefault("--usepkg", True)
333 if "--fetch-all-uri" in opts:
334 opts.setdefault("--fetchonly", True)
335 if "--skipfirst" in opts:
336 opts.setdefault("--resume", True)
337 if "--buildpkgonly" in opts:
338 # --buildpkgonly will not merge anything, so it overrides all binary
339 # package options.
340 for opt in ("--getbinpkg", "--getbinpkgonly",
341 "--usepkg", "--usepkgonly"):
342 opts.pop(opt, None)
343 if (settings.get("PORTAGE_DEBUG", "") == "1" and
344 "python-trace" in settings.features):
345 portage.debug.set_trace(True)
346
347 # Complain about unsupported options
David James386ccd12011-05-04 20:17:42 -0700348 for opt in ("--ask", "--ask-enter-invalid", "--resume", "--skipfirst"):
David Jamesfcb70ef2011-02-02 16:02:30 -0800349 if opt in opts:
350 print "%s is not supported by parallel_emerge" % opt
351 sys.exit(1)
352
353 # Make emerge specific adjustments to the config (e.g. colors!)
354 adjust_configs(opts, trees)
355
356 # Save our configuration so far in the emerge object
357 emerge = self.emerge
358 emerge.action, emerge.opts = action, opts
359 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
360 emerge.cmdline_packages = cmdline_packages
361 root = settings["ROOT"]
362 emerge.root_config = trees[root]["root_config"]
363
David James386ccd12011-05-04 20:17:42 -0700364 if "--usepkg" in opts:
David Jamesfcb70ef2011-02-02 16:02:30 -0800365 emerge.trees[root]["bintree"].populate("--getbinpkg" in opts)
366
David Jamesfcb70ef2011-02-02 16:02:30 -0800367 def CreateDepgraph(self, emerge, packages):
368 """Create an emerge depgraph object."""
369 # Setup emerge options.
370 emerge_opts = emerge.opts.copy()
371
David James386ccd12011-05-04 20:17:42 -0700372 # Ask portage to build a dependency graph. with the options we specified
373 # above.
David Jamesfcb70ef2011-02-02 16:02:30 -0800374 params = create_depgraph_params(emerge_opts, emerge.action)
David Jamesbf1e3442011-05-28 07:44:20 -0700375 success, depgraph, favorites = backtrack_depgraph(
David James386ccd12011-05-04 20:17:42 -0700376 emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
377 packages, emerge.spinner)
378 emerge.depgraph = depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800379
David James386ccd12011-05-04 20:17:42 -0700380 # Is it impossible to honor the user's request? Bail!
381 if not success:
382 depgraph.display_problems()
383 sys.exit(1)
David Jamesfcb70ef2011-02-02 16:02:30 -0800384
385 emerge.depgraph = depgraph
David Jamesbf1e3442011-05-28 07:44:20 -0700386 emerge.favorites = favorites
David Jamesfcb70ef2011-02-02 16:02:30 -0800387
David Jamesdeebd692011-05-09 17:02:52 -0700388 # Prime and flush emerge caches.
389 root = emerge.settings["ROOT"]
390 vardb = emerge.trees[root]["vartree"].dbapi
David James0bdc5de2011-05-12 16:22:26 -0700391 if "--pretend" not in emerge.opts:
392 vardb.counter_tick()
David Jamesdeebd692011-05-09 17:02:52 -0700393 vardb.flush_cache()
394
David James386ccd12011-05-04 20:17:42 -0700395 def GenDependencyTree(self):
David Jamesfcb70ef2011-02-02 16:02:30 -0800396 """Get dependency tree info from emerge.
397
David Jamesfcb70ef2011-02-02 16:02:30 -0800398 Returns:
399 Dependency tree
400 """
401 start = time.time()
402
403 emerge = self.emerge
404
405 # Create a list of packages to merge
406 packages = set(emerge.cmdline_packages[:])
David Jamesfcb70ef2011-02-02 16:02:30 -0800407
408 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
409 # need any extra output from portage.
410 portage.util.noiselimit = -1
411
412 # My favorite feature: The silent spinner. It doesn't spin. Ever.
413 # I'd disable the colors by default too, but they look kind of cool.
414 emerge.spinner = stdout_spinner()
415 emerge.spinner.update = emerge.spinner.update_quiet
416
417 if "--quiet" not in emerge.opts:
418 print "Calculating deps..."
419
420 self.CreateDepgraph(emerge, packages)
421 depgraph = emerge.depgraph
422
423 # Build our own tree from the emerge digraph.
424 deps_tree = {}
425 digraph = depgraph._dynamic_config.digraph
David James3f778802011-08-25 19:31:45 -0700426 root = emerge.settings["ROOT"]
427 final_db = depgraph._dynamic_config.mydbapi[root]
David Jamesfcb70ef2011-02-02 16:02:30 -0800428 for node, node_deps in digraph.nodes.items():
429 # Calculate dependency packages that need to be installed first. Each
430 # child on the digraph is a dependency. The "operation" field specifies
431 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
432 # contains the type of dependency (e.g. build, runtime, runtime_post,
433 # etc.)
434 #
David Jamesfcb70ef2011-02-02 16:02:30 -0800435 # Portage refers to the identifiers for packages as a CPV. This acronym
436 # stands for Component/Path/Version.
437 #
438 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
439 # Split up, this CPV would be:
440 # C -- Component: chromeos-base
441 # P -- Path: power_manager
442 # V -- Version: 0.0.1-r1
443 #
444 # We just refer to CPVs as packages here because it's easier.
445 deps = {}
446 for child, priorities in node_deps[0].items():
David James3f778802011-08-25 19:31:45 -0700447 if isinstance(child, Package) and child.root == root:
448 cpv = str(child.cpv)
449 action = str(child.operation)
450
451 # If we're uninstalling a package, check whether Portage is
452 # installing a replacement. If so, just depend on the installation
453 # of the new package, because the old package will automatically
454 # be uninstalled at that time.
455 if action == "uninstall":
456 for pkg in final_db.match_pkgs(child.slot_atom):
457 cpv = str(pkg.cpv)
458 action = "merge"
459 break
460
461 deps[cpv] = dict(action=action,
462 deptypes=[str(x) for x in priorities],
463 deps={})
David Jamesfcb70ef2011-02-02 16:02:30 -0800464
465 # We've built our list of deps, so we can add our package to the tree.
David James3f778802011-08-25 19:31:45 -0700466 if isinstance(node, Package) and node.root == root:
David Jamesfcb70ef2011-02-02 16:02:30 -0800467 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
468 deps=deps)
469
David Jamesfcb70ef2011-02-02 16:02:30 -0800470 # Ask portage for its install plan, so that we can only throw out
David James386ccd12011-05-04 20:17:42 -0700471 # dependencies that portage throws out.
David Jamesfcb70ef2011-02-02 16:02:30 -0800472 deps_info = {}
473 for pkg in depgraph.altlist():
474 if isinstance(pkg, Package):
David James3f778802011-08-25 19:31:45 -0700475 assert pkg.root == root
David Jamesfcb70ef2011-02-02 16:02:30 -0800476 self.package_db[pkg.cpv] = pkg
477
David Jamesfcb70ef2011-02-02 16:02:30 -0800478 # Save off info about the package
David James386ccd12011-05-04 20:17:42 -0700479 deps_info[str(pkg.cpv)] = {"idx": len(deps_info)}
David Jamesfcb70ef2011-02-02 16:02:30 -0800480
481 seconds = time.time() - start
482 if "--quiet" not in emerge.opts:
483 print "Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60)
484
485 return deps_tree, deps_info
486
487 def PrintTree(self, deps, depth=""):
488 """Print the deps we have seen in the emerge output.
489
490 Args:
491 deps: Dependency tree structure.
492 depth: Allows printing the tree recursively, with indentation.
493 """
494 for entry in sorted(deps):
495 action = deps[entry]["action"]
496 print "%s %s (%s)" % (depth, entry, action)
497 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
498
David James386ccd12011-05-04 20:17:42 -0700499 def GenDependencyGraph(self, deps_tree, deps_info):
David Jamesfcb70ef2011-02-02 16:02:30 -0800500 """Generate a doubly linked dependency graph.
501
502 Args:
503 deps_tree: Dependency tree structure.
504 deps_info: More details on the dependencies.
505 Returns:
506 Deps graph in the form of a dict of packages, with each package
507 specifying a "needs" list and "provides" list.
508 """
509 emerge = self.emerge
510 root = emerge.settings["ROOT"]
511
David Jamesfcb70ef2011-02-02 16:02:30 -0800512 # deps_map is the actual dependency graph.
513 #
514 # Each package specifies a "needs" list and a "provides" list. The "needs"
515 # list indicates which packages we depend on. The "provides" list
516 # indicates the reverse dependencies -- what packages need us.
517 #
518 # We also provide some other information in the dependency graph:
519 # - action: What we're planning on doing with this package. Generally,
520 # "merge", "nomerge", or "uninstall"
David Jamesfcb70ef2011-02-02 16:02:30 -0800521 deps_map = {}
522
523 def ReverseTree(packages):
524 """Convert tree to digraph.
525
526 Take the tree of package -> requirements and reverse it to a digraph of
527 buildable packages -> packages they unblock.
528 Args:
529 packages: Tree(s) of dependencies.
530 Returns:
531 Unsanitized digraph.
532 """
David James8c7e5e32011-06-28 11:26:03 -0700533 binpkg_phases = set(["setup", "preinst", "postinst"])
David James3f778802011-08-25 19:31:45 -0700534 needed_dep_types = set(["blocker", "buildtime", "runtime"])
David Jamesfcb70ef2011-02-02 16:02:30 -0800535 for pkg in packages:
536
537 # Create an entry for the package
538 action = packages[pkg]["action"]
David James8c7e5e32011-06-28 11:26:03 -0700539 default_pkg = {"needs": {}, "provides": set(), "action": action,
540 "nodeps": False, "binary": False}
David Jamesfcb70ef2011-02-02 16:02:30 -0800541 this_pkg = deps_map.setdefault(pkg, default_pkg)
542
David James8c7e5e32011-06-28 11:26:03 -0700543 if pkg in deps_info:
544 this_pkg["idx"] = deps_info[pkg]["idx"]
545
546 # If a package doesn't have any defined phases that might use the
547 # dependent packages (i.e. pkg_setup, pkg_preinst, or pkg_postinst),
548 # we can install this package before its deps are ready.
549 emerge_pkg = self.package_db.get(pkg)
550 if emerge_pkg and emerge_pkg.type_name == "binary":
551 this_pkg["binary"] = True
552 defined_phases = emerge_pkg.metadata.defined_phases
553 defined_binpkg_phases = binpkg_phases.intersection(defined_phases)
554 if not defined_binpkg_phases:
555 this_pkg["nodeps"] = True
556
David Jamesfcb70ef2011-02-02 16:02:30 -0800557 # Create entries for dependencies of this package first.
558 ReverseTree(packages[pkg]["deps"])
559
560 # Add dependencies to this package.
561 for dep, dep_item in packages[pkg]["deps"].iteritems():
David James8c7e5e32011-06-28 11:26:03 -0700562 # We only need to enforce strict ordering of dependencies if the
David James3f778802011-08-25 19:31:45 -0700563 # dependency is a blocker, or is a buildtime or runtime dependency.
564 # (I.e., ignored, optional, and runtime_post dependencies don't
565 # depend on ordering.)
David James8c7e5e32011-06-28 11:26:03 -0700566 dep_types = dep_item["deptypes"]
567 if needed_dep_types.intersection(dep_types):
568 deps_map[dep]["provides"].add(pkg)
569 this_pkg["needs"][dep] = "/".join(dep_types)
David Jamesfcb70ef2011-02-02 16:02:30 -0800570
David James3f778802011-08-25 19:31:45 -0700571 # If there's a blocker, Portage may need to move files from one
572 # package to another, which requires editing the CONTENTS files of
573 # both packages. To avoid race conditions while editing this file,
574 # the two packages must not be installed in parallel, so we can't
575 # safely ignore dependencies. See http://crosbug.com/19328
576 if "blocker" in dep_types:
577 this_pkg["nodeps"] = False
578
David Jamesfcb70ef2011-02-02 16:02:30 -0800579 def FindCycles():
580 """Find cycles in the dependency tree.
581
582 Returns:
583 A dict mapping cyclic packages to a dict of the deps that cause
584 cycles. For each dep that causes cycles, it returns an example
585 traversal of the graph that shows the cycle.
586 """
587
588 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
589 """Find cycles in cyclic dependencies starting at specified package.
590
591 Args:
592 pkg: Package identifier.
593 cycles: A dict mapping cyclic packages to a dict of the deps that
594 cause cycles. For each dep that causes cycles, it returns an
595 example traversal of the graph that shows the cycle.
596 unresolved: Nodes that have been visited but are not fully processed.
597 resolved: Nodes that have been visited and are fully processed.
598 """
599 pkg_cycles = cycles.get(pkg)
600 if pkg in resolved and not pkg_cycles:
601 # If we already looked at this package, and found no cyclic
602 # dependencies, we can stop now.
603 return
604 unresolved.append(pkg)
605 for dep in deps_map[pkg]["needs"]:
606 if dep in unresolved:
607 idx = unresolved.index(dep)
608 mycycle = unresolved[idx:] + [dep]
609 for i in range(len(mycycle) - 1):
610 pkg1, pkg2 = mycycle[i], mycycle[i+1]
611 cycles.setdefault(pkg1, {}).setdefault(pkg2, mycycle)
612 elif not pkg_cycles or dep not in pkg_cycles:
613 # Looks like we haven't seen this edge before.
614 FindCyclesAtNode(dep, cycles, unresolved, resolved)
615 unresolved.pop()
616 resolved.add(pkg)
617
618 cycles, unresolved, resolved = {}, [], set()
619 for pkg in deps_map:
620 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
621 return cycles
622
David James386ccd12011-05-04 20:17:42 -0700623 def RemoveUnusedPackages():
David Jamesfcb70ef2011-02-02 16:02:30 -0800624 """Remove installed packages, propagating dependencies."""
David Jamesfcb70ef2011-02-02 16:02:30 -0800625 # Schedule packages that aren't on the install list for removal
626 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
627
David Jamesfcb70ef2011-02-02 16:02:30 -0800628 # Remove the packages we don't want, simplifying the graph and making
629 # it easier for us to crack cycles.
630 for pkg in sorted(rm_pkgs):
631 this_pkg = deps_map[pkg]
632 needs = this_pkg["needs"]
633 provides = this_pkg["provides"]
634 for dep in needs:
635 dep_provides = deps_map[dep]["provides"]
636 dep_provides.update(provides)
637 dep_provides.discard(pkg)
638 dep_provides.discard(dep)
639 for target in provides:
640 target_needs = deps_map[target]["needs"]
641 target_needs.update(needs)
642 target_needs.pop(pkg, None)
643 target_needs.pop(target, None)
644 del deps_map[pkg]
645
646 def PrintCycleBreak(basedep, dep, mycycle):
647 """Print details about a cycle that we are planning on breaking.
648
649 We are breaking a cycle where dep needs basedep. mycycle is an
650 example cycle which contains dep -> basedep."""
651
David Jamesfcb70ef2011-02-02 16:02:30 -0800652 needs = deps_map[dep]["needs"]
653 depinfo = needs.get(basedep, "deleted")
David Jamesfcb70ef2011-02-02 16:02:30 -0800654
David James3f778802011-08-25 19:31:45 -0700655 # It's OK to swap install order for blockers, as long as the two
656 # packages aren't installed in parallel. If there is a cycle, then
657 # we know the packages depend on each other already, so we can drop the
658 # blocker safely without printing a warning.
659 if depinfo == "blocker":
660 return
661
David Jamesfcb70ef2011-02-02 16:02:30 -0800662 # Notify the user that we're breaking a cycle.
663 print "Breaking %s -> %s (%s)" % (dep, basedep, depinfo)
664
665 # Show cycle.
666 for i in range(len(mycycle) - 1):
667 pkg1, pkg2 = mycycle[i], mycycle[i+1]
668 needs = deps_map[pkg1]["needs"]
669 depinfo = needs.get(pkg2, "deleted")
670 if pkg1 == dep and pkg2 == basedep:
671 depinfo = depinfo + ", deleting"
672 print " %s -> %s (%s)" % (pkg1, pkg2, depinfo)
673
674 def SanitizeTree():
675 """Remove circular dependencies.
676
677 We prune all dependencies involved in cycles that go against the emerge
678 ordering. This has a nice property: we're guaranteed to merge
679 dependencies in the same order that portage does.
680
681 Because we don't treat any dependencies as "soft" unless they're killed
682 by a cycle, we pay attention to a larger number of dependencies when
683 merging. This hurts performance a bit, but helps reliability.
684 """
685 start = time.time()
686 cycles = FindCycles()
687 while cycles:
688 for dep, mycycles in cycles.iteritems():
689 for basedep, mycycle in mycycles.iteritems():
690 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
Matt Tennant08797302011-10-17 16:18:45 -0700691 if "--quiet" not in emerge.opts:
692 PrintCycleBreak(basedep, dep, mycycle)
David Jamesfcb70ef2011-02-02 16:02:30 -0800693 del deps_map[dep]["needs"][basedep]
694 deps_map[basedep]["provides"].remove(dep)
695 cycles = FindCycles()
696 seconds = time.time() - start
697 if "--quiet" not in emerge.opts and seconds >= 0.1:
698 print "Tree sanitized in %dm%.1fs" % (seconds / 60, seconds % 60)
699
David James8c7e5e32011-06-28 11:26:03 -0700700 def FindRecursiveProvides(pkg, seen):
701 """Find all nodes that require a particular package.
702
703 Assumes that graph is acyclic.
704
705 Args:
706 pkg: Package identifier.
707 seen: Nodes that have been visited so far.
708 """
709 if pkg in seen:
710 return
711 seen.add(pkg)
712 info = deps_map[pkg]
713 info["tprovides"] = info["provides"].copy()
714 for dep in info["provides"]:
715 FindRecursiveProvides(dep, seen)
716 info["tprovides"].update(deps_map[dep]["tprovides"])
717
David Jamesa22906f2011-05-04 19:53:26 -0700718 ReverseTree(deps_tree)
David Jamesa22906f2011-05-04 19:53:26 -0700719
David James386ccd12011-05-04 20:17:42 -0700720 # We need to remove unused packages so that we can use the dependency
721 # ordering of the install process to show us what cycles to crack.
722 RemoveUnusedPackages()
David Jamesfcb70ef2011-02-02 16:02:30 -0800723 SanitizeTree()
David James8c7e5e32011-06-28 11:26:03 -0700724 seen = set()
725 for pkg in deps_map:
726 FindRecursiveProvides(pkg, seen)
David Jamesfcb70ef2011-02-02 16:02:30 -0800727 return deps_map
728
729 def PrintInstallPlan(self, deps_map):
730 """Print an emerge-style install plan.
731
732 The install plan lists what packages we're installing, in order.
733 It's useful for understanding what parallel_emerge is doing.
734
735 Args:
736 deps_map: The dependency graph.
737 """
738
739 def InstallPlanAtNode(target, deps_map):
740 nodes = []
741 nodes.append(target)
742 for dep in deps_map[target]["provides"]:
743 del deps_map[dep]["needs"][target]
744 if not deps_map[dep]["needs"]:
745 nodes.extend(InstallPlanAtNode(dep, deps_map))
746 return nodes
747
748 deps_map = copy.deepcopy(deps_map)
749 install_plan = []
750 plan = set()
751 for target, info in deps_map.iteritems():
752 if not info["needs"] and target not in plan:
753 for item in InstallPlanAtNode(target, deps_map):
754 plan.add(item)
755 install_plan.append(self.package_db[item])
756
757 for pkg in plan:
758 del deps_map[pkg]
759
760 if deps_map:
761 print "Cyclic dependencies:", " ".join(deps_map)
762 PrintDepsMap(deps_map)
763 sys.exit(1)
764
765 self.emerge.depgraph.display(install_plan)
766
767
768def PrintDepsMap(deps_map):
769 """Print dependency graph, for each package list it's prerequisites."""
770 for i in sorted(deps_map):
771 print "%s: (%s) needs" % (i, deps_map[i]["action"])
772 needs = deps_map[i]["needs"]
773 for j in sorted(needs):
774 print " %s" % (j)
775 if not needs:
776 print " no dependencies"
777
778
779class EmergeJobState(object):
780 __slots__ = ["done", "filename", "last_notify_timestamp", "last_output_seek",
781 "last_output_timestamp", "pkgname", "retcode", "start_timestamp",
Brian Harring0be85c62012-03-17 19:52:12 -0700782 "target", "fetch_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800783
784 def __init__(self, target, pkgname, done, filename, start_timestamp,
Brian Harring0be85c62012-03-17 19:52:12 -0700785 retcode=None, fetch_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800786
787 # The full name of the target we're building (e.g.
788 # chromeos-base/chromeos-0.0.1-r60)
789 self.target = target
790
791 # The short name of the target we're building (e.g. chromeos-0.0.1-r60)
792 self.pkgname = pkgname
793
794 # Whether the job is done. (True if the job is done; false otherwise.)
795 self.done = done
796
797 # The filename where output is currently stored.
798 self.filename = filename
799
800 # The timestamp of the last time we printed the name of the log file. We
801 # print this at the beginning of the job, so this starts at
802 # start_timestamp.
803 self.last_notify_timestamp = start_timestamp
804
805 # The location (in bytes) of the end of the last complete line we printed.
806 # This starts off at zero. We use this to jump to the right place when we
807 # print output from the same ebuild multiple times.
808 self.last_output_seek = 0
809
810 # The timestamp of the last time we printed output. Since we haven't
811 # printed output yet, this starts at zero.
812 self.last_output_timestamp = 0
813
814 # The return code of our job, if the job is actually finished.
815 self.retcode = retcode
816
Brian Harring0be85c62012-03-17 19:52:12 -0700817 # Was this just a fetch job?
818 self.fetch_only = fetch_only
819
David Jamesfcb70ef2011-02-02 16:02:30 -0800820 # The timestamp when our job started.
821 self.start_timestamp = start_timestamp
822
823
David James7358d032011-05-19 10:40:03 -0700824def KillHandler(signum, frame):
825 # Kill self and all subprocesses.
826 os.killpg(0, signal.SIGKILL)
827
David Jamesfcb70ef2011-02-02 16:02:30 -0800828def SetupWorkerSignals():
829 def ExitHandler(signum, frame):
David James7358d032011-05-19 10:40:03 -0700830 # Set KILLED flag.
831 KILLED.set()
David James13cead42011-05-18 16:22:01 -0700832
David James7358d032011-05-19 10:40:03 -0700833 # Remove our signal handlers so we don't get called recursively.
834 signal.signal(signal.SIGINT, KillHandler)
835 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -0800836
837 # Ensure that we exit quietly and cleanly, if possible, when we receive
838 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all
839 # of the child processes will print details about KeyboardInterrupt
840 # exceptions, which isn't very helpful.
841 signal.signal(signal.SIGINT, ExitHandler)
842 signal.signal(signal.SIGTERM, ExitHandler)
843
David James1ed3e252011-10-05 20:26:15 -0700844def EmergeProcess(scheduler, output):
845 """Merge a package in a subprocess.
846
847 Args:
848 scheduler: Scheduler object.
849 output: Temporary file to write output.
850
851 Returns:
852 The exit code returned by the subprocess.
853 """
854 pid = os.fork()
855 if pid == 0:
856 try:
857 # Sanity checks.
858 if sys.stdout.fileno() != 1: raise Exception("sys.stdout.fileno() != 1")
859 if sys.stderr.fileno() != 2: raise Exception("sys.stderr.fileno() != 2")
860
861 # - Redirect 1 (stdout) and 2 (stderr) at our temporary file.
862 # - Redirect 0 to point at sys.stdin. In this case, sys.stdin
863 # points at a file reading os.devnull, because multiprocessing mucks
864 # with sys.stdin.
865 # - Leave the sys.stdin and output filehandles alone.
866 fd_pipes = {0: sys.stdin.fileno(),
867 1: output.fileno(),
868 2: output.fileno(),
869 sys.stdin.fileno(): sys.stdin.fileno(),
870 output.fileno(): output.fileno()}
871 portage.process._setup_pipes(fd_pipes)
872
873 # Portage doesn't like when sys.stdin.fileno() != 0, so point sys.stdin
874 # at the filehandle we just created in _setup_pipes.
875 if sys.stdin.fileno() != 0:
876 sys.stdin = os.fdopen(0, "r")
877
878 # Actually do the merge.
879 retval = scheduler.merge()
880
881 # We catch all exceptions here (including SystemExit, KeyboardInterrupt,
882 # etc) so as to ensure that we don't confuse the multiprocessing module,
883 # which expects that all forked children exit with os._exit().
884 except:
885 traceback.print_exc(file=output)
886 retval = 1
887 sys.stdout.flush()
888 sys.stderr.flush()
889 output.flush()
890 os._exit(retval)
891 else:
892 # Return the exit code of the subprocess.
893 return os.waitpid(pid, 0)[1]
David Jamesfcb70ef2011-02-02 16:02:30 -0800894
Brian Harring0be85c62012-03-17 19:52:12 -0700895def EmergeWorker(task_queue, job_queue, emerge, package_db, fetch_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800896 """This worker emerges any packages given to it on the task_queue.
897
898 Args:
899 task_queue: The queue of tasks for this worker to do.
900 job_queue: The queue of results from the worker.
901 emerge: An EmergeData() object.
902 package_db: A dict, mapping package ids to portage Package objects.
Brian Harring0be85c62012-03-17 19:52:12 -0700903 fetch_only: A bool, indicating if we should just fetch the target.
David Jamesfcb70ef2011-02-02 16:02:30 -0800904
905 It expects package identifiers to be passed to it via task_queue. When
906 a task is started, it pushes the (target, filename) to the started_queue.
907 The output is stored in filename. When a merge starts or finishes, we push
908 EmergeJobState objects to the job_queue.
909 """
910
911 SetupWorkerSignals()
912 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
David Jamesdeebd692011-05-09 17:02:52 -0700913
914 # Disable flushing of caches to save on I/O.
David James7a1ea4b2011-10-13 15:06:41 -0700915 root = emerge.settings["ROOT"]
916 vardb = emerge.trees[root]["vartree"].dbapi
917 vardb._flush_cache_enabled = False
Brian Harring0be85c62012-03-17 19:52:12 -0700918 bindb = emerge.trees[root]["bintree"].dbapi
919 # Might be a set, might be a list, might be None; no clue, just use shallow
920 # copy to ensure we can roll it back.
921 original_remotepkgs = copy.copy(bindb.bintree._remotepkgs)
David Jamesdeebd692011-05-09 17:02:52 -0700922
David Jamesfcb70ef2011-02-02 16:02:30 -0800923 opts, spinner = emerge.opts, emerge.spinner
924 opts["--nodeps"] = True
Brian Harring0be85c62012-03-17 19:52:12 -0700925 if fetch_only:
926 opts["--fetchonly"] = True
927
David Jamesfcb70ef2011-02-02 16:02:30 -0800928 while True:
929 # Wait for a new item to show up on the queue. This is a blocking wait,
930 # so if there's nothing to do, we just sit here.
Brian Harring0be85c62012-03-17 19:52:12 -0700931 pkg_state = task_queue.get()
932 if pkg_state is None:
David Jamesfcb70ef2011-02-02 16:02:30 -0800933 # If target is None, this means that the main thread wants us to quit.
934 # The other workers need to exit too, so we'll push the message back on
935 # to the queue so they'll get it too.
Brian Harring0be85c62012-03-17 19:52:12 -0700936 task_queue.put(None)
David Jamesfcb70ef2011-02-02 16:02:30 -0800937 return
David James7358d032011-05-19 10:40:03 -0700938 if KILLED.is_set():
939 return
940
Brian Harring0be85c62012-03-17 19:52:12 -0700941 target = pkg_state.target
942
David Jamesfcb70ef2011-02-02 16:02:30 -0800943 db_pkg = package_db[target]
Brian Harring0be85c62012-03-17 19:52:12 -0700944
945 if db_pkg.type_name == "binary":
946 if not fetch_only and pkg_state.fetched_successfully:
947 # Ensure portage doesn't think our pkg is remote- else it'll force
948 # a redownload of it (even if the on-disk file is fine). In-memory
949 # caching basically, implemented dumbly.
950 bindb.bintree._remotepkgs = None
951 else:
952 bindb.bintree_remotepkgs = original_remotepkgs
953
David Jamesfcb70ef2011-02-02 16:02:30 -0800954 db_pkg.root_config = emerge.root_config
955 install_list = [db_pkg]
956 pkgname = db_pkg.pf
957 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False)
958 start_timestamp = time.time()
Brian Harring0be85c62012-03-17 19:52:12 -0700959 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp,
960 fetch_only=fetch_only)
David Jamesfcb70ef2011-02-02 16:02:30 -0800961 job_queue.put(job)
962 if "--pretend" in opts:
963 retcode = 0
964 else:
David Jamesfcb70ef2011-02-02 16:02:30 -0800965 try:
David James386ccd12011-05-04 20:17:42 -0700966 emerge.scheduler_graph.mergelist = install_list
967 scheduler = Scheduler(settings, trees, mtimedb, opts, spinner,
David Jamesbf1e3442011-05-28 07:44:20 -0700968 favorites=emerge.favorites, graph_config=emerge.scheduler_graph)
David Jamesace2e212011-07-13 11:47:39 -0700969
970 # Enable blocker handling even though we're in --nodeps mode. This
971 # allows us to unmerge the blocker after we've merged the replacement.
972 scheduler._opts_ignore_blockers = frozenset()
973
David James1ed3e252011-10-05 20:26:15 -0700974 retcode = EmergeProcess(scheduler, output)
David Jamesfcb70ef2011-02-02 16:02:30 -0800975 except Exception:
976 traceback.print_exc(file=output)
977 retcode = 1
David James1ed3e252011-10-05 20:26:15 -0700978 output.close()
David Jamesfcb70ef2011-02-02 16:02:30 -0800979
David James7358d032011-05-19 10:40:03 -0700980 if KILLED.is_set():
981 return
982
David Jamesfcb70ef2011-02-02 16:02:30 -0800983 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp,
Brian Harring0be85c62012-03-17 19:52:12 -0700984 retcode, fetch_only=fetch_only)
David Jamesfcb70ef2011-02-02 16:02:30 -0800985 job_queue.put(job)
986
987
988class LinePrinter(object):
989 """Helper object to print a single line."""
990
991 def __init__(self, line):
992 self.line = line
993
994 def Print(self, seek_locations):
995 print self.line
996
997
998class JobPrinter(object):
999 """Helper object to print output of a job."""
1000
1001 def __init__(self, job, unlink=False):
1002 """Print output of job.
1003
1004 If unlink is True, unlink the job output file when done."""
1005 self.current_time = time.time()
1006 self.job = job
1007 self.unlink = unlink
1008
1009 def Print(self, seek_locations):
1010
1011 job = self.job
1012
1013 # Calculate how long the job has been running.
1014 seconds = self.current_time - job.start_timestamp
1015
1016 # Note that we've printed out the job so far.
1017 job.last_output_timestamp = self.current_time
1018
1019 # Note that we're starting the job
1020 info = "job %s (%dm%.1fs)" % (job.pkgname, seconds / 60, seconds % 60)
1021 last_output_seek = seek_locations.get(job.filename, 0)
1022 if last_output_seek:
1023 print "=== Continue output for %s ===" % info
1024 else:
1025 print "=== Start output for %s ===" % info
1026
1027 # Print actual output from job
1028 f = codecs.open(job.filename, encoding='utf-8', errors='replace')
1029 f.seek(last_output_seek)
1030 prefix = job.pkgname + ":"
1031 for line in f:
1032
1033 # Save off our position in the file
1034 if line and line[-1] == "\n":
1035 last_output_seek = f.tell()
1036 line = line[:-1]
1037
1038 # Print our line
1039 print prefix, line.encode('utf-8', 'replace')
1040 f.close()
1041
1042 # Save our last spot in the file so that we don't print out the same
1043 # location twice.
1044 seek_locations[job.filename] = last_output_seek
1045
1046 # Note end of output section
1047 if job.done:
1048 print "=== Complete: %s ===" % info
1049 else:
1050 print "=== Still running: %s ===" % info
1051
1052 if self.unlink:
1053 os.unlink(job.filename)
1054
1055
1056def PrintWorker(queue):
1057 """A worker that prints stuff to the screen as requested."""
1058
1059 def ExitHandler(signum, frame):
David James7358d032011-05-19 10:40:03 -07001060 # Set KILLED flag.
1061 KILLED.set()
1062
David Jamesfcb70ef2011-02-02 16:02:30 -08001063 # Switch to default signal handlers so that we'll die after two signals.
David James7358d032011-05-19 10:40:03 -07001064 signal.signal(signal.SIGINT, KillHandler)
1065 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001066
1067 # Don't exit on the first SIGINT / SIGTERM, because the parent worker will
1068 # handle it and tell us when we need to exit.
1069 signal.signal(signal.SIGINT, ExitHandler)
1070 signal.signal(signal.SIGTERM, ExitHandler)
1071
1072 # seek_locations is a map indicating the position we are at in each file.
1073 # It starts off empty, but is set by the various Print jobs as we go along
1074 # to indicate where we left off in each file.
1075 seek_locations = {}
1076 while True:
1077 try:
1078 job = queue.get()
1079 if job:
1080 job.Print(seek_locations)
David Jamesbccf8eb2011-07-27 14:06:06 -07001081 sys.stdout.flush()
David Jamesfcb70ef2011-02-02 16:02:30 -08001082 else:
1083 break
1084 except IOError as ex:
1085 if ex.errno == errno.EINTR:
1086 # Looks like we received a signal. Keep printing.
1087 continue
1088 raise
1089
Brian Harring867e2362012-03-17 04:05:17 -07001090
Brian Harring0be85c62012-03-17 19:52:12 -07001091class TargetState(object):
Brian Harring867e2362012-03-17 04:05:17 -07001092
Brian Harring0be85c62012-03-17 19:52:12 -07001093 __slots__ = ("target", "info", "score", "prefetched", "fetched_successfully")
Brian Harring867e2362012-03-17 04:05:17 -07001094
Brian Harring0be85c62012-03-17 19:52:12 -07001095 def __init__(self, target, info, fetched=False):
Brian Harring867e2362012-03-17 04:05:17 -07001096 self.target, self.info = target, info
Brian Harring0be85c62012-03-17 19:52:12 -07001097 self.fetched_successfully = False
1098 self.prefetched = False
Brian Harring867e2362012-03-17 04:05:17 -07001099 self.update_score()
1100
1101 def __cmp__(self, other):
1102 return cmp(self.score, other.score)
1103
1104 def update_score(self):
1105 self.score = (
1106 -len(self.info["tprovides"]),
Brian Harring0be85c62012-03-17 19:52:12 -07001107 len(self.info["needs"]),
Brian Harring11c5eeb2012-03-18 11:02:39 -07001108 not self.info["binary"],
Brian Harring867e2362012-03-17 04:05:17 -07001109 -len(self.info["provides"]),
1110 self.info["idx"],
1111 self.target,
1112 )
1113
1114
1115class ScoredHeap(object):
1116
Brian Harring0be85c62012-03-17 19:52:12 -07001117 __slots__ = ("heap", "_heap_set")
1118
Brian Harring867e2362012-03-17 04:05:17 -07001119 def __init__(self, initial=()):
Brian Harring0be85c62012-03-17 19:52:12 -07001120 self.heap = list()
1121 self._heap_set = set()
1122 if initial:
1123 self.multi_put(initial)
Brian Harring867e2362012-03-17 04:05:17 -07001124
1125 def get(self):
Brian Harring0be85c62012-03-17 19:52:12 -07001126 item = heapq.heappop(self.heap)
1127 self._heap_set.remove(item.target)
1128 return item
Brian Harring867e2362012-03-17 04:05:17 -07001129
Brian Harring0be85c62012-03-17 19:52:12 -07001130 def put(self, item):
1131 if not isinstance(item, TargetState):
1132 raise ValueError("Item %r isn't a TargetState" % (item,))
1133 heapq.heappush(self.heap, item)
1134 self._heap_set.add(item.target)
Brian Harring867e2362012-03-17 04:05:17 -07001135
Brian Harring0be85c62012-03-17 19:52:12 -07001136 def multi_put(self, sequence):
1137 sequence = list(sequence)
1138 self.heap.extend(sequence)
1139 self._heap_set.update(x.target for x in sequence)
Brian Harring867e2362012-03-17 04:05:17 -07001140 self.sort()
1141
David James5c9996d2012-03-24 10:50:46 -07001142 def sort(self):
1143 heapq.heapify(self.heap)
1144
Brian Harring0be85c62012-03-17 19:52:12 -07001145 def __contains__(self, target):
1146 return target in self._heap_set
1147
1148 def __nonzero__(self):
1149 return bool(self.heap)
1150
Brian Harring867e2362012-03-17 04:05:17 -07001151 def __len__(self):
1152 return len(self.heap)
1153
1154
David Jamesfcb70ef2011-02-02 16:02:30 -08001155class EmergeQueue(object):
1156 """Class to schedule emerge jobs according to a dependency graph."""
1157
1158 def __init__(self, deps_map, emerge, package_db, show_output):
1159 # Store the dependency graph.
1160 self._deps_map = deps_map
Brian Harring0be85c62012-03-17 19:52:12 -07001161 self._state_map = {}
David Jamesfcb70ef2011-02-02 16:02:30 -08001162 # Initialize the running queue to empty
Brian Harring0be85c62012-03-17 19:52:12 -07001163 self._build_jobs = {}
1164 self._build_ready = ScoredHeap()
1165 self._fetch_jobs = {}
1166 self._fetch_ready = ScoredHeap()
David Jamesfcb70ef2011-02-02 16:02:30 -08001167 # List of total package installs represented in deps_map.
1168 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
1169 self._total_jobs = len(install_jobs)
1170 self._show_output = show_output
1171
1172 if "--pretend" in emerge.opts:
1173 print "Skipping merge because of --pretend mode."
1174 sys.exit(0)
1175
David James7358d032011-05-19 10:40:03 -07001176 # Set a process group so we can easily terminate all children.
1177 os.setsid()
1178
David Jamesfcb70ef2011-02-02 16:02:30 -08001179 # Setup scheduler graph object. This is used by the child processes
1180 # to help schedule jobs.
1181 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1182
1183 # Calculate how many jobs we can run in parallel. We don't want to pass
1184 # the --jobs flag over to emerge itself, because that'll tell emerge to
1185 # hide its output, and said output is quite useful for debugging hung
1186 # jobs.
1187 procs = min(self._total_jobs,
1188 emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
Brian Harring0be85c62012-03-17 19:52:12 -07001189 self._build_procs = procs
1190 self._fetch_procs = procs
David James8c7e5e32011-06-28 11:26:03 -07001191 self._load_avg = emerge.opts.pop("--load-average", None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001192 self._job_queue = multiprocessing.Queue()
1193 self._print_queue = multiprocessing.Queue()
Brian Harring0be85c62012-03-17 19:52:12 -07001194
1195 self._fetch_queue = multiprocessing.Queue()
1196 args = (self._fetch_queue, self._job_queue, emerge, package_db, True)
1197 self._fetch_pool = multiprocessing.Pool(self._fetch_procs, EmergeWorker,
1198 args)
1199
1200 self._build_queue = multiprocessing.Queue()
1201 args = (self._build_queue, self._job_queue, emerge, package_db)
1202 self._build_pool = multiprocessing.Pool(self._build_procs, EmergeWorker,
1203 args)
1204
David Jamesfcb70ef2011-02-02 16:02:30 -08001205 self._print_worker = multiprocessing.Process(target=PrintWorker,
1206 args=[self._print_queue])
1207 self._print_worker.start()
1208
1209 # Initialize the failed queue to empty.
1210 self._retry_queue = []
1211 self._failed = set()
1212
David Jamesfcb70ef2011-02-02 16:02:30 -08001213 # Setup an exit handler so that we print nice messages if we are
1214 # terminated.
1215 self._SetupExitHandler()
1216
1217 # Schedule our jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001218 self._state_map.update(
1219 (pkg, TargetState(pkg, data)) for pkg, data in deps_map.iteritems())
1220 self._fetch_ready.multi_put(self._state_map.itervalues())
David Jamesfcb70ef2011-02-02 16:02:30 -08001221
1222 def _SetupExitHandler(self):
1223
1224 def ExitHandler(signum, frame):
David James7358d032011-05-19 10:40:03 -07001225 # Set KILLED flag.
1226 KILLED.set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001227
1228 # Kill our signal handlers so we don't get called recursively
David James7358d032011-05-19 10:40:03 -07001229 signal.signal(signal.SIGINT, KillHandler)
1230 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001231
1232 # Print our current job status
Brian Harring0be85c62012-03-17 19:52:12 -07001233 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001234 if job:
1235 self._print_queue.put(JobPrinter(job, unlink=True))
1236
1237 # Notify the user that we are exiting
1238 self._Print("Exiting on signal %s" % signum)
David James7358d032011-05-19 10:40:03 -07001239 self._print_queue.put(None)
1240 self._print_worker.join()
David Jamesfcb70ef2011-02-02 16:02:30 -08001241
1242 # Kill child threads, then exit.
David James7358d032011-05-19 10:40:03 -07001243 os.killpg(0, signal.SIGKILL)
David Jamesfcb70ef2011-02-02 16:02:30 -08001244 sys.exit(1)
1245
1246 # Print out job status when we are killed
1247 signal.signal(signal.SIGINT, ExitHandler)
1248 signal.signal(signal.SIGTERM, ExitHandler)
1249
Brian Harring0be85c62012-03-17 19:52:12 -07001250 def _Schedule(self, pkg_state):
David Jamesfcb70ef2011-02-02 16:02:30 -08001251 # We maintain a tree of all deps, if this doesn't need
David James8c7e5e32011-06-28 11:26:03 -07001252 # to be installed just free up its children and continue.
David Jamesfcb70ef2011-02-02 16:02:30 -08001253 # It is possible to reinstall deps of deps, without reinstalling
1254 # first level deps, like so:
1255 # chromeos (merge) -> eselect (nomerge) -> python (merge)
Brian Harring0be85c62012-03-17 19:52:12 -07001256 this_pkg = pkg_state.info
1257 target = pkg_state.target
1258 if pkg_state.info is not None:
1259 if this_pkg["action"] == "nomerge":
1260 self._Finish(target)
1261 elif target not in self._build_jobs:
1262 # Kick off the build if it's marked to be built.
1263 self._build_jobs[target] = None
1264 self._build_queue.put(pkg_state)
1265 return True
David Jamesfcb70ef2011-02-02 16:02:30 -08001266
David James8c7e5e32011-06-28 11:26:03 -07001267 def _ScheduleLoop(self):
1268 # If the current load exceeds our desired load average, don't schedule
1269 # more than one job.
1270 if self._load_avg and os.getloadavg()[0] > self._load_avg:
1271 needed_jobs = 1
1272 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001273 needed_jobs = self._build_procs
David James8c7e5e32011-06-28 11:26:03 -07001274
1275 # Schedule more jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001276 while self._build_ready and len(self._build_jobs) < needed_jobs:
1277 state = self._build_ready.get()
1278 if state.target not in self._failed:
1279 self._Schedule(state)
David Jamesfcb70ef2011-02-02 16:02:30 -08001280
1281 def _Print(self, line):
1282 """Print a single line."""
1283 self._print_queue.put(LinePrinter(line))
1284
1285 def _Status(self):
1286 """Print status."""
1287 current_time = time.time()
1288 no_output = True
1289
1290 # Print interim output every minute if --show-output is used. Otherwise,
1291 # print notifications about running packages every 2 minutes, and print
1292 # full output for jobs that have been running for 60 minutes or more.
1293 if self._show_output:
1294 interval = 60
1295 notify_interval = 0
1296 else:
1297 interval = 60 * 60
1298 notify_interval = 60 * 2
Brian Harring0be85c62012-03-17 19:52:12 -07001299 for target, job in self._build_jobs.iteritems():
David Jamesfcb70ef2011-02-02 16:02:30 -08001300 if job:
1301 last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
1302 if last_timestamp + interval < current_time:
1303 self._print_queue.put(JobPrinter(job))
1304 job.last_output_timestamp = current_time
1305 no_output = False
1306 elif (notify_interval and
1307 job.last_notify_timestamp + notify_interval < current_time):
1308 job_seconds = current_time - job.start_timestamp
1309 args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
1310 info = "Still building %s (%dm%.1fs). Logs in %s" % args
1311 job.last_notify_timestamp = current_time
1312 self._Print(info)
1313 no_output = False
1314
1315 # If we haven't printed any messages yet, print a general status message
1316 # here.
1317 if no_output:
1318 seconds = current_time - GLOBAL_START
Brian Harring0be85c62012-03-17 19:52:12 -07001319 fjobs, fready = len(self._fetch_jobs), len(self._fetch_ready)
1320 bjobs, bready = len(self._build_jobs), len(self._build_ready)
1321 retries = len(self._retry_queue)
1322 pending = max(0, len(self._deps_map) - fjobs - bjobs)
1323 line = "Pending %s/%s, " % (pending, self._total_jobs)
1324 if fjobs or fready:
1325 line += "Fetching %s/%s, " % (fjobs, fready + fjobs)
1326 if bjobs or bready or retries:
1327 line += "Building %s/%s, " % (bjobs, bready + bjobs)
1328 if retries:
1329 line += "Retrying %s, " % (retries,)
David James8c7e5e32011-06-28 11:26:03 -07001330 load = " ".join(str(x) for x in os.getloadavg())
Brian Harring0be85c62012-03-17 19:52:12 -07001331 line += ("[Time %dm%.1fs Load %s]" % (seconds/60, seconds %60, load))
1332 self._Print(line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001333
1334 def _Finish(self, target):
David James8c7e5e32011-06-28 11:26:03 -07001335 """Mark a target as completed and unblock dependencies."""
1336 this_pkg = self._deps_map[target]
1337 if this_pkg["needs"] and this_pkg["nodeps"]:
1338 # We got installed, but our deps have not been installed yet. Dependent
1339 # packages should only be installed when our needs have been fully met.
1340 this_pkg["action"] = "nomerge"
1341 else:
1342 finish = []
1343 for dep in this_pkg["provides"]:
1344 dep_pkg = self._deps_map[dep]
Brian Harring0be85c62012-03-17 19:52:12 -07001345 state = self._state_map[dep]
David James8c7e5e32011-06-28 11:26:03 -07001346 del dep_pkg["needs"][target]
Brian Harring0be85c62012-03-17 19:52:12 -07001347 state.update_score()
1348 if not state.prefetched:
1349 if dep in self._fetch_ready:
1350 # If it's not currently being fetched, update the prioritization
1351 self._fetch_ready.sort()
1352 elif not dep_pkg["needs"]:
David James8c7e5e32011-06-28 11:26:03 -07001353 if dep_pkg["nodeps"] and dep_pkg["action"] == "nomerge":
1354 self._Finish(dep)
1355 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001356 self._build_ready.put(self._state_map[dep])
David James8c7e5e32011-06-28 11:26:03 -07001357 self._deps_map.pop(target)
David Jamesfcb70ef2011-02-02 16:02:30 -08001358
1359 def _Retry(self):
David James8c7e5e32011-06-28 11:26:03 -07001360 while self._retry_queue:
Brian Harring0be85c62012-03-17 19:52:12 -07001361 state = self._retry_queue.pop(0)
1362 if self._Schedule(state):
1363 self._Print("Retrying emerge of %s." % state.target)
David James8c7e5e32011-06-28 11:26:03 -07001364 break
David Jamesfcb70ef2011-02-02 16:02:30 -08001365
Brian Harringa43f5952012-04-12 01:19:34 -07001366 def _Shutdown(self):
David Jamesfcb70ef2011-02-02 16:02:30 -08001367 # Tell emerge workers to exit. They all exit when 'None' is pushed
1368 # to the queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001369
Brian Harringa43f5952012-04-12 01:19:34 -07001370 # Shutdown the workers first; then jobs (which is how they feed things back)
1371 # then finally the print queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001372
Brian Harringa43f5952012-04-12 01:19:34 -07001373 def _stop(queue, pool):
1374 if pool is None:
1375 return
1376 try:
1377 queue.put(None)
1378 pool.close()
1379 pool.join()
1380 finally:
1381 pool.terminate()
Brian Harring0be85c62012-03-17 19:52:12 -07001382
Brian Harringa43f5952012-04-12 01:19:34 -07001383 _stop(self._fetch_queue, self._fetch_pool)
1384 self._fetch_queue = self._fetch_pool = None
Brian Harring0be85c62012-03-17 19:52:12 -07001385
Brian Harringa43f5952012-04-12 01:19:34 -07001386 _stop(self._build_queue, self._build_pool)
1387 self._build_queue = self._build_pool = None
1388
1389 if self._job_queue is not None:
1390 self._job_queue.close()
1391 self._job_queue = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001392
1393 # Now that our workers are finished, we can kill the print queue.
Brian Harringa43f5952012-04-12 01:19:34 -07001394 if self._print_worker is not None:
1395 try:
1396 self._print_queue.put(None)
1397 self._print_queue.close()
1398 self._print_worker.join()
1399 finally:
1400 self._print_worker.terminate()
1401 self._print_queue = self._print_worker = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001402
1403 def Run(self):
1404 """Run through the scheduled ebuilds.
1405
1406 Keep running so long as we have uninstalled packages in the
1407 dependency graph to merge.
1408 """
Brian Harringa43f5952012-04-12 01:19:34 -07001409 if not self._deps_map:
1410 return
1411
Brian Harring0be85c62012-03-17 19:52:12 -07001412 # Start the fetchers.
1413 for _ in xrange(min(self._fetch_procs, len(self._fetch_ready))):
1414 state = self._fetch_ready.get()
1415 self._fetch_jobs[state.target] = None
1416 self._fetch_queue.put(state)
1417
1418 # Print an update, then get going.
1419 self._Status()
1420
David Jamese703d0f2012-01-12 16:27:45 -08001421 retried = set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001422 while self._deps_map:
1423 # Check here that we are actually waiting for something.
Brian Harring0be85c62012-03-17 19:52:12 -07001424 if (self._build_queue.empty() and
David Jamesfcb70ef2011-02-02 16:02:30 -08001425 self._job_queue.empty() and
Brian Harring0be85c62012-03-17 19:52:12 -07001426 not self._fetch_jobs and
1427 not self._fetch_ready and
1428 not self._build_jobs and
1429 not self._build_ready and
David Jamesfcb70ef2011-02-02 16:02:30 -08001430 self._deps_map):
1431 # If we have failed on a package, retry it now.
1432 if self._retry_queue:
1433 self._Retry()
1434 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001435 # The dependency map is helpful for debugging failures.
1436 PrintDepsMap(self._deps_map)
1437
1438 # Tell the user why we're exiting.
1439 if self._failed:
1440 print "Packages failed: %s" % " ,".join(self._failed)
1441 else:
1442 print "Deadlock! Circular dependencies!"
1443 sys.exit(1)
1444
Brian Harring706747c2012-03-16 03:04:31 -07001445 for i in range(12):
David Jamesa74289a2011-08-12 10:41:24 -07001446 try:
1447 job = self._job_queue.get(timeout=5)
1448 break
1449 except Queue.Empty:
1450 # Check if any more jobs can be scheduled.
1451 self._ScheduleLoop()
1452 else:
Brian Harring706747c2012-03-16 03:04:31 -07001453 # Print an update every 60 seconds.
David Jamesfcb70ef2011-02-02 16:02:30 -08001454 self._Status()
1455 continue
1456
1457 target = job.target
1458
Brian Harring0be85c62012-03-17 19:52:12 -07001459 if job.fetch_only:
1460 if not job.done:
1461 self._fetch_jobs[job.target] = job
1462 else:
1463 state = self._state_map[job.target]
1464 state.prefetched = True
1465 state.fetched_successfully = (job.retcode == 0)
1466 del self._fetch_jobs[job.target]
1467 self._Print("Fetched %s in %2.2fs"
1468 % (target, time.time() - job.start_timestamp))
1469
1470 if self._show_output or job.retcode != 0:
1471 self._print_queue.put(JobPrinter(job, unlink=True))
1472 else:
1473 os.unlink(job.filename)
1474 # Failure or not, let build work with it next.
1475 if not self._deps_map[job.target]["needs"]:
1476 self._build_ready.put(state)
1477 self._ScheduleLoop()
1478
1479 if self._fetch_ready:
1480 state = self._fetch_ready.get()
1481 self._fetch_queue.put(state)
1482 self._fetch_jobs[state.target] = None
1483 else:
1484 # Minor optimization; shut down fetchers early since we know
1485 # the queue is empty.
1486 self._fetch_queue.put(None)
1487 continue
1488
David Jamesfcb70ef2011-02-02 16:02:30 -08001489 if not job.done:
Brian Harring0be85c62012-03-17 19:52:12 -07001490 self._build_jobs[target] = job
David Jamesfcb70ef2011-02-02 16:02:30 -08001491 self._Print("Started %s (logged in %s)" % (target, job.filename))
1492 continue
1493
1494 # Print output of job
1495 if self._show_output or job.retcode != 0:
1496 self._print_queue.put(JobPrinter(job, unlink=True))
1497 else:
1498 os.unlink(job.filename)
Brian Harring0be85c62012-03-17 19:52:12 -07001499 del self._build_jobs[target]
David Jamesfcb70ef2011-02-02 16:02:30 -08001500
1501 seconds = time.time() - job.start_timestamp
1502 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
David James32420cc2011-08-25 21:32:46 -07001503 previously_failed = target in self._failed
David Jamesfcb70ef2011-02-02 16:02:30 -08001504
1505 # Complain if necessary.
1506 if job.retcode != 0:
1507 # Handle job failure.
David James32420cc2011-08-25 21:32:46 -07001508 if previously_failed:
David Jamesfcb70ef2011-02-02 16:02:30 -08001509 # If this job has failed previously, give up.
1510 self._Print("Failed %s. Your build has failed." % details)
1511 else:
1512 # Queue up this build to try again after a long while.
David Jamese703d0f2012-01-12 16:27:45 -08001513 retried.add(target)
Brian Harring0be85c62012-03-17 19:52:12 -07001514 self._retry_queue.append(self._state_map[target])
David Jamesfcb70ef2011-02-02 16:02:30 -08001515 self._failed.add(target)
1516 self._Print("Failed %s, retrying later." % details)
1517 else:
David James32420cc2011-08-25 21:32:46 -07001518 if previously_failed:
1519 # Remove target from list of failed packages.
1520 self._failed.remove(target)
1521
1522 self._Print("Completed %s" % details)
1523
1524 # Mark as completed and unblock waiting ebuilds.
1525 self._Finish(target)
1526
1527 if previously_failed and self._retry_queue:
David Jamesfcb70ef2011-02-02 16:02:30 -08001528 # If we have successfully retried a failed package, and there
1529 # are more failed packages, try the next one. We will only have
1530 # one retrying package actively running at a time.
1531 self._Retry()
1532
David Jamesfcb70ef2011-02-02 16:02:30 -08001533
David James8c7e5e32011-06-28 11:26:03 -07001534 # Schedule pending jobs and print an update.
1535 self._ScheduleLoop()
1536 self._Status()
David Jamesfcb70ef2011-02-02 16:02:30 -08001537
David Jamese703d0f2012-01-12 16:27:45 -08001538 # If packages were retried, output a warning.
1539 if retried:
1540 self._Print("")
1541 self._Print("WARNING: The following packages failed the first time,")
1542 self._Print("but succeeded upon retry. This might indicate incorrect")
1543 self._Print("dependencies.")
1544 for pkg in retried:
1545 self._Print(" %s" % pkg)
1546 self._Print("@@@STEP_WARNINGS@@@")
1547 self._Print("")
1548
David Jamesfcb70ef2011-02-02 16:02:30 -08001549 # Tell child threads to exit.
1550 self._Print("Merge complete")
David Jamesfcb70ef2011-02-02 16:02:30 -08001551
1552
Brian Harring30675052012-02-29 12:18:22 -08001553def main(argv):
David Jamesfcb70ef2011-02-02 16:02:30 -08001554
Brian Harring30675052012-02-29 12:18:22 -08001555 parallel_emerge_args = argv[:]
David Jamesfcb70ef2011-02-02 16:02:30 -08001556 deps = DepGraphGenerator()
Brian Harring30675052012-02-29 12:18:22 -08001557 deps.Initialize(parallel_emerge_args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001558 emerge = deps.emerge
1559
1560 if emerge.action is not None:
Brian Harring30675052012-02-29 12:18:22 -08001561 argv = deps.ParseParallelEmergeArgs(argv)
1562 sys.exit(emerge_main(argv))
David Jamesfcb70ef2011-02-02 16:02:30 -08001563 elif not emerge.cmdline_packages:
1564 Usage()
1565 sys.exit(1)
1566
1567 # Unless we're in pretend mode, there's not much point running without
1568 # root access. We need to be able to install packages.
1569 #
1570 # NOTE: Even if you're running --pretend, it's a good idea to run
1571 # parallel_emerge with root access so that portage can write to the
1572 # dependency cache. This is important for performance.
1573 if "--pretend" not in emerge.opts and portage.secpass < 2:
1574 print "parallel_emerge: superuser access is required."
1575 sys.exit(1)
1576
1577 if "--quiet" not in emerge.opts:
1578 cmdline_packages = " ".join(emerge.cmdline_packages)
David Jamesfcb70ef2011-02-02 16:02:30 -08001579 print "Starting fast-emerge."
1580 print " Building package %s on %s" % (cmdline_packages,
1581 deps.board or "root")
David Jamesfcb70ef2011-02-02 16:02:30 -08001582
David James386ccd12011-05-04 20:17:42 -07001583 deps_tree, deps_info = deps.GenDependencyTree()
David Jamesfcb70ef2011-02-02 16:02:30 -08001584
1585 # You want me to be verbose? I'll give you two trees! Twice as much value.
1586 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
1587 deps.PrintTree(deps_tree)
1588
David James386ccd12011-05-04 20:17:42 -07001589 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001590
1591 # OK, time to print out our progress so far.
1592 deps.PrintInstallPlan(deps_graph)
1593 if "--tree" in emerge.opts:
1594 PrintDepsMap(deps_graph)
1595
1596 # Are we upgrading portage? If so, and there are more packages to merge,
1597 # schedule a restart of parallel_emerge to merge the rest. This ensures that
1598 # we pick up all updates to portage settings before merging any more
1599 # packages.
1600 portage_upgrade = False
1601 root = emerge.settings["ROOT"]
1602 final_db = emerge.depgraph._dynamic_config.mydbapi[root]
1603 if root == "/":
1604 for db_pkg in final_db.match_pkgs("sys-apps/portage"):
1605 portage_pkg = deps_graph.get(db_pkg.cpv)
1606 if portage_pkg and len(deps_graph) > 1:
1607 portage_pkg["needs"].clear()
1608 portage_pkg["provides"].clear()
1609 deps_graph = { str(db_pkg.cpv): portage_pkg }
1610 portage_upgrade = True
1611 if "--quiet" not in emerge.opts:
1612 print "Upgrading portage first, then restarting..."
1613
1614 # Run the queued emerges.
1615 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output)
Brian Harringa43f5952012-04-12 01:19:34 -07001616 try:
1617 scheduler.Run()
1618 finally:
1619 scheduler._Shutdown()
David James97ce8902011-08-16 09:51:05 -07001620 scheduler = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001621
David Jamesfcb70ef2011-02-02 16:02:30 -08001622 # If we already upgraded portage, we don't need to do so again. But we do
1623 # need to upgrade the rest of the packages. So we'll go ahead and do that.
David Jamesebc3ae02011-05-21 20:46:10 -07001624 #
1625 # In order to grant the child permission to run setsid, we need to run sudo
1626 # again. We preserve SUDO_USER here in case an ebuild depends on it.
David Jamesfcb70ef2011-02-02 16:02:30 -08001627 if portage_upgrade:
Brian Harring30675052012-02-29 12:18:22 -08001628 args = ["sudo", "-E", "SUDO_USER=%s" % os.environ.get("SUDO_USER", "")]
Brian Harringef3e9832012-03-02 04:43:05 -08001629 args += [os.path.abspath(sys.argv[0])] + parallel_emerge_args
Brian Harring30675052012-02-29 12:18:22 -08001630 args += ["--exclude=sys-apps/portage"]
David Jamesebc3ae02011-05-21 20:46:10 -07001631 os.execvp("sudo", args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001632
1633 print "Done"
1634 sys.exit(0)