blob: 4b80208e3a8dd0fe465f9bb325e167a3865cb390 [file] [log] [blame]
David Jamesfcb70ef2011-02-02 16:02:30 -08001#!/usr/bin/python2.6
2# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""Program to run emerge in parallel, for significant speedup.
7
8Usage:
David James386ccd12011-05-04 20:17:42 -07009 ./parallel_emerge [--board=BOARD] [--workon=PKGS]
David Jamesfcb70ef2011-02-02 16:02:30 -080010 [--force-remote-binary=PKGS] [emerge args] package
11
David James78b6cd92012-04-02 21:36:12 -070012This script runs multiple emerge processes in parallel, using appropriate
13Portage APIs. It is faster than standard emerge because it has a
14multiprocess model instead of an asynchronous model.
David Jamesfcb70ef2011-02-02 16:02:30 -080015"""
16
17import codecs
18import copy
19import errno
Brian Harring8294d652012-05-23 02:20:52 -070020import gc
David James8c7e5e32011-06-28 11:26:03 -070021import heapq
David Jamesfcb70ef2011-02-02 16:02:30 -080022import multiprocessing
23import os
24import Queue
David Jamesfcb70ef2011-02-02 16:02:30 -080025import signal
26import sys
27import tempfile
Brian Harring8294d652012-05-23 02:20:52 -070028import threading
David Jamesfcb70ef2011-02-02 16:02:30 -080029import time
30import traceback
David Jamesfcb70ef2011-02-02 16:02:30 -080031
32# If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
33# Chromium OS, the default "portage" user doesn't have the necessary
34# permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
35# is "root" here because we get called through sudo.
36#
37# We need to set this before importing any portage modules, because portage
38# looks up "PORTAGE_USERNAME" at import time.
39#
40# NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
41# encounter this case unless they have an old chroot or blow away the
42# environment by running sudo without the -E specifier.
43if "PORTAGE_USERNAME" not in os.environ:
44 homedir = os.environ.get("HOME")
45 if homedir:
46 os.environ["PORTAGE_USERNAME"] = os.path.basename(homedir)
47
48# Portage doesn't expose dependency trees in its public API, so we have to
49# make use of some private APIs here. These modules are found under
50# /usr/lib/portage/pym/.
51#
52# TODO(davidjames): Update Portage to expose public APIs for these features.
53from _emerge.actions import adjust_configs
54from _emerge.actions import load_emerge_config
55from _emerge.create_depgraph_params import create_depgraph_params
David James386ccd12011-05-04 20:17:42 -070056from _emerge.depgraph import backtrack_depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -080057from _emerge.main import emerge_main
58from _emerge.main import parse_opts
59from _emerge.Package import Package
60from _emerge.Scheduler import Scheduler
David Jamesfcb70ef2011-02-02 16:02:30 -080061from _emerge.stdout_spinner import stdout_spinner
David James386ccd12011-05-04 20:17:42 -070062from portage._global_updates import _global_updates
David Jamesfcb70ef2011-02-02 16:02:30 -080063import portage
64import portage.debug
David Jamesfcb70ef2011-02-02 16:02:30 -080065
David Jamesfcb70ef2011-02-02 16:02:30 -080066def Usage():
67 """Print usage."""
68 print "Usage:"
David James386ccd12011-05-04 20:17:42 -070069 print " ./parallel_emerge [--board=BOARD] [--workon=PKGS]"
David Jamesfcb70ef2011-02-02 16:02:30 -080070 print " [--rebuild] [emerge args] package"
71 print
72 print "Packages specified as workon packages are always built from source."
David Jamesfcb70ef2011-02-02 16:02:30 -080073 print
74 print "The --workon argument is mainly useful when you want to build and"
75 print "install packages that you are working on unconditionally, but do not"
76 print "to have to rev the package to indicate you want to build it from"
77 print "source. The build_packages script will automatically supply the"
78 print "workon argument to emerge, ensuring that packages selected using"
79 print "cros-workon are rebuilt."
80 print
81 print "The --rebuild option rebuilds packages whenever their dependencies"
82 print "are changed. This ensures that your build is correct."
David Jamesfcb70ef2011-02-02 16:02:30 -080083
84
David Jamesfcb70ef2011-02-02 16:02:30 -080085# Global start time
86GLOBAL_START = time.time()
87
David James7358d032011-05-19 10:40:03 -070088# Whether process has been killed by a signal.
89KILLED = multiprocessing.Event()
90
David Jamesfcb70ef2011-02-02 16:02:30 -080091
92class EmergeData(object):
93 """This simple struct holds various emerge variables.
94
95 This struct helps us easily pass emerge variables around as a unit.
96 These variables are used for calculating dependencies and installing
97 packages.
98 """
99
David Jamesbf1e3442011-05-28 07:44:20 -0700100 __slots__ = ["action", "cmdline_packages", "depgraph", "favorites",
101 "mtimedb", "opts", "root_config", "scheduler_graph",
102 "settings", "spinner", "trees"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800103
104 def __init__(self):
105 # The action the user requested. If the user is installing packages, this
106 # is None. If the user is doing anything other than installing packages,
107 # this will contain the action name, which will map exactly to the
108 # long-form name of the associated emerge option.
109 #
110 # Example: If you call parallel_emerge --unmerge package, the action name
111 # will be "unmerge"
112 self.action = None
113
114 # The list of packages the user passed on the command-line.
115 self.cmdline_packages = None
116
117 # The emerge dependency graph. It'll contain all the packages involved in
118 # this merge, along with their versions.
119 self.depgraph = None
120
David Jamesbf1e3442011-05-28 07:44:20 -0700121 # The list of candidates to add to the world file.
122 self.favorites = None
123
David Jamesfcb70ef2011-02-02 16:02:30 -0800124 # A dict of the options passed to emerge. This dict has been cleaned up
125 # a bit by parse_opts, so that it's a bit easier for the emerge code to
126 # look at the options.
127 #
128 # Emerge takes a few shortcuts in its cleanup process to make parsing of
129 # the options dict easier. For example, if you pass in "--usepkg=n", the
130 # "--usepkg" flag is just left out of the dictionary altogether. Because
131 # --usepkg=n is the default, this makes parsing easier, because emerge
132 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
133 #
134 # These cleanup processes aren't applied to all options. For example, the
135 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
136 # applied by emerge, see the parse_opts function in the _emerge.main
137 # package.
138 self.opts = None
139
140 # A dictionary used by portage to maintain global state. This state is
141 # loaded from disk when portage starts up, and saved to disk whenever we
142 # call mtimedb.commit().
143 #
144 # This database contains information about global updates (i.e., what
145 # version of portage we have) and what we're currently doing. Portage
146 # saves what it is currently doing in this database so that it can be
147 # resumed when you call it with the --resume option.
148 #
149 # parallel_emerge does not save what it is currently doing in the mtimedb,
150 # so we do not support the --resume option.
151 self.mtimedb = None
152
153 # The portage configuration for our current root. This contains the portage
154 # settings (see below) and the three portage trees for our current root.
155 # (The three portage trees are explained below, in the documentation for
156 # the "trees" member.)
157 self.root_config = None
158
159 # The scheduler graph is used by emerge to calculate what packages to
160 # install. We don't actually install any deps, so this isn't really used,
161 # but we pass it in to the Scheduler object anyway.
162 self.scheduler_graph = None
163
164 # Portage settings for our current session. Most of these settings are set
165 # in make.conf inside our current install root.
166 self.settings = None
167
168 # The spinner, which spews stuff to stdout to indicate that portage is
169 # doing something. We maintain our own spinner, so we set the portage
170 # spinner to "silent" mode.
171 self.spinner = None
172
173 # The portage trees. There are separate portage trees for each root. To get
174 # the portage tree for the current root, you can look in self.trees[root],
175 # where root = self.settings["ROOT"].
176 #
177 # In each root, there are three trees: vartree, porttree, and bintree.
178 # - vartree: A database of the currently-installed packages.
179 # - porttree: A database of ebuilds, that can be used to build packages.
180 # - bintree: A database of binary packages.
181 self.trees = None
182
183
184class DepGraphGenerator(object):
185 """Grab dependency information about packages from portage.
186
187 Typical usage:
188 deps = DepGraphGenerator()
189 deps.Initialize(sys.argv[1:])
190 deps_tree, deps_info = deps.GenDependencyTree()
191 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
192 deps.PrintTree(deps_tree)
193 PrintDepsMap(deps_graph)
194 """
195
David James386ccd12011-05-04 20:17:42 -0700196 __slots__ = ["board", "emerge", "package_db", "show_output"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800197
198 def __init__(self):
199 self.board = None
200 self.emerge = EmergeData()
David Jamesfcb70ef2011-02-02 16:02:30 -0800201 self.package_db = {}
David Jamesfcb70ef2011-02-02 16:02:30 -0800202 self.show_output = False
David Jamesfcb70ef2011-02-02 16:02:30 -0800203
204 def ParseParallelEmergeArgs(self, argv):
205 """Read the parallel emerge arguments from the command-line.
206
207 We need to be compatible with emerge arg format. We scrape arguments that
208 are specific to parallel_emerge, and pass through the rest directly to
209 emerge.
210 Args:
211 argv: arguments list
212 Returns:
213 Arguments that don't belong to parallel_emerge
214 """
215 emerge_args = []
216 for arg in argv:
217 # Specifically match arguments that are specific to parallel_emerge, and
218 # pass through the rest.
219 if arg.startswith("--board="):
220 self.board = arg.replace("--board=", "")
221 elif arg.startswith("--workon="):
222 workon_str = arg.replace("--workon=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700223 emerge_args.append("--reinstall-atoms=%s" % workon_str)
224 emerge_args.append("--usepkg-exclude=%s" % workon_str)
David Jamesfcb70ef2011-02-02 16:02:30 -0800225 elif arg.startswith("--force-remote-binary="):
226 force_remote_binary = arg.replace("--force-remote-binary=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700227 emerge_args.append("--useoldpkg-atoms=%s" % force_remote_binary)
David Jamesfcb70ef2011-02-02 16:02:30 -0800228 elif arg == "--show-output":
229 self.show_output = True
David James386ccd12011-05-04 20:17:42 -0700230 elif arg == "--rebuild":
David James7a1ea4b2011-10-13 15:06:41 -0700231 emerge_args.append("--rebuild-if-unbuilt")
David Jamesfcb70ef2011-02-02 16:02:30 -0800232 else:
233 # Not one of our options, so pass through to emerge.
234 emerge_args.append(arg)
235
David James386ccd12011-05-04 20:17:42 -0700236 # These packages take a really long time to build, so, for expediency, we
237 # are blacklisting them from automatic rebuilds because one of their
238 # dependencies needs to be recompiled.
239 for pkg in ("chromeos-base/chromeos-chrome", "media-plugins/o3d",
240 "dev-java/icedtea"):
David James7a1ea4b2011-10-13 15:06:41 -0700241 emerge_args.append("--rebuild-exclude=%s" % pkg)
David Jamesfcb70ef2011-02-02 16:02:30 -0800242
243 return emerge_args
244
245 def Initialize(self, args):
246 """Initializer. Parses arguments and sets up portage state."""
247
248 # Parse and strip out args that are just intended for parallel_emerge.
249 emerge_args = self.ParseParallelEmergeArgs(args)
250
251 # Setup various environment variables based on our current board. These
252 # variables are normally setup inside emerge-${BOARD}, but since we don't
253 # call that script, we have to set it up here. These variables serve to
254 # point our tools at /build/BOARD and to setup cross compiles to the
255 # appropriate board as configured in toolchain.conf.
256 if self.board:
257 os.environ["PORTAGE_CONFIGROOT"] = "/build/" + self.board
258 os.environ["PORTAGE_SYSROOT"] = "/build/" + self.board
259 os.environ["SYSROOT"] = "/build/" + self.board
David Jamesfcb70ef2011-02-02 16:02:30 -0800260
261 # Although CHROMEOS_ROOT isn't specific to boards, it's normally setup
262 # inside emerge-${BOARD}, so we set it up here for compatibility. It
263 # will be going away soon as we migrate to CROS_WORKON_SRCROOT.
264 os.environ.setdefault("CHROMEOS_ROOT", os.environ["HOME"] + "/trunk")
265
266 # Turn off interactive delays
267 os.environ["EBEEP_IGNORE"] = "1"
268 os.environ["EPAUSE_IGNORE"] = "1"
269 os.environ["UNMERGE_DELAY"] = "0"
270
271 # Parse the emerge options.
David Jamesea3ca332011-05-26 11:48:29 -0700272 action, opts, cmdline_packages = parse_opts(emerge_args, silent=True)
David Jamesfcb70ef2011-02-02 16:02:30 -0800273
274 # Set environment variables based on options. Portage normally sets these
275 # environment variables in emerge_main, but we can't use that function,
276 # because it also does a bunch of other stuff that we don't want.
277 # TODO(davidjames): Patch portage to move this logic into a function we can
278 # reuse here.
279 if "--debug" in opts:
280 os.environ["PORTAGE_DEBUG"] = "1"
281 if "--config-root" in opts:
282 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
283 if "--root" in opts:
284 os.environ["ROOT"] = opts["--root"]
285 if "--accept-properties" in opts:
286 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
287
David Jamesfcb70ef2011-02-02 16:02:30 -0800288 # If we're installing packages to the board, and we're not using the
David James927a56d2012-04-03 11:26:39 -0700289 # official flag, we can disable vardb locks. This is safe because we
290 # only run up to one instance of parallel_emerge in parallel.
David Jamesfcb70ef2011-02-02 16:02:30 -0800291 if self.board and os.environ.get("CHROMEOS_OFFICIAL") != "1":
292 os.environ.setdefault("PORTAGE_LOCKS", "false")
David Jamesfcb70ef2011-02-02 16:02:30 -0800293
294 # Now that we've setup the necessary environment variables, we can load the
295 # emerge config from disk.
296 settings, trees, mtimedb = load_emerge_config()
297
David Jamesea3ca332011-05-26 11:48:29 -0700298 # Add in EMERGE_DEFAULT_OPTS, if specified.
299 tmpcmdline = []
300 if "--ignore-default-opts" not in opts:
301 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
302 tmpcmdline.extend(emerge_args)
303 action, opts, cmdline_packages = parse_opts(tmpcmdline)
304
305 # If we're installing to the board, we want the --root-deps option so that
306 # portage will install the build dependencies to that location as well.
307 if self.board:
308 opts.setdefault("--root-deps", True)
309
David Jamesfcb70ef2011-02-02 16:02:30 -0800310 # Check whether our portage tree is out of date. Typically, this happens
311 # when you're setting up a new portage tree, such as in setup_board and
312 # make_chroot. In that case, portage applies a bunch of global updates
313 # here. Once the updates are finished, we need to commit any changes
314 # that the global update made to our mtimedb, and reload the config.
315 #
316 # Portage normally handles this logic in emerge_main, but again, we can't
317 # use that function here.
318 if _global_updates(trees, mtimedb["updates"]):
319 mtimedb.commit()
320 settings, trees, mtimedb = load_emerge_config(trees=trees)
321
322 # Setup implied options. Portage normally handles this logic in
323 # emerge_main.
324 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
325 opts.setdefault("--buildpkg", True)
326 if "--getbinpkgonly" in opts:
327 opts.setdefault("--usepkgonly", True)
328 opts.setdefault("--getbinpkg", True)
329 if "getbinpkg" in settings.features:
330 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
331 opts["--getbinpkg"] = True
332 if "--getbinpkg" in opts or "--usepkgonly" in opts:
333 opts.setdefault("--usepkg", True)
334 if "--fetch-all-uri" in opts:
335 opts.setdefault("--fetchonly", True)
336 if "--skipfirst" in opts:
337 opts.setdefault("--resume", True)
338 if "--buildpkgonly" in opts:
339 # --buildpkgonly will not merge anything, so it overrides all binary
340 # package options.
341 for opt in ("--getbinpkg", "--getbinpkgonly",
342 "--usepkg", "--usepkgonly"):
343 opts.pop(opt, None)
344 if (settings.get("PORTAGE_DEBUG", "") == "1" and
345 "python-trace" in settings.features):
346 portage.debug.set_trace(True)
347
348 # Complain about unsupported options
David James386ccd12011-05-04 20:17:42 -0700349 for opt in ("--ask", "--ask-enter-invalid", "--resume", "--skipfirst"):
David Jamesfcb70ef2011-02-02 16:02:30 -0800350 if opt in opts:
351 print "%s is not supported by parallel_emerge" % opt
352 sys.exit(1)
353
354 # Make emerge specific adjustments to the config (e.g. colors!)
355 adjust_configs(opts, trees)
356
357 # Save our configuration so far in the emerge object
358 emerge = self.emerge
359 emerge.action, emerge.opts = action, opts
360 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
361 emerge.cmdline_packages = cmdline_packages
362 root = settings["ROOT"]
363 emerge.root_config = trees[root]["root_config"]
364
David James386ccd12011-05-04 20:17:42 -0700365 if "--usepkg" in opts:
David Jamesfcb70ef2011-02-02 16:02:30 -0800366 emerge.trees[root]["bintree"].populate("--getbinpkg" in opts)
367
David Jamesfcb70ef2011-02-02 16:02:30 -0800368 def CreateDepgraph(self, emerge, packages):
369 """Create an emerge depgraph object."""
370 # Setup emerge options.
371 emerge_opts = emerge.opts.copy()
372
David James386ccd12011-05-04 20:17:42 -0700373 # Ask portage to build a dependency graph. with the options we specified
374 # above.
David Jamesfcb70ef2011-02-02 16:02:30 -0800375 params = create_depgraph_params(emerge_opts, emerge.action)
David Jamesbf1e3442011-05-28 07:44:20 -0700376 success, depgraph, favorites = backtrack_depgraph(
David James386ccd12011-05-04 20:17:42 -0700377 emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
378 packages, emerge.spinner)
379 emerge.depgraph = depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800380
David James386ccd12011-05-04 20:17:42 -0700381 # Is it impossible to honor the user's request? Bail!
382 if not success:
383 depgraph.display_problems()
384 sys.exit(1)
David Jamesfcb70ef2011-02-02 16:02:30 -0800385
386 emerge.depgraph = depgraph
David Jamesbf1e3442011-05-28 07:44:20 -0700387 emerge.favorites = favorites
David Jamesfcb70ef2011-02-02 16:02:30 -0800388
David Jamesdeebd692011-05-09 17:02:52 -0700389 # Prime and flush emerge caches.
390 root = emerge.settings["ROOT"]
391 vardb = emerge.trees[root]["vartree"].dbapi
David James0bdc5de2011-05-12 16:22:26 -0700392 if "--pretend" not in emerge.opts:
393 vardb.counter_tick()
David Jamesdeebd692011-05-09 17:02:52 -0700394 vardb.flush_cache()
395
David James386ccd12011-05-04 20:17:42 -0700396 def GenDependencyTree(self):
David Jamesfcb70ef2011-02-02 16:02:30 -0800397 """Get dependency tree info from emerge.
398
David Jamesfcb70ef2011-02-02 16:02:30 -0800399 Returns:
400 Dependency tree
401 """
402 start = time.time()
403
404 emerge = self.emerge
405
406 # Create a list of packages to merge
407 packages = set(emerge.cmdline_packages[:])
David Jamesfcb70ef2011-02-02 16:02:30 -0800408
409 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
410 # need any extra output from portage.
411 portage.util.noiselimit = -1
412
413 # My favorite feature: The silent spinner. It doesn't spin. Ever.
414 # I'd disable the colors by default too, but they look kind of cool.
415 emerge.spinner = stdout_spinner()
416 emerge.spinner.update = emerge.spinner.update_quiet
417
418 if "--quiet" not in emerge.opts:
419 print "Calculating deps..."
420
421 self.CreateDepgraph(emerge, packages)
422 depgraph = emerge.depgraph
423
424 # Build our own tree from the emerge digraph.
425 deps_tree = {}
426 digraph = depgraph._dynamic_config.digraph
David James3f778802011-08-25 19:31:45 -0700427 root = emerge.settings["ROOT"]
428 final_db = depgraph._dynamic_config.mydbapi[root]
David Jamesfcb70ef2011-02-02 16:02:30 -0800429 for node, node_deps in digraph.nodes.items():
430 # Calculate dependency packages that need to be installed first. Each
431 # child on the digraph is a dependency. The "operation" field specifies
432 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
433 # contains the type of dependency (e.g. build, runtime, runtime_post,
434 # etc.)
435 #
David Jamesfcb70ef2011-02-02 16:02:30 -0800436 # Portage refers to the identifiers for packages as a CPV. This acronym
437 # stands for Component/Path/Version.
438 #
439 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
440 # Split up, this CPV would be:
441 # C -- Component: chromeos-base
442 # P -- Path: power_manager
443 # V -- Version: 0.0.1-r1
444 #
445 # We just refer to CPVs as packages here because it's easier.
446 deps = {}
447 for child, priorities in node_deps[0].items():
David James3f778802011-08-25 19:31:45 -0700448 if isinstance(child, Package) and child.root == root:
449 cpv = str(child.cpv)
450 action = str(child.operation)
451
452 # If we're uninstalling a package, check whether Portage is
453 # installing a replacement. If so, just depend on the installation
454 # of the new package, because the old package will automatically
455 # be uninstalled at that time.
456 if action == "uninstall":
457 for pkg in final_db.match_pkgs(child.slot_atom):
458 cpv = str(pkg.cpv)
459 action = "merge"
460 break
461
462 deps[cpv] = dict(action=action,
463 deptypes=[str(x) for x in priorities],
464 deps={})
David Jamesfcb70ef2011-02-02 16:02:30 -0800465
466 # We've built our list of deps, so we can add our package to the tree.
David James3f778802011-08-25 19:31:45 -0700467 if isinstance(node, Package) and node.root == root:
David Jamesfcb70ef2011-02-02 16:02:30 -0800468 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
469 deps=deps)
470
David Jamesfcb70ef2011-02-02 16:02:30 -0800471 # Ask portage for its install plan, so that we can only throw out
David James386ccd12011-05-04 20:17:42 -0700472 # dependencies that portage throws out.
David Jamesfcb70ef2011-02-02 16:02:30 -0800473 deps_info = {}
474 for pkg in depgraph.altlist():
475 if isinstance(pkg, Package):
David James3f778802011-08-25 19:31:45 -0700476 assert pkg.root == root
David Jamesfcb70ef2011-02-02 16:02:30 -0800477 self.package_db[pkg.cpv] = pkg
478
David Jamesfcb70ef2011-02-02 16:02:30 -0800479 # Save off info about the package
David James386ccd12011-05-04 20:17:42 -0700480 deps_info[str(pkg.cpv)] = {"idx": len(deps_info)}
David Jamesfcb70ef2011-02-02 16:02:30 -0800481
482 seconds = time.time() - start
483 if "--quiet" not in emerge.opts:
484 print "Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60)
485
486 return deps_tree, deps_info
487
488 def PrintTree(self, deps, depth=""):
489 """Print the deps we have seen in the emerge output.
490
491 Args:
492 deps: Dependency tree structure.
493 depth: Allows printing the tree recursively, with indentation.
494 """
495 for entry in sorted(deps):
496 action = deps[entry]["action"]
497 print "%s %s (%s)" % (depth, entry, action)
498 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
499
David James386ccd12011-05-04 20:17:42 -0700500 def GenDependencyGraph(self, deps_tree, deps_info):
David Jamesfcb70ef2011-02-02 16:02:30 -0800501 """Generate a doubly linked dependency graph.
502
503 Args:
504 deps_tree: Dependency tree structure.
505 deps_info: More details on the dependencies.
506 Returns:
507 Deps graph in the form of a dict of packages, with each package
508 specifying a "needs" list and "provides" list.
509 """
510 emerge = self.emerge
511 root = emerge.settings["ROOT"]
512
David Jamesfcb70ef2011-02-02 16:02:30 -0800513 # deps_map is the actual dependency graph.
514 #
515 # Each package specifies a "needs" list and a "provides" list. The "needs"
516 # list indicates which packages we depend on. The "provides" list
517 # indicates the reverse dependencies -- what packages need us.
518 #
519 # We also provide some other information in the dependency graph:
520 # - action: What we're planning on doing with this package. Generally,
521 # "merge", "nomerge", or "uninstall"
David Jamesfcb70ef2011-02-02 16:02:30 -0800522 deps_map = {}
523
524 def ReverseTree(packages):
525 """Convert tree to digraph.
526
527 Take the tree of package -> requirements and reverse it to a digraph of
528 buildable packages -> packages they unblock.
529 Args:
530 packages: Tree(s) of dependencies.
531 Returns:
532 Unsanitized digraph.
533 """
David James8c7e5e32011-06-28 11:26:03 -0700534 binpkg_phases = set(["setup", "preinst", "postinst"])
David James3f778802011-08-25 19:31:45 -0700535 needed_dep_types = set(["blocker", "buildtime", "runtime"])
David Jamesfcb70ef2011-02-02 16:02:30 -0800536 for pkg in packages:
537
538 # Create an entry for the package
539 action = packages[pkg]["action"]
David James8c7e5e32011-06-28 11:26:03 -0700540 default_pkg = {"needs": {}, "provides": set(), "action": action,
541 "nodeps": False, "binary": False}
David Jamesfcb70ef2011-02-02 16:02:30 -0800542 this_pkg = deps_map.setdefault(pkg, default_pkg)
543
David James8c7e5e32011-06-28 11:26:03 -0700544 if pkg in deps_info:
545 this_pkg["idx"] = deps_info[pkg]["idx"]
546
547 # If a package doesn't have any defined phases that might use the
548 # dependent packages (i.e. pkg_setup, pkg_preinst, or pkg_postinst),
549 # we can install this package before its deps are ready.
550 emerge_pkg = self.package_db.get(pkg)
551 if emerge_pkg and emerge_pkg.type_name == "binary":
552 this_pkg["binary"] = True
553 defined_phases = emerge_pkg.metadata.defined_phases
554 defined_binpkg_phases = binpkg_phases.intersection(defined_phases)
555 if not defined_binpkg_phases:
556 this_pkg["nodeps"] = True
557
David Jamesfcb70ef2011-02-02 16:02:30 -0800558 # Create entries for dependencies of this package first.
559 ReverseTree(packages[pkg]["deps"])
560
561 # Add dependencies to this package.
562 for dep, dep_item in packages[pkg]["deps"].iteritems():
David James8c7e5e32011-06-28 11:26:03 -0700563 # We only need to enforce strict ordering of dependencies if the
David James3f778802011-08-25 19:31:45 -0700564 # dependency is a blocker, or is a buildtime or runtime dependency.
565 # (I.e., ignored, optional, and runtime_post dependencies don't
566 # depend on ordering.)
David James8c7e5e32011-06-28 11:26:03 -0700567 dep_types = dep_item["deptypes"]
568 if needed_dep_types.intersection(dep_types):
569 deps_map[dep]["provides"].add(pkg)
570 this_pkg["needs"][dep] = "/".join(dep_types)
David Jamesfcb70ef2011-02-02 16:02:30 -0800571
David James3f778802011-08-25 19:31:45 -0700572 # If there's a blocker, Portage may need to move files from one
573 # package to another, which requires editing the CONTENTS files of
574 # both packages. To avoid race conditions while editing this file,
575 # the two packages must not be installed in parallel, so we can't
576 # safely ignore dependencies. See http://crosbug.com/19328
577 if "blocker" in dep_types:
578 this_pkg["nodeps"] = False
579
David Jamesfcb70ef2011-02-02 16:02:30 -0800580 def FindCycles():
581 """Find cycles in the dependency tree.
582
583 Returns:
584 A dict mapping cyclic packages to a dict of the deps that cause
585 cycles. For each dep that causes cycles, it returns an example
586 traversal of the graph that shows the cycle.
587 """
588
589 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
590 """Find cycles in cyclic dependencies starting at specified package.
591
592 Args:
593 pkg: Package identifier.
594 cycles: A dict mapping cyclic packages to a dict of the deps that
595 cause cycles. For each dep that causes cycles, it returns an
596 example traversal of the graph that shows the cycle.
597 unresolved: Nodes that have been visited but are not fully processed.
598 resolved: Nodes that have been visited and are fully processed.
599 """
600 pkg_cycles = cycles.get(pkg)
601 if pkg in resolved and not pkg_cycles:
602 # If we already looked at this package, and found no cyclic
603 # dependencies, we can stop now.
604 return
605 unresolved.append(pkg)
606 for dep in deps_map[pkg]["needs"]:
607 if dep in unresolved:
608 idx = unresolved.index(dep)
609 mycycle = unresolved[idx:] + [dep]
610 for i in range(len(mycycle) - 1):
611 pkg1, pkg2 = mycycle[i], mycycle[i+1]
612 cycles.setdefault(pkg1, {}).setdefault(pkg2, mycycle)
613 elif not pkg_cycles or dep not in pkg_cycles:
614 # Looks like we haven't seen this edge before.
615 FindCyclesAtNode(dep, cycles, unresolved, resolved)
616 unresolved.pop()
617 resolved.add(pkg)
618
619 cycles, unresolved, resolved = {}, [], set()
620 for pkg in deps_map:
621 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
622 return cycles
623
David James386ccd12011-05-04 20:17:42 -0700624 def RemoveUnusedPackages():
David Jamesfcb70ef2011-02-02 16:02:30 -0800625 """Remove installed packages, propagating dependencies."""
David Jamesfcb70ef2011-02-02 16:02:30 -0800626 # Schedule packages that aren't on the install list for removal
627 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
628
David Jamesfcb70ef2011-02-02 16:02:30 -0800629 # Remove the packages we don't want, simplifying the graph and making
630 # it easier for us to crack cycles.
631 for pkg in sorted(rm_pkgs):
632 this_pkg = deps_map[pkg]
633 needs = this_pkg["needs"]
634 provides = this_pkg["provides"]
635 for dep in needs:
636 dep_provides = deps_map[dep]["provides"]
637 dep_provides.update(provides)
638 dep_provides.discard(pkg)
639 dep_provides.discard(dep)
640 for target in provides:
641 target_needs = deps_map[target]["needs"]
642 target_needs.update(needs)
643 target_needs.pop(pkg, None)
644 target_needs.pop(target, None)
645 del deps_map[pkg]
646
647 def PrintCycleBreak(basedep, dep, mycycle):
648 """Print details about a cycle that we are planning on breaking.
649
650 We are breaking a cycle where dep needs basedep. mycycle is an
651 example cycle which contains dep -> basedep."""
652
David Jamesfcb70ef2011-02-02 16:02:30 -0800653 needs = deps_map[dep]["needs"]
654 depinfo = needs.get(basedep, "deleted")
David Jamesfcb70ef2011-02-02 16:02:30 -0800655
David James3f778802011-08-25 19:31:45 -0700656 # It's OK to swap install order for blockers, as long as the two
657 # packages aren't installed in parallel. If there is a cycle, then
658 # we know the packages depend on each other already, so we can drop the
659 # blocker safely without printing a warning.
660 if depinfo == "blocker":
661 return
662
David Jamesfcb70ef2011-02-02 16:02:30 -0800663 # Notify the user that we're breaking a cycle.
664 print "Breaking %s -> %s (%s)" % (dep, basedep, depinfo)
665
666 # Show cycle.
667 for i in range(len(mycycle) - 1):
668 pkg1, pkg2 = mycycle[i], mycycle[i+1]
669 needs = deps_map[pkg1]["needs"]
670 depinfo = needs.get(pkg2, "deleted")
671 if pkg1 == dep and pkg2 == basedep:
672 depinfo = depinfo + ", deleting"
673 print " %s -> %s (%s)" % (pkg1, pkg2, depinfo)
674
675 def SanitizeTree():
676 """Remove circular dependencies.
677
678 We prune all dependencies involved in cycles that go against the emerge
679 ordering. This has a nice property: we're guaranteed to merge
680 dependencies in the same order that portage does.
681
682 Because we don't treat any dependencies as "soft" unless they're killed
683 by a cycle, we pay attention to a larger number of dependencies when
684 merging. This hurts performance a bit, but helps reliability.
685 """
686 start = time.time()
687 cycles = FindCycles()
688 while cycles:
689 for dep, mycycles in cycles.iteritems():
690 for basedep, mycycle in mycycles.iteritems():
691 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
Matt Tennant08797302011-10-17 16:18:45 -0700692 if "--quiet" not in emerge.opts:
693 PrintCycleBreak(basedep, dep, mycycle)
David Jamesfcb70ef2011-02-02 16:02:30 -0800694 del deps_map[dep]["needs"][basedep]
695 deps_map[basedep]["provides"].remove(dep)
696 cycles = FindCycles()
697 seconds = time.time() - start
698 if "--quiet" not in emerge.opts and seconds >= 0.1:
699 print "Tree sanitized in %dm%.1fs" % (seconds / 60, seconds % 60)
700
David James8c7e5e32011-06-28 11:26:03 -0700701 def FindRecursiveProvides(pkg, seen):
702 """Find all nodes that require a particular package.
703
704 Assumes that graph is acyclic.
705
706 Args:
707 pkg: Package identifier.
708 seen: Nodes that have been visited so far.
709 """
710 if pkg in seen:
711 return
712 seen.add(pkg)
713 info = deps_map[pkg]
714 info["tprovides"] = info["provides"].copy()
715 for dep in info["provides"]:
716 FindRecursiveProvides(dep, seen)
717 info["tprovides"].update(deps_map[dep]["tprovides"])
718
David Jamesa22906f2011-05-04 19:53:26 -0700719 ReverseTree(deps_tree)
David Jamesa22906f2011-05-04 19:53:26 -0700720
David James386ccd12011-05-04 20:17:42 -0700721 # We need to remove unused packages so that we can use the dependency
722 # ordering of the install process to show us what cycles to crack.
723 RemoveUnusedPackages()
David Jamesfcb70ef2011-02-02 16:02:30 -0800724 SanitizeTree()
David James8c7e5e32011-06-28 11:26:03 -0700725 seen = set()
726 for pkg in deps_map:
727 FindRecursiveProvides(pkg, seen)
David Jamesfcb70ef2011-02-02 16:02:30 -0800728 return deps_map
729
730 def PrintInstallPlan(self, deps_map):
731 """Print an emerge-style install plan.
732
733 The install plan lists what packages we're installing, in order.
734 It's useful for understanding what parallel_emerge is doing.
735
736 Args:
737 deps_map: The dependency graph.
738 """
739
740 def InstallPlanAtNode(target, deps_map):
741 nodes = []
742 nodes.append(target)
743 for dep in deps_map[target]["provides"]:
744 del deps_map[dep]["needs"][target]
745 if not deps_map[dep]["needs"]:
746 nodes.extend(InstallPlanAtNode(dep, deps_map))
747 return nodes
748
749 deps_map = copy.deepcopy(deps_map)
750 install_plan = []
751 plan = set()
752 for target, info in deps_map.iteritems():
753 if not info["needs"] and target not in plan:
754 for item in InstallPlanAtNode(target, deps_map):
755 plan.add(item)
756 install_plan.append(self.package_db[item])
757
758 for pkg in plan:
759 del deps_map[pkg]
760
761 if deps_map:
762 print "Cyclic dependencies:", " ".join(deps_map)
763 PrintDepsMap(deps_map)
764 sys.exit(1)
765
766 self.emerge.depgraph.display(install_plan)
767
768
769def PrintDepsMap(deps_map):
770 """Print dependency graph, for each package list it's prerequisites."""
771 for i in sorted(deps_map):
772 print "%s: (%s) needs" % (i, deps_map[i]["action"])
773 needs = deps_map[i]["needs"]
774 for j in sorted(needs):
775 print " %s" % (j)
776 if not needs:
777 print " no dependencies"
778
779
780class EmergeJobState(object):
781 __slots__ = ["done", "filename", "last_notify_timestamp", "last_output_seek",
782 "last_output_timestamp", "pkgname", "retcode", "start_timestamp",
Brian Harring0be85c62012-03-17 19:52:12 -0700783 "target", "fetch_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800784
785 def __init__(self, target, pkgname, done, filename, start_timestamp,
Brian Harring0be85c62012-03-17 19:52:12 -0700786 retcode=None, fetch_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800787
788 # The full name of the target we're building (e.g.
789 # chromeos-base/chromeos-0.0.1-r60)
790 self.target = target
791
792 # The short name of the target we're building (e.g. chromeos-0.0.1-r60)
793 self.pkgname = pkgname
794
795 # Whether the job is done. (True if the job is done; false otherwise.)
796 self.done = done
797
798 # The filename where output is currently stored.
799 self.filename = filename
800
801 # The timestamp of the last time we printed the name of the log file. We
802 # print this at the beginning of the job, so this starts at
803 # start_timestamp.
804 self.last_notify_timestamp = start_timestamp
805
806 # The location (in bytes) of the end of the last complete line we printed.
807 # This starts off at zero. We use this to jump to the right place when we
808 # print output from the same ebuild multiple times.
809 self.last_output_seek = 0
810
811 # The timestamp of the last time we printed output. Since we haven't
812 # printed output yet, this starts at zero.
813 self.last_output_timestamp = 0
814
815 # The return code of our job, if the job is actually finished.
816 self.retcode = retcode
817
Brian Harring0be85c62012-03-17 19:52:12 -0700818 # Was this just a fetch job?
819 self.fetch_only = fetch_only
820
David Jamesfcb70ef2011-02-02 16:02:30 -0800821 # The timestamp when our job started.
822 self.start_timestamp = start_timestamp
823
824
David James7358d032011-05-19 10:40:03 -0700825def KillHandler(signum, frame):
826 # Kill self and all subprocesses.
827 os.killpg(0, signal.SIGKILL)
828
David Jamesfcb70ef2011-02-02 16:02:30 -0800829def SetupWorkerSignals():
830 def ExitHandler(signum, frame):
David James7358d032011-05-19 10:40:03 -0700831 # Set KILLED flag.
832 KILLED.set()
David James13cead42011-05-18 16:22:01 -0700833
David James7358d032011-05-19 10:40:03 -0700834 # Remove our signal handlers so we don't get called recursively.
835 signal.signal(signal.SIGINT, KillHandler)
836 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -0800837
838 # Ensure that we exit quietly and cleanly, if possible, when we receive
839 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all
840 # of the child processes will print details about KeyboardInterrupt
841 # exceptions, which isn't very helpful.
842 signal.signal(signal.SIGINT, ExitHandler)
843 signal.signal(signal.SIGTERM, ExitHandler)
844
David James1ed3e252011-10-05 20:26:15 -0700845def EmergeProcess(scheduler, output):
846 """Merge a package in a subprocess.
847
848 Args:
849 scheduler: Scheduler object.
850 output: Temporary file to write output.
851
852 Returns:
853 The exit code returned by the subprocess.
854 """
855 pid = os.fork()
856 if pid == 0:
857 try:
858 # Sanity checks.
859 if sys.stdout.fileno() != 1: raise Exception("sys.stdout.fileno() != 1")
860 if sys.stderr.fileno() != 2: raise Exception("sys.stderr.fileno() != 2")
861
862 # - Redirect 1 (stdout) and 2 (stderr) at our temporary file.
863 # - Redirect 0 to point at sys.stdin. In this case, sys.stdin
864 # points at a file reading os.devnull, because multiprocessing mucks
865 # with sys.stdin.
866 # - Leave the sys.stdin and output filehandles alone.
867 fd_pipes = {0: sys.stdin.fileno(),
868 1: output.fileno(),
869 2: output.fileno(),
870 sys.stdin.fileno(): sys.stdin.fileno(),
871 output.fileno(): output.fileno()}
872 portage.process._setup_pipes(fd_pipes)
873
874 # Portage doesn't like when sys.stdin.fileno() != 0, so point sys.stdin
875 # at the filehandle we just created in _setup_pipes.
876 if sys.stdin.fileno() != 0:
877 sys.stdin = os.fdopen(0, "r")
878
879 # Actually do the merge.
880 retval = scheduler.merge()
881
882 # We catch all exceptions here (including SystemExit, KeyboardInterrupt,
883 # etc) so as to ensure that we don't confuse the multiprocessing module,
884 # which expects that all forked children exit with os._exit().
885 except:
886 traceback.print_exc(file=output)
887 retval = 1
888 sys.stdout.flush()
889 sys.stderr.flush()
890 output.flush()
891 os._exit(retval)
892 else:
893 # Return the exit code of the subprocess.
894 return os.waitpid(pid, 0)[1]
David Jamesfcb70ef2011-02-02 16:02:30 -0800895
Brian Harring0be85c62012-03-17 19:52:12 -0700896def EmergeWorker(task_queue, job_queue, emerge, package_db, fetch_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800897 """This worker emerges any packages given to it on the task_queue.
898
899 Args:
900 task_queue: The queue of tasks for this worker to do.
901 job_queue: The queue of results from the worker.
902 emerge: An EmergeData() object.
903 package_db: A dict, mapping package ids to portage Package objects.
Brian Harring0be85c62012-03-17 19:52:12 -0700904 fetch_only: A bool, indicating if we should just fetch the target.
David Jamesfcb70ef2011-02-02 16:02:30 -0800905
906 It expects package identifiers to be passed to it via task_queue. When
907 a task is started, it pushes the (target, filename) to the started_queue.
908 The output is stored in filename. When a merge starts or finishes, we push
909 EmergeJobState objects to the job_queue.
910 """
911
912 SetupWorkerSignals()
913 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
David Jamesdeebd692011-05-09 17:02:52 -0700914
915 # Disable flushing of caches to save on I/O.
David James7a1ea4b2011-10-13 15:06:41 -0700916 root = emerge.settings["ROOT"]
917 vardb = emerge.trees[root]["vartree"].dbapi
918 vardb._flush_cache_enabled = False
Brian Harring0be85c62012-03-17 19:52:12 -0700919 bindb = emerge.trees[root]["bintree"].dbapi
920 # Might be a set, might be a list, might be None; no clue, just use shallow
921 # copy to ensure we can roll it back.
922 original_remotepkgs = copy.copy(bindb.bintree._remotepkgs)
David Jamesdeebd692011-05-09 17:02:52 -0700923
David Jamesfcb70ef2011-02-02 16:02:30 -0800924 opts, spinner = emerge.opts, emerge.spinner
925 opts["--nodeps"] = True
Brian Harring0be85c62012-03-17 19:52:12 -0700926 if fetch_only:
927 opts["--fetchonly"] = True
928
David Jamesfcb70ef2011-02-02 16:02:30 -0800929 while True:
930 # Wait for a new item to show up on the queue. This is a blocking wait,
931 # so if there's nothing to do, we just sit here.
Brian Harring0be85c62012-03-17 19:52:12 -0700932 pkg_state = task_queue.get()
933 if pkg_state is None:
David Jamesfcb70ef2011-02-02 16:02:30 -0800934 # If target is None, this means that the main thread wants us to quit.
935 # The other workers need to exit too, so we'll push the message back on
936 # to the queue so they'll get it too.
Brian Harring0be85c62012-03-17 19:52:12 -0700937 task_queue.put(None)
David Jamesfcb70ef2011-02-02 16:02:30 -0800938 return
David James7358d032011-05-19 10:40:03 -0700939 if KILLED.is_set():
940 return
941
Brian Harring0be85c62012-03-17 19:52:12 -0700942 target = pkg_state.target
943
David Jamesfcb70ef2011-02-02 16:02:30 -0800944 db_pkg = package_db[target]
Brian Harring0be85c62012-03-17 19:52:12 -0700945
946 if db_pkg.type_name == "binary":
947 if not fetch_only and pkg_state.fetched_successfully:
948 # Ensure portage doesn't think our pkg is remote- else it'll force
949 # a redownload of it (even if the on-disk file is fine). In-memory
950 # caching basically, implemented dumbly.
951 bindb.bintree._remotepkgs = None
952 else:
953 bindb.bintree_remotepkgs = original_remotepkgs
954
David Jamesfcb70ef2011-02-02 16:02:30 -0800955 db_pkg.root_config = emerge.root_config
956 install_list = [db_pkg]
957 pkgname = db_pkg.pf
958 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False)
David James01b1e0f2012-06-07 17:18:05 -0700959 os.chmod(output.name, 644)
David Jamesfcb70ef2011-02-02 16:02:30 -0800960 start_timestamp = time.time()
Brian Harring0be85c62012-03-17 19:52:12 -0700961 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp,
962 fetch_only=fetch_only)
David Jamesfcb70ef2011-02-02 16:02:30 -0800963 job_queue.put(job)
964 if "--pretend" in opts:
965 retcode = 0
966 else:
David Jamesfcb70ef2011-02-02 16:02:30 -0800967 try:
David James386ccd12011-05-04 20:17:42 -0700968 emerge.scheduler_graph.mergelist = install_list
969 scheduler = Scheduler(settings, trees, mtimedb, opts, spinner,
David Jamesbf1e3442011-05-28 07:44:20 -0700970 favorites=emerge.favorites, graph_config=emerge.scheduler_graph)
David Jamesace2e212011-07-13 11:47:39 -0700971
972 # Enable blocker handling even though we're in --nodeps mode. This
973 # allows us to unmerge the blocker after we've merged the replacement.
974 scheduler._opts_ignore_blockers = frozenset()
975
David James1ed3e252011-10-05 20:26:15 -0700976 retcode = EmergeProcess(scheduler, output)
David Jamesfcb70ef2011-02-02 16:02:30 -0800977 except Exception:
978 traceback.print_exc(file=output)
979 retcode = 1
David James1ed3e252011-10-05 20:26:15 -0700980 output.close()
David Jamesfcb70ef2011-02-02 16:02:30 -0800981
David James7358d032011-05-19 10:40:03 -0700982 if KILLED.is_set():
983 return
984
David Jamesfcb70ef2011-02-02 16:02:30 -0800985 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp,
Brian Harring0be85c62012-03-17 19:52:12 -0700986 retcode, fetch_only=fetch_only)
David Jamesfcb70ef2011-02-02 16:02:30 -0800987 job_queue.put(job)
988
989
990class LinePrinter(object):
991 """Helper object to print a single line."""
992
993 def __init__(self, line):
994 self.line = line
995
996 def Print(self, seek_locations):
997 print self.line
998
999
1000class JobPrinter(object):
1001 """Helper object to print output of a job."""
1002
1003 def __init__(self, job, unlink=False):
1004 """Print output of job.
1005
1006 If unlink is True, unlink the job output file when done."""
1007 self.current_time = time.time()
1008 self.job = job
1009 self.unlink = unlink
1010
1011 def Print(self, seek_locations):
1012
1013 job = self.job
1014
1015 # Calculate how long the job has been running.
1016 seconds = self.current_time - job.start_timestamp
1017
1018 # Note that we've printed out the job so far.
1019 job.last_output_timestamp = self.current_time
1020
1021 # Note that we're starting the job
1022 info = "job %s (%dm%.1fs)" % (job.pkgname, seconds / 60, seconds % 60)
1023 last_output_seek = seek_locations.get(job.filename, 0)
1024 if last_output_seek:
1025 print "=== Continue output for %s ===" % info
1026 else:
1027 print "=== Start output for %s ===" % info
1028
1029 # Print actual output from job
1030 f = codecs.open(job.filename, encoding='utf-8', errors='replace')
1031 f.seek(last_output_seek)
1032 prefix = job.pkgname + ":"
1033 for line in f:
1034
1035 # Save off our position in the file
1036 if line and line[-1] == "\n":
1037 last_output_seek = f.tell()
1038 line = line[:-1]
1039
1040 # Print our line
1041 print prefix, line.encode('utf-8', 'replace')
1042 f.close()
1043
1044 # Save our last spot in the file so that we don't print out the same
1045 # location twice.
1046 seek_locations[job.filename] = last_output_seek
1047
1048 # Note end of output section
1049 if job.done:
1050 print "=== Complete: %s ===" % info
1051 else:
1052 print "=== Still running: %s ===" % info
1053
1054 if self.unlink:
1055 os.unlink(job.filename)
1056
1057
1058def PrintWorker(queue):
1059 """A worker that prints stuff to the screen as requested."""
1060
1061 def ExitHandler(signum, frame):
David James7358d032011-05-19 10:40:03 -07001062 # Set KILLED flag.
1063 KILLED.set()
1064
David Jamesfcb70ef2011-02-02 16:02:30 -08001065 # Switch to default signal handlers so that we'll die after two signals.
David James7358d032011-05-19 10:40:03 -07001066 signal.signal(signal.SIGINT, KillHandler)
1067 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001068
1069 # Don't exit on the first SIGINT / SIGTERM, because the parent worker will
1070 # handle it and tell us when we need to exit.
1071 signal.signal(signal.SIGINT, ExitHandler)
1072 signal.signal(signal.SIGTERM, ExitHandler)
1073
1074 # seek_locations is a map indicating the position we are at in each file.
1075 # It starts off empty, but is set by the various Print jobs as we go along
1076 # to indicate where we left off in each file.
1077 seek_locations = {}
1078 while True:
1079 try:
1080 job = queue.get()
1081 if job:
1082 job.Print(seek_locations)
David Jamesbccf8eb2011-07-27 14:06:06 -07001083 sys.stdout.flush()
David Jamesfcb70ef2011-02-02 16:02:30 -08001084 else:
1085 break
1086 except IOError as ex:
1087 if ex.errno == errno.EINTR:
1088 # Looks like we received a signal. Keep printing.
1089 continue
1090 raise
1091
Brian Harring867e2362012-03-17 04:05:17 -07001092
Brian Harring0be85c62012-03-17 19:52:12 -07001093class TargetState(object):
Brian Harring867e2362012-03-17 04:05:17 -07001094
Brian Harring0be85c62012-03-17 19:52:12 -07001095 __slots__ = ("target", "info", "score", "prefetched", "fetched_successfully")
Brian Harring867e2362012-03-17 04:05:17 -07001096
Brian Harring0be85c62012-03-17 19:52:12 -07001097 def __init__(self, target, info, fetched=False):
Brian Harring867e2362012-03-17 04:05:17 -07001098 self.target, self.info = target, info
Brian Harring0be85c62012-03-17 19:52:12 -07001099 self.fetched_successfully = False
1100 self.prefetched = False
Brian Harring867e2362012-03-17 04:05:17 -07001101 self.update_score()
1102
1103 def __cmp__(self, other):
1104 return cmp(self.score, other.score)
1105
1106 def update_score(self):
1107 self.score = (
1108 -len(self.info["tprovides"]),
Brian Harring0be85c62012-03-17 19:52:12 -07001109 len(self.info["needs"]),
Brian Harring11c5eeb2012-03-18 11:02:39 -07001110 not self.info["binary"],
Brian Harring867e2362012-03-17 04:05:17 -07001111 -len(self.info["provides"]),
1112 self.info["idx"],
1113 self.target,
1114 )
1115
1116
1117class ScoredHeap(object):
1118
Brian Harring0be85c62012-03-17 19:52:12 -07001119 __slots__ = ("heap", "_heap_set")
1120
Brian Harring867e2362012-03-17 04:05:17 -07001121 def __init__(self, initial=()):
Brian Harring0be85c62012-03-17 19:52:12 -07001122 self.heap = list()
1123 self._heap_set = set()
1124 if initial:
1125 self.multi_put(initial)
Brian Harring867e2362012-03-17 04:05:17 -07001126
1127 def get(self):
Brian Harring0be85c62012-03-17 19:52:12 -07001128 item = heapq.heappop(self.heap)
1129 self._heap_set.remove(item.target)
1130 return item
Brian Harring867e2362012-03-17 04:05:17 -07001131
Brian Harring0be85c62012-03-17 19:52:12 -07001132 def put(self, item):
1133 if not isinstance(item, TargetState):
1134 raise ValueError("Item %r isn't a TargetState" % (item,))
1135 heapq.heappush(self.heap, item)
1136 self._heap_set.add(item.target)
Brian Harring867e2362012-03-17 04:05:17 -07001137
Brian Harring0be85c62012-03-17 19:52:12 -07001138 def multi_put(self, sequence):
1139 sequence = list(sequence)
1140 self.heap.extend(sequence)
1141 self._heap_set.update(x.target for x in sequence)
Brian Harring867e2362012-03-17 04:05:17 -07001142 self.sort()
1143
David James5c9996d2012-03-24 10:50:46 -07001144 def sort(self):
1145 heapq.heapify(self.heap)
1146
Brian Harring0be85c62012-03-17 19:52:12 -07001147 def __contains__(self, target):
1148 return target in self._heap_set
1149
1150 def __nonzero__(self):
1151 return bool(self.heap)
1152
Brian Harring867e2362012-03-17 04:05:17 -07001153 def __len__(self):
1154 return len(self.heap)
1155
1156
David Jamesfcb70ef2011-02-02 16:02:30 -08001157class EmergeQueue(object):
1158 """Class to schedule emerge jobs according to a dependency graph."""
1159
1160 def __init__(self, deps_map, emerge, package_db, show_output):
1161 # Store the dependency graph.
1162 self._deps_map = deps_map
Brian Harring0be85c62012-03-17 19:52:12 -07001163 self._state_map = {}
David Jamesfcb70ef2011-02-02 16:02:30 -08001164 # Initialize the running queue to empty
Brian Harring0be85c62012-03-17 19:52:12 -07001165 self._build_jobs = {}
1166 self._build_ready = ScoredHeap()
1167 self._fetch_jobs = {}
1168 self._fetch_ready = ScoredHeap()
David Jamesfcb70ef2011-02-02 16:02:30 -08001169 # List of total package installs represented in deps_map.
1170 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
1171 self._total_jobs = len(install_jobs)
1172 self._show_output = show_output
1173
1174 if "--pretend" in emerge.opts:
1175 print "Skipping merge because of --pretend mode."
1176 sys.exit(0)
1177
David James7358d032011-05-19 10:40:03 -07001178 # Set a process group so we can easily terminate all children.
1179 os.setsid()
1180
David Jamesfcb70ef2011-02-02 16:02:30 -08001181 # Setup scheduler graph object. This is used by the child processes
1182 # to help schedule jobs.
1183 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1184
1185 # Calculate how many jobs we can run in parallel. We don't want to pass
1186 # the --jobs flag over to emerge itself, because that'll tell emerge to
1187 # hide its output, and said output is quite useful for debugging hung
1188 # jobs.
1189 procs = min(self._total_jobs,
1190 emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
Brian Harring0be85c62012-03-17 19:52:12 -07001191 self._build_procs = procs
1192 self._fetch_procs = procs
David James8c7e5e32011-06-28 11:26:03 -07001193 self._load_avg = emerge.opts.pop("--load-average", None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001194 self._job_queue = multiprocessing.Queue()
1195 self._print_queue = multiprocessing.Queue()
Brian Harring0be85c62012-03-17 19:52:12 -07001196
1197 self._fetch_queue = multiprocessing.Queue()
1198 args = (self._fetch_queue, self._job_queue, emerge, package_db, True)
1199 self._fetch_pool = multiprocessing.Pool(self._fetch_procs, EmergeWorker,
1200 args)
1201
1202 self._build_queue = multiprocessing.Queue()
1203 args = (self._build_queue, self._job_queue, emerge, package_db)
1204 self._build_pool = multiprocessing.Pool(self._build_procs, EmergeWorker,
1205 args)
1206
David Jamesfcb70ef2011-02-02 16:02:30 -08001207 self._print_worker = multiprocessing.Process(target=PrintWorker,
1208 args=[self._print_queue])
1209 self._print_worker.start()
1210
1211 # Initialize the failed queue to empty.
1212 self._retry_queue = []
1213 self._failed = set()
1214
David Jamesfcb70ef2011-02-02 16:02:30 -08001215 # Setup an exit handler so that we print nice messages if we are
1216 # terminated.
1217 self._SetupExitHandler()
1218
1219 # Schedule our jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001220 self._state_map.update(
1221 (pkg, TargetState(pkg, data)) for pkg, data in deps_map.iteritems())
1222 self._fetch_ready.multi_put(self._state_map.itervalues())
David Jamesfcb70ef2011-02-02 16:02:30 -08001223
1224 def _SetupExitHandler(self):
1225
1226 def ExitHandler(signum, frame):
David James7358d032011-05-19 10:40:03 -07001227 # Set KILLED flag.
1228 KILLED.set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001229
1230 # Kill our signal handlers so we don't get called recursively
David James7358d032011-05-19 10:40:03 -07001231 signal.signal(signal.SIGINT, KillHandler)
1232 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001233
1234 # Print our current job status
Brian Harring0be85c62012-03-17 19:52:12 -07001235 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001236 if job:
1237 self._print_queue.put(JobPrinter(job, unlink=True))
1238
1239 # Notify the user that we are exiting
1240 self._Print("Exiting on signal %s" % signum)
David James7358d032011-05-19 10:40:03 -07001241 self._print_queue.put(None)
1242 self._print_worker.join()
David Jamesfcb70ef2011-02-02 16:02:30 -08001243
1244 # Kill child threads, then exit.
David James7358d032011-05-19 10:40:03 -07001245 os.killpg(0, signal.SIGKILL)
David Jamesfcb70ef2011-02-02 16:02:30 -08001246 sys.exit(1)
1247
1248 # Print out job status when we are killed
1249 signal.signal(signal.SIGINT, ExitHandler)
1250 signal.signal(signal.SIGTERM, ExitHandler)
1251
Brian Harring0be85c62012-03-17 19:52:12 -07001252 def _Schedule(self, pkg_state):
David Jamesfcb70ef2011-02-02 16:02:30 -08001253 # We maintain a tree of all deps, if this doesn't need
David James8c7e5e32011-06-28 11:26:03 -07001254 # to be installed just free up its children and continue.
David Jamesfcb70ef2011-02-02 16:02:30 -08001255 # It is possible to reinstall deps of deps, without reinstalling
1256 # first level deps, like so:
1257 # chromeos (merge) -> eselect (nomerge) -> python (merge)
Brian Harring0be85c62012-03-17 19:52:12 -07001258 this_pkg = pkg_state.info
1259 target = pkg_state.target
1260 if pkg_state.info is not None:
1261 if this_pkg["action"] == "nomerge":
1262 self._Finish(target)
1263 elif target not in self._build_jobs:
1264 # Kick off the build if it's marked to be built.
1265 self._build_jobs[target] = None
1266 self._build_queue.put(pkg_state)
1267 return True
David Jamesfcb70ef2011-02-02 16:02:30 -08001268
David James8c7e5e32011-06-28 11:26:03 -07001269 def _ScheduleLoop(self):
1270 # If the current load exceeds our desired load average, don't schedule
1271 # more than one job.
1272 if self._load_avg and os.getloadavg()[0] > self._load_avg:
1273 needed_jobs = 1
1274 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001275 needed_jobs = self._build_procs
David James8c7e5e32011-06-28 11:26:03 -07001276
1277 # Schedule more jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001278 while self._build_ready and len(self._build_jobs) < needed_jobs:
1279 state = self._build_ready.get()
1280 if state.target not in self._failed:
1281 self._Schedule(state)
David Jamesfcb70ef2011-02-02 16:02:30 -08001282
1283 def _Print(self, line):
1284 """Print a single line."""
1285 self._print_queue.put(LinePrinter(line))
1286
1287 def _Status(self):
1288 """Print status."""
1289 current_time = time.time()
1290 no_output = True
1291
1292 # Print interim output every minute if --show-output is used. Otherwise,
1293 # print notifications about running packages every 2 minutes, and print
1294 # full output for jobs that have been running for 60 minutes or more.
1295 if self._show_output:
1296 interval = 60
1297 notify_interval = 0
1298 else:
1299 interval = 60 * 60
1300 notify_interval = 60 * 2
Brian Harring0be85c62012-03-17 19:52:12 -07001301 for target, job in self._build_jobs.iteritems():
David Jamesfcb70ef2011-02-02 16:02:30 -08001302 if job:
1303 last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
1304 if last_timestamp + interval < current_time:
1305 self._print_queue.put(JobPrinter(job))
1306 job.last_output_timestamp = current_time
1307 no_output = False
1308 elif (notify_interval and
1309 job.last_notify_timestamp + notify_interval < current_time):
1310 job_seconds = current_time - job.start_timestamp
1311 args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
1312 info = "Still building %s (%dm%.1fs). Logs in %s" % args
1313 job.last_notify_timestamp = current_time
1314 self._Print(info)
1315 no_output = False
1316
1317 # If we haven't printed any messages yet, print a general status message
1318 # here.
1319 if no_output:
1320 seconds = current_time - GLOBAL_START
Brian Harring0be85c62012-03-17 19:52:12 -07001321 fjobs, fready = len(self._fetch_jobs), len(self._fetch_ready)
1322 bjobs, bready = len(self._build_jobs), len(self._build_ready)
1323 retries = len(self._retry_queue)
1324 pending = max(0, len(self._deps_map) - fjobs - bjobs)
1325 line = "Pending %s/%s, " % (pending, self._total_jobs)
1326 if fjobs or fready:
1327 line += "Fetching %s/%s, " % (fjobs, fready + fjobs)
1328 if bjobs or bready or retries:
1329 line += "Building %s/%s, " % (bjobs, bready + bjobs)
1330 if retries:
1331 line += "Retrying %s, " % (retries,)
David James8c7e5e32011-06-28 11:26:03 -07001332 load = " ".join(str(x) for x in os.getloadavg())
Brian Harring0be85c62012-03-17 19:52:12 -07001333 line += ("[Time %dm%.1fs Load %s]" % (seconds/60, seconds %60, load))
1334 self._Print(line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001335
1336 def _Finish(self, target):
David James8c7e5e32011-06-28 11:26:03 -07001337 """Mark a target as completed and unblock dependencies."""
1338 this_pkg = self._deps_map[target]
1339 if this_pkg["needs"] and this_pkg["nodeps"]:
1340 # We got installed, but our deps have not been installed yet. Dependent
1341 # packages should only be installed when our needs have been fully met.
1342 this_pkg["action"] = "nomerge"
1343 else:
1344 finish = []
1345 for dep in this_pkg["provides"]:
1346 dep_pkg = self._deps_map[dep]
Brian Harring0be85c62012-03-17 19:52:12 -07001347 state = self._state_map[dep]
David James8c7e5e32011-06-28 11:26:03 -07001348 del dep_pkg["needs"][target]
Brian Harring0be85c62012-03-17 19:52:12 -07001349 state.update_score()
1350 if not state.prefetched:
1351 if dep in self._fetch_ready:
1352 # If it's not currently being fetched, update the prioritization
1353 self._fetch_ready.sort()
1354 elif not dep_pkg["needs"]:
David James8c7e5e32011-06-28 11:26:03 -07001355 if dep_pkg["nodeps"] and dep_pkg["action"] == "nomerge":
1356 self._Finish(dep)
1357 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001358 self._build_ready.put(self._state_map[dep])
David James8c7e5e32011-06-28 11:26:03 -07001359 self._deps_map.pop(target)
David Jamesfcb70ef2011-02-02 16:02:30 -08001360
1361 def _Retry(self):
David James8c7e5e32011-06-28 11:26:03 -07001362 while self._retry_queue:
Brian Harring0be85c62012-03-17 19:52:12 -07001363 state = self._retry_queue.pop(0)
1364 if self._Schedule(state):
1365 self._Print("Retrying emerge of %s." % state.target)
David James8c7e5e32011-06-28 11:26:03 -07001366 break
David Jamesfcb70ef2011-02-02 16:02:30 -08001367
Brian Harringa43f5952012-04-12 01:19:34 -07001368 def _Shutdown(self):
David Jamesfcb70ef2011-02-02 16:02:30 -08001369 # Tell emerge workers to exit. They all exit when 'None' is pushed
1370 # to the queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001371
Brian Harringa43f5952012-04-12 01:19:34 -07001372 # Shutdown the workers first; then jobs (which is how they feed things back)
1373 # then finally the print queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001374
Brian Harringa43f5952012-04-12 01:19:34 -07001375 def _stop(queue, pool):
1376 if pool is None:
1377 return
1378 try:
1379 queue.put(None)
1380 pool.close()
1381 pool.join()
1382 finally:
1383 pool.terminate()
Brian Harring0be85c62012-03-17 19:52:12 -07001384
Brian Harringa43f5952012-04-12 01:19:34 -07001385 _stop(self._fetch_queue, self._fetch_pool)
1386 self._fetch_queue = self._fetch_pool = None
Brian Harring0be85c62012-03-17 19:52:12 -07001387
Brian Harringa43f5952012-04-12 01:19:34 -07001388 _stop(self._build_queue, self._build_pool)
1389 self._build_queue = self._build_pool = None
1390
1391 if self._job_queue is not None:
1392 self._job_queue.close()
1393 self._job_queue = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001394
1395 # Now that our workers are finished, we can kill the print queue.
Brian Harringa43f5952012-04-12 01:19:34 -07001396 if self._print_worker is not None:
1397 try:
1398 self._print_queue.put(None)
1399 self._print_queue.close()
1400 self._print_worker.join()
1401 finally:
1402 self._print_worker.terminate()
1403 self._print_queue = self._print_worker = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001404
1405 def Run(self):
1406 """Run through the scheduled ebuilds.
1407
1408 Keep running so long as we have uninstalled packages in the
1409 dependency graph to merge.
1410 """
Brian Harringa43f5952012-04-12 01:19:34 -07001411 if not self._deps_map:
1412 return
1413
Brian Harring0be85c62012-03-17 19:52:12 -07001414 # Start the fetchers.
1415 for _ in xrange(min(self._fetch_procs, len(self._fetch_ready))):
1416 state = self._fetch_ready.get()
1417 self._fetch_jobs[state.target] = None
1418 self._fetch_queue.put(state)
1419
1420 # Print an update, then get going.
1421 self._Status()
1422
David Jamese703d0f2012-01-12 16:27:45 -08001423 retried = set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001424 while self._deps_map:
1425 # Check here that we are actually waiting for something.
Brian Harring0be85c62012-03-17 19:52:12 -07001426 if (self._build_queue.empty() and
David Jamesfcb70ef2011-02-02 16:02:30 -08001427 self._job_queue.empty() and
Brian Harring0be85c62012-03-17 19:52:12 -07001428 not self._fetch_jobs and
1429 not self._fetch_ready and
1430 not self._build_jobs and
1431 not self._build_ready and
David Jamesfcb70ef2011-02-02 16:02:30 -08001432 self._deps_map):
1433 # If we have failed on a package, retry it now.
1434 if self._retry_queue:
1435 self._Retry()
1436 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001437 # The dependency map is helpful for debugging failures.
1438 PrintDepsMap(self._deps_map)
1439
1440 # Tell the user why we're exiting.
1441 if self._failed:
1442 print "Packages failed: %s" % " ,".join(self._failed)
1443 else:
1444 print "Deadlock! Circular dependencies!"
1445 sys.exit(1)
1446
Brian Harring706747c2012-03-16 03:04:31 -07001447 for i in range(12):
David Jamesa74289a2011-08-12 10:41:24 -07001448 try:
1449 job = self._job_queue.get(timeout=5)
1450 break
1451 except Queue.Empty:
1452 # Check if any more jobs can be scheduled.
1453 self._ScheduleLoop()
1454 else:
Brian Harring706747c2012-03-16 03:04:31 -07001455 # Print an update every 60 seconds.
David Jamesfcb70ef2011-02-02 16:02:30 -08001456 self._Status()
1457 continue
1458
1459 target = job.target
1460
Brian Harring0be85c62012-03-17 19:52:12 -07001461 if job.fetch_only:
1462 if not job.done:
1463 self._fetch_jobs[job.target] = job
1464 else:
1465 state = self._state_map[job.target]
1466 state.prefetched = True
1467 state.fetched_successfully = (job.retcode == 0)
1468 del self._fetch_jobs[job.target]
1469 self._Print("Fetched %s in %2.2fs"
1470 % (target, time.time() - job.start_timestamp))
1471
1472 if self._show_output or job.retcode != 0:
1473 self._print_queue.put(JobPrinter(job, unlink=True))
1474 else:
1475 os.unlink(job.filename)
1476 # Failure or not, let build work with it next.
1477 if not self._deps_map[job.target]["needs"]:
1478 self._build_ready.put(state)
1479 self._ScheduleLoop()
1480
1481 if self._fetch_ready:
1482 state = self._fetch_ready.get()
1483 self._fetch_queue.put(state)
1484 self._fetch_jobs[state.target] = None
1485 else:
1486 # Minor optimization; shut down fetchers early since we know
1487 # the queue is empty.
1488 self._fetch_queue.put(None)
1489 continue
1490
David Jamesfcb70ef2011-02-02 16:02:30 -08001491 if not job.done:
Brian Harring0be85c62012-03-17 19:52:12 -07001492 self._build_jobs[target] = job
David Jamesfcb70ef2011-02-02 16:02:30 -08001493 self._Print("Started %s (logged in %s)" % (target, job.filename))
1494 continue
1495
1496 # Print output of job
1497 if self._show_output or job.retcode != 0:
1498 self._print_queue.put(JobPrinter(job, unlink=True))
1499 else:
1500 os.unlink(job.filename)
Brian Harring0be85c62012-03-17 19:52:12 -07001501 del self._build_jobs[target]
David Jamesfcb70ef2011-02-02 16:02:30 -08001502
1503 seconds = time.time() - job.start_timestamp
1504 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
David James32420cc2011-08-25 21:32:46 -07001505 previously_failed = target in self._failed
David Jamesfcb70ef2011-02-02 16:02:30 -08001506
1507 # Complain if necessary.
1508 if job.retcode != 0:
1509 # Handle job failure.
David James32420cc2011-08-25 21:32:46 -07001510 if previously_failed:
David Jamesfcb70ef2011-02-02 16:02:30 -08001511 # If this job has failed previously, give up.
1512 self._Print("Failed %s. Your build has failed." % details)
1513 else:
1514 # Queue up this build to try again after a long while.
David Jamese703d0f2012-01-12 16:27:45 -08001515 retried.add(target)
Brian Harring0be85c62012-03-17 19:52:12 -07001516 self._retry_queue.append(self._state_map[target])
David Jamesfcb70ef2011-02-02 16:02:30 -08001517 self._failed.add(target)
1518 self._Print("Failed %s, retrying later." % details)
1519 else:
David James32420cc2011-08-25 21:32:46 -07001520 if previously_failed:
1521 # Remove target from list of failed packages.
1522 self._failed.remove(target)
1523
1524 self._Print("Completed %s" % details)
1525
1526 # Mark as completed and unblock waiting ebuilds.
1527 self._Finish(target)
1528
1529 if previously_failed and self._retry_queue:
David Jamesfcb70ef2011-02-02 16:02:30 -08001530 # If we have successfully retried a failed package, and there
1531 # are more failed packages, try the next one. We will only have
1532 # one retrying package actively running at a time.
1533 self._Retry()
1534
David Jamesfcb70ef2011-02-02 16:02:30 -08001535
David James8c7e5e32011-06-28 11:26:03 -07001536 # Schedule pending jobs and print an update.
1537 self._ScheduleLoop()
1538 self._Status()
David Jamesfcb70ef2011-02-02 16:02:30 -08001539
David Jamese703d0f2012-01-12 16:27:45 -08001540 # If packages were retried, output a warning.
1541 if retried:
1542 self._Print("")
1543 self._Print("WARNING: The following packages failed the first time,")
1544 self._Print("but succeeded upon retry. This might indicate incorrect")
1545 self._Print("dependencies.")
1546 for pkg in retried:
1547 self._Print(" %s" % pkg)
1548 self._Print("@@@STEP_WARNINGS@@@")
1549 self._Print("")
1550
David Jamesfcb70ef2011-02-02 16:02:30 -08001551 # Tell child threads to exit.
1552 self._Print("Merge complete")
David Jamesfcb70ef2011-02-02 16:02:30 -08001553
1554
Brian Harring30675052012-02-29 12:18:22 -08001555def main(argv):
Brian Harring8294d652012-05-23 02:20:52 -07001556 try:
1557 return real_main(argv)
1558 finally:
1559 # Work around multiprocessing sucking and not cleaning up after itself.
1560 # http://bugs.python.org/issue4106;
1561 # Step one; ensure GC is ran *prior* to the VM starting shutdown.
1562 gc.collect()
1563 # Step two; go looking for those threads and try to manually reap
1564 # them if we can.
1565 for x in threading.enumerate():
1566 # Filter on the name, and ident; if ident is None, the thread
1567 # wasn't started.
1568 if x.name == 'QueueFeederThread' and x.ident is not None:
1569 x.join(1)
David Jamesfcb70ef2011-02-02 16:02:30 -08001570
Brian Harring8294d652012-05-23 02:20:52 -07001571
1572def real_main(argv):
Brian Harring30675052012-02-29 12:18:22 -08001573 parallel_emerge_args = argv[:]
David Jamesfcb70ef2011-02-02 16:02:30 -08001574 deps = DepGraphGenerator()
Brian Harring30675052012-02-29 12:18:22 -08001575 deps.Initialize(parallel_emerge_args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001576 emerge = deps.emerge
1577
1578 if emerge.action is not None:
Brian Harring30675052012-02-29 12:18:22 -08001579 argv = deps.ParseParallelEmergeArgs(argv)
Brian Harring8294d652012-05-23 02:20:52 -07001580 return emerge_main(argv)
David Jamesfcb70ef2011-02-02 16:02:30 -08001581 elif not emerge.cmdline_packages:
1582 Usage()
Brian Harring8294d652012-05-23 02:20:52 -07001583 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001584
1585 # Unless we're in pretend mode, there's not much point running without
1586 # root access. We need to be able to install packages.
1587 #
1588 # NOTE: Even if you're running --pretend, it's a good idea to run
1589 # parallel_emerge with root access so that portage can write to the
1590 # dependency cache. This is important for performance.
1591 if "--pretend" not in emerge.opts and portage.secpass < 2:
1592 print "parallel_emerge: superuser access is required."
Brian Harring8294d652012-05-23 02:20:52 -07001593 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001594
1595 if "--quiet" not in emerge.opts:
1596 cmdline_packages = " ".join(emerge.cmdline_packages)
David Jamesfcb70ef2011-02-02 16:02:30 -08001597 print "Starting fast-emerge."
1598 print " Building package %s on %s" % (cmdline_packages,
1599 deps.board or "root")
David Jamesfcb70ef2011-02-02 16:02:30 -08001600
David James386ccd12011-05-04 20:17:42 -07001601 deps_tree, deps_info = deps.GenDependencyTree()
David Jamesfcb70ef2011-02-02 16:02:30 -08001602
1603 # You want me to be verbose? I'll give you two trees! Twice as much value.
1604 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
1605 deps.PrintTree(deps_tree)
1606
David James386ccd12011-05-04 20:17:42 -07001607 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001608
1609 # OK, time to print out our progress so far.
1610 deps.PrintInstallPlan(deps_graph)
1611 if "--tree" in emerge.opts:
1612 PrintDepsMap(deps_graph)
1613
1614 # Are we upgrading portage? If so, and there are more packages to merge,
1615 # schedule a restart of parallel_emerge to merge the rest. This ensures that
1616 # we pick up all updates to portage settings before merging any more
1617 # packages.
1618 portage_upgrade = False
1619 root = emerge.settings["ROOT"]
1620 final_db = emerge.depgraph._dynamic_config.mydbapi[root]
1621 if root == "/":
1622 for db_pkg in final_db.match_pkgs("sys-apps/portage"):
1623 portage_pkg = deps_graph.get(db_pkg.cpv)
1624 if portage_pkg and len(deps_graph) > 1:
1625 portage_pkg["needs"].clear()
1626 portage_pkg["provides"].clear()
1627 deps_graph = { str(db_pkg.cpv): portage_pkg }
1628 portage_upgrade = True
1629 if "--quiet" not in emerge.opts:
1630 print "Upgrading portage first, then restarting..."
1631
1632 # Run the queued emerges.
1633 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output)
Brian Harringa43f5952012-04-12 01:19:34 -07001634 try:
1635 scheduler.Run()
1636 finally:
1637 scheduler._Shutdown()
David James97ce8902011-08-16 09:51:05 -07001638 scheduler = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001639
David Jamesfcb70ef2011-02-02 16:02:30 -08001640 # If we already upgraded portage, we don't need to do so again. But we do
1641 # need to upgrade the rest of the packages. So we'll go ahead and do that.
David Jamesebc3ae02011-05-21 20:46:10 -07001642 #
1643 # In order to grant the child permission to run setsid, we need to run sudo
1644 # again. We preserve SUDO_USER here in case an ebuild depends on it.
David Jamesfcb70ef2011-02-02 16:02:30 -08001645 if portage_upgrade:
Brian Harring30675052012-02-29 12:18:22 -08001646 args = ["sudo", "-E", "SUDO_USER=%s" % os.environ.get("SUDO_USER", "")]
Brian Harringef3e9832012-03-02 04:43:05 -08001647 args += [os.path.abspath(sys.argv[0])] + parallel_emerge_args
Brian Harring30675052012-02-29 12:18:22 -08001648 args += ["--exclude=sys-apps/portage"]
David Jamesebc3ae02011-05-21 20:46:10 -07001649 os.execvp("sudo", args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001650
1651 print "Done"
Brian Harring8294d652012-05-23 02:20:52 -07001652 return 0