blob: 3d1525562c2f90188639a998fda383cb98d0a12c [file] [log] [blame]
David Jamesfcb70ef2011-02-02 16:02:30 -08001#!/usr/bin/python2.6
Mike Frysinger0a647fc2012-08-06 14:36:05 -04002# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
David Jamesfcb70ef2011-02-02 16:02:30 -08003# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""Program to run emerge in parallel, for significant speedup.
7
8Usage:
David James386ccd12011-05-04 20:17:42 -07009 ./parallel_emerge [--board=BOARD] [--workon=PKGS]
David Jamesfcb70ef2011-02-02 16:02:30 -080010 [--force-remote-binary=PKGS] [emerge args] package
11
David James78b6cd92012-04-02 21:36:12 -070012This script runs multiple emerge processes in parallel, using appropriate
13Portage APIs. It is faster than standard emerge because it has a
14multiprocess model instead of an asynchronous model.
David Jamesfcb70ef2011-02-02 16:02:30 -080015"""
16
17import codecs
18import copy
19import errno
Brian Harring8294d652012-05-23 02:20:52 -070020import gc
David James8c7e5e32011-06-28 11:26:03 -070021import heapq
David Jamesfcb70ef2011-02-02 16:02:30 -080022import multiprocessing
23import os
24import Queue
David Jamesfcb70ef2011-02-02 16:02:30 -080025import signal
26import sys
27import tempfile
Brian Harring8294d652012-05-23 02:20:52 -070028import threading
David Jamesfcb70ef2011-02-02 16:02:30 -080029import time
30import traceback
David Jamesfcb70ef2011-02-02 16:02:30 -080031
32# If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
33# Chromium OS, the default "portage" user doesn't have the necessary
34# permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
35# is "root" here because we get called through sudo.
36#
37# We need to set this before importing any portage modules, because portage
38# looks up "PORTAGE_USERNAME" at import time.
39#
40# NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
41# encounter this case unless they have an old chroot or blow away the
42# environment by running sudo without the -E specifier.
43if "PORTAGE_USERNAME" not in os.environ:
44 homedir = os.environ.get("HOME")
45 if homedir:
46 os.environ["PORTAGE_USERNAME"] = os.path.basename(homedir)
47
48# Portage doesn't expose dependency trees in its public API, so we have to
49# make use of some private APIs here. These modules are found under
50# /usr/lib/portage/pym/.
51#
52# TODO(davidjames): Update Portage to expose public APIs for these features.
53from _emerge.actions import adjust_configs
54from _emerge.actions import load_emerge_config
55from _emerge.create_depgraph_params import create_depgraph_params
David James386ccd12011-05-04 20:17:42 -070056from _emerge.depgraph import backtrack_depgraph
Mike Frysingerd20a6e22012-10-04 19:01:10 -040057from _emerge.main import clean_logs
David Jamesfcb70ef2011-02-02 16:02:30 -080058from _emerge.main import emerge_main
59from _emerge.main import parse_opts
60from _emerge.Package import Package
61from _emerge.Scheduler import Scheduler
David Jamesfcb70ef2011-02-02 16:02:30 -080062from _emerge.stdout_spinner import stdout_spinner
David James386ccd12011-05-04 20:17:42 -070063from portage._global_updates import _global_updates
David Jamesfcb70ef2011-02-02 16:02:30 -080064import portage
65import portage.debug
David Jamesfcb70ef2011-02-02 16:02:30 -080066
David Jamesfcb70ef2011-02-02 16:02:30 -080067def Usage():
68 """Print usage."""
69 print "Usage:"
David James386ccd12011-05-04 20:17:42 -070070 print " ./parallel_emerge [--board=BOARD] [--workon=PKGS]"
David Jamesfcb70ef2011-02-02 16:02:30 -080071 print " [--rebuild] [emerge args] package"
72 print
73 print "Packages specified as workon packages are always built from source."
David Jamesfcb70ef2011-02-02 16:02:30 -080074 print
75 print "The --workon argument is mainly useful when you want to build and"
76 print "install packages that you are working on unconditionally, but do not"
77 print "to have to rev the package to indicate you want to build it from"
78 print "source. The build_packages script will automatically supply the"
79 print "workon argument to emerge, ensuring that packages selected using"
80 print "cros-workon are rebuilt."
81 print
82 print "The --rebuild option rebuilds packages whenever their dependencies"
83 print "are changed. This ensures that your build is correct."
David Jamesfcb70ef2011-02-02 16:02:30 -080084
85
David Jamesfcb70ef2011-02-02 16:02:30 -080086# Global start time
87GLOBAL_START = time.time()
88
David James7358d032011-05-19 10:40:03 -070089# Whether process has been killed by a signal.
90KILLED = multiprocessing.Event()
91
David Jamesfcb70ef2011-02-02 16:02:30 -080092
93class EmergeData(object):
94 """This simple struct holds various emerge variables.
95
96 This struct helps us easily pass emerge variables around as a unit.
97 These variables are used for calculating dependencies and installing
98 packages.
99 """
100
David Jamesbf1e3442011-05-28 07:44:20 -0700101 __slots__ = ["action", "cmdline_packages", "depgraph", "favorites",
102 "mtimedb", "opts", "root_config", "scheduler_graph",
103 "settings", "spinner", "trees"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800104
105 def __init__(self):
106 # The action the user requested. If the user is installing packages, this
107 # is None. If the user is doing anything other than installing packages,
108 # this will contain the action name, which will map exactly to the
109 # long-form name of the associated emerge option.
110 #
111 # Example: If you call parallel_emerge --unmerge package, the action name
112 # will be "unmerge"
113 self.action = None
114
115 # The list of packages the user passed on the command-line.
116 self.cmdline_packages = None
117
118 # The emerge dependency graph. It'll contain all the packages involved in
119 # this merge, along with their versions.
120 self.depgraph = None
121
David Jamesbf1e3442011-05-28 07:44:20 -0700122 # The list of candidates to add to the world file.
123 self.favorites = None
124
David Jamesfcb70ef2011-02-02 16:02:30 -0800125 # A dict of the options passed to emerge. This dict has been cleaned up
126 # a bit by parse_opts, so that it's a bit easier for the emerge code to
127 # look at the options.
128 #
129 # Emerge takes a few shortcuts in its cleanup process to make parsing of
130 # the options dict easier. For example, if you pass in "--usepkg=n", the
131 # "--usepkg" flag is just left out of the dictionary altogether. Because
132 # --usepkg=n is the default, this makes parsing easier, because emerge
133 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
134 #
135 # These cleanup processes aren't applied to all options. For example, the
136 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
137 # applied by emerge, see the parse_opts function in the _emerge.main
138 # package.
139 self.opts = None
140
141 # A dictionary used by portage to maintain global state. This state is
142 # loaded from disk when portage starts up, and saved to disk whenever we
143 # call mtimedb.commit().
144 #
145 # This database contains information about global updates (i.e., what
146 # version of portage we have) and what we're currently doing. Portage
147 # saves what it is currently doing in this database so that it can be
148 # resumed when you call it with the --resume option.
149 #
150 # parallel_emerge does not save what it is currently doing in the mtimedb,
151 # so we do not support the --resume option.
152 self.mtimedb = None
153
154 # The portage configuration for our current root. This contains the portage
155 # settings (see below) and the three portage trees for our current root.
156 # (The three portage trees are explained below, in the documentation for
157 # the "trees" member.)
158 self.root_config = None
159
160 # The scheduler graph is used by emerge to calculate what packages to
161 # install. We don't actually install any deps, so this isn't really used,
162 # but we pass it in to the Scheduler object anyway.
163 self.scheduler_graph = None
164
165 # Portage settings for our current session. Most of these settings are set
166 # in make.conf inside our current install root.
167 self.settings = None
168
169 # The spinner, which spews stuff to stdout to indicate that portage is
170 # doing something. We maintain our own spinner, so we set the portage
171 # spinner to "silent" mode.
172 self.spinner = None
173
174 # The portage trees. There are separate portage trees for each root. To get
175 # the portage tree for the current root, you can look in self.trees[root],
176 # where root = self.settings["ROOT"].
177 #
178 # In each root, there are three trees: vartree, porttree, and bintree.
179 # - vartree: A database of the currently-installed packages.
180 # - porttree: A database of ebuilds, that can be used to build packages.
181 # - bintree: A database of binary packages.
182 self.trees = None
183
184
185class DepGraphGenerator(object):
186 """Grab dependency information about packages from portage.
187
188 Typical usage:
189 deps = DepGraphGenerator()
190 deps.Initialize(sys.argv[1:])
191 deps_tree, deps_info = deps.GenDependencyTree()
192 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
193 deps.PrintTree(deps_tree)
194 PrintDepsMap(deps_graph)
195 """
196
David James386ccd12011-05-04 20:17:42 -0700197 __slots__ = ["board", "emerge", "package_db", "show_output"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800198
199 def __init__(self):
200 self.board = None
201 self.emerge = EmergeData()
David Jamesfcb70ef2011-02-02 16:02:30 -0800202 self.package_db = {}
David Jamesfcb70ef2011-02-02 16:02:30 -0800203 self.show_output = False
David Jamesfcb70ef2011-02-02 16:02:30 -0800204
205 def ParseParallelEmergeArgs(self, argv):
206 """Read the parallel emerge arguments from the command-line.
207
208 We need to be compatible with emerge arg format. We scrape arguments that
209 are specific to parallel_emerge, and pass through the rest directly to
210 emerge.
211 Args:
212 argv: arguments list
213 Returns:
214 Arguments that don't belong to parallel_emerge
215 """
216 emerge_args = []
217 for arg in argv:
218 # Specifically match arguments that are specific to parallel_emerge, and
219 # pass through the rest.
220 if arg.startswith("--board="):
221 self.board = arg.replace("--board=", "")
222 elif arg.startswith("--workon="):
223 workon_str = arg.replace("--workon=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700224 emerge_args.append("--reinstall-atoms=%s" % workon_str)
225 emerge_args.append("--usepkg-exclude=%s" % workon_str)
David Jamesfcb70ef2011-02-02 16:02:30 -0800226 elif arg.startswith("--force-remote-binary="):
227 force_remote_binary = arg.replace("--force-remote-binary=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700228 emerge_args.append("--useoldpkg-atoms=%s" % force_remote_binary)
David Jamesfcb70ef2011-02-02 16:02:30 -0800229 elif arg == "--show-output":
230 self.show_output = True
David James386ccd12011-05-04 20:17:42 -0700231 elif arg == "--rebuild":
David James7a1ea4b2011-10-13 15:06:41 -0700232 emerge_args.append("--rebuild-if-unbuilt")
David Jamesfcb70ef2011-02-02 16:02:30 -0800233 else:
234 # Not one of our options, so pass through to emerge.
235 emerge_args.append(arg)
236
David James386ccd12011-05-04 20:17:42 -0700237 # These packages take a really long time to build, so, for expediency, we
238 # are blacklisting them from automatic rebuilds because one of their
239 # dependencies needs to be recompiled.
240 for pkg in ("chromeos-base/chromeos-chrome", "media-plugins/o3d",
241 "dev-java/icedtea"):
David James7a1ea4b2011-10-13 15:06:41 -0700242 emerge_args.append("--rebuild-exclude=%s" % pkg)
David Jamesfcb70ef2011-02-02 16:02:30 -0800243
244 return emerge_args
245
246 def Initialize(self, args):
247 """Initializer. Parses arguments and sets up portage state."""
248
249 # Parse and strip out args that are just intended for parallel_emerge.
250 emerge_args = self.ParseParallelEmergeArgs(args)
251
252 # Setup various environment variables based on our current board. These
253 # variables are normally setup inside emerge-${BOARD}, but since we don't
254 # call that script, we have to set it up here. These variables serve to
255 # point our tools at /build/BOARD and to setup cross compiles to the
256 # appropriate board as configured in toolchain.conf.
257 if self.board:
258 os.environ["PORTAGE_CONFIGROOT"] = "/build/" + self.board
259 os.environ["PORTAGE_SYSROOT"] = "/build/" + self.board
260 os.environ["SYSROOT"] = "/build/" + self.board
David Jamesfcb70ef2011-02-02 16:02:30 -0800261
262 # Although CHROMEOS_ROOT isn't specific to boards, it's normally setup
263 # inside emerge-${BOARD}, so we set it up here for compatibility. It
264 # will be going away soon as we migrate to CROS_WORKON_SRCROOT.
265 os.environ.setdefault("CHROMEOS_ROOT", os.environ["HOME"] + "/trunk")
266
267 # Turn off interactive delays
268 os.environ["EBEEP_IGNORE"] = "1"
269 os.environ["EPAUSE_IGNORE"] = "1"
Mike Frysinger0a647fc2012-08-06 14:36:05 -0400270 os.environ["CLEAN_DELAY"] = "0"
David Jamesfcb70ef2011-02-02 16:02:30 -0800271
272 # Parse the emerge options.
David Jamesea3ca332011-05-26 11:48:29 -0700273 action, opts, cmdline_packages = parse_opts(emerge_args, silent=True)
David Jamesfcb70ef2011-02-02 16:02:30 -0800274
275 # Set environment variables based on options. Portage normally sets these
276 # environment variables in emerge_main, but we can't use that function,
277 # because it also does a bunch of other stuff that we don't want.
278 # TODO(davidjames): Patch portage to move this logic into a function we can
279 # reuse here.
280 if "--debug" in opts:
281 os.environ["PORTAGE_DEBUG"] = "1"
282 if "--config-root" in opts:
283 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
284 if "--root" in opts:
285 os.environ["ROOT"] = opts["--root"]
286 if "--accept-properties" in opts:
287 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
288
David Jamesfcb70ef2011-02-02 16:02:30 -0800289 # If we're installing packages to the board, and we're not using the
David James927a56d2012-04-03 11:26:39 -0700290 # official flag, we can disable vardb locks. This is safe because we
291 # only run up to one instance of parallel_emerge in parallel.
David Jamesfcb70ef2011-02-02 16:02:30 -0800292 if self.board and os.environ.get("CHROMEOS_OFFICIAL") != "1":
293 os.environ.setdefault("PORTAGE_LOCKS", "false")
David Jamesfcb70ef2011-02-02 16:02:30 -0800294
295 # Now that we've setup the necessary environment variables, we can load the
296 # emerge config from disk.
297 settings, trees, mtimedb = load_emerge_config()
298
David Jamesea3ca332011-05-26 11:48:29 -0700299 # Add in EMERGE_DEFAULT_OPTS, if specified.
300 tmpcmdline = []
301 if "--ignore-default-opts" not in opts:
302 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
303 tmpcmdline.extend(emerge_args)
304 action, opts, cmdline_packages = parse_opts(tmpcmdline)
305
306 # If we're installing to the board, we want the --root-deps option so that
307 # portage will install the build dependencies to that location as well.
308 if self.board:
309 opts.setdefault("--root-deps", True)
310
David Jamesfcb70ef2011-02-02 16:02:30 -0800311 # Check whether our portage tree is out of date. Typically, this happens
312 # when you're setting up a new portage tree, such as in setup_board and
313 # make_chroot. In that case, portage applies a bunch of global updates
314 # here. Once the updates are finished, we need to commit any changes
315 # that the global update made to our mtimedb, and reload the config.
316 #
317 # Portage normally handles this logic in emerge_main, but again, we can't
318 # use that function here.
319 if _global_updates(trees, mtimedb["updates"]):
320 mtimedb.commit()
321 settings, trees, mtimedb = load_emerge_config(trees=trees)
322
323 # Setup implied options. Portage normally handles this logic in
324 # emerge_main.
325 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
326 opts.setdefault("--buildpkg", True)
327 if "--getbinpkgonly" in opts:
328 opts.setdefault("--usepkgonly", True)
329 opts.setdefault("--getbinpkg", True)
330 if "getbinpkg" in settings.features:
331 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
332 opts["--getbinpkg"] = True
333 if "--getbinpkg" in opts or "--usepkgonly" in opts:
334 opts.setdefault("--usepkg", True)
335 if "--fetch-all-uri" in opts:
336 opts.setdefault("--fetchonly", True)
337 if "--skipfirst" in opts:
338 opts.setdefault("--resume", True)
339 if "--buildpkgonly" in opts:
340 # --buildpkgonly will not merge anything, so it overrides all binary
341 # package options.
342 for opt in ("--getbinpkg", "--getbinpkgonly",
343 "--usepkg", "--usepkgonly"):
344 opts.pop(opt, None)
345 if (settings.get("PORTAGE_DEBUG", "") == "1" and
346 "python-trace" in settings.features):
347 portage.debug.set_trace(True)
348
349 # Complain about unsupported options
David James386ccd12011-05-04 20:17:42 -0700350 for opt in ("--ask", "--ask-enter-invalid", "--resume", "--skipfirst"):
David Jamesfcb70ef2011-02-02 16:02:30 -0800351 if opt in opts:
352 print "%s is not supported by parallel_emerge" % opt
353 sys.exit(1)
354
355 # Make emerge specific adjustments to the config (e.g. colors!)
356 adjust_configs(opts, trees)
357
358 # Save our configuration so far in the emerge object
359 emerge = self.emerge
360 emerge.action, emerge.opts = action, opts
361 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
362 emerge.cmdline_packages = cmdline_packages
363 root = settings["ROOT"]
364 emerge.root_config = trees[root]["root_config"]
365
David James386ccd12011-05-04 20:17:42 -0700366 if "--usepkg" in opts:
David Jamesfcb70ef2011-02-02 16:02:30 -0800367 emerge.trees[root]["bintree"].populate("--getbinpkg" in opts)
368
David Jamesfcb70ef2011-02-02 16:02:30 -0800369 def CreateDepgraph(self, emerge, packages):
370 """Create an emerge depgraph object."""
371 # Setup emerge options.
372 emerge_opts = emerge.opts.copy()
373
David James386ccd12011-05-04 20:17:42 -0700374 # Ask portage to build a dependency graph. with the options we specified
375 # above.
David Jamesfcb70ef2011-02-02 16:02:30 -0800376 params = create_depgraph_params(emerge_opts, emerge.action)
David Jamesbf1e3442011-05-28 07:44:20 -0700377 success, depgraph, favorites = backtrack_depgraph(
David James386ccd12011-05-04 20:17:42 -0700378 emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
379 packages, emerge.spinner)
380 emerge.depgraph = depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800381
David James386ccd12011-05-04 20:17:42 -0700382 # Is it impossible to honor the user's request? Bail!
383 if not success:
384 depgraph.display_problems()
385 sys.exit(1)
David Jamesfcb70ef2011-02-02 16:02:30 -0800386
387 emerge.depgraph = depgraph
David Jamesbf1e3442011-05-28 07:44:20 -0700388 emerge.favorites = favorites
David Jamesfcb70ef2011-02-02 16:02:30 -0800389
David Jamesdeebd692011-05-09 17:02:52 -0700390 # Prime and flush emerge caches.
391 root = emerge.settings["ROOT"]
392 vardb = emerge.trees[root]["vartree"].dbapi
David James0bdc5de2011-05-12 16:22:26 -0700393 if "--pretend" not in emerge.opts:
394 vardb.counter_tick()
David Jamesdeebd692011-05-09 17:02:52 -0700395 vardb.flush_cache()
396
David James386ccd12011-05-04 20:17:42 -0700397 def GenDependencyTree(self):
David Jamesfcb70ef2011-02-02 16:02:30 -0800398 """Get dependency tree info from emerge.
399
David Jamesfcb70ef2011-02-02 16:02:30 -0800400 Returns:
401 Dependency tree
402 """
403 start = time.time()
404
405 emerge = self.emerge
406
407 # Create a list of packages to merge
408 packages = set(emerge.cmdline_packages[:])
David Jamesfcb70ef2011-02-02 16:02:30 -0800409
410 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
411 # need any extra output from portage.
412 portage.util.noiselimit = -1
413
414 # My favorite feature: The silent spinner. It doesn't spin. Ever.
415 # I'd disable the colors by default too, but they look kind of cool.
416 emerge.spinner = stdout_spinner()
417 emerge.spinner.update = emerge.spinner.update_quiet
418
419 if "--quiet" not in emerge.opts:
420 print "Calculating deps..."
421
422 self.CreateDepgraph(emerge, packages)
423 depgraph = emerge.depgraph
424
425 # Build our own tree from the emerge digraph.
426 deps_tree = {}
427 digraph = depgraph._dynamic_config.digraph
David James3f778802011-08-25 19:31:45 -0700428 root = emerge.settings["ROOT"]
429 final_db = depgraph._dynamic_config.mydbapi[root]
David Jamesfcb70ef2011-02-02 16:02:30 -0800430 for node, node_deps in digraph.nodes.items():
431 # Calculate dependency packages that need to be installed first. Each
432 # child on the digraph is a dependency. The "operation" field specifies
433 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
434 # contains the type of dependency (e.g. build, runtime, runtime_post,
435 # etc.)
436 #
David Jamesfcb70ef2011-02-02 16:02:30 -0800437 # Portage refers to the identifiers for packages as a CPV. This acronym
438 # stands for Component/Path/Version.
439 #
440 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
441 # Split up, this CPV would be:
442 # C -- Component: chromeos-base
443 # P -- Path: power_manager
444 # V -- Version: 0.0.1-r1
445 #
446 # We just refer to CPVs as packages here because it's easier.
447 deps = {}
448 for child, priorities in node_deps[0].items():
David James3f778802011-08-25 19:31:45 -0700449 if isinstance(child, Package) and child.root == root:
450 cpv = str(child.cpv)
451 action = str(child.operation)
452
453 # If we're uninstalling a package, check whether Portage is
454 # installing a replacement. If so, just depend on the installation
455 # of the new package, because the old package will automatically
456 # be uninstalled at that time.
457 if action == "uninstall":
458 for pkg in final_db.match_pkgs(child.slot_atom):
459 cpv = str(pkg.cpv)
460 action = "merge"
461 break
462
463 deps[cpv] = dict(action=action,
464 deptypes=[str(x) for x in priorities],
465 deps={})
David Jamesfcb70ef2011-02-02 16:02:30 -0800466
467 # We've built our list of deps, so we can add our package to the tree.
David James3f778802011-08-25 19:31:45 -0700468 if isinstance(node, Package) and node.root == root:
David Jamesfcb70ef2011-02-02 16:02:30 -0800469 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
470 deps=deps)
471
David Jamesfcb70ef2011-02-02 16:02:30 -0800472 # Ask portage for its install plan, so that we can only throw out
David James386ccd12011-05-04 20:17:42 -0700473 # dependencies that portage throws out.
David Jamesfcb70ef2011-02-02 16:02:30 -0800474 deps_info = {}
475 for pkg in depgraph.altlist():
476 if isinstance(pkg, Package):
David James3f778802011-08-25 19:31:45 -0700477 assert pkg.root == root
David Jamesfcb70ef2011-02-02 16:02:30 -0800478 self.package_db[pkg.cpv] = pkg
479
David Jamesfcb70ef2011-02-02 16:02:30 -0800480 # Save off info about the package
David James386ccd12011-05-04 20:17:42 -0700481 deps_info[str(pkg.cpv)] = {"idx": len(deps_info)}
David Jamesfcb70ef2011-02-02 16:02:30 -0800482
483 seconds = time.time() - start
484 if "--quiet" not in emerge.opts:
485 print "Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60)
486
487 return deps_tree, deps_info
488
489 def PrintTree(self, deps, depth=""):
490 """Print the deps we have seen in the emerge output.
491
492 Args:
493 deps: Dependency tree structure.
494 depth: Allows printing the tree recursively, with indentation.
495 """
496 for entry in sorted(deps):
497 action = deps[entry]["action"]
498 print "%s %s (%s)" % (depth, entry, action)
499 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
500
David James386ccd12011-05-04 20:17:42 -0700501 def GenDependencyGraph(self, deps_tree, deps_info):
David Jamesfcb70ef2011-02-02 16:02:30 -0800502 """Generate a doubly linked dependency graph.
503
504 Args:
505 deps_tree: Dependency tree structure.
506 deps_info: More details on the dependencies.
507 Returns:
508 Deps graph in the form of a dict of packages, with each package
509 specifying a "needs" list and "provides" list.
510 """
511 emerge = self.emerge
512 root = emerge.settings["ROOT"]
513
David Jamesfcb70ef2011-02-02 16:02:30 -0800514 # deps_map is the actual dependency graph.
515 #
516 # Each package specifies a "needs" list and a "provides" list. The "needs"
517 # list indicates which packages we depend on. The "provides" list
518 # indicates the reverse dependencies -- what packages need us.
519 #
520 # We also provide some other information in the dependency graph:
521 # - action: What we're planning on doing with this package. Generally,
522 # "merge", "nomerge", or "uninstall"
David Jamesfcb70ef2011-02-02 16:02:30 -0800523 deps_map = {}
524
525 def ReverseTree(packages):
526 """Convert tree to digraph.
527
528 Take the tree of package -> requirements and reverse it to a digraph of
529 buildable packages -> packages they unblock.
530 Args:
531 packages: Tree(s) of dependencies.
532 Returns:
533 Unsanitized digraph.
534 """
David James8c7e5e32011-06-28 11:26:03 -0700535 binpkg_phases = set(["setup", "preinst", "postinst"])
David James3f778802011-08-25 19:31:45 -0700536 needed_dep_types = set(["blocker", "buildtime", "runtime"])
David Jamesfcb70ef2011-02-02 16:02:30 -0800537 for pkg in packages:
538
539 # Create an entry for the package
540 action = packages[pkg]["action"]
David James8c7e5e32011-06-28 11:26:03 -0700541 default_pkg = {"needs": {}, "provides": set(), "action": action,
542 "nodeps": False, "binary": False}
David Jamesfcb70ef2011-02-02 16:02:30 -0800543 this_pkg = deps_map.setdefault(pkg, default_pkg)
544
David James8c7e5e32011-06-28 11:26:03 -0700545 if pkg in deps_info:
546 this_pkg["idx"] = deps_info[pkg]["idx"]
547
548 # If a package doesn't have any defined phases that might use the
549 # dependent packages (i.e. pkg_setup, pkg_preinst, or pkg_postinst),
550 # we can install this package before its deps are ready.
551 emerge_pkg = self.package_db.get(pkg)
552 if emerge_pkg and emerge_pkg.type_name == "binary":
553 this_pkg["binary"] = True
554 defined_phases = emerge_pkg.metadata.defined_phases
555 defined_binpkg_phases = binpkg_phases.intersection(defined_phases)
556 if not defined_binpkg_phases:
557 this_pkg["nodeps"] = True
558
David Jamesfcb70ef2011-02-02 16:02:30 -0800559 # Create entries for dependencies of this package first.
560 ReverseTree(packages[pkg]["deps"])
561
562 # Add dependencies to this package.
563 for dep, dep_item in packages[pkg]["deps"].iteritems():
David James8c7e5e32011-06-28 11:26:03 -0700564 # We only need to enforce strict ordering of dependencies if the
David James3f778802011-08-25 19:31:45 -0700565 # dependency is a blocker, or is a buildtime or runtime dependency.
566 # (I.e., ignored, optional, and runtime_post dependencies don't
567 # depend on ordering.)
David James8c7e5e32011-06-28 11:26:03 -0700568 dep_types = dep_item["deptypes"]
569 if needed_dep_types.intersection(dep_types):
570 deps_map[dep]["provides"].add(pkg)
571 this_pkg["needs"][dep] = "/".join(dep_types)
David Jamesfcb70ef2011-02-02 16:02:30 -0800572
David James3f778802011-08-25 19:31:45 -0700573 # If there's a blocker, Portage may need to move files from one
574 # package to another, which requires editing the CONTENTS files of
575 # both packages. To avoid race conditions while editing this file,
576 # the two packages must not be installed in parallel, so we can't
577 # safely ignore dependencies. See http://crosbug.com/19328
578 if "blocker" in dep_types:
579 this_pkg["nodeps"] = False
580
David Jamesfcb70ef2011-02-02 16:02:30 -0800581 def FindCycles():
582 """Find cycles in the dependency tree.
583
584 Returns:
585 A dict mapping cyclic packages to a dict of the deps that cause
586 cycles. For each dep that causes cycles, it returns an example
587 traversal of the graph that shows the cycle.
588 """
589
590 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
591 """Find cycles in cyclic dependencies starting at specified package.
592
593 Args:
594 pkg: Package identifier.
595 cycles: A dict mapping cyclic packages to a dict of the deps that
596 cause cycles. For each dep that causes cycles, it returns an
597 example traversal of the graph that shows the cycle.
598 unresolved: Nodes that have been visited but are not fully processed.
599 resolved: Nodes that have been visited and are fully processed.
600 """
601 pkg_cycles = cycles.get(pkg)
602 if pkg in resolved and not pkg_cycles:
603 # If we already looked at this package, and found no cyclic
604 # dependencies, we can stop now.
605 return
606 unresolved.append(pkg)
607 for dep in deps_map[pkg]["needs"]:
608 if dep in unresolved:
609 idx = unresolved.index(dep)
610 mycycle = unresolved[idx:] + [dep]
611 for i in range(len(mycycle) - 1):
612 pkg1, pkg2 = mycycle[i], mycycle[i+1]
613 cycles.setdefault(pkg1, {}).setdefault(pkg2, mycycle)
614 elif not pkg_cycles or dep not in pkg_cycles:
615 # Looks like we haven't seen this edge before.
616 FindCyclesAtNode(dep, cycles, unresolved, resolved)
617 unresolved.pop()
618 resolved.add(pkg)
619
620 cycles, unresolved, resolved = {}, [], set()
621 for pkg in deps_map:
622 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
623 return cycles
624
David James386ccd12011-05-04 20:17:42 -0700625 def RemoveUnusedPackages():
David Jamesfcb70ef2011-02-02 16:02:30 -0800626 """Remove installed packages, propagating dependencies."""
David Jamesfcb70ef2011-02-02 16:02:30 -0800627 # Schedule packages that aren't on the install list for removal
628 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
629
David Jamesfcb70ef2011-02-02 16:02:30 -0800630 # Remove the packages we don't want, simplifying the graph and making
631 # it easier for us to crack cycles.
632 for pkg in sorted(rm_pkgs):
633 this_pkg = deps_map[pkg]
634 needs = this_pkg["needs"]
635 provides = this_pkg["provides"]
636 for dep in needs:
637 dep_provides = deps_map[dep]["provides"]
638 dep_provides.update(provides)
639 dep_provides.discard(pkg)
640 dep_provides.discard(dep)
641 for target in provides:
642 target_needs = deps_map[target]["needs"]
643 target_needs.update(needs)
644 target_needs.pop(pkg, None)
645 target_needs.pop(target, None)
646 del deps_map[pkg]
647
648 def PrintCycleBreak(basedep, dep, mycycle):
649 """Print details about a cycle that we are planning on breaking.
650
651 We are breaking a cycle where dep needs basedep. mycycle is an
652 example cycle which contains dep -> basedep."""
653
David Jamesfcb70ef2011-02-02 16:02:30 -0800654 needs = deps_map[dep]["needs"]
655 depinfo = needs.get(basedep, "deleted")
David Jamesfcb70ef2011-02-02 16:02:30 -0800656
David James3f778802011-08-25 19:31:45 -0700657 # It's OK to swap install order for blockers, as long as the two
658 # packages aren't installed in parallel. If there is a cycle, then
659 # we know the packages depend on each other already, so we can drop the
660 # blocker safely without printing a warning.
661 if depinfo == "blocker":
662 return
663
David Jamesfcb70ef2011-02-02 16:02:30 -0800664 # Notify the user that we're breaking a cycle.
665 print "Breaking %s -> %s (%s)" % (dep, basedep, depinfo)
666
667 # Show cycle.
668 for i in range(len(mycycle) - 1):
669 pkg1, pkg2 = mycycle[i], mycycle[i+1]
670 needs = deps_map[pkg1]["needs"]
671 depinfo = needs.get(pkg2, "deleted")
672 if pkg1 == dep and pkg2 == basedep:
673 depinfo = depinfo + ", deleting"
674 print " %s -> %s (%s)" % (pkg1, pkg2, depinfo)
675
676 def SanitizeTree():
677 """Remove circular dependencies.
678
679 We prune all dependencies involved in cycles that go against the emerge
680 ordering. This has a nice property: we're guaranteed to merge
681 dependencies in the same order that portage does.
682
683 Because we don't treat any dependencies as "soft" unless they're killed
684 by a cycle, we pay attention to a larger number of dependencies when
685 merging. This hurts performance a bit, but helps reliability.
686 """
687 start = time.time()
688 cycles = FindCycles()
689 while cycles:
690 for dep, mycycles in cycles.iteritems():
691 for basedep, mycycle in mycycles.iteritems():
692 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
Matt Tennant08797302011-10-17 16:18:45 -0700693 if "--quiet" not in emerge.opts:
694 PrintCycleBreak(basedep, dep, mycycle)
David Jamesfcb70ef2011-02-02 16:02:30 -0800695 del deps_map[dep]["needs"][basedep]
696 deps_map[basedep]["provides"].remove(dep)
697 cycles = FindCycles()
698 seconds = time.time() - start
699 if "--quiet" not in emerge.opts and seconds >= 0.1:
700 print "Tree sanitized in %dm%.1fs" % (seconds / 60, seconds % 60)
701
David James8c7e5e32011-06-28 11:26:03 -0700702 def FindRecursiveProvides(pkg, seen):
703 """Find all nodes that require a particular package.
704
705 Assumes that graph is acyclic.
706
707 Args:
708 pkg: Package identifier.
709 seen: Nodes that have been visited so far.
710 """
711 if pkg in seen:
712 return
713 seen.add(pkg)
714 info = deps_map[pkg]
715 info["tprovides"] = info["provides"].copy()
716 for dep in info["provides"]:
717 FindRecursiveProvides(dep, seen)
718 info["tprovides"].update(deps_map[dep]["tprovides"])
719
David Jamesa22906f2011-05-04 19:53:26 -0700720 ReverseTree(deps_tree)
David Jamesa22906f2011-05-04 19:53:26 -0700721
David James386ccd12011-05-04 20:17:42 -0700722 # We need to remove unused packages so that we can use the dependency
723 # ordering of the install process to show us what cycles to crack.
724 RemoveUnusedPackages()
David Jamesfcb70ef2011-02-02 16:02:30 -0800725 SanitizeTree()
David James8c7e5e32011-06-28 11:26:03 -0700726 seen = set()
727 for pkg in deps_map:
728 FindRecursiveProvides(pkg, seen)
David Jamesfcb70ef2011-02-02 16:02:30 -0800729 return deps_map
730
731 def PrintInstallPlan(self, deps_map):
732 """Print an emerge-style install plan.
733
734 The install plan lists what packages we're installing, in order.
735 It's useful for understanding what parallel_emerge is doing.
736
737 Args:
738 deps_map: The dependency graph.
739 """
740
741 def InstallPlanAtNode(target, deps_map):
742 nodes = []
743 nodes.append(target)
744 for dep in deps_map[target]["provides"]:
745 del deps_map[dep]["needs"][target]
746 if not deps_map[dep]["needs"]:
747 nodes.extend(InstallPlanAtNode(dep, deps_map))
748 return nodes
749
750 deps_map = copy.deepcopy(deps_map)
751 install_plan = []
752 plan = set()
753 for target, info in deps_map.iteritems():
754 if not info["needs"] and target not in plan:
755 for item in InstallPlanAtNode(target, deps_map):
756 plan.add(item)
757 install_plan.append(self.package_db[item])
758
759 for pkg in plan:
760 del deps_map[pkg]
761
762 if deps_map:
763 print "Cyclic dependencies:", " ".join(deps_map)
764 PrintDepsMap(deps_map)
765 sys.exit(1)
766
767 self.emerge.depgraph.display(install_plan)
768
769
770def PrintDepsMap(deps_map):
771 """Print dependency graph, for each package list it's prerequisites."""
772 for i in sorted(deps_map):
773 print "%s: (%s) needs" % (i, deps_map[i]["action"])
774 needs = deps_map[i]["needs"]
775 for j in sorted(needs):
776 print " %s" % (j)
777 if not needs:
778 print " no dependencies"
779
780
781class EmergeJobState(object):
782 __slots__ = ["done", "filename", "last_notify_timestamp", "last_output_seek",
783 "last_output_timestamp", "pkgname", "retcode", "start_timestamp",
Brian Harring0be85c62012-03-17 19:52:12 -0700784 "target", "fetch_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800785
786 def __init__(self, target, pkgname, done, filename, start_timestamp,
Brian Harring0be85c62012-03-17 19:52:12 -0700787 retcode=None, fetch_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800788
789 # The full name of the target we're building (e.g.
790 # chromeos-base/chromeos-0.0.1-r60)
791 self.target = target
792
793 # The short name of the target we're building (e.g. chromeos-0.0.1-r60)
794 self.pkgname = pkgname
795
796 # Whether the job is done. (True if the job is done; false otherwise.)
797 self.done = done
798
799 # The filename where output is currently stored.
800 self.filename = filename
801
802 # The timestamp of the last time we printed the name of the log file. We
803 # print this at the beginning of the job, so this starts at
804 # start_timestamp.
805 self.last_notify_timestamp = start_timestamp
806
807 # The location (in bytes) of the end of the last complete line we printed.
808 # This starts off at zero. We use this to jump to the right place when we
809 # print output from the same ebuild multiple times.
810 self.last_output_seek = 0
811
812 # The timestamp of the last time we printed output. Since we haven't
813 # printed output yet, this starts at zero.
814 self.last_output_timestamp = 0
815
816 # The return code of our job, if the job is actually finished.
817 self.retcode = retcode
818
Brian Harring0be85c62012-03-17 19:52:12 -0700819 # Was this just a fetch job?
820 self.fetch_only = fetch_only
821
David Jamesfcb70ef2011-02-02 16:02:30 -0800822 # The timestamp when our job started.
823 self.start_timestamp = start_timestamp
824
825
David James7358d032011-05-19 10:40:03 -0700826def KillHandler(signum, frame):
827 # Kill self and all subprocesses.
828 os.killpg(0, signal.SIGKILL)
829
David Jamesfcb70ef2011-02-02 16:02:30 -0800830def SetupWorkerSignals():
831 def ExitHandler(signum, frame):
David James7358d032011-05-19 10:40:03 -0700832 # Set KILLED flag.
833 KILLED.set()
David James13cead42011-05-18 16:22:01 -0700834
David James7358d032011-05-19 10:40:03 -0700835 # Remove our signal handlers so we don't get called recursively.
836 signal.signal(signal.SIGINT, KillHandler)
837 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -0800838
839 # Ensure that we exit quietly and cleanly, if possible, when we receive
840 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all
841 # of the child processes will print details about KeyboardInterrupt
842 # exceptions, which isn't very helpful.
843 signal.signal(signal.SIGINT, ExitHandler)
844 signal.signal(signal.SIGTERM, ExitHandler)
845
David James1ed3e252011-10-05 20:26:15 -0700846def EmergeProcess(scheduler, output):
847 """Merge a package in a subprocess.
848
849 Args:
850 scheduler: Scheduler object.
851 output: Temporary file to write output.
852
853 Returns:
854 The exit code returned by the subprocess.
855 """
856 pid = os.fork()
857 if pid == 0:
858 try:
859 # Sanity checks.
860 if sys.stdout.fileno() != 1: raise Exception("sys.stdout.fileno() != 1")
861 if sys.stderr.fileno() != 2: raise Exception("sys.stderr.fileno() != 2")
862
863 # - Redirect 1 (stdout) and 2 (stderr) at our temporary file.
864 # - Redirect 0 to point at sys.stdin. In this case, sys.stdin
865 # points at a file reading os.devnull, because multiprocessing mucks
866 # with sys.stdin.
867 # - Leave the sys.stdin and output filehandles alone.
868 fd_pipes = {0: sys.stdin.fileno(),
869 1: output.fileno(),
870 2: output.fileno(),
871 sys.stdin.fileno(): sys.stdin.fileno(),
872 output.fileno(): output.fileno()}
873 portage.process._setup_pipes(fd_pipes)
874
875 # Portage doesn't like when sys.stdin.fileno() != 0, so point sys.stdin
876 # at the filehandle we just created in _setup_pipes.
877 if sys.stdin.fileno() != 0:
878 sys.stdin = os.fdopen(0, "r")
879
880 # Actually do the merge.
881 retval = scheduler.merge()
882
883 # We catch all exceptions here (including SystemExit, KeyboardInterrupt,
884 # etc) so as to ensure that we don't confuse the multiprocessing module,
885 # which expects that all forked children exit with os._exit().
886 except:
887 traceback.print_exc(file=output)
888 retval = 1
889 sys.stdout.flush()
890 sys.stderr.flush()
891 output.flush()
892 os._exit(retval)
893 else:
894 # Return the exit code of the subprocess.
895 return os.waitpid(pid, 0)[1]
David Jamesfcb70ef2011-02-02 16:02:30 -0800896
Brian Harring0be85c62012-03-17 19:52:12 -0700897def EmergeWorker(task_queue, job_queue, emerge, package_db, fetch_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800898 """This worker emerges any packages given to it on the task_queue.
899
900 Args:
901 task_queue: The queue of tasks for this worker to do.
902 job_queue: The queue of results from the worker.
903 emerge: An EmergeData() object.
904 package_db: A dict, mapping package ids to portage Package objects.
Brian Harring0be85c62012-03-17 19:52:12 -0700905 fetch_only: A bool, indicating if we should just fetch the target.
David Jamesfcb70ef2011-02-02 16:02:30 -0800906
907 It expects package identifiers to be passed to it via task_queue. When
908 a task is started, it pushes the (target, filename) to the started_queue.
909 The output is stored in filename. When a merge starts or finishes, we push
910 EmergeJobState objects to the job_queue.
911 """
912
913 SetupWorkerSignals()
914 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
David Jamesdeebd692011-05-09 17:02:52 -0700915
916 # Disable flushing of caches to save on I/O.
David James7a1ea4b2011-10-13 15:06:41 -0700917 root = emerge.settings["ROOT"]
918 vardb = emerge.trees[root]["vartree"].dbapi
919 vardb._flush_cache_enabled = False
Brian Harring0be85c62012-03-17 19:52:12 -0700920 bindb = emerge.trees[root]["bintree"].dbapi
921 # Might be a set, might be a list, might be None; no clue, just use shallow
922 # copy to ensure we can roll it back.
923 original_remotepkgs = copy.copy(bindb.bintree._remotepkgs)
David Jamesdeebd692011-05-09 17:02:52 -0700924
David Jamesfcb70ef2011-02-02 16:02:30 -0800925 opts, spinner = emerge.opts, emerge.spinner
926 opts["--nodeps"] = True
Brian Harring0be85c62012-03-17 19:52:12 -0700927 if fetch_only:
928 opts["--fetchonly"] = True
929
David Jamesfcb70ef2011-02-02 16:02:30 -0800930 while True:
931 # Wait for a new item to show up on the queue. This is a blocking wait,
932 # so if there's nothing to do, we just sit here.
Brian Harring0be85c62012-03-17 19:52:12 -0700933 pkg_state = task_queue.get()
934 if pkg_state is None:
David Jamesfcb70ef2011-02-02 16:02:30 -0800935 # If target is None, this means that the main thread wants us to quit.
936 # The other workers need to exit too, so we'll push the message back on
937 # to the queue so they'll get it too.
Brian Harring0be85c62012-03-17 19:52:12 -0700938 task_queue.put(None)
David Jamesfcb70ef2011-02-02 16:02:30 -0800939 return
David James7358d032011-05-19 10:40:03 -0700940 if KILLED.is_set():
941 return
942
Brian Harring0be85c62012-03-17 19:52:12 -0700943 target = pkg_state.target
944
David Jamesfcb70ef2011-02-02 16:02:30 -0800945 db_pkg = package_db[target]
Brian Harring0be85c62012-03-17 19:52:12 -0700946
947 if db_pkg.type_name == "binary":
948 if not fetch_only and pkg_state.fetched_successfully:
949 # Ensure portage doesn't think our pkg is remote- else it'll force
950 # a redownload of it (even if the on-disk file is fine). In-memory
951 # caching basically, implemented dumbly.
952 bindb.bintree._remotepkgs = None
953 else:
954 bindb.bintree_remotepkgs = original_remotepkgs
955
David Jamesfcb70ef2011-02-02 16:02:30 -0800956 db_pkg.root_config = emerge.root_config
957 install_list = [db_pkg]
958 pkgname = db_pkg.pf
959 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False)
David James01b1e0f2012-06-07 17:18:05 -0700960 os.chmod(output.name, 644)
David Jamesfcb70ef2011-02-02 16:02:30 -0800961 start_timestamp = time.time()
Brian Harring0be85c62012-03-17 19:52:12 -0700962 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp,
963 fetch_only=fetch_only)
David Jamesfcb70ef2011-02-02 16:02:30 -0800964 job_queue.put(job)
965 if "--pretend" in opts:
966 retcode = 0
967 else:
David Jamesfcb70ef2011-02-02 16:02:30 -0800968 try:
David James386ccd12011-05-04 20:17:42 -0700969 emerge.scheduler_graph.mergelist = install_list
970 scheduler = Scheduler(settings, trees, mtimedb, opts, spinner,
David Jamesbf1e3442011-05-28 07:44:20 -0700971 favorites=emerge.favorites, graph_config=emerge.scheduler_graph)
David Jamesace2e212011-07-13 11:47:39 -0700972
973 # Enable blocker handling even though we're in --nodeps mode. This
974 # allows us to unmerge the blocker after we've merged the replacement.
975 scheduler._opts_ignore_blockers = frozenset()
976
David James1ed3e252011-10-05 20:26:15 -0700977 retcode = EmergeProcess(scheduler, output)
David Jamesfcb70ef2011-02-02 16:02:30 -0800978 except Exception:
979 traceback.print_exc(file=output)
980 retcode = 1
David James1ed3e252011-10-05 20:26:15 -0700981 output.close()
David Jamesfcb70ef2011-02-02 16:02:30 -0800982
David James7358d032011-05-19 10:40:03 -0700983 if KILLED.is_set():
984 return
985
David Jamesfcb70ef2011-02-02 16:02:30 -0800986 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp,
Brian Harring0be85c62012-03-17 19:52:12 -0700987 retcode, fetch_only=fetch_only)
David Jamesfcb70ef2011-02-02 16:02:30 -0800988 job_queue.put(job)
989
990
991class LinePrinter(object):
992 """Helper object to print a single line."""
993
994 def __init__(self, line):
995 self.line = line
996
997 def Print(self, seek_locations):
998 print self.line
999
1000
1001class JobPrinter(object):
1002 """Helper object to print output of a job."""
1003
1004 def __init__(self, job, unlink=False):
1005 """Print output of job.
1006
1007 If unlink is True, unlink the job output file when done."""
1008 self.current_time = time.time()
1009 self.job = job
1010 self.unlink = unlink
1011
1012 def Print(self, seek_locations):
1013
1014 job = self.job
1015
1016 # Calculate how long the job has been running.
1017 seconds = self.current_time - job.start_timestamp
1018
1019 # Note that we've printed out the job so far.
1020 job.last_output_timestamp = self.current_time
1021
1022 # Note that we're starting the job
1023 info = "job %s (%dm%.1fs)" % (job.pkgname, seconds / 60, seconds % 60)
1024 last_output_seek = seek_locations.get(job.filename, 0)
1025 if last_output_seek:
1026 print "=== Continue output for %s ===" % info
1027 else:
1028 print "=== Start output for %s ===" % info
1029
1030 # Print actual output from job
1031 f = codecs.open(job.filename, encoding='utf-8', errors='replace')
1032 f.seek(last_output_seek)
1033 prefix = job.pkgname + ":"
1034 for line in f:
1035
1036 # Save off our position in the file
1037 if line and line[-1] == "\n":
1038 last_output_seek = f.tell()
1039 line = line[:-1]
1040
1041 # Print our line
1042 print prefix, line.encode('utf-8', 'replace')
1043 f.close()
1044
1045 # Save our last spot in the file so that we don't print out the same
1046 # location twice.
1047 seek_locations[job.filename] = last_output_seek
1048
1049 # Note end of output section
1050 if job.done:
1051 print "=== Complete: %s ===" % info
1052 else:
1053 print "=== Still running: %s ===" % info
1054
1055 if self.unlink:
1056 os.unlink(job.filename)
1057
1058
1059def PrintWorker(queue):
1060 """A worker that prints stuff to the screen as requested."""
1061
1062 def ExitHandler(signum, frame):
David James7358d032011-05-19 10:40:03 -07001063 # Set KILLED flag.
1064 KILLED.set()
1065
David Jamesfcb70ef2011-02-02 16:02:30 -08001066 # Switch to default signal handlers so that we'll die after two signals.
David James7358d032011-05-19 10:40:03 -07001067 signal.signal(signal.SIGINT, KillHandler)
1068 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001069
1070 # Don't exit on the first SIGINT / SIGTERM, because the parent worker will
1071 # handle it and tell us when we need to exit.
1072 signal.signal(signal.SIGINT, ExitHandler)
1073 signal.signal(signal.SIGTERM, ExitHandler)
1074
1075 # seek_locations is a map indicating the position we are at in each file.
1076 # It starts off empty, but is set by the various Print jobs as we go along
1077 # to indicate where we left off in each file.
1078 seek_locations = {}
1079 while True:
1080 try:
1081 job = queue.get()
1082 if job:
1083 job.Print(seek_locations)
David Jamesbccf8eb2011-07-27 14:06:06 -07001084 sys.stdout.flush()
David Jamesfcb70ef2011-02-02 16:02:30 -08001085 else:
1086 break
1087 except IOError as ex:
1088 if ex.errno == errno.EINTR:
1089 # Looks like we received a signal. Keep printing.
1090 continue
1091 raise
1092
Brian Harring867e2362012-03-17 04:05:17 -07001093
Brian Harring0be85c62012-03-17 19:52:12 -07001094class TargetState(object):
Brian Harring867e2362012-03-17 04:05:17 -07001095
Brian Harring0be85c62012-03-17 19:52:12 -07001096 __slots__ = ("target", "info", "score", "prefetched", "fetched_successfully")
Brian Harring867e2362012-03-17 04:05:17 -07001097
Brian Harring0be85c62012-03-17 19:52:12 -07001098 def __init__(self, target, info, fetched=False):
Brian Harring867e2362012-03-17 04:05:17 -07001099 self.target, self.info = target, info
Brian Harring0be85c62012-03-17 19:52:12 -07001100 self.fetched_successfully = False
1101 self.prefetched = False
Brian Harring867e2362012-03-17 04:05:17 -07001102 self.update_score()
1103
1104 def __cmp__(self, other):
1105 return cmp(self.score, other.score)
1106
1107 def update_score(self):
1108 self.score = (
1109 -len(self.info["tprovides"]),
Brian Harring0be85c62012-03-17 19:52:12 -07001110 len(self.info["needs"]),
Brian Harring11c5eeb2012-03-18 11:02:39 -07001111 not self.info["binary"],
Brian Harring867e2362012-03-17 04:05:17 -07001112 -len(self.info["provides"]),
1113 self.info["idx"],
1114 self.target,
1115 )
1116
1117
1118class ScoredHeap(object):
1119
Brian Harring0be85c62012-03-17 19:52:12 -07001120 __slots__ = ("heap", "_heap_set")
1121
Brian Harring867e2362012-03-17 04:05:17 -07001122 def __init__(self, initial=()):
Brian Harring0be85c62012-03-17 19:52:12 -07001123 self.heap = list()
1124 self._heap_set = set()
1125 if initial:
1126 self.multi_put(initial)
Brian Harring867e2362012-03-17 04:05:17 -07001127
1128 def get(self):
Brian Harring0be85c62012-03-17 19:52:12 -07001129 item = heapq.heappop(self.heap)
1130 self._heap_set.remove(item.target)
1131 return item
Brian Harring867e2362012-03-17 04:05:17 -07001132
Brian Harring0be85c62012-03-17 19:52:12 -07001133 def put(self, item):
1134 if not isinstance(item, TargetState):
1135 raise ValueError("Item %r isn't a TargetState" % (item,))
1136 heapq.heappush(self.heap, item)
1137 self._heap_set.add(item.target)
Brian Harring867e2362012-03-17 04:05:17 -07001138
Brian Harring0be85c62012-03-17 19:52:12 -07001139 def multi_put(self, sequence):
1140 sequence = list(sequence)
1141 self.heap.extend(sequence)
1142 self._heap_set.update(x.target for x in sequence)
Brian Harring867e2362012-03-17 04:05:17 -07001143 self.sort()
1144
David James5c9996d2012-03-24 10:50:46 -07001145 def sort(self):
1146 heapq.heapify(self.heap)
1147
Brian Harring0be85c62012-03-17 19:52:12 -07001148 def __contains__(self, target):
1149 return target in self._heap_set
1150
1151 def __nonzero__(self):
1152 return bool(self.heap)
1153
Brian Harring867e2362012-03-17 04:05:17 -07001154 def __len__(self):
1155 return len(self.heap)
1156
1157
David Jamesfcb70ef2011-02-02 16:02:30 -08001158class EmergeQueue(object):
1159 """Class to schedule emerge jobs according to a dependency graph."""
1160
1161 def __init__(self, deps_map, emerge, package_db, show_output):
1162 # Store the dependency graph.
1163 self._deps_map = deps_map
Brian Harring0be85c62012-03-17 19:52:12 -07001164 self._state_map = {}
David Jamesfcb70ef2011-02-02 16:02:30 -08001165 # Initialize the running queue to empty
Brian Harring0be85c62012-03-17 19:52:12 -07001166 self._build_jobs = {}
1167 self._build_ready = ScoredHeap()
1168 self._fetch_jobs = {}
1169 self._fetch_ready = ScoredHeap()
David Jamesfcb70ef2011-02-02 16:02:30 -08001170 # List of total package installs represented in deps_map.
1171 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
1172 self._total_jobs = len(install_jobs)
1173 self._show_output = show_output
1174
1175 if "--pretend" in emerge.opts:
1176 print "Skipping merge because of --pretend mode."
1177 sys.exit(0)
1178
David James7358d032011-05-19 10:40:03 -07001179 # Set a process group so we can easily terminate all children.
1180 os.setsid()
1181
David Jamesfcb70ef2011-02-02 16:02:30 -08001182 # Setup scheduler graph object. This is used by the child processes
1183 # to help schedule jobs.
1184 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1185
1186 # Calculate how many jobs we can run in parallel. We don't want to pass
1187 # the --jobs flag over to emerge itself, because that'll tell emerge to
1188 # hide its output, and said output is quite useful for debugging hung
1189 # jobs.
1190 procs = min(self._total_jobs,
1191 emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
Brian Harring0be85c62012-03-17 19:52:12 -07001192 self._build_procs = procs
1193 self._fetch_procs = procs
David James8c7e5e32011-06-28 11:26:03 -07001194 self._load_avg = emerge.opts.pop("--load-average", None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001195 self._job_queue = multiprocessing.Queue()
1196 self._print_queue = multiprocessing.Queue()
Brian Harring0be85c62012-03-17 19:52:12 -07001197
1198 self._fetch_queue = multiprocessing.Queue()
1199 args = (self._fetch_queue, self._job_queue, emerge, package_db, True)
1200 self._fetch_pool = multiprocessing.Pool(self._fetch_procs, EmergeWorker,
1201 args)
1202
1203 self._build_queue = multiprocessing.Queue()
1204 args = (self._build_queue, self._job_queue, emerge, package_db)
1205 self._build_pool = multiprocessing.Pool(self._build_procs, EmergeWorker,
1206 args)
1207
David Jamesfcb70ef2011-02-02 16:02:30 -08001208 self._print_worker = multiprocessing.Process(target=PrintWorker,
1209 args=[self._print_queue])
1210 self._print_worker.start()
1211
1212 # Initialize the failed queue to empty.
1213 self._retry_queue = []
1214 self._failed = set()
1215
David Jamesfcb70ef2011-02-02 16:02:30 -08001216 # Setup an exit handler so that we print nice messages if we are
1217 # terminated.
1218 self._SetupExitHandler()
1219
1220 # Schedule our jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001221 self._state_map.update(
1222 (pkg, TargetState(pkg, data)) for pkg, data in deps_map.iteritems())
1223 self._fetch_ready.multi_put(self._state_map.itervalues())
David Jamesfcb70ef2011-02-02 16:02:30 -08001224
1225 def _SetupExitHandler(self):
1226
1227 def ExitHandler(signum, frame):
David James7358d032011-05-19 10:40:03 -07001228 # Set KILLED flag.
1229 KILLED.set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001230
1231 # Kill our signal handlers so we don't get called recursively
David James7358d032011-05-19 10:40:03 -07001232 signal.signal(signal.SIGINT, KillHandler)
1233 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001234
1235 # Print our current job status
Brian Harring0be85c62012-03-17 19:52:12 -07001236 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001237 if job:
1238 self._print_queue.put(JobPrinter(job, unlink=True))
1239
1240 # Notify the user that we are exiting
1241 self._Print("Exiting on signal %s" % signum)
David James7358d032011-05-19 10:40:03 -07001242 self._print_queue.put(None)
1243 self._print_worker.join()
David Jamesfcb70ef2011-02-02 16:02:30 -08001244
1245 # Kill child threads, then exit.
David James7358d032011-05-19 10:40:03 -07001246 os.killpg(0, signal.SIGKILL)
David Jamesfcb70ef2011-02-02 16:02:30 -08001247 sys.exit(1)
1248
1249 # Print out job status when we are killed
1250 signal.signal(signal.SIGINT, ExitHandler)
1251 signal.signal(signal.SIGTERM, ExitHandler)
1252
Brian Harring0be85c62012-03-17 19:52:12 -07001253 def _Schedule(self, pkg_state):
David Jamesfcb70ef2011-02-02 16:02:30 -08001254 # We maintain a tree of all deps, if this doesn't need
David James8c7e5e32011-06-28 11:26:03 -07001255 # to be installed just free up its children and continue.
David Jamesfcb70ef2011-02-02 16:02:30 -08001256 # It is possible to reinstall deps of deps, without reinstalling
1257 # first level deps, like so:
1258 # chromeos (merge) -> eselect (nomerge) -> python (merge)
Brian Harring0be85c62012-03-17 19:52:12 -07001259 this_pkg = pkg_state.info
1260 target = pkg_state.target
1261 if pkg_state.info is not None:
1262 if this_pkg["action"] == "nomerge":
1263 self._Finish(target)
1264 elif target not in self._build_jobs:
1265 # Kick off the build if it's marked to be built.
1266 self._build_jobs[target] = None
1267 self._build_queue.put(pkg_state)
1268 return True
David Jamesfcb70ef2011-02-02 16:02:30 -08001269
David James8c7e5e32011-06-28 11:26:03 -07001270 def _ScheduleLoop(self):
1271 # If the current load exceeds our desired load average, don't schedule
1272 # more than one job.
1273 if self._load_avg and os.getloadavg()[0] > self._load_avg:
1274 needed_jobs = 1
1275 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001276 needed_jobs = self._build_procs
David James8c7e5e32011-06-28 11:26:03 -07001277
1278 # Schedule more jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001279 while self._build_ready and len(self._build_jobs) < needed_jobs:
1280 state = self._build_ready.get()
1281 if state.target not in self._failed:
1282 self._Schedule(state)
David Jamesfcb70ef2011-02-02 16:02:30 -08001283
1284 def _Print(self, line):
1285 """Print a single line."""
1286 self._print_queue.put(LinePrinter(line))
1287
1288 def _Status(self):
1289 """Print status."""
1290 current_time = time.time()
1291 no_output = True
1292
1293 # Print interim output every minute if --show-output is used. Otherwise,
1294 # print notifications about running packages every 2 minutes, and print
1295 # full output for jobs that have been running for 60 minutes or more.
1296 if self._show_output:
1297 interval = 60
1298 notify_interval = 0
1299 else:
1300 interval = 60 * 60
1301 notify_interval = 60 * 2
Brian Harring0be85c62012-03-17 19:52:12 -07001302 for target, job in self._build_jobs.iteritems():
David Jamesfcb70ef2011-02-02 16:02:30 -08001303 if job:
1304 last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
1305 if last_timestamp + interval < current_time:
1306 self._print_queue.put(JobPrinter(job))
1307 job.last_output_timestamp = current_time
1308 no_output = False
1309 elif (notify_interval and
1310 job.last_notify_timestamp + notify_interval < current_time):
1311 job_seconds = current_time - job.start_timestamp
1312 args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
1313 info = "Still building %s (%dm%.1fs). Logs in %s" % args
1314 job.last_notify_timestamp = current_time
1315 self._Print(info)
1316 no_output = False
1317
1318 # If we haven't printed any messages yet, print a general status message
1319 # here.
1320 if no_output:
1321 seconds = current_time - GLOBAL_START
Brian Harring0be85c62012-03-17 19:52:12 -07001322 fjobs, fready = len(self._fetch_jobs), len(self._fetch_ready)
1323 bjobs, bready = len(self._build_jobs), len(self._build_ready)
1324 retries = len(self._retry_queue)
1325 pending = max(0, len(self._deps_map) - fjobs - bjobs)
1326 line = "Pending %s/%s, " % (pending, self._total_jobs)
1327 if fjobs or fready:
1328 line += "Fetching %s/%s, " % (fjobs, fready + fjobs)
1329 if bjobs or bready or retries:
1330 line += "Building %s/%s, " % (bjobs, bready + bjobs)
1331 if retries:
1332 line += "Retrying %s, " % (retries,)
David James8c7e5e32011-06-28 11:26:03 -07001333 load = " ".join(str(x) for x in os.getloadavg())
Brian Harring0be85c62012-03-17 19:52:12 -07001334 line += ("[Time %dm%.1fs Load %s]" % (seconds/60, seconds %60, load))
1335 self._Print(line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001336
1337 def _Finish(self, target):
David James8c7e5e32011-06-28 11:26:03 -07001338 """Mark a target as completed and unblock dependencies."""
1339 this_pkg = self._deps_map[target]
1340 if this_pkg["needs"] and this_pkg["nodeps"]:
1341 # We got installed, but our deps have not been installed yet. Dependent
1342 # packages should only be installed when our needs have been fully met.
1343 this_pkg["action"] = "nomerge"
1344 else:
1345 finish = []
1346 for dep in this_pkg["provides"]:
1347 dep_pkg = self._deps_map[dep]
Brian Harring0be85c62012-03-17 19:52:12 -07001348 state = self._state_map[dep]
David James8c7e5e32011-06-28 11:26:03 -07001349 del dep_pkg["needs"][target]
Brian Harring0be85c62012-03-17 19:52:12 -07001350 state.update_score()
1351 if not state.prefetched:
1352 if dep in self._fetch_ready:
1353 # If it's not currently being fetched, update the prioritization
1354 self._fetch_ready.sort()
1355 elif not dep_pkg["needs"]:
David James8c7e5e32011-06-28 11:26:03 -07001356 if dep_pkg["nodeps"] and dep_pkg["action"] == "nomerge":
1357 self._Finish(dep)
1358 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001359 self._build_ready.put(self._state_map[dep])
David James8c7e5e32011-06-28 11:26:03 -07001360 self._deps_map.pop(target)
David Jamesfcb70ef2011-02-02 16:02:30 -08001361
1362 def _Retry(self):
David James8c7e5e32011-06-28 11:26:03 -07001363 while self._retry_queue:
Brian Harring0be85c62012-03-17 19:52:12 -07001364 state = self._retry_queue.pop(0)
1365 if self._Schedule(state):
1366 self._Print("Retrying emerge of %s." % state.target)
David James8c7e5e32011-06-28 11:26:03 -07001367 break
David Jamesfcb70ef2011-02-02 16:02:30 -08001368
Brian Harringa43f5952012-04-12 01:19:34 -07001369 def _Shutdown(self):
David Jamesfcb70ef2011-02-02 16:02:30 -08001370 # Tell emerge workers to exit. They all exit when 'None' is pushed
1371 # to the queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001372
Brian Harringa43f5952012-04-12 01:19:34 -07001373 # Shutdown the workers first; then jobs (which is how they feed things back)
1374 # then finally the print queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001375
Brian Harringa43f5952012-04-12 01:19:34 -07001376 def _stop(queue, pool):
1377 if pool is None:
1378 return
1379 try:
1380 queue.put(None)
1381 pool.close()
1382 pool.join()
1383 finally:
1384 pool.terminate()
Brian Harring0be85c62012-03-17 19:52:12 -07001385
Brian Harringa43f5952012-04-12 01:19:34 -07001386 _stop(self._fetch_queue, self._fetch_pool)
1387 self._fetch_queue = self._fetch_pool = None
Brian Harring0be85c62012-03-17 19:52:12 -07001388
Brian Harringa43f5952012-04-12 01:19:34 -07001389 _stop(self._build_queue, self._build_pool)
1390 self._build_queue = self._build_pool = None
1391
1392 if self._job_queue is not None:
1393 self._job_queue.close()
1394 self._job_queue = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001395
1396 # Now that our workers are finished, we can kill the print queue.
Brian Harringa43f5952012-04-12 01:19:34 -07001397 if self._print_worker is not None:
1398 try:
1399 self._print_queue.put(None)
1400 self._print_queue.close()
1401 self._print_worker.join()
1402 finally:
1403 self._print_worker.terminate()
1404 self._print_queue = self._print_worker = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001405
1406 def Run(self):
1407 """Run through the scheduled ebuilds.
1408
1409 Keep running so long as we have uninstalled packages in the
1410 dependency graph to merge.
1411 """
Brian Harringa43f5952012-04-12 01:19:34 -07001412 if not self._deps_map:
1413 return
1414
Brian Harring0be85c62012-03-17 19:52:12 -07001415 # Start the fetchers.
1416 for _ in xrange(min(self._fetch_procs, len(self._fetch_ready))):
1417 state = self._fetch_ready.get()
1418 self._fetch_jobs[state.target] = None
1419 self._fetch_queue.put(state)
1420
1421 # Print an update, then get going.
1422 self._Status()
1423
David Jamese703d0f2012-01-12 16:27:45 -08001424 retried = set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001425 while self._deps_map:
1426 # Check here that we are actually waiting for something.
Brian Harring0be85c62012-03-17 19:52:12 -07001427 if (self._build_queue.empty() and
David Jamesfcb70ef2011-02-02 16:02:30 -08001428 self._job_queue.empty() and
Brian Harring0be85c62012-03-17 19:52:12 -07001429 not self._fetch_jobs and
1430 not self._fetch_ready and
1431 not self._build_jobs and
1432 not self._build_ready and
David Jamesfcb70ef2011-02-02 16:02:30 -08001433 self._deps_map):
1434 # If we have failed on a package, retry it now.
1435 if self._retry_queue:
1436 self._Retry()
1437 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001438 # Tell the user why we're exiting.
1439 if self._failed:
1440 print "Packages failed: %s" % " ,".join(self._failed)
David James0eae23e2012-07-03 15:04:25 -07001441 status_file = os.environ.get("PARALLEL_EMERGE_STATUS_FILE")
1442 if status_file:
1443 failed_pkgs = set(portage.cpv_getkey(x) for x in self._failed)
1444 with open(status_file, "a") as f:
1445 f.write("%s\n" % " ".join(failed_pkgs))
David Jamesfcb70ef2011-02-02 16:02:30 -08001446 else:
1447 print "Deadlock! Circular dependencies!"
1448 sys.exit(1)
1449
Brian Harring706747c2012-03-16 03:04:31 -07001450 for i in range(12):
David Jamesa74289a2011-08-12 10:41:24 -07001451 try:
1452 job = self._job_queue.get(timeout=5)
1453 break
1454 except Queue.Empty:
1455 # Check if any more jobs can be scheduled.
1456 self._ScheduleLoop()
1457 else:
Brian Harring706747c2012-03-16 03:04:31 -07001458 # Print an update every 60 seconds.
David Jamesfcb70ef2011-02-02 16:02:30 -08001459 self._Status()
1460 continue
1461
1462 target = job.target
1463
Brian Harring0be85c62012-03-17 19:52:12 -07001464 if job.fetch_only:
1465 if not job.done:
1466 self._fetch_jobs[job.target] = job
1467 else:
1468 state = self._state_map[job.target]
1469 state.prefetched = True
1470 state.fetched_successfully = (job.retcode == 0)
1471 del self._fetch_jobs[job.target]
1472 self._Print("Fetched %s in %2.2fs"
1473 % (target, time.time() - job.start_timestamp))
1474
1475 if self._show_output or job.retcode != 0:
1476 self._print_queue.put(JobPrinter(job, unlink=True))
1477 else:
1478 os.unlink(job.filename)
1479 # Failure or not, let build work with it next.
1480 if not self._deps_map[job.target]["needs"]:
1481 self._build_ready.put(state)
1482 self._ScheduleLoop()
1483
1484 if self._fetch_ready:
1485 state = self._fetch_ready.get()
1486 self._fetch_queue.put(state)
1487 self._fetch_jobs[state.target] = None
1488 else:
1489 # Minor optimization; shut down fetchers early since we know
1490 # the queue is empty.
1491 self._fetch_queue.put(None)
1492 continue
1493
David Jamesfcb70ef2011-02-02 16:02:30 -08001494 if not job.done:
Brian Harring0be85c62012-03-17 19:52:12 -07001495 self._build_jobs[target] = job
David Jamesfcb70ef2011-02-02 16:02:30 -08001496 self._Print("Started %s (logged in %s)" % (target, job.filename))
1497 continue
1498
1499 # Print output of job
1500 if self._show_output or job.retcode != 0:
1501 self._print_queue.put(JobPrinter(job, unlink=True))
1502 else:
1503 os.unlink(job.filename)
Brian Harring0be85c62012-03-17 19:52:12 -07001504 del self._build_jobs[target]
David Jamesfcb70ef2011-02-02 16:02:30 -08001505
1506 seconds = time.time() - job.start_timestamp
1507 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
David James32420cc2011-08-25 21:32:46 -07001508 previously_failed = target in self._failed
David Jamesfcb70ef2011-02-02 16:02:30 -08001509
1510 # Complain if necessary.
1511 if job.retcode != 0:
1512 # Handle job failure.
David James32420cc2011-08-25 21:32:46 -07001513 if previously_failed:
David Jamesfcb70ef2011-02-02 16:02:30 -08001514 # If this job has failed previously, give up.
1515 self._Print("Failed %s. Your build has failed." % details)
1516 else:
1517 # Queue up this build to try again after a long while.
David Jamese703d0f2012-01-12 16:27:45 -08001518 retried.add(target)
Brian Harring0be85c62012-03-17 19:52:12 -07001519 self._retry_queue.append(self._state_map[target])
David Jamesfcb70ef2011-02-02 16:02:30 -08001520 self._failed.add(target)
1521 self._Print("Failed %s, retrying later." % details)
1522 else:
David James32420cc2011-08-25 21:32:46 -07001523 if previously_failed:
1524 # Remove target from list of failed packages.
1525 self._failed.remove(target)
1526
1527 self._Print("Completed %s" % details)
1528
1529 # Mark as completed and unblock waiting ebuilds.
1530 self._Finish(target)
1531
1532 if previously_failed and self._retry_queue:
David Jamesfcb70ef2011-02-02 16:02:30 -08001533 # If we have successfully retried a failed package, and there
1534 # are more failed packages, try the next one. We will only have
1535 # one retrying package actively running at a time.
1536 self._Retry()
1537
David Jamesfcb70ef2011-02-02 16:02:30 -08001538
David James8c7e5e32011-06-28 11:26:03 -07001539 # Schedule pending jobs and print an update.
1540 self._ScheduleLoop()
1541 self._Status()
David Jamesfcb70ef2011-02-02 16:02:30 -08001542
David Jamese703d0f2012-01-12 16:27:45 -08001543 # If packages were retried, output a warning.
1544 if retried:
1545 self._Print("")
1546 self._Print("WARNING: The following packages failed the first time,")
1547 self._Print("but succeeded upon retry. This might indicate incorrect")
1548 self._Print("dependencies.")
1549 for pkg in retried:
1550 self._Print(" %s" % pkg)
1551 self._Print("@@@STEP_WARNINGS@@@")
1552 self._Print("")
1553
David Jamesfcb70ef2011-02-02 16:02:30 -08001554 # Tell child threads to exit.
1555 self._Print("Merge complete")
David Jamesfcb70ef2011-02-02 16:02:30 -08001556
1557
Brian Harring30675052012-02-29 12:18:22 -08001558def main(argv):
Brian Harring8294d652012-05-23 02:20:52 -07001559 try:
1560 return real_main(argv)
1561 finally:
1562 # Work around multiprocessing sucking and not cleaning up after itself.
1563 # http://bugs.python.org/issue4106;
1564 # Step one; ensure GC is ran *prior* to the VM starting shutdown.
1565 gc.collect()
1566 # Step two; go looking for those threads and try to manually reap
1567 # them if we can.
1568 for x in threading.enumerate():
1569 # Filter on the name, and ident; if ident is None, the thread
1570 # wasn't started.
1571 if x.name == 'QueueFeederThread' and x.ident is not None:
1572 x.join(1)
David Jamesfcb70ef2011-02-02 16:02:30 -08001573
Brian Harring8294d652012-05-23 02:20:52 -07001574
1575def real_main(argv):
Brian Harring30675052012-02-29 12:18:22 -08001576 parallel_emerge_args = argv[:]
David Jamesfcb70ef2011-02-02 16:02:30 -08001577 deps = DepGraphGenerator()
Brian Harring30675052012-02-29 12:18:22 -08001578 deps.Initialize(parallel_emerge_args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001579 emerge = deps.emerge
1580
1581 if emerge.action is not None:
Brian Harring30675052012-02-29 12:18:22 -08001582 argv = deps.ParseParallelEmergeArgs(argv)
Brian Harring8294d652012-05-23 02:20:52 -07001583 return emerge_main(argv)
David Jamesfcb70ef2011-02-02 16:02:30 -08001584 elif not emerge.cmdline_packages:
1585 Usage()
Brian Harring8294d652012-05-23 02:20:52 -07001586 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001587
1588 # Unless we're in pretend mode, there's not much point running without
1589 # root access. We need to be able to install packages.
1590 #
1591 # NOTE: Even if you're running --pretend, it's a good idea to run
1592 # parallel_emerge with root access so that portage can write to the
1593 # dependency cache. This is important for performance.
1594 if "--pretend" not in emerge.opts and portage.secpass < 2:
1595 print "parallel_emerge: superuser access is required."
Brian Harring8294d652012-05-23 02:20:52 -07001596 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001597
1598 if "--quiet" not in emerge.opts:
1599 cmdline_packages = " ".join(emerge.cmdline_packages)
David Jamesfcb70ef2011-02-02 16:02:30 -08001600 print "Starting fast-emerge."
1601 print " Building package %s on %s" % (cmdline_packages,
1602 deps.board or "root")
David Jamesfcb70ef2011-02-02 16:02:30 -08001603
David James386ccd12011-05-04 20:17:42 -07001604 deps_tree, deps_info = deps.GenDependencyTree()
David Jamesfcb70ef2011-02-02 16:02:30 -08001605
1606 # You want me to be verbose? I'll give you two trees! Twice as much value.
1607 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
1608 deps.PrintTree(deps_tree)
1609
David James386ccd12011-05-04 20:17:42 -07001610 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001611
1612 # OK, time to print out our progress so far.
1613 deps.PrintInstallPlan(deps_graph)
1614 if "--tree" in emerge.opts:
1615 PrintDepsMap(deps_graph)
1616
1617 # Are we upgrading portage? If so, and there are more packages to merge,
1618 # schedule a restart of parallel_emerge to merge the rest. This ensures that
1619 # we pick up all updates to portage settings before merging any more
1620 # packages.
1621 portage_upgrade = False
1622 root = emerge.settings["ROOT"]
1623 final_db = emerge.depgraph._dynamic_config.mydbapi[root]
1624 if root == "/":
1625 for db_pkg in final_db.match_pkgs("sys-apps/portage"):
1626 portage_pkg = deps_graph.get(db_pkg.cpv)
1627 if portage_pkg and len(deps_graph) > 1:
1628 portage_pkg["needs"].clear()
1629 portage_pkg["provides"].clear()
1630 deps_graph = { str(db_pkg.cpv): portage_pkg }
1631 portage_upgrade = True
1632 if "--quiet" not in emerge.opts:
1633 print "Upgrading portage first, then restarting..."
1634
1635 # Run the queued emerges.
1636 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output)
Brian Harringa43f5952012-04-12 01:19:34 -07001637 try:
1638 scheduler.Run()
1639 finally:
1640 scheduler._Shutdown()
David James97ce8902011-08-16 09:51:05 -07001641 scheduler = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001642
David Jamesfcb70ef2011-02-02 16:02:30 -08001643 # If we already upgraded portage, we don't need to do so again. But we do
1644 # need to upgrade the rest of the packages. So we'll go ahead and do that.
David Jamesebc3ae02011-05-21 20:46:10 -07001645 #
1646 # In order to grant the child permission to run setsid, we need to run sudo
1647 # again. We preserve SUDO_USER here in case an ebuild depends on it.
David Jamesfcb70ef2011-02-02 16:02:30 -08001648 if portage_upgrade:
Brian Harring30675052012-02-29 12:18:22 -08001649 args = ["sudo", "-E", "SUDO_USER=%s" % os.environ.get("SUDO_USER", "")]
Brian Harringef3e9832012-03-02 04:43:05 -08001650 args += [os.path.abspath(sys.argv[0])] + parallel_emerge_args
Brian Harring30675052012-02-29 12:18:22 -08001651 args += ["--exclude=sys-apps/portage"]
David Jamesebc3ae02011-05-21 20:46:10 -07001652 os.execvp("sudo", args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001653
Mike Frysingerd20a6e22012-10-04 19:01:10 -04001654 clean_logs(emerge.settings)
1655
David Jamesfcb70ef2011-02-02 16:02:30 -08001656 print "Done"
Brian Harring8294d652012-05-23 02:20:52 -07001657 return 0