blob: 326802cf01fb00273f6d5feb553b6a2c766cb8d4 [file] [log] [blame]
Mike Frysinger9f7e4ee2013-03-13 15:43:03 -04001#!/usr/bin/python
Mike Frysinger0a647fc2012-08-06 14:36:05 -04002# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
David Jamesfcb70ef2011-02-02 16:02:30 -08003# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""Program to run emerge in parallel, for significant speedup.
7
8Usage:
David James386ccd12011-05-04 20:17:42 -07009 ./parallel_emerge [--board=BOARD] [--workon=PKGS]
David Jamesfcb70ef2011-02-02 16:02:30 -080010 [--force-remote-binary=PKGS] [emerge args] package
11
David James78b6cd92012-04-02 21:36:12 -070012This script runs multiple emerge processes in parallel, using appropriate
13Portage APIs. It is faster than standard emerge because it has a
14multiprocess model instead of an asynchronous model.
David Jamesfcb70ef2011-02-02 16:02:30 -080015"""
16
17import codecs
18import copy
19import errno
Brian Harring8294d652012-05-23 02:20:52 -070020import gc
David James8c7e5e32011-06-28 11:26:03 -070021import heapq
David Jamesfcb70ef2011-02-02 16:02:30 -080022import multiprocessing
23import os
Mike Frysinger1ae28092013-10-17 17:17:22 -040024try:
25 import Queue
26except ImportError:
27 # Python-3 renamed to "queue". We still use Queue to avoid collisions
28 # with naming variables as "queue". Maybe we'll transition at some point.
29 # pylint: disable=F0401
30 import queue as Queue
David Jamesfcb70ef2011-02-02 16:02:30 -080031import signal
32import sys
33import tempfile
Brian Harring8294d652012-05-23 02:20:52 -070034import threading
David Jamesfcb70ef2011-02-02 16:02:30 -080035import time
36import traceback
David Jamesfcb70ef2011-02-02 16:02:30 -080037
Thiago Goncalesf4acc422013-07-17 10:26:35 -070038from chromite.lib import cros_build_lib
39
David Jamesfcb70ef2011-02-02 16:02:30 -080040# If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
41# Chromium OS, the default "portage" user doesn't have the necessary
42# permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
43# is "root" here because we get called through sudo.
44#
45# We need to set this before importing any portage modules, because portage
46# looks up "PORTAGE_USERNAME" at import time.
47#
48# NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
49# encounter this case unless they have an old chroot or blow away the
50# environment by running sudo without the -E specifier.
51if "PORTAGE_USERNAME" not in os.environ:
52 homedir = os.environ.get("HOME")
53 if homedir:
54 os.environ["PORTAGE_USERNAME"] = os.path.basename(homedir)
55
56# Portage doesn't expose dependency trees in its public API, so we have to
57# make use of some private APIs here. These modules are found under
58# /usr/lib/portage/pym/.
59#
60# TODO(davidjames): Update Portage to expose public APIs for these features.
Don Garrett25f309a2014-03-19 14:02:12 -070061# pylint: disable=F0401
David Jamesfcb70ef2011-02-02 16:02:30 -080062from _emerge.actions import adjust_configs
63from _emerge.actions import load_emerge_config
64from _emerge.create_depgraph_params import create_depgraph_params
David James386ccd12011-05-04 20:17:42 -070065from _emerge.depgraph import backtrack_depgraph
Mike Frysinger901eaad2012-10-10 18:18:03 -040066try:
67 from _emerge.main import clean_logs
68except ImportError:
69 # Older portage versions did not provide clean_logs, so stub it.
70 # We need this if running in an older chroot that hasn't yet upgraded
71 # the portage version.
72 clean_logs = lambda x: None
David Jamesfcb70ef2011-02-02 16:02:30 -080073from _emerge.main import emerge_main
74from _emerge.main import parse_opts
75from _emerge.Package import Package
76from _emerge.Scheduler import Scheduler
David Jamesfcb70ef2011-02-02 16:02:30 -080077from _emerge.stdout_spinner import stdout_spinner
David James386ccd12011-05-04 20:17:42 -070078from portage._global_updates import _global_updates
David Jamesfcb70ef2011-02-02 16:02:30 -080079import portage
80import portage.debug
Mike Frysinger91d7da92013-02-19 15:53:46 -050081from portage.versions import vercmp
Don Garrettf8bf7842014-03-20 17:03:42 -070082# pylint: enable=F0401
Mike Frysinger91d7da92013-02-19 15:53:46 -050083
David Jamesfcb70ef2011-02-02 16:02:30 -080084
David Jamesfcb70ef2011-02-02 16:02:30 -080085def Usage():
86 """Print usage."""
87 print "Usage:"
David James386ccd12011-05-04 20:17:42 -070088 print " ./parallel_emerge [--board=BOARD] [--workon=PKGS]"
David Jamesfcb70ef2011-02-02 16:02:30 -080089 print " [--rebuild] [emerge args] package"
90 print
91 print "Packages specified as workon packages are always built from source."
David Jamesfcb70ef2011-02-02 16:02:30 -080092 print
93 print "The --workon argument is mainly useful when you want to build and"
94 print "install packages that you are working on unconditionally, but do not"
95 print "to have to rev the package to indicate you want to build it from"
96 print "source. The build_packages script will automatically supply the"
97 print "workon argument to emerge, ensuring that packages selected using"
98 print "cros-workon are rebuilt."
99 print
100 print "The --rebuild option rebuilds packages whenever their dependencies"
101 print "are changed. This ensures that your build is correct."
David Jamesfcb70ef2011-02-02 16:02:30 -0800102
103
David Jamesfcb70ef2011-02-02 16:02:30 -0800104# Global start time
105GLOBAL_START = time.time()
106
David James7358d032011-05-19 10:40:03 -0700107# Whether process has been killed by a signal.
108KILLED = multiprocessing.Event()
109
David Jamesfcb70ef2011-02-02 16:02:30 -0800110
111class EmergeData(object):
112 """This simple struct holds various emerge variables.
113
114 This struct helps us easily pass emerge variables around as a unit.
115 These variables are used for calculating dependencies and installing
116 packages.
117 """
118
David Jamesbf1e3442011-05-28 07:44:20 -0700119 __slots__ = ["action", "cmdline_packages", "depgraph", "favorites",
120 "mtimedb", "opts", "root_config", "scheduler_graph",
121 "settings", "spinner", "trees"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800122
123 def __init__(self):
124 # The action the user requested. If the user is installing packages, this
125 # is None. If the user is doing anything other than installing packages,
126 # this will contain the action name, which will map exactly to the
127 # long-form name of the associated emerge option.
128 #
129 # Example: If you call parallel_emerge --unmerge package, the action name
130 # will be "unmerge"
131 self.action = None
132
133 # The list of packages the user passed on the command-line.
134 self.cmdline_packages = None
135
136 # The emerge dependency graph. It'll contain all the packages involved in
137 # this merge, along with their versions.
138 self.depgraph = None
139
David Jamesbf1e3442011-05-28 07:44:20 -0700140 # The list of candidates to add to the world file.
141 self.favorites = None
142
David Jamesfcb70ef2011-02-02 16:02:30 -0800143 # A dict of the options passed to emerge. This dict has been cleaned up
144 # a bit by parse_opts, so that it's a bit easier for the emerge code to
145 # look at the options.
146 #
147 # Emerge takes a few shortcuts in its cleanup process to make parsing of
148 # the options dict easier. For example, if you pass in "--usepkg=n", the
149 # "--usepkg" flag is just left out of the dictionary altogether. Because
150 # --usepkg=n is the default, this makes parsing easier, because emerge
151 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
152 #
153 # These cleanup processes aren't applied to all options. For example, the
154 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
155 # applied by emerge, see the parse_opts function in the _emerge.main
156 # package.
157 self.opts = None
158
159 # A dictionary used by portage to maintain global state. This state is
160 # loaded from disk when portage starts up, and saved to disk whenever we
161 # call mtimedb.commit().
162 #
163 # This database contains information about global updates (i.e., what
164 # version of portage we have) and what we're currently doing. Portage
165 # saves what it is currently doing in this database so that it can be
166 # resumed when you call it with the --resume option.
167 #
168 # parallel_emerge does not save what it is currently doing in the mtimedb,
169 # so we do not support the --resume option.
170 self.mtimedb = None
171
172 # The portage configuration for our current root. This contains the portage
173 # settings (see below) and the three portage trees for our current root.
174 # (The three portage trees are explained below, in the documentation for
175 # the "trees" member.)
176 self.root_config = None
177
178 # The scheduler graph is used by emerge to calculate what packages to
179 # install. We don't actually install any deps, so this isn't really used,
180 # but we pass it in to the Scheduler object anyway.
181 self.scheduler_graph = None
182
183 # Portage settings for our current session. Most of these settings are set
184 # in make.conf inside our current install root.
185 self.settings = None
186
187 # The spinner, which spews stuff to stdout to indicate that portage is
188 # doing something. We maintain our own spinner, so we set the portage
189 # spinner to "silent" mode.
190 self.spinner = None
191
192 # The portage trees. There are separate portage trees for each root. To get
193 # the portage tree for the current root, you can look in self.trees[root],
194 # where root = self.settings["ROOT"].
195 #
196 # In each root, there are three trees: vartree, porttree, and bintree.
197 # - vartree: A database of the currently-installed packages.
198 # - porttree: A database of ebuilds, that can be used to build packages.
199 # - bintree: A database of binary packages.
200 self.trees = None
201
202
203class DepGraphGenerator(object):
204 """Grab dependency information about packages from portage.
205
206 Typical usage:
207 deps = DepGraphGenerator()
208 deps.Initialize(sys.argv[1:])
209 deps_tree, deps_info = deps.GenDependencyTree()
210 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
211 deps.PrintTree(deps_tree)
212 PrintDepsMap(deps_graph)
213 """
214
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700215 __slots__ = ["board", "emerge", "package_db", "show_output", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800216
217 def __init__(self):
218 self.board = None
219 self.emerge = EmergeData()
David Jamesfcb70ef2011-02-02 16:02:30 -0800220 self.package_db = {}
David Jamesfcb70ef2011-02-02 16:02:30 -0800221 self.show_output = False
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700222 self.unpack_only = False
David Jamesfcb70ef2011-02-02 16:02:30 -0800223
224 def ParseParallelEmergeArgs(self, argv):
225 """Read the parallel emerge arguments from the command-line.
226
227 We need to be compatible with emerge arg format. We scrape arguments that
228 are specific to parallel_emerge, and pass through the rest directly to
229 emerge.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500230
David Jamesfcb70ef2011-02-02 16:02:30 -0800231 Args:
232 argv: arguments list
Mike Frysinger1a736a82013-12-12 01:50:59 -0500233
David Jamesfcb70ef2011-02-02 16:02:30 -0800234 Returns:
235 Arguments that don't belong to parallel_emerge
236 """
237 emerge_args = []
238 for arg in argv:
239 # Specifically match arguments that are specific to parallel_emerge, and
240 # pass through the rest.
241 if arg.startswith("--board="):
242 self.board = arg.replace("--board=", "")
243 elif arg.startswith("--workon="):
244 workon_str = arg.replace("--workon=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700245 emerge_args.append("--reinstall-atoms=%s" % workon_str)
246 emerge_args.append("--usepkg-exclude=%s" % workon_str)
David Jamesfcb70ef2011-02-02 16:02:30 -0800247 elif arg.startswith("--force-remote-binary="):
248 force_remote_binary = arg.replace("--force-remote-binary=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700249 emerge_args.append("--useoldpkg-atoms=%s" % force_remote_binary)
David Jamesfcb70ef2011-02-02 16:02:30 -0800250 elif arg == "--show-output":
251 self.show_output = True
David James386ccd12011-05-04 20:17:42 -0700252 elif arg == "--rebuild":
David James7a1ea4b2011-10-13 15:06:41 -0700253 emerge_args.append("--rebuild-if-unbuilt")
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700254 elif arg == "--unpackonly":
255 emerge_args.append("--fetchonly")
256 self.unpack_only = True
David Jamesfcb70ef2011-02-02 16:02:30 -0800257 else:
258 # Not one of our options, so pass through to emerge.
259 emerge_args.append(arg)
260
David James386ccd12011-05-04 20:17:42 -0700261 # These packages take a really long time to build, so, for expediency, we
262 # are blacklisting them from automatic rebuilds because one of their
263 # dependencies needs to be recompiled.
264 for pkg in ("chromeos-base/chromeos-chrome", "media-plugins/o3d",
265 "dev-java/icedtea"):
David James7a1ea4b2011-10-13 15:06:41 -0700266 emerge_args.append("--rebuild-exclude=%s" % pkg)
David Jamesfcb70ef2011-02-02 16:02:30 -0800267
268 return emerge_args
269
270 def Initialize(self, args):
271 """Initializer. Parses arguments and sets up portage state."""
272
273 # Parse and strip out args that are just intended for parallel_emerge.
274 emerge_args = self.ParseParallelEmergeArgs(args)
275
276 # Setup various environment variables based on our current board. These
277 # variables are normally setup inside emerge-${BOARD}, but since we don't
278 # call that script, we have to set it up here. These variables serve to
279 # point our tools at /build/BOARD and to setup cross compiles to the
280 # appropriate board as configured in toolchain.conf.
281 if self.board:
Yu-Ju Hongdd9bb2b2014-01-03 17:08:26 -0800282 sysroot = cros_build_lib.GetSysroot(board=self.board)
283 os.environ["PORTAGE_CONFIGROOT"] = sysroot
284 os.environ["PORTAGE_SYSROOT"] = sysroot
285 os.environ["SYSROOT"] = sysroot
David Jamesfcb70ef2011-02-02 16:02:30 -0800286
287 # Although CHROMEOS_ROOT isn't specific to boards, it's normally setup
288 # inside emerge-${BOARD}, so we set it up here for compatibility. It
289 # will be going away soon as we migrate to CROS_WORKON_SRCROOT.
290 os.environ.setdefault("CHROMEOS_ROOT", os.environ["HOME"] + "/trunk")
291
292 # Turn off interactive delays
293 os.environ["EBEEP_IGNORE"] = "1"
294 os.environ["EPAUSE_IGNORE"] = "1"
Mike Frysinger0a647fc2012-08-06 14:36:05 -0400295 os.environ["CLEAN_DELAY"] = "0"
David Jamesfcb70ef2011-02-02 16:02:30 -0800296
297 # Parse the emerge options.
David Jamesea3ca332011-05-26 11:48:29 -0700298 action, opts, cmdline_packages = parse_opts(emerge_args, silent=True)
David Jamesfcb70ef2011-02-02 16:02:30 -0800299
300 # Set environment variables based on options. Portage normally sets these
301 # environment variables in emerge_main, but we can't use that function,
302 # because it also does a bunch of other stuff that we don't want.
303 # TODO(davidjames): Patch portage to move this logic into a function we can
304 # reuse here.
305 if "--debug" in opts:
306 os.environ["PORTAGE_DEBUG"] = "1"
307 if "--config-root" in opts:
308 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
309 if "--root" in opts:
310 os.environ["ROOT"] = opts["--root"]
311 if "--accept-properties" in opts:
312 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
313
David James88d780c2014-02-05 13:03:29 -0800314 # If we're installing packages to the board, we can disable vardb locks.
315 # This is safe because we only run up to one instance of parallel_emerge in
316 # parallel.
317 # TODO(davidjames): Enable this for the host too.
318 if self.board:
David Jamesfcb70ef2011-02-02 16:02:30 -0800319 os.environ.setdefault("PORTAGE_LOCKS", "false")
David Jamesfcb70ef2011-02-02 16:02:30 -0800320
321 # Now that we've setup the necessary environment variables, we can load the
322 # emerge config from disk.
323 settings, trees, mtimedb = load_emerge_config()
324
David Jamesea3ca332011-05-26 11:48:29 -0700325 # Add in EMERGE_DEFAULT_OPTS, if specified.
326 tmpcmdline = []
327 if "--ignore-default-opts" not in opts:
328 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
329 tmpcmdline.extend(emerge_args)
330 action, opts, cmdline_packages = parse_opts(tmpcmdline)
331
332 # If we're installing to the board, we want the --root-deps option so that
333 # portage will install the build dependencies to that location as well.
334 if self.board:
335 opts.setdefault("--root-deps", True)
336
David Jamesfcb70ef2011-02-02 16:02:30 -0800337 # Check whether our portage tree is out of date. Typically, this happens
338 # when you're setting up a new portage tree, such as in setup_board and
339 # make_chroot. In that case, portage applies a bunch of global updates
340 # here. Once the updates are finished, we need to commit any changes
341 # that the global update made to our mtimedb, and reload the config.
342 #
343 # Portage normally handles this logic in emerge_main, but again, we can't
344 # use that function here.
345 if _global_updates(trees, mtimedb["updates"]):
346 mtimedb.commit()
347 settings, trees, mtimedb = load_emerge_config(trees=trees)
348
349 # Setup implied options. Portage normally handles this logic in
350 # emerge_main.
351 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
352 opts.setdefault("--buildpkg", True)
353 if "--getbinpkgonly" in opts:
354 opts.setdefault("--usepkgonly", True)
355 opts.setdefault("--getbinpkg", True)
356 if "getbinpkg" in settings.features:
357 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
358 opts["--getbinpkg"] = True
359 if "--getbinpkg" in opts or "--usepkgonly" in opts:
360 opts.setdefault("--usepkg", True)
361 if "--fetch-all-uri" in opts:
362 opts.setdefault("--fetchonly", True)
363 if "--skipfirst" in opts:
364 opts.setdefault("--resume", True)
365 if "--buildpkgonly" in opts:
366 # --buildpkgonly will not merge anything, so it overrides all binary
367 # package options.
368 for opt in ("--getbinpkg", "--getbinpkgonly",
369 "--usepkg", "--usepkgonly"):
370 opts.pop(opt, None)
371 if (settings.get("PORTAGE_DEBUG", "") == "1" and
372 "python-trace" in settings.features):
373 portage.debug.set_trace(True)
374
375 # Complain about unsupported options
David James386ccd12011-05-04 20:17:42 -0700376 for opt in ("--ask", "--ask-enter-invalid", "--resume", "--skipfirst"):
David Jamesfcb70ef2011-02-02 16:02:30 -0800377 if opt in opts:
378 print "%s is not supported by parallel_emerge" % opt
379 sys.exit(1)
380
381 # Make emerge specific adjustments to the config (e.g. colors!)
382 adjust_configs(opts, trees)
383
384 # Save our configuration so far in the emerge object
385 emerge = self.emerge
386 emerge.action, emerge.opts = action, opts
387 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
388 emerge.cmdline_packages = cmdline_packages
389 root = settings["ROOT"]
390 emerge.root_config = trees[root]["root_config"]
391
David James386ccd12011-05-04 20:17:42 -0700392 if "--usepkg" in opts:
David Jamesfcb70ef2011-02-02 16:02:30 -0800393 emerge.trees[root]["bintree"].populate("--getbinpkg" in opts)
394
David Jamesfcb70ef2011-02-02 16:02:30 -0800395 def CreateDepgraph(self, emerge, packages):
396 """Create an emerge depgraph object."""
397 # Setup emerge options.
398 emerge_opts = emerge.opts.copy()
399
David James386ccd12011-05-04 20:17:42 -0700400 # Ask portage to build a dependency graph. with the options we specified
401 # above.
David Jamesfcb70ef2011-02-02 16:02:30 -0800402 params = create_depgraph_params(emerge_opts, emerge.action)
David Jamesbf1e3442011-05-28 07:44:20 -0700403 success, depgraph, favorites = backtrack_depgraph(
David James386ccd12011-05-04 20:17:42 -0700404 emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
405 packages, emerge.spinner)
406 emerge.depgraph = depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800407
David James386ccd12011-05-04 20:17:42 -0700408 # Is it impossible to honor the user's request? Bail!
409 if not success:
410 depgraph.display_problems()
411 sys.exit(1)
David Jamesfcb70ef2011-02-02 16:02:30 -0800412
413 emerge.depgraph = depgraph
David Jamesbf1e3442011-05-28 07:44:20 -0700414 emerge.favorites = favorites
David Jamesfcb70ef2011-02-02 16:02:30 -0800415
David Jamesdeebd692011-05-09 17:02:52 -0700416 # Prime and flush emerge caches.
417 root = emerge.settings["ROOT"]
418 vardb = emerge.trees[root]["vartree"].dbapi
David James0bdc5de2011-05-12 16:22:26 -0700419 if "--pretend" not in emerge.opts:
420 vardb.counter_tick()
David Jamesdeebd692011-05-09 17:02:52 -0700421 vardb.flush_cache()
422
David James386ccd12011-05-04 20:17:42 -0700423 def GenDependencyTree(self):
David Jamesfcb70ef2011-02-02 16:02:30 -0800424 """Get dependency tree info from emerge.
425
David Jamesfcb70ef2011-02-02 16:02:30 -0800426 Returns:
427 Dependency tree
428 """
429 start = time.time()
430
431 emerge = self.emerge
432
433 # Create a list of packages to merge
434 packages = set(emerge.cmdline_packages[:])
David Jamesfcb70ef2011-02-02 16:02:30 -0800435
436 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
437 # need any extra output from portage.
438 portage.util.noiselimit = -1
439
440 # My favorite feature: The silent spinner. It doesn't spin. Ever.
441 # I'd disable the colors by default too, but they look kind of cool.
442 emerge.spinner = stdout_spinner()
443 emerge.spinner.update = emerge.spinner.update_quiet
444
445 if "--quiet" not in emerge.opts:
446 print "Calculating deps..."
447
448 self.CreateDepgraph(emerge, packages)
449 depgraph = emerge.depgraph
450
451 # Build our own tree from the emerge digraph.
452 deps_tree = {}
Don Garrett25f309a2014-03-19 14:02:12 -0700453 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -0800454 digraph = depgraph._dynamic_config.digraph
David James3f778802011-08-25 19:31:45 -0700455 root = emerge.settings["ROOT"]
456 final_db = depgraph._dynamic_config.mydbapi[root]
David Jamesfcb70ef2011-02-02 16:02:30 -0800457 for node, node_deps in digraph.nodes.items():
458 # Calculate dependency packages that need to be installed first. Each
459 # child on the digraph is a dependency. The "operation" field specifies
460 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
461 # contains the type of dependency (e.g. build, runtime, runtime_post,
462 # etc.)
463 #
David Jamesfcb70ef2011-02-02 16:02:30 -0800464 # Portage refers to the identifiers for packages as a CPV. This acronym
465 # stands for Component/Path/Version.
466 #
467 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
468 # Split up, this CPV would be:
469 # C -- Component: chromeos-base
470 # P -- Path: power_manager
471 # V -- Version: 0.0.1-r1
472 #
473 # We just refer to CPVs as packages here because it's easier.
474 deps = {}
475 for child, priorities in node_deps[0].items():
David James3f778802011-08-25 19:31:45 -0700476 if isinstance(child, Package) and child.root == root:
477 cpv = str(child.cpv)
478 action = str(child.operation)
479
480 # If we're uninstalling a package, check whether Portage is
481 # installing a replacement. If so, just depend on the installation
482 # of the new package, because the old package will automatically
483 # be uninstalled at that time.
484 if action == "uninstall":
485 for pkg in final_db.match_pkgs(child.slot_atom):
486 cpv = str(pkg.cpv)
487 action = "merge"
488 break
489
490 deps[cpv] = dict(action=action,
491 deptypes=[str(x) for x in priorities],
492 deps={})
David Jamesfcb70ef2011-02-02 16:02:30 -0800493
494 # We've built our list of deps, so we can add our package to the tree.
David James3f778802011-08-25 19:31:45 -0700495 if isinstance(node, Package) and node.root == root:
David Jamesfcb70ef2011-02-02 16:02:30 -0800496 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
497 deps=deps)
498
David Jamesfcb70ef2011-02-02 16:02:30 -0800499 # Ask portage for its install plan, so that we can only throw out
David James386ccd12011-05-04 20:17:42 -0700500 # dependencies that portage throws out.
David Jamesfcb70ef2011-02-02 16:02:30 -0800501 deps_info = {}
502 for pkg in depgraph.altlist():
503 if isinstance(pkg, Package):
David James3f778802011-08-25 19:31:45 -0700504 assert pkg.root == root
David Jamesfcb70ef2011-02-02 16:02:30 -0800505 self.package_db[pkg.cpv] = pkg
506
David Jamesfcb70ef2011-02-02 16:02:30 -0800507 # Save off info about the package
David James386ccd12011-05-04 20:17:42 -0700508 deps_info[str(pkg.cpv)] = {"idx": len(deps_info)}
David Jamesfcb70ef2011-02-02 16:02:30 -0800509
510 seconds = time.time() - start
511 if "--quiet" not in emerge.opts:
512 print "Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60)
513
514 return deps_tree, deps_info
515
516 def PrintTree(self, deps, depth=""):
517 """Print the deps we have seen in the emerge output.
518
519 Args:
520 deps: Dependency tree structure.
521 depth: Allows printing the tree recursively, with indentation.
522 """
523 for entry in sorted(deps):
524 action = deps[entry]["action"]
525 print "%s %s (%s)" % (depth, entry, action)
526 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
527
David James386ccd12011-05-04 20:17:42 -0700528 def GenDependencyGraph(self, deps_tree, deps_info):
David Jamesfcb70ef2011-02-02 16:02:30 -0800529 """Generate a doubly linked dependency graph.
530
531 Args:
532 deps_tree: Dependency tree structure.
533 deps_info: More details on the dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500534
David Jamesfcb70ef2011-02-02 16:02:30 -0800535 Returns:
536 Deps graph in the form of a dict of packages, with each package
537 specifying a "needs" list and "provides" list.
538 """
539 emerge = self.emerge
David Jamesfcb70ef2011-02-02 16:02:30 -0800540
David Jamesfcb70ef2011-02-02 16:02:30 -0800541 # deps_map is the actual dependency graph.
542 #
543 # Each package specifies a "needs" list and a "provides" list. The "needs"
544 # list indicates which packages we depend on. The "provides" list
545 # indicates the reverse dependencies -- what packages need us.
546 #
547 # We also provide some other information in the dependency graph:
548 # - action: What we're planning on doing with this package. Generally,
549 # "merge", "nomerge", or "uninstall"
David Jamesfcb70ef2011-02-02 16:02:30 -0800550 deps_map = {}
551
552 def ReverseTree(packages):
553 """Convert tree to digraph.
554
555 Take the tree of package -> requirements and reverse it to a digraph of
556 buildable packages -> packages they unblock.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500557
David Jamesfcb70ef2011-02-02 16:02:30 -0800558 Args:
559 packages: Tree(s) of dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500560
David Jamesfcb70ef2011-02-02 16:02:30 -0800561 Returns:
562 Unsanitized digraph.
563 """
David James8c7e5e32011-06-28 11:26:03 -0700564 binpkg_phases = set(["setup", "preinst", "postinst"])
David James3f778802011-08-25 19:31:45 -0700565 needed_dep_types = set(["blocker", "buildtime", "runtime"])
David Jamesfcb70ef2011-02-02 16:02:30 -0800566 for pkg in packages:
567
568 # Create an entry for the package
569 action = packages[pkg]["action"]
David James8c7e5e32011-06-28 11:26:03 -0700570 default_pkg = {"needs": {}, "provides": set(), "action": action,
571 "nodeps": False, "binary": False}
David Jamesfcb70ef2011-02-02 16:02:30 -0800572 this_pkg = deps_map.setdefault(pkg, default_pkg)
573
David James8c7e5e32011-06-28 11:26:03 -0700574 if pkg in deps_info:
575 this_pkg["idx"] = deps_info[pkg]["idx"]
576
577 # If a package doesn't have any defined phases that might use the
578 # dependent packages (i.e. pkg_setup, pkg_preinst, or pkg_postinst),
579 # we can install this package before its deps are ready.
580 emerge_pkg = self.package_db.get(pkg)
581 if emerge_pkg and emerge_pkg.type_name == "binary":
582 this_pkg["binary"] = True
Mike Frysinger91d7da92013-02-19 15:53:46 -0500583 if 0 <= vercmp(portage.VERSION, "2.1.11.50"):
584 defined_phases = emerge_pkg.defined_phases
585 else:
586 defined_phases = emerge_pkg.metadata.defined_phases
David James8c7e5e32011-06-28 11:26:03 -0700587 defined_binpkg_phases = binpkg_phases.intersection(defined_phases)
588 if not defined_binpkg_phases:
589 this_pkg["nodeps"] = True
590
David Jamesfcb70ef2011-02-02 16:02:30 -0800591 # Create entries for dependencies of this package first.
592 ReverseTree(packages[pkg]["deps"])
593
594 # Add dependencies to this package.
595 for dep, dep_item in packages[pkg]["deps"].iteritems():
David James8c7e5e32011-06-28 11:26:03 -0700596 # We only need to enforce strict ordering of dependencies if the
David James3f778802011-08-25 19:31:45 -0700597 # dependency is a blocker, or is a buildtime or runtime dependency.
598 # (I.e., ignored, optional, and runtime_post dependencies don't
599 # depend on ordering.)
David James8c7e5e32011-06-28 11:26:03 -0700600 dep_types = dep_item["deptypes"]
601 if needed_dep_types.intersection(dep_types):
602 deps_map[dep]["provides"].add(pkg)
603 this_pkg["needs"][dep] = "/".join(dep_types)
David Jamesfcb70ef2011-02-02 16:02:30 -0800604
David James3f778802011-08-25 19:31:45 -0700605 # If there's a blocker, Portage may need to move files from one
606 # package to another, which requires editing the CONTENTS files of
607 # both packages. To avoid race conditions while editing this file,
608 # the two packages must not be installed in parallel, so we can't
609 # safely ignore dependencies. See http://crosbug.com/19328
610 if "blocker" in dep_types:
611 this_pkg["nodeps"] = False
612
David Jamesfcb70ef2011-02-02 16:02:30 -0800613 def FindCycles():
614 """Find cycles in the dependency tree.
615
616 Returns:
617 A dict mapping cyclic packages to a dict of the deps that cause
618 cycles. For each dep that causes cycles, it returns an example
619 traversal of the graph that shows the cycle.
620 """
621
622 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
623 """Find cycles in cyclic dependencies starting at specified package.
624
625 Args:
626 pkg: Package identifier.
627 cycles: A dict mapping cyclic packages to a dict of the deps that
628 cause cycles. For each dep that causes cycles, it returns an
629 example traversal of the graph that shows the cycle.
630 unresolved: Nodes that have been visited but are not fully processed.
631 resolved: Nodes that have been visited and are fully processed.
632 """
633 pkg_cycles = cycles.get(pkg)
634 if pkg in resolved and not pkg_cycles:
635 # If we already looked at this package, and found no cyclic
636 # dependencies, we can stop now.
637 return
638 unresolved.append(pkg)
639 for dep in deps_map[pkg]["needs"]:
640 if dep in unresolved:
641 idx = unresolved.index(dep)
642 mycycle = unresolved[idx:] + [dep]
David James321490a2012-12-17 12:05:56 -0800643 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800644 pkg1, pkg2 = mycycle[i], mycycle[i+1]
645 cycles.setdefault(pkg1, {}).setdefault(pkg2, mycycle)
646 elif not pkg_cycles or dep not in pkg_cycles:
647 # Looks like we haven't seen this edge before.
648 FindCyclesAtNode(dep, cycles, unresolved, resolved)
649 unresolved.pop()
650 resolved.add(pkg)
651
652 cycles, unresolved, resolved = {}, [], set()
653 for pkg in deps_map:
654 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
655 return cycles
656
David James386ccd12011-05-04 20:17:42 -0700657 def RemoveUnusedPackages():
David Jamesfcb70ef2011-02-02 16:02:30 -0800658 """Remove installed packages, propagating dependencies."""
David Jamesfcb70ef2011-02-02 16:02:30 -0800659 # Schedule packages that aren't on the install list for removal
660 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
661
David Jamesfcb70ef2011-02-02 16:02:30 -0800662 # Remove the packages we don't want, simplifying the graph and making
663 # it easier for us to crack cycles.
664 for pkg in sorted(rm_pkgs):
665 this_pkg = deps_map[pkg]
666 needs = this_pkg["needs"]
667 provides = this_pkg["provides"]
668 for dep in needs:
669 dep_provides = deps_map[dep]["provides"]
670 dep_provides.update(provides)
671 dep_provides.discard(pkg)
672 dep_provides.discard(dep)
673 for target in provides:
674 target_needs = deps_map[target]["needs"]
675 target_needs.update(needs)
676 target_needs.pop(pkg, None)
677 target_needs.pop(target, None)
678 del deps_map[pkg]
679
680 def PrintCycleBreak(basedep, dep, mycycle):
681 """Print details about a cycle that we are planning on breaking.
682
Mike Frysinger02e1e072013-11-10 22:11:34 -0500683 We are breaking a cycle where dep needs basedep. mycycle is an
684 example cycle which contains dep -> basedep.
685 """
David Jamesfcb70ef2011-02-02 16:02:30 -0800686
David Jamesfcb70ef2011-02-02 16:02:30 -0800687 needs = deps_map[dep]["needs"]
688 depinfo = needs.get(basedep, "deleted")
David Jamesfcb70ef2011-02-02 16:02:30 -0800689
David James3f778802011-08-25 19:31:45 -0700690 # It's OK to swap install order for blockers, as long as the two
691 # packages aren't installed in parallel. If there is a cycle, then
692 # we know the packages depend on each other already, so we can drop the
693 # blocker safely without printing a warning.
694 if depinfo == "blocker":
695 return
696
David Jamesfcb70ef2011-02-02 16:02:30 -0800697 # Notify the user that we're breaking a cycle.
698 print "Breaking %s -> %s (%s)" % (dep, basedep, depinfo)
699
700 # Show cycle.
David James321490a2012-12-17 12:05:56 -0800701 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800702 pkg1, pkg2 = mycycle[i], mycycle[i+1]
703 needs = deps_map[pkg1]["needs"]
704 depinfo = needs.get(pkg2, "deleted")
705 if pkg1 == dep and pkg2 == basedep:
706 depinfo = depinfo + ", deleting"
707 print " %s -> %s (%s)" % (pkg1, pkg2, depinfo)
708
709 def SanitizeTree():
710 """Remove circular dependencies.
711
712 We prune all dependencies involved in cycles that go against the emerge
713 ordering. This has a nice property: we're guaranteed to merge
714 dependencies in the same order that portage does.
715
716 Because we don't treat any dependencies as "soft" unless they're killed
717 by a cycle, we pay attention to a larger number of dependencies when
718 merging. This hurts performance a bit, but helps reliability.
719 """
720 start = time.time()
721 cycles = FindCycles()
722 while cycles:
723 for dep, mycycles in cycles.iteritems():
724 for basedep, mycycle in mycycles.iteritems():
725 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
Matt Tennant08797302011-10-17 16:18:45 -0700726 if "--quiet" not in emerge.opts:
727 PrintCycleBreak(basedep, dep, mycycle)
David Jamesfcb70ef2011-02-02 16:02:30 -0800728 del deps_map[dep]["needs"][basedep]
729 deps_map[basedep]["provides"].remove(dep)
730 cycles = FindCycles()
731 seconds = time.time() - start
732 if "--quiet" not in emerge.opts and seconds >= 0.1:
733 print "Tree sanitized in %dm%.1fs" % (seconds / 60, seconds % 60)
734
David James8c7e5e32011-06-28 11:26:03 -0700735 def FindRecursiveProvides(pkg, seen):
736 """Find all nodes that require a particular package.
737
738 Assumes that graph is acyclic.
739
740 Args:
741 pkg: Package identifier.
742 seen: Nodes that have been visited so far.
743 """
744 if pkg in seen:
745 return
746 seen.add(pkg)
747 info = deps_map[pkg]
748 info["tprovides"] = info["provides"].copy()
749 for dep in info["provides"]:
750 FindRecursiveProvides(dep, seen)
751 info["tprovides"].update(deps_map[dep]["tprovides"])
752
David Jamesa22906f2011-05-04 19:53:26 -0700753 ReverseTree(deps_tree)
David Jamesa22906f2011-05-04 19:53:26 -0700754
David James386ccd12011-05-04 20:17:42 -0700755 # We need to remove unused packages so that we can use the dependency
756 # ordering of the install process to show us what cycles to crack.
757 RemoveUnusedPackages()
David Jamesfcb70ef2011-02-02 16:02:30 -0800758 SanitizeTree()
David James8c7e5e32011-06-28 11:26:03 -0700759 seen = set()
760 for pkg in deps_map:
761 FindRecursiveProvides(pkg, seen)
David Jamesfcb70ef2011-02-02 16:02:30 -0800762 return deps_map
763
764 def PrintInstallPlan(self, deps_map):
765 """Print an emerge-style install plan.
766
767 The install plan lists what packages we're installing, in order.
768 It's useful for understanding what parallel_emerge is doing.
769
770 Args:
771 deps_map: The dependency graph.
772 """
773
774 def InstallPlanAtNode(target, deps_map):
775 nodes = []
776 nodes.append(target)
777 for dep in deps_map[target]["provides"]:
778 del deps_map[dep]["needs"][target]
779 if not deps_map[dep]["needs"]:
780 nodes.extend(InstallPlanAtNode(dep, deps_map))
781 return nodes
782
783 deps_map = copy.deepcopy(deps_map)
784 install_plan = []
785 plan = set()
786 for target, info in deps_map.iteritems():
787 if not info["needs"] and target not in plan:
788 for item in InstallPlanAtNode(target, deps_map):
789 plan.add(item)
790 install_plan.append(self.package_db[item])
791
792 for pkg in plan:
793 del deps_map[pkg]
794
795 if deps_map:
796 print "Cyclic dependencies:", " ".join(deps_map)
797 PrintDepsMap(deps_map)
798 sys.exit(1)
799
800 self.emerge.depgraph.display(install_plan)
801
802
803def PrintDepsMap(deps_map):
804 """Print dependency graph, for each package list it's prerequisites."""
805 for i in sorted(deps_map):
806 print "%s: (%s) needs" % (i, deps_map[i]["action"])
807 needs = deps_map[i]["needs"]
808 for j in sorted(needs):
809 print " %s" % (j)
810 if not needs:
811 print " no dependencies"
812
813
814class EmergeJobState(object):
Don Garrett25f309a2014-03-19 14:02:12 -0700815 """Structure describing the EmergeJobState."""
816
David Jamesfcb70ef2011-02-02 16:02:30 -0800817 __slots__ = ["done", "filename", "last_notify_timestamp", "last_output_seek",
818 "last_output_timestamp", "pkgname", "retcode", "start_timestamp",
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700819 "target", "fetch_only", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800820
821 def __init__(self, target, pkgname, done, filename, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700822 retcode=None, fetch_only=False, unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800823
824 # The full name of the target we're building (e.g.
Mike Frysingerfd969312014-04-02 22:16:42 -0400825 # virtual/target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800826 self.target = target
827
Mike Frysingerfd969312014-04-02 22:16:42 -0400828 # The short name of the target we're building (e.g. target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800829 self.pkgname = pkgname
830
831 # Whether the job is done. (True if the job is done; false otherwise.)
832 self.done = done
833
834 # The filename where output is currently stored.
835 self.filename = filename
836
837 # The timestamp of the last time we printed the name of the log file. We
838 # print this at the beginning of the job, so this starts at
839 # start_timestamp.
840 self.last_notify_timestamp = start_timestamp
841
842 # The location (in bytes) of the end of the last complete line we printed.
843 # This starts off at zero. We use this to jump to the right place when we
844 # print output from the same ebuild multiple times.
845 self.last_output_seek = 0
846
847 # The timestamp of the last time we printed output. Since we haven't
848 # printed output yet, this starts at zero.
849 self.last_output_timestamp = 0
850
851 # The return code of our job, if the job is actually finished.
852 self.retcode = retcode
853
Brian Harring0be85c62012-03-17 19:52:12 -0700854 # Was this just a fetch job?
855 self.fetch_only = fetch_only
856
David Jamesfcb70ef2011-02-02 16:02:30 -0800857 # The timestamp when our job started.
858 self.start_timestamp = start_timestamp
859
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700860 # No emerge, only unpack packages.
861 self.unpack_only = unpack_only
862
David Jamesfcb70ef2011-02-02 16:02:30 -0800863
David James321490a2012-12-17 12:05:56 -0800864def KillHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700865 # Kill self and all subprocesses.
866 os.killpg(0, signal.SIGKILL)
867
David Jamesfcb70ef2011-02-02 16:02:30 -0800868def SetupWorkerSignals():
David James321490a2012-12-17 12:05:56 -0800869 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700870 # Set KILLED flag.
871 KILLED.set()
David James13cead42011-05-18 16:22:01 -0700872
David James7358d032011-05-19 10:40:03 -0700873 # Remove our signal handlers so we don't get called recursively.
874 signal.signal(signal.SIGINT, KillHandler)
875 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -0800876
877 # Ensure that we exit quietly and cleanly, if possible, when we receive
878 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all
879 # of the child processes will print details about KeyboardInterrupt
880 # exceptions, which isn't very helpful.
881 signal.signal(signal.SIGINT, ExitHandler)
882 signal.signal(signal.SIGTERM, ExitHandler)
883
David James6b29d052012-11-02 10:27:27 -0700884def EmergeProcess(output, *args, **kwargs):
David James1ed3e252011-10-05 20:26:15 -0700885 """Merge a package in a subprocess.
886
887 Args:
David James1ed3e252011-10-05 20:26:15 -0700888 output: Temporary file to write output.
David James6b29d052012-11-02 10:27:27 -0700889 *args: Arguments to pass to Scheduler constructor.
890 **kwargs: Keyword arguments to pass to Scheduler constructor.
David James1ed3e252011-10-05 20:26:15 -0700891
892 Returns:
893 The exit code returned by the subprocess.
894 """
895 pid = os.fork()
896 if pid == 0:
897 try:
898 # Sanity checks.
Mike Frysingerf02736e2013-11-08 15:27:00 -0500899 if sys.stdout.fileno() != 1:
900 raise Exception("sys.stdout.fileno() != 1")
901 if sys.stderr.fileno() != 2:
902 raise Exception("sys.stderr.fileno() != 2")
David James1ed3e252011-10-05 20:26:15 -0700903
904 # - Redirect 1 (stdout) and 2 (stderr) at our temporary file.
905 # - Redirect 0 to point at sys.stdin. In this case, sys.stdin
906 # points at a file reading os.devnull, because multiprocessing mucks
907 # with sys.stdin.
908 # - Leave the sys.stdin and output filehandles alone.
909 fd_pipes = {0: sys.stdin.fileno(),
910 1: output.fileno(),
911 2: output.fileno(),
912 sys.stdin.fileno(): sys.stdin.fileno(),
913 output.fileno(): output.fileno()}
David Jamesa249eef2013-07-19 14:03:45 -0700914 if 0 <= vercmp(portage.VERSION, "2.1.11.50"):
Don Garrett25f309a2014-03-19 14:02:12 -0700915 # pylint: disable=W0212
David Jamesa249eef2013-07-19 14:03:45 -0700916 portage.process._setup_pipes(fd_pipes, close_fds=False)
917 else:
Don Garrett25f309a2014-03-19 14:02:12 -0700918 # pylint: disable=W0212
David Jamesa249eef2013-07-19 14:03:45 -0700919 portage.process._setup_pipes(fd_pipes)
David James1ed3e252011-10-05 20:26:15 -0700920
921 # Portage doesn't like when sys.stdin.fileno() != 0, so point sys.stdin
922 # at the filehandle we just created in _setup_pipes.
923 if sys.stdin.fileno() != 0:
David James6b29d052012-11-02 10:27:27 -0700924 sys.__stdin__ = sys.stdin = os.fdopen(0, "r")
925
926 scheduler = Scheduler(*args, **kwargs)
927
928 # Enable blocker handling even though we're in --nodeps mode. This
929 # allows us to unmerge the blocker after we've merged the replacement.
930 scheduler._opts_ignore_blockers = frozenset()
David James1ed3e252011-10-05 20:26:15 -0700931
932 # Actually do the merge.
933 retval = scheduler.merge()
934
935 # We catch all exceptions here (including SystemExit, KeyboardInterrupt,
936 # etc) so as to ensure that we don't confuse the multiprocessing module,
937 # which expects that all forked children exit with os._exit().
David James321490a2012-12-17 12:05:56 -0800938 # pylint: disable=W0702
David James1ed3e252011-10-05 20:26:15 -0700939 except:
940 traceback.print_exc(file=output)
941 retval = 1
942 sys.stdout.flush()
943 sys.stderr.flush()
944 output.flush()
Don Garrett25f309a2014-03-19 14:02:12 -0700945 # pylint: disable=W0212
David James1ed3e252011-10-05 20:26:15 -0700946 os._exit(retval)
947 else:
948 # Return the exit code of the subprocess.
949 return os.waitpid(pid, 0)[1]
David Jamesfcb70ef2011-02-02 16:02:30 -0800950
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700951
952def UnpackPackage(pkg_state):
953 """Unpacks package described by pkg_state.
954
955 Args:
956 pkg_state: EmergeJobState object describing target.
957
958 Returns:
959 Exit code returned by subprocess.
960 """
961 pkgdir = os.environ.get("PKGDIR",
962 os.path.join(os.environ["SYSROOT"], "packages"))
963 root = os.environ.get("ROOT", os.environ["SYSROOT"])
964 path = os.path.join(pkgdir, pkg_state.target + ".tbz2")
965 comp = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)
966 cmd = [comp, "-dc"]
967 if comp.endswith("pbzip2"):
968 cmd.append("--ignore-trailing-garbage=1")
969 cmd.append(path)
970
971 result = cros_build_lib.RunCommand(cmd, cwd=root, stdout_to_pipe=True,
972 print_cmd=False, error_code_ok=True)
973
974 # If we were not successful, return now and don't attempt untar.
975 if result.returncode:
976 return result.returncode
977
978 cmd = ["sudo", "tar", "-xf", "-", "-C", root]
979 result = cros_build_lib.RunCommand(cmd, cwd=root, input=result.output,
980 print_cmd=False, error_code_ok=True)
981
982 return result.returncode
983
984
985def EmergeWorker(task_queue, job_queue, emerge, package_db, fetch_only=False,
986 unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800987 """This worker emerges any packages given to it on the task_queue.
988
989 Args:
990 task_queue: The queue of tasks for this worker to do.
991 job_queue: The queue of results from the worker.
992 emerge: An EmergeData() object.
993 package_db: A dict, mapping package ids to portage Package objects.
Brian Harring0be85c62012-03-17 19:52:12 -0700994 fetch_only: A bool, indicating if we should just fetch the target.
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700995 unpack_only: A bool, indicating if we should just unpack the target.
David Jamesfcb70ef2011-02-02 16:02:30 -0800996
997 It expects package identifiers to be passed to it via task_queue. When
998 a task is started, it pushes the (target, filename) to the started_queue.
999 The output is stored in filename. When a merge starts or finishes, we push
1000 EmergeJobState objects to the job_queue.
1001 """
1002
1003 SetupWorkerSignals()
1004 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
David Jamesdeebd692011-05-09 17:02:52 -07001005
1006 # Disable flushing of caches to save on I/O.
David James7a1ea4b2011-10-13 15:06:41 -07001007 root = emerge.settings["ROOT"]
1008 vardb = emerge.trees[root]["vartree"].dbapi
1009 vardb._flush_cache_enabled = False
Brian Harring0be85c62012-03-17 19:52:12 -07001010 bindb = emerge.trees[root]["bintree"].dbapi
1011 # Might be a set, might be a list, might be None; no clue, just use shallow
1012 # copy to ensure we can roll it back.
Don Garrett25f309a2014-03-19 14:02:12 -07001013 # pylint: disable=W0212
Brian Harring0be85c62012-03-17 19:52:12 -07001014 original_remotepkgs = copy.copy(bindb.bintree._remotepkgs)
David Jamesdeebd692011-05-09 17:02:52 -07001015
David Jamesfcb70ef2011-02-02 16:02:30 -08001016 opts, spinner = emerge.opts, emerge.spinner
1017 opts["--nodeps"] = True
Brian Harring0be85c62012-03-17 19:52:12 -07001018 if fetch_only:
1019 opts["--fetchonly"] = True
1020
David Jamesfcb70ef2011-02-02 16:02:30 -08001021 while True:
1022 # Wait for a new item to show up on the queue. This is a blocking wait,
1023 # so if there's nothing to do, we just sit here.
Brian Harring0be85c62012-03-17 19:52:12 -07001024 pkg_state = task_queue.get()
1025 if pkg_state is None:
David Jamesfcb70ef2011-02-02 16:02:30 -08001026 # If target is None, this means that the main thread wants us to quit.
1027 # The other workers need to exit too, so we'll push the message back on
1028 # to the queue so they'll get it too.
Brian Harring0be85c62012-03-17 19:52:12 -07001029 task_queue.put(None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001030 return
David James7358d032011-05-19 10:40:03 -07001031 if KILLED.is_set():
1032 return
1033
Brian Harring0be85c62012-03-17 19:52:12 -07001034 target = pkg_state.target
1035
David Jamesfcb70ef2011-02-02 16:02:30 -08001036 db_pkg = package_db[target]
Brian Harring0be85c62012-03-17 19:52:12 -07001037
1038 if db_pkg.type_name == "binary":
1039 if not fetch_only and pkg_state.fetched_successfully:
1040 # Ensure portage doesn't think our pkg is remote- else it'll force
1041 # a redownload of it (even if the on-disk file is fine). In-memory
1042 # caching basically, implemented dumbly.
1043 bindb.bintree._remotepkgs = None
1044 else:
1045 bindb.bintree_remotepkgs = original_remotepkgs
1046
David Jamesfcb70ef2011-02-02 16:02:30 -08001047 db_pkg.root_config = emerge.root_config
1048 install_list = [db_pkg]
1049 pkgname = db_pkg.pf
1050 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False)
David James01b1e0f2012-06-07 17:18:05 -07001051 os.chmod(output.name, 644)
David Jamesfcb70ef2011-02-02 16:02:30 -08001052 start_timestamp = time.time()
Brian Harring0be85c62012-03-17 19:52:12 -07001053 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001054 fetch_only=fetch_only, unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001055 job_queue.put(job)
1056 if "--pretend" in opts:
1057 retcode = 0
1058 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001059 try:
David James386ccd12011-05-04 20:17:42 -07001060 emerge.scheduler_graph.mergelist = install_list
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001061 if unpack_only:
1062 retcode = UnpackPackage(pkg_state)
1063 else:
1064 retcode = EmergeProcess(output, settings, trees, mtimedb, opts,
1065 spinner, favorites=emerge.favorites,
1066 graph_config=emerge.scheduler_graph)
David Jamesfcb70ef2011-02-02 16:02:30 -08001067 except Exception:
1068 traceback.print_exc(file=output)
1069 retcode = 1
David James1ed3e252011-10-05 20:26:15 -07001070 output.close()
David Jamesfcb70ef2011-02-02 16:02:30 -08001071
David James7358d032011-05-19 10:40:03 -07001072 if KILLED.is_set():
1073 return
1074
David Jamesfcb70ef2011-02-02 16:02:30 -08001075 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001076 retcode, fetch_only=fetch_only,
1077 unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001078 job_queue.put(job)
1079
1080
1081class LinePrinter(object):
1082 """Helper object to print a single line."""
1083
1084 def __init__(self, line):
1085 self.line = line
1086
David James321490a2012-12-17 12:05:56 -08001087 def Print(self, _seek_locations):
David Jamesfcb70ef2011-02-02 16:02:30 -08001088 print self.line
1089
1090
1091class JobPrinter(object):
1092 """Helper object to print output of a job."""
1093
1094 def __init__(self, job, unlink=False):
1095 """Print output of job.
1096
Mike Frysinger02e1e072013-11-10 22:11:34 -05001097 If unlink is True, unlink the job output file when done.
1098 """
David Jamesfcb70ef2011-02-02 16:02:30 -08001099 self.current_time = time.time()
1100 self.job = job
1101 self.unlink = unlink
1102
1103 def Print(self, seek_locations):
1104
1105 job = self.job
1106
1107 # Calculate how long the job has been running.
1108 seconds = self.current_time - job.start_timestamp
1109
1110 # Note that we've printed out the job so far.
1111 job.last_output_timestamp = self.current_time
1112
1113 # Note that we're starting the job
1114 info = "job %s (%dm%.1fs)" % (job.pkgname, seconds / 60, seconds % 60)
1115 last_output_seek = seek_locations.get(job.filename, 0)
1116 if last_output_seek:
1117 print "=== Continue output for %s ===" % info
1118 else:
1119 print "=== Start output for %s ===" % info
1120
1121 # Print actual output from job
1122 f = codecs.open(job.filename, encoding='utf-8', errors='replace')
1123 f.seek(last_output_seek)
1124 prefix = job.pkgname + ":"
1125 for line in f:
1126
1127 # Save off our position in the file
1128 if line and line[-1] == "\n":
1129 last_output_seek = f.tell()
1130 line = line[:-1]
1131
1132 # Print our line
1133 print prefix, line.encode('utf-8', 'replace')
1134 f.close()
1135
1136 # Save our last spot in the file so that we don't print out the same
1137 # location twice.
1138 seek_locations[job.filename] = last_output_seek
1139
1140 # Note end of output section
1141 if job.done:
1142 print "=== Complete: %s ===" % info
1143 else:
1144 print "=== Still running: %s ===" % info
1145
1146 if self.unlink:
1147 os.unlink(job.filename)
1148
1149
1150def PrintWorker(queue):
1151 """A worker that prints stuff to the screen as requested."""
1152
David James321490a2012-12-17 12:05:56 -08001153 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -07001154 # Set KILLED flag.
1155 KILLED.set()
1156
David Jamesfcb70ef2011-02-02 16:02:30 -08001157 # Switch to default signal handlers so that we'll die after two signals.
David James7358d032011-05-19 10:40:03 -07001158 signal.signal(signal.SIGINT, KillHandler)
1159 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001160
1161 # Don't exit on the first SIGINT / SIGTERM, because the parent worker will
1162 # handle it and tell us when we need to exit.
1163 signal.signal(signal.SIGINT, ExitHandler)
1164 signal.signal(signal.SIGTERM, ExitHandler)
1165
1166 # seek_locations is a map indicating the position we are at in each file.
1167 # It starts off empty, but is set by the various Print jobs as we go along
1168 # to indicate where we left off in each file.
1169 seek_locations = {}
1170 while True:
1171 try:
1172 job = queue.get()
1173 if job:
1174 job.Print(seek_locations)
David Jamesbccf8eb2011-07-27 14:06:06 -07001175 sys.stdout.flush()
David Jamesfcb70ef2011-02-02 16:02:30 -08001176 else:
1177 break
1178 except IOError as ex:
1179 if ex.errno == errno.EINTR:
1180 # Looks like we received a signal. Keep printing.
1181 continue
1182 raise
1183
Brian Harring867e2362012-03-17 04:05:17 -07001184
Brian Harring0be85c62012-03-17 19:52:12 -07001185class TargetState(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001186 """Structure descriting the TargetState."""
Brian Harring867e2362012-03-17 04:05:17 -07001187
Brian Harring0be85c62012-03-17 19:52:12 -07001188 __slots__ = ("target", "info", "score", "prefetched", "fetched_successfully")
Brian Harring867e2362012-03-17 04:05:17 -07001189
David James321490a2012-12-17 12:05:56 -08001190 def __init__(self, target, info):
Brian Harring867e2362012-03-17 04:05:17 -07001191 self.target, self.info = target, info
Brian Harring0be85c62012-03-17 19:52:12 -07001192 self.fetched_successfully = False
1193 self.prefetched = False
David James321490a2012-12-17 12:05:56 -08001194 self.score = None
Brian Harring867e2362012-03-17 04:05:17 -07001195 self.update_score()
1196
1197 def __cmp__(self, other):
1198 return cmp(self.score, other.score)
1199
1200 def update_score(self):
1201 self.score = (
1202 -len(self.info["tprovides"]),
Brian Harring0be85c62012-03-17 19:52:12 -07001203 len(self.info["needs"]),
Brian Harring11c5eeb2012-03-18 11:02:39 -07001204 not self.info["binary"],
Brian Harring867e2362012-03-17 04:05:17 -07001205 -len(self.info["provides"]),
1206 self.info["idx"],
1207 self.target,
1208 )
1209
1210
1211class ScoredHeap(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001212 """Implementation of a general purpose scored heap."""
Brian Harring867e2362012-03-17 04:05:17 -07001213
Brian Harring0be85c62012-03-17 19:52:12 -07001214 __slots__ = ("heap", "_heap_set")
1215
Brian Harring867e2362012-03-17 04:05:17 -07001216 def __init__(self, initial=()):
Brian Harring0be85c62012-03-17 19:52:12 -07001217 self.heap = list()
1218 self._heap_set = set()
1219 if initial:
1220 self.multi_put(initial)
Brian Harring867e2362012-03-17 04:05:17 -07001221
1222 def get(self):
Brian Harring0be85c62012-03-17 19:52:12 -07001223 item = heapq.heappop(self.heap)
1224 self._heap_set.remove(item.target)
1225 return item
Brian Harring867e2362012-03-17 04:05:17 -07001226
Brian Harring0be85c62012-03-17 19:52:12 -07001227 def put(self, item):
1228 if not isinstance(item, TargetState):
1229 raise ValueError("Item %r isn't a TargetState" % (item,))
1230 heapq.heappush(self.heap, item)
1231 self._heap_set.add(item.target)
Brian Harring867e2362012-03-17 04:05:17 -07001232
Brian Harring0be85c62012-03-17 19:52:12 -07001233 def multi_put(self, sequence):
1234 sequence = list(sequence)
1235 self.heap.extend(sequence)
1236 self._heap_set.update(x.target for x in sequence)
Brian Harring867e2362012-03-17 04:05:17 -07001237 self.sort()
1238
David James5c9996d2012-03-24 10:50:46 -07001239 def sort(self):
1240 heapq.heapify(self.heap)
1241
Brian Harring0be85c62012-03-17 19:52:12 -07001242 def __contains__(self, target):
1243 return target in self._heap_set
1244
1245 def __nonzero__(self):
1246 return bool(self.heap)
1247
Brian Harring867e2362012-03-17 04:05:17 -07001248 def __len__(self):
1249 return len(self.heap)
1250
1251
David Jamesfcb70ef2011-02-02 16:02:30 -08001252class EmergeQueue(object):
1253 """Class to schedule emerge jobs according to a dependency graph."""
1254
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001255 def __init__(self, deps_map, emerge, package_db, show_output, unpack_only):
David Jamesfcb70ef2011-02-02 16:02:30 -08001256 # Store the dependency graph.
1257 self._deps_map = deps_map
Brian Harring0be85c62012-03-17 19:52:12 -07001258 self._state_map = {}
David Jamesfcb70ef2011-02-02 16:02:30 -08001259 # Initialize the running queue to empty
Brian Harring0be85c62012-03-17 19:52:12 -07001260 self._build_jobs = {}
1261 self._build_ready = ScoredHeap()
1262 self._fetch_jobs = {}
1263 self._fetch_ready = ScoredHeap()
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001264 self._unpack_jobs = {}
1265 self._unpack_ready = ScoredHeap()
David Jamesfcb70ef2011-02-02 16:02:30 -08001266 # List of total package installs represented in deps_map.
1267 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
1268 self._total_jobs = len(install_jobs)
1269 self._show_output = show_output
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001270 self._unpack_only = unpack_only
David Jamesfcb70ef2011-02-02 16:02:30 -08001271
1272 if "--pretend" in emerge.opts:
1273 print "Skipping merge because of --pretend mode."
1274 sys.exit(0)
1275
David James7358d032011-05-19 10:40:03 -07001276 # Set a process group so we can easily terminate all children.
1277 os.setsid()
1278
David Jamesfcb70ef2011-02-02 16:02:30 -08001279 # Setup scheduler graph object. This is used by the child processes
1280 # to help schedule jobs.
1281 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1282
1283 # Calculate how many jobs we can run in parallel. We don't want to pass
1284 # the --jobs flag over to emerge itself, because that'll tell emerge to
1285 # hide its output, and said output is quite useful for debugging hung
1286 # jobs.
1287 procs = min(self._total_jobs,
1288 emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001289 self._build_procs = self._unpack_procs = self._fetch_procs = max(1, procs)
David James8c7e5e32011-06-28 11:26:03 -07001290 self._load_avg = emerge.opts.pop("--load-average", None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001291 self._job_queue = multiprocessing.Queue()
1292 self._print_queue = multiprocessing.Queue()
Brian Harring0be85c62012-03-17 19:52:12 -07001293
1294 self._fetch_queue = multiprocessing.Queue()
1295 args = (self._fetch_queue, self._job_queue, emerge, package_db, True)
1296 self._fetch_pool = multiprocessing.Pool(self._fetch_procs, EmergeWorker,
1297 args)
1298
1299 self._build_queue = multiprocessing.Queue()
1300 args = (self._build_queue, self._job_queue, emerge, package_db)
1301 self._build_pool = multiprocessing.Pool(self._build_procs, EmergeWorker,
1302 args)
1303
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001304 if self._unpack_only:
1305 # Unpack pool only required on unpack_only jobs.
1306 self._unpack_queue = multiprocessing.Queue()
1307 args = (self._unpack_queue, self._job_queue, emerge, package_db, False,
1308 True)
1309 self._unpack_pool = multiprocessing.Pool(self._unpack_procs, EmergeWorker,
1310 args)
1311
David Jamesfcb70ef2011-02-02 16:02:30 -08001312 self._print_worker = multiprocessing.Process(target=PrintWorker,
1313 args=[self._print_queue])
1314 self._print_worker.start()
1315
1316 # Initialize the failed queue to empty.
1317 self._retry_queue = []
1318 self._failed = set()
1319
David Jamesfcb70ef2011-02-02 16:02:30 -08001320 # Setup an exit handler so that we print nice messages if we are
1321 # terminated.
1322 self._SetupExitHandler()
1323
1324 # Schedule our jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001325 self._state_map.update(
1326 (pkg, TargetState(pkg, data)) for pkg, data in deps_map.iteritems())
1327 self._fetch_ready.multi_put(self._state_map.itervalues())
David Jamesfcb70ef2011-02-02 16:02:30 -08001328
1329 def _SetupExitHandler(self):
1330
David James321490a2012-12-17 12:05:56 -08001331 def ExitHandler(signum, _frame):
David James7358d032011-05-19 10:40:03 -07001332 # Set KILLED flag.
1333 KILLED.set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001334
1335 # Kill our signal handlers so we don't get called recursively
David James7358d032011-05-19 10:40:03 -07001336 signal.signal(signal.SIGINT, KillHandler)
1337 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001338
1339 # Print our current job status
Brian Harring0be85c62012-03-17 19:52:12 -07001340 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001341 if job:
1342 self._print_queue.put(JobPrinter(job, unlink=True))
1343
1344 # Notify the user that we are exiting
1345 self._Print("Exiting on signal %s" % signum)
David James7358d032011-05-19 10:40:03 -07001346 self._print_queue.put(None)
1347 self._print_worker.join()
David Jamesfcb70ef2011-02-02 16:02:30 -08001348
1349 # Kill child threads, then exit.
David James7358d032011-05-19 10:40:03 -07001350 os.killpg(0, signal.SIGKILL)
David Jamesfcb70ef2011-02-02 16:02:30 -08001351 sys.exit(1)
1352
1353 # Print out job status when we are killed
1354 signal.signal(signal.SIGINT, ExitHandler)
1355 signal.signal(signal.SIGTERM, ExitHandler)
1356
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001357 def _ScheduleUnpack(self, pkg_state):
1358 self._unpack_jobs[pkg_state.target] = None
1359 self._unpack_queue.put(pkg_state)
1360
Brian Harring0be85c62012-03-17 19:52:12 -07001361 def _Schedule(self, pkg_state):
David Jamesfcb70ef2011-02-02 16:02:30 -08001362 # We maintain a tree of all deps, if this doesn't need
David James8c7e5e32011-06-28 11:26:03 -07001363 # to be installed just free up its children and continue.
David Jamesfcb70ef2011-02-02 16:02:30 -08001364 # It is possible to reinstall deps of deps, without reinstalling
1365 # first level deps, like so:
Mike Frysingerfd969312014-04-02 22:16:42 -04001366 # virtual/target-os (merge) -> eselect (nomerge) -> python (merge)
Brian Harring0be85c62012-03-17 19:52:12 -07001367 this_pkg = pkg_state.info
1368 target = pkg_state.target
1369 if pkg_state.info is not None:
1370 if this_pkg["action"] == "nomerge":
1371 self._Finish(target)
1372 elif target not in self._build_jobs:
1373 # Kick off the build if it's marked to be built.
1374 self._build_jobs[target] = None
1375 self._build_queue.put(pkg_state)
1376 return True
David Jamesfcb70ef2011-02-02 16:02:30 -08001377
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001378 def _ScheduleLoop(self, unpack_only=False):
1379 if unpack_only:
1380 ready_queue = self._unpack_ready
1381 jobs_queue = self._unpack_jobs
1382 procs = self._unpack_procs
1383 else:
1384 ready_queue = self._build_ready
1385 jobs_queue = self._build_jobs
1386 procs = self._build_procs
1387
David James8c7e5e32011-06-28 11:26:03 -07001388 # If the current load exceeds our desired load average, don't schedule
1389 # more than one job.
1390 if self._load_avg and os.getloadavg()[0] > self._load_avg:
1391 needed_jobs = 1
1392 else:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001393 needed_jobs = procs
David James8c7e5e32011-06-28 11:26:03 -07001394
1395 # Schedule more jobs.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001396 while ready_queue and len(jobs_queue) < needed_jobs:
1397 state = ready_queue.get()
1398 if unpack_only:
1399 self._ScheduleUnpack(state)
1400 else:
1401 if state.target not in self._failed:
1402 self._Schedule(state)
David Jamesfcb70ef2011-02-02 16:02:30 -08001403
1404 def _Print(self, line):
1405 """Print a single line."""
1406 self._print_queue.put(LinePrinter(line))
1407
1408 def _Status(self):
1409 """Print status."""
1410 current_time = time.time()
1411 no_output = True
1412
1413 # Print interim output every minute if --show-output is used. Otherwise,
1414 # print notifications about running packages every 2 minutes, and print
1415 # full output for jobs that have been running for 60 minutes or more.
1416 if self._show_output:
1417 interval = 60
1418 notify_interval = 0
1419 else:
1420 interval = 60 * 60
1421 notify_interval = 60 * 2
David James321490a2012-12-17 12:05:56 -08001422 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001423 if job:
1424 last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
1425 if last_timestamp + interval < current_time:
1426 self._print_queue.put(JobPrinter(job))
1427 job.last_output_timestamp = current_time
1428 no_output = False
1429 elif (notify_interval and
1430 job.last_notify_timestamp + notify_interval < current_time):
1431 job_seconds = current_time - job.start_timestamp
1432 args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
1433 info = "Still building %s (%dm%.1fs). Logs in %s" % args
1434 job.last_notify_timestamp = current_time
1435 self._Print(info)
1436 no_output = False
1437
1438 # If we haven't printed any messages yet, print a general status message
1439 # here.
1440 if no_output:
1441 seconds = current_time - GLOBAL_START
Brian Harring0be85c62012-03-17 19:52:12 -07001442 fjobs, fready = len(self._fetch_jobs), len(self._fetch_ready)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001443 ujobs, uready = len(self._unpack_jobs), len(self._unpack_ready)
Brian Harring0be85c62012-03-17 19:52:12 -07001444 bjobs, bready = len(self._build_jobs), len(self._build_ready)
1445 retries = len(self._retry_queue)
1446 pending = max(0, len(self._deps_map) - fjobs - bjobs)
1447 line = "Pending %s/%s, " % (pending, self._total_jobs)
1448 if fjobs or fready:
1449 line += "Fetching %s/%s, " % (fjobs, fready + fjobs)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001450 if ujobs or uready:
1451 line += "Unpacking %s/%s, " % (ujobs, uready + ujobs)
Brian Harring0be85c62012-03-17 19:52:12 -07001452 if bjobs or bready or retries:
1453 line += "Building %s/%s, " % (bjobs, bready + bjobs)
1454 if retries:
1455 line += "Retrying %s, " % (retries,)
David James8c7e5e32011-06-28 11:26:03 -07001456 load = " ".join(str(x) for x in os.getloadavg())
Brian Harring0be85c62012-03-17 19:52:12 -07001457 line += ("[Time %dm%.1fs Load %s]" % (seconds/60, seconds %60, load))
1458 self._Print(line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001459
1460 def _Finish(self, target):
David James8c7e5e32011-06-28 11:26:03 -07001461 """Mark a target as completed and unblock dependencies."""
1462 this_pkg = self._deps_map[target]
1463 if this_pkg["needs"] and this_pkg["nodeps"]:
1464 # We got installed, but our deps have not been installed yet. Dependent
1465 # packages should only be installed when our needs have been fully met.
1466 this_pkg["action"] = "nomerge"
1467 else:
David James8c7e5e32011-06-28 11:26:03 -07001468 for dep in this_pkg["provides"]:
1469 dep_pkg = self._deps_map[dep]
Brian Harring0be85c62012-03-17 19:52:12 -07001470 state = self._state_map[dep]
David James8c7e5e32011-06-28 11:26:03 -07001471 del dep_pkg["needs"][target]
Brian Harring0be85c62012-03-17 19:52:12 -07001472 state.update_score()
1473 if not state.prefetched:
1474 if dep in self._fetch_ready:
1475 # If it's not currently being fetched, update the prioritization
1476 self._fetch_ready.sort()
1477 elif not dep_pkg["needs"]:
David James8c7e5e32011-06-28 11:26:03 -07001478 if dep_pkg["nodeps"] and dep_pkg["action"] == "nomerge":
1479 self._Finish(dep)
1480 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001481 self._build_ready.put(self._state_map[dep])
David James8c7e5e32011-06-28 11:26:03 -07001482 self._deps_map.pop(target)
David Jamesfcb70ef2011-02-02 16:02:30 -08001483
1484 def _Retry(self):
David James8c7e5e32011-06-28 11:26:03 -07001485 while self._retry_queue:
Brian Harring0be85c62012-03-17 19:52:12 -07001486 state = self._retry_queue.pop(0)
1487 if self._Schedule(state):
1488 self._Print("Retrying emerge of %s." % state.target)
David James8c7e5e32011-06-28 11:26:03 -07001489 break
David Jamesfcb70ef2011-02-02 16:02:30 -08001490
Brian Harringa43f5952012-04-12 01:19:34 -07001491 def _Shutdown(self):
David Jamesfcb70ef2011-02-02 16:02:30 -08001492 # Tell emerge workers to exit. They all exit when 'None' is pushed
1493 # to the queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001494
Brian Harringa43f5952012-04-12 01:19:34 -07001495 # Shutdown the workers first; then jobs (which is how they feed things back)
1496 # then finally the print queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001497
Brian Harringa43f5952012-04-12 01:19:34 -07001498 def _stop(queue, pool):
1499 if pool is None:
1500 return
1501 try:
1502 queue.put(None)
1503 pool.close()
1504 pool.join()
1505 finally:
1506 pool.terminate()
Brian Harring0be85c62012-03-17 19:52:12 -07001507
Brian Harringa43f5952012-04-12 01:19:34 -07001508 _stop(self._fetch_queue, self._fetch_pool)
1509 self._fetch_queue = self._fetch_pool = None
Brian Harring0be85c62012-03-17 19:52:12 -07001510
Brian Harringa43f5952012-04-12 01:19:34 -07001511 _stop(self._build_queue, self._build_pool)
1512 self._build_queue = self._build_pool = None
1513
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001514 if self._unpack_only:
1515 _stop(self._unpack_queue, self._unpack_pool)
1516 self._unpack_queue = self._unpack_pool = None
1517
Brian Harringa43f5952012-04-12 01:19:34 -07001518 if self._job_queue is not None:
1519 self._job_queue.close()
1520 self._job_queue = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001521
1522 # Now that our workers are finished, we can kill the print queue.
Brian Harringa43f5952012-04-12 01:19:34 -07001523 if self._print_worker is not None:
1524 try:
1525 self._print_queue.put(None)
1526 self._print_queue.close()
1527 self._print_worker.join()
1528 finally:
1529 self._print_worker.terminate()
1530 self._print_queue = self._print_worker = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001531
1532 def Run(self):
1533 """Run through the scheduled ebuilds.
1534
1535 Keep running so long as we have uninstalled packages in the
1536 dependency graph to merge.
1537 """
Brian Harringa43f5952012-04-12 01:19:34 -07001538 if not self._deps_map:
1539 return
1540
Brian Harring0be85c62012-03-17 19:52:12 -07001541 # Start the fetchers.
1542 for _ in xrange(min(self._fetch_procs, len(self._fetch_ready))):
1543 state = self._fetch_ready.get()
1544 self._fetch_jobs[state.target] = None
1545 self._fetch_queue.put(state)
1546
1547 # Print an update, then get going.
1548 self._Status()
1549
David Jamese703d0f2012-01-12 16:27:45 -08001550 retried = set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001551 while self._deps_map:
1552 # Check here that we are actually waiting for something.
Brian Harring0be85c62012-03-17 19:52:12 -07001553 if (self._build_queue.empty() and
David Jamesfcb70ef2011-02-02 16:02:30 -08001554 self._job_queue.empty() and
Brian Harring0be85c62012-03-17 19:52:12 -07001555 not self._fetch_jobs and
1556 not self._fetch_ready and
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001557 not self._unpack_jobs and
1558 not self._unpack_ready and
Brian Harring0be85c62012-03-17 19:52:12 -07001559 not self._build_jobs and
1560 not self._build_ready and
David Jamesfcb70ef2011-02-02 16:02:30 -08001561 self._deps_map):
1562 # If we have failed on a package, retry it now.
1563 if self._retry_queue:
1564 self._Retry()
1565 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001566 # Tell the user why we're exiting.
1567 if self._failed:
Mike Frysingerf2ff9172012-11-01 18:47:41 -04001568 print 'Packages failed:\n\t%s' % '\n\t'.join(self._failed)
David James0eae23e2012-07-03 15:04:25 -07001569 status_file = os.environ.get("PARALLEL_EMERGE_STATUS_FILE")
1570 if status_file:
David James321490a2012-12-17 12:05:56 -08001571 failed_pkgs = set(portage.versions.cpv_getkey(x)
1572 for x in self._failed)
David James0eae23e2012-07-03 15:04:25 -07001573 with open(status_file, "a") as f:
1574 f.write("%s\n" % " ".join(failed_pkgs))
David Jamesfcb70ef2011-02-02 16:02:30 -08001575 else:
1576 print "Deadlock! Circular dependencies!"
1577 sys.exit(1)
1578
David James321490a2012-12-17 12:05:56 -08001579 for _ in xrange(12):
David Jamesa74289a2011-08-12 10:41:24 -07001580 try:
1581 job = self._job_queue.get(timeout=5)
1582 break
1583 except Queue.Empty:
1584 # Check if any more jobs can be scheduled.
1585 self._ScheduleLoop()
1586 else:
Brian Harring706747c2012-03-16 03:04:31 -07001587 # Print an update every 60 seconds.
David Jamesfcb70ef2011-02-02 16:02:30 -08001588 self._Status()
1589 continue
1590
1591 target = job.target
1592
Brian Harring0be85c62012-03-17 19:52:12 -07001593 if job.fetch_only:
1594 if not job.done:
1595 self._fetch_jobs[job.target] = job
1596 else:
1597 state = self._state_map[job.target]
1598 state.prefetched = True
1599 state.fetched_successfully = (job.retcode == 0)
1600 del self._fetch_jobs[job.target]
1601 self._Print("Fetched %s in %2.2fs"
1602 % (target, time.time() - job.start_timestamp))
1603
1604 if self._show_output or job.retcode != 0:
1605 self._print_queue.put(JobPrinter(job, unlink=True))
1606 else:
1607 os.unlink(job.filename)
1608 # Failure or not, let build work with it next.
1609 if not self._deps_map[job.target]["needs"]:
1610 self._build_ready.put(state)
1611 self._ScheduleLoop()
1612
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001613 if self._unpack_only and job.retcode == 0:
1614 self._unpack_ready.put(state)
1615 self._ScheduleLoop(unpack_only=True)
1616
Brian Harring0be85c62012-03-17 19:52:12 -07001617 if self._fetch_ready:
1618 state = self._fetch_ready.get()
1619 self._fetch_queue.put(state)
1620 self._fetch_jobs[state.target] = None
1621 else:
1622 # Minor optimization; shut down fetchers early since we know
1623 # the queue is empty.
1624 self._fetch_queue.put(None)
1625 continue
1626
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001627 if job.unpack_only:
1628 if not job.done:
1629 self._unpack_jobs[target] = job
1630 else:
1631 del self._unpack_jobs[target]
1632 self._Print("Unpacked %s in %2.2fs"
1633 % (target, time.time() - job.start_timestamp))
1634 if self._show_output or job.retcode != 0:
1635 self._print_queue.put(JobPrinter(job, unlink=True))
1636 else:
1637 os.unlink(job.filename)
1638 if self._unpack_ready:
1639 state = self._unpack_ready.get()
1640 self._unpack_queue.put(state)
1641 self._unpack_jobs[state.target] = None
1642 continue
1643
David Jamesfcb70ef2011-02-02 16:02:30 -08001644 if not job.done:
Brian Harring0be85c62012-03-17 19:52:12 -07001645 self._build_jobs[target] = job
David Jamesfcb70ef2011-02-02 16:02:30 -08001646 self._Print("Started %s (logged in %s)" % (target, job.filename))
1647 continue
1648
1649 # Print output of job
1650 if self._show_output or job.retcode != 0:
1651 self._print_queue.put(JobPrinter(job, unlink=True))
1652 else:
1653 os.unlink(job.filename)
Brian Harring0be85c62012-03-17 19:52:12 -07001654 del self._build_jobs[target]
David Jamesfcb70ef2011-02-02 16:02:30 -08001655
1656 seconds = time.time() - job.start_timestamp
1657 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
David James32420cc2011-08-25 21:32:46 -07001658 previously_failed = target in self._failed
David Jamesfcb70ef2011-02-02 16:02:30 -08001659
1660 # Complain if necessary.
1661 if job.retcode != 0:
1662 # Handle job failure.
David James32420cc2011-08-25 21:32:46 -07001663 if previously_failed:
David Jamesfcb70ef2011-02-02 16:02:30 -08001664 # If this job has failed previously, give up.
1665 self._Print("Failed %s. Your build has failed." % details)
1666 else:
1667 # Queue up this build to try again after a long while.
David Jamese703d0f2012-01-12 16:27:45 -08001668 retried.add(target)
Brian Harring0be85c62012-03-17 19:52:12 -07001669 self._retry_queue.append(self._state_map[target])
David Jamesfcb70ef2011-02-02 16:02:30 -08001670 self._failed.add(target)
1671 self._Print("Failed %s, retrying later." % details)
1672 else:
David James32420cc2011-08-25 21:32:46 -07001673 if previously_failed:
1674 # Remove target from list of failed packages.
1675 self._failed.remove(target)
1676
1677 self._Print("Completed %s" % details)
1678
1679 # Mark as completed and unblock waiting ebuilds.
1680 self._Finish(target)
1681
1682 if previously_failed and self._retry_queue:
David Jamesfcb70ef2011-02-02 16:02:30 -08001683 # If we have successfully retried a failed package, and there
1684 # are more failed packages, try the next one. We will only have
1685 # one retrying package actively running at a time.
1686 self._Retry()
1687
David Jamesfcb70ef2011-02-02 16:02:30 -08001688
David James8c7e5e32011-06-28 11:26:03 -07001689 # Schedule pending jobs and print an update.
1690 self._ScheduleLoop()
1691 self._Status()
David Jamesfcb70ef2011-02-02 16:02:30 -08001692
David Jamese703d0f2012-01-12 16:27:45 -08001693 # If packages were retried, output a warning.
1694 if retried:
1695 self._Print("")
1696 self._Print("WARNING: The following packages failed the first time,")
1697 self._Print("but succeeded upon retry. This might indicate incorrect")
1698 self._Print("dependencies.")
1699 for pkg in retried:
1700 self._Print(" %s" % pkg)
1701 self._Print("@@@STEP_WARNINGS@@@")
1702 self._Print("")
1703
David Jamesfcb70ef2011-02-02 16:02:30 -08001704 # Tell child threads to exit.
1705 self._Print("Merge complete")
David Jamesfcb70ef2011-02-02 16:02:30 -08001706
1707
Brian Harring30675052012-02-29 12:18:22 -08001708def main(argv):
Brian Harring8294d652012-05-23 02:20:52 -07001709 try:
1710 return real_main(argv)
1711 finally:
1712 # Work around multiprocessing sucking and not cleaning up after itself.
1713 # http://bugs.python.org/issue4106;
1714 # Step one; ensure GC is ran *prior* to the VM starting shutdown.
1715 gc.collect()
1716 # Step two; go looking for those threads and try to manually reap
1717 # them if we can.
1718 for x in threading.enumerate():
1719 # Filter on the name, and ident; if ident is None, the thread
1720 # wasn't started.
1721 if x.name == 'QueueFeederThread' and x.ident is not None:
1722 x.join(1)
David Jamesfcb70ef2011-02-02 16:02:30 -08001723
Brian Harring8294d652012-05-23 02:20:52 -07001724
1725def real_main(argv):
Brian Harring30675052012-02-29 12:18:22 -08001726 parallel_emerge_args = argv[:]
David Jamesfcb70ef2011-02-02 16:02:30 -08001727 deps = DepGraphGenerator()
Brian Harring30675052012-02-29 12:18:22 -08001728 deps.Initialize(parallel_emerge_args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001729 emerge = deps.emerge
1730
1731 if emerge.action is not None:
Brian Harring30675052012-02-29 12:18:22 -08001732 argv = deps.ParseParallelEmergeArgs(argv)
Brian Harring8294d652012-05-23 02:20:52 -07001733 return emerge_main(argv)
David Jamesfcb70ef2011-02-02 16:02:30 -08001734 elif not emerge.cmdline_packages:
1735 Usage()
Brian Harring8294d652012-05-23 02:20:52 -07001736 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001737
1738 # Unless we're in pretend mode, there's not much point running without
1739 # root access. We need to be able to install packages.
1740 #
1741 # NOTE: Even if you're running --pretend, it's a good idea to run
1742 # parallel_emerge with root access so that portage can write to the
1743 # dependency cache. This is important for performance.
David James321490a2012-12-17 12:05:56 -08001744 if "--pretend" not in emerge.opts and portage.data.secpass < 2:
David Jamesfcb70ef2011-02-02 16:02:30 -08001745 print "parallel_emerge: superuser access is required."
Brian Harring8294d652012-05-23 02:20:52 -07001746 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001747
1748 if "--quiet" not in emerge.opts:
1749 cmdline_packages = " ".join(emerge.cmdline_packages)
David Jamesfcb70ef2011-02-02 16:02:30 -08001750 print "Starting fast-emerge."
1751 print " Building package %s on %s" % (cmdline_packages,
1752 deps.board or "root")
David Jamesfcb70ef2011-02-02 16:02:30 -08001753
David James386ccd12011-05-04 20:17:42 -07001754 deps_tree, deps_info = deps.GenDependencyTree()
David Jamesfcb70ef2011-02-02 16:02:30 -08001755
1756 # You want me to be verbose? I'll give you two trees! Twice as much value.
1757 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
1758 deps.PrintTree(deps_tree)
1759
David James386ccd12011-05-04 20:17:42 -07001760 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001761
1762 # OK, time to print out our progress so far.
1763 deps.PrintInstallPlan(deps_graph)
1764 if "--tree" in emerge.opts:
1765 PrintDepsMap(deps_graph)
1766
1767 # Are we upgrading portage? If so, and there are more packages to merge,
1768 # schedule a restart of parallel_emerge to merge the rest. This ensures that
1769 # we pick up all updates to portage settings before merging any more
1770 # packages.
1771 portage_upgrade = False
1772 root = emerge.settings["ROOT"]
Don Garrett25f309a2014-03-19 14:02:12 -07001773 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -08001774 final_db = emerge.depgraph._dynamic_config.mydbapi[root]
1775 if root == "/":
1776 for db_pkg in final_db.match_pkgs("sys-apps/portage"):
1777 portage_pkg = deps_graph.get(db_pkg.cpv)
David James0ff16f22012-11-02 14:18:07 -07001778 if portage_pkg:
David Jamesfcb70ef2011-02-02 16:02:30 -08001779 portage_upgrade = True
1780 if "--quiet" not in emerge.opts:
1781 print "Upgrading portage first, then restarting..."
1782
David James0ff16f22012-11-02 14:18:07 -07001783 # Upgrade Portage first, then the rest of the packages.
1784 #
1785 # In order to grant the child permission to run setsid, we need to run sudo
1786 # again. We preserve SUDO_USER here in case an ebuild depends on it.
1787 if portage_upgrade:
1788 # Calculate what arguments to use when re-invoking.
1789 args = ["sudo", "-E", "SUDO_USER=%s" % os.environ.get("SUDO_USER", "")]
1790 args += [os.path.abspath(sys.argv[0])] + parallel_emerge_args
1791 args += ["--exclude=sys-apps/portage"]
1792
1793 # First upgrade Portage.
1794 passthrough_args = ("--quiet", "--pretend", "--verbose")
1795 emerge_args = [k for k in emerge.opts if k in passthrough_args]
1796 ret = emerge_main(emerge_args + ["portage"])
1797 if ret != 0:
1798 return ret
1799
1800 # Now upgrade the rest.
1801 os.execvp(args[0], args)
1802
David Jamesfcb70ef2011-02-02 16:02:30 -08001803 # Run the queued emerges.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001804 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output,
1805 deps.unpack_only)
Brian Harringa43f5952012-04-12 01:19:34 -07001806 try:
1807 scheduler.Run()
1808 finally:
Don Garrett25f309a2014-03-19 14:02:12 -07001809 # pylint: disable=W0212
Brian Harringa43f5952012-04-12 01:19:34 -07001810 scheduler._Shutdown()
David James97ce8902011-08-16 09:51:05 -07001811 scheduler = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001812
Mike Frysingerd20a6e22012-10-04 19:01:10 -04001813 clean_logs(emerge.settings)
1814
David Jamesfcb70ef2011-02-02 16:02:30 -08001815 print "Done"
Brian Harring8294d652012-05-23 02:20:52 -07001816 return 0