blob: c18990b6db1de4f7821ec753710aeb45f70612b5 [file] [log] [blame]
Mike Frysinger0a647fc2012-08-06 14:36:05 -04001# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
David Jamesfcb70ef2011-02-02 16:02:30 -08002# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Program to run emerge in parallel, for significant speedup.
6
7Usage:
David James386ccd12011-05-04 20:17:42 -07008 ./parallel_emerge [--board=BOARD] [--workon=PKGS]
David Jamesfcb70ef2011-02-02 16:02:30 -08009 [--force-remote-binary=PKGS] [emerge args] package
10
David James78b6cd92012-04-02 21:36:12 -070011This script runs multiple emerge processes in parallel, using appropriate
12Portage APIs. It is faster than standard emerge because it has a
13multiprocess model instead of an asynchronous model.
David Jamesfcb70ef2011-02-02 16:02:30 -080014"""
15
Mike Frysinger383367e2014-09-16 15:06:17 -040016from __future__ import print_function
17
David Jamesfcb70ef2011-02-02 16:02:30 -080018import codecs
19import copy
20import errno
Brian Harring8294d652012-05-23 02:20:52 -070021import gc
David James8c7e5e32011-06-28 11:26:03 -070022import heapq
David Jamesfcb70ef2011-02-02 16:02:30 -080023import multiprocessing
24import os
Mike Frysinger1ae28092013-10-17 17:17:22 -040025try:
26 import Queue
27except ImportError:
28 # Python-3 renamed to "queue". We still use Queue to avoid collisions
29 # with naming variables as "queue". Maybe we'll transition at some point.
30 # pylint: disable=F0401
31 import queue as Queue
David Jamesfcb70ef2011-02-02 16:02:30 -080032import signal
Bertrand SIMONNET19d789e2014-12-09 13:36:31 -080033import subprocess
David Jamesfcb70ef2011-02-02 16:02:30 -080034import sys
35import tempfile
Brian Harring8294d652012-05-23 02:20:52 -070036import threading
David Jamesfcb70ef2011-02-02 16:02:30 -080037import time
38import traceback
David Jamesfcb70ef2011-02-02 16:02:30 -080039
Thiago Goncalesf4acc422013-07-17 10:26:35 -070040from chromite.lib import cros_build_lib
Mike Frysingere2d8f0d2014-11-01 13:09:26 -040041from chromite.lib import process_util
Mike Frysingerd74fe4a2014-04-24 11:43:38 -040042from chromite.lib import proctitle
Thiago Goncalesf4acc422013-07-17 10:26:35 -070043
David Jamesfcb70ef2011-02-02 16:02:30 -080044# If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
45# Chromium OS, the default "portage" user doesn't have the necessary
46# permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
47# is "root" here because we get called through sudo.
48#
49# We need to set this before importing any portage modules, because portage
50# looks up "PORTAGE_USERNAME" at import time.
51#
52# NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
53# encounter this case unless they have an old chroot or blow away the
54# environment by running sudo without the -E specifier.
55if "PORTAGE_USERNAME" not in os.environ:
56 homedir = os.environ.get("HOME")
57 if homedir:
58 os.environ["PORTAGE_USERNAME"] = os.path.basename(homedir)
59
Bertrand SIMONNET19d789e2014-12-09 13:36:31 -080060# Wrap Popen with a lock to ensure no two Popen are executed simultaneously in
61# the same process.
62# Two Popen call at the same time might be the cause for crbug.com/433482.
63_popen_lock = threading.Lock()
64_old_popen = subprocess.Popen
65
66def _LockedPopen(*args, **kwargs):
67 with _popen_lock:
68 return _old_popen(*args, **kwargs)
69
70subprocess.Popen = _LockedPopen
71
David Jamesfcb70ef2011-02-02 16:02:30 -080072# Portage doesn't expose dependency trees in its public API, so we have to
73# make use of some private APIs here. These modules are found under
74# /usr/lib/portage/pym/.
75#
76# TODO(davidjames): Update Portage to expose public APIs for these features.
Don Garrett25f309a2014-03-19 14:02:12 -070077# pylint: disable=F0401
David Jamesfcb70ef2011-02-02 16:02:30 -080078from _emerge.actions import adjust_configs
79from _emerge.actions import load_emerge_config
80from _emerge.create_depgraph_params import create_depgraph_params
David James386ccd12011-05-04 20:17:42 -070081from _emerge.depgraph import backtrack_depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -080082from _emerge.main import emerge_main
83from _emerge.main import parse_opts
84from _emerge.Package import Package
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -070085from _emerge.post_emerge import clean_logs
David Jamesfcb70ef2011-02-02 16:02:30 -080086from _emerge.Scheduler import Scheduler
David Jamesfcb70ef2011-02-02 16:02:30 -080087from _emerge.stdout_spinner import stdout_spinner
David James386ccd12011-05-04 20:17:42 -070088from portage._global_updates import _global_updates
David Jamesfcb70ef2011-02-02 16:02:30 -080089import portage
90import portage.debug
Don Garrettf8bf7842014-03-20 17:03:42 -070091# pylint: enable=F0401
Mike Frysinger91d7da92013-02-19 15:53:46 -050092
David Jamesfcb70ef2011-02-02 16:02:30 -080093
David Jamesfcb70ef2011-02-02 16:02:30 -080094def Usage():
95 """Print usage."""
Mike Frysinger383367e2014-09-16 15:06:17 -040096 print("Usage:")
97 print(" ./parallel_emerge [--board=BOARD] [--workon=PKGS]")
98 print(" [--rebuild] [emerge args] package")
99 print()
100 print("Packages specified as workon packages are always built from source.")
101 print()
102 print("The --workon argument is mainly useful when you want to build and")
103 print("install packages that you are working on unconditionally, but do not")
104 print("to have to rev the package to indicate you want to build it from")
105 print("source. The build_packages script will automatically supply the")
106 print("workon argument to emerge, ensuring that packages selected using")
107 print("cros-workon are rebuilt.")
108 print()
109 print("The --rebuild option rebuilds packages whenever their dependencies")
110 print("are changed. This ensures that your build is correct.")
David Jamesfcb70ef2011-02-02 16:02:30 -0800111
112
David Jamesfcb70ef2011-02-02 16:02:30 -0800113# Global start time
114GLOBAL_START = time.time()
115
David James7358d032011-05-19 10:40:03 -0700116# Whether process has been killed by a signal.
117KILLED = multiprocessing.Event()
118
David Jamesfcb70ef2011-02-02 16:02:30 -0800119
120class EmergeData(object):
121 """This simple struct holds various emerge variables.
122
123 This struct helps us easily pass emerge variables around as a unit.
124 These variables are used for calculating dependencies and installing
125 packages.
126 """
127
David Jamesbf1e3442011-05-28 07:44:20 -0700128 __slots__ = ["action", "cmdline_packages", "depgraph", "favorites",
129 "mtimedb", "opts", "root_config", "scheduler_graph",
130 "settings", "spinner", "trees"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800131
132 def __init__(self):
133 # The action the user requested. If the user is installing packages, this
134 # is None. If the user is doing anything other than installing packages,
135 # this will contain the action name, which will map exactly to the
136 # long-form name of the associated emerge option.
137 #
138 # Example: If you call parallel_emerge --unmerge package, the action name
139 # will be "unmerge"
140 self.action = None
141
142 # The list of packages the user passed on the command-line.
143 self.cmdline_packages = None
144
145 # The emerge dependency graph. It'll contain all the packages involved in
146 # this merge, along with their versions.
147 self.depgraph = None
148
David Jamesbf1e3442011-05-28 07:44:20 -0700149 # The list of candidates to add to the world file.
150 self.favorites = None
151
David Jamesfcb70ef2011-02-02 16:02:30 -0800152 # A dict of the options passed to emerge. This dict has been cleaned up
153 # a bit by parse_opts, so that it's a bit easier for the emerge code to
154 # look at the options.
155 #
156 # Emerge takes a few shortcuts in its cleanup process to make parsing of
157 # the options dict easier. For example, if you pass in "--usepkg=n", the
158 # "--usepkg" flag is just left out of the dictionary altogether. Because
159 # --usepkg=n is the default, this makes parsing easier, because emerge
160 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
161 #
162 # These cleanup processes aren't applied to all options. For example, the
163 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
164 # applied by emerge, see the parse_opts function in the _emerge.main
165 # package.
166 self.opts = None
167
168 # A dictionary used by portage to maintain global state. This state is
169 # loaded from disk when portage starts up, and saved to disk whenever we
170 # call mtimedb.commit().
171 #
172 # This database contains information about global updates (i.e., what
173 # version of portage we have) and what we're currently doing. Portage
174 # saves what it is currently doing in this database so that it can be
175 # resumed when you call it with the --resume option.
176 #
177 # parallel_emerge does not save what it is currently doing in the mtimedb,
178 # so we do not support the --resume option.
179 self.mtimedb = None
180
181 # The portage configuration for our current root. This contains the portage
182 # settings (see below) and the three portage trees for our current root.
183 # (The three portage trees are explained below, in the documentation for
184 # the "trees" member.)
185 self.root_config = None
186
187 # The scheduler graph is used by emerge to calculate what packages to
188 # install. We don't actually install any deps, so this isn't really used,
189 # but we pass it in to the Scheduler object anyway.
190 self.scheduler_graph = None
191
192 # Portage settings for our current session. Most of these settings are set
193 # in make.conf inside our current install root.
194 self.settings = None
195
196 # The spinner, which spews stuff to stdout to indicate that portage is
197 # doing something. We maintain our own spinner, so we set the portage
198 # spinner to "silent" mode.
199 self.spinner = None
200
201 # The portage trees. There are separate portage trees for each root. To get
202 # the portage tree for the current root, you can look in self.trees[root],
203 # where root = self.settings["ROOT"].
204 #
205 # In each root, there are three trees: vartree, porttree, and bintree.
206 # - vartree: A database of the currently-installed packages.
207 # - porttree: A database of ebuilds, that can be used to build packages.
208 # - bintree: A database of binary packages.
209 self.trees = None
210
211
212class DepGraphGenerator(object):
213 """Grab dependency information about packages from portage.
214
215 Typical usage:
216 deps = DepGraphGenerator()
217 deps.Initialize(sys.argv[1:])
218 deps_tree, deps_info = deps.GenDependencyTree()
219 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
220 deps.PrintTree(deps_tree)
221 PrintDepsMap(deps_graph)
222 """
223
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700224 __slots__ = ["board", "emerge", "package_db", "show_output", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800225
226 def __init__(self):
227 self.board = None
228 self.emerge = EmergeData()
David Jamesfcb70ef2011-02-02 16:02:30 -0800229 self.package_db = {}
David Jamesfcb70ef2011-02-02 16:02:30 -0800230 self.show_output = False
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700231 self.unpack_only = False
David Jamesfcb70ef2011-02-02 16:02:30 -0800232
233 def ParseParallelEmergeArgs(self, argv):
234 """Read the parallel emerge arguments from the command-line.
235
236 We need to be compatible with emerge arg format. We scrape arguments that
237 are specific to parallel_emerge, and pass through the rest directly to
238 emerge.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500239
David Jamesfcb70ef2011-02-02 16:02:30 -0800240 Args:
241 argv: arguments list
Mike Frysinger1a736a82013-12-12 01:50:59 -0500242
David Jamesfcb70ef2011-02-02 16:02:30 -0800243 Returns:
244 Arguments that don't belong to parallel_emerge
245 """
246 emerge_args = []
247 for arg in argv:
248 # Specifically match arguments that are specific to parallel_emerge, and
249 # pass through the rest.
250 if arg.startswith("--board="):
251 self.board = arg.replace("--board=", "")
252 elif arg.startswith("--workon="):
253 workon_str = arg.replace("--workon=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700254 emerge_args.append("--reinstall-atoms=%s" % workon_str)
255 emerge_args.append("--usepkg-exclude=%s" % workon_str)
David Jamesfcb70ef2011-02-02 16:02:30 -0800256 elif arg.startswith("--force-remote-binary="):
257 force_remote_binary = arg.replace("--force-remote-binary=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700258 emerge_args.append("--useoldpkg-atoms=%s" % force_remote_binary)
David Jamesfcb70ef2011-02-02 16:02:30 -0800259 elif arg == "--show-output":
260 self.show_output = True
David James386ccd12011-05-04 20:17:42 -0700261 elif arg == "--rebuild":
David James7a1ea4b2011-10-13 15:06:41 -0700262 emerge_args.append("--rebuild-if-unbuilt")
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700263 elif arg == "--unpackonly":
264 emerge_args.append("--fetchonly")
265 self.unpack_only = True
David Jamesfcb70ef2011-02-02 16:02:30 -0800266 else:
267 # Not one of our options, so pass through to emerge.
268 emerge_args.append(arg)
269
David James386ccd12011-05-04 20:17:42 -0700270 # These packages take a really long time to build, so, for expediency, we
271 # are blacklisting them from automatic rebuilds because one of their
272 # dependencies needs to be recompiled.
Mike Frysinger5c2a9052014-04-15 15:52:04 -0400273 for pkg in ("chromeos-base/chromeos-chrome",):
David James7a1ea4b2011-10-13 15:06:41 -0700274 emerge_args.append("--rebuild-exclude=%s" % pkg)
David Jamesfcb70ef2011-02-02 16:02:30 -0800275
276 return emerge_args
277
278 def Initialize(self, args):
279 """Initializer. Parses arguments and sets up portage state."""
280
281 # Parse and strip out args that are just intended for parallel_emerge.
282 emerge_args = self.ParseParallelEmergeArgs(args)
283
284 # Setup various environment variables based on our current board. These
285 # variables are normally setup inside emerge-${BOARD}, but since we don't
286 # call that script, we have to set it up here. These variables serve to
287 # point our tools at /build/BOARD and to setup cross compiles to the
288 # appropriate board as configured in toolchain.conf.
289 if self.board:
Bertrand SIMONNETf6febab2014-10-03 10:59:43 -0700290 sysroot = os.environ.get('SYSROOT', cros_build_lib.GetSysroot(self.board))
Yu-Ju Hongdd9bb2b2014-01-03 17:08:26 -0800291 os.environ["PORTAGE_CONFIGROOT"] = sysroot
Yu-Ju Hongdd9bb2b2014-01-03 17:08:26 -0800292 os.environ["SYSROOT"] = sysroot
David Jamesfcb70ef2011-02-02 16:02:30 -0800293
294 # Although CHROMEOS_ROOT isn't specific to boards, it's normally setup
295 # inside emerge-${BOARD}, so we set it up here for compatibility. It
296 # will be going away soon as we migrate to CROS_WORKON_SRCROOT.
297 os.environ.setdefault("CHROMEOS_ROOT", os.environ["HOME"] + "/trunk")
298
299 # Turn off interactive delays
300 os.environ["EBEEP_IGNORE"] = "1"
301 os.environ["EPAUSE_IGNORE"] = "1"
Mike Frysinger0a647fc2012-08-06 14:36:05 -0400302 os.environ["CLEAN_DELAY"] = "0"
David Jamesfcb70ef2011-02-02 16:02:30 -0800303
304 # Parse the emerge options.
David Jamesea3ca332011-05-26 11:48:29 -0700305 action, opts, cmdline_packages = parse_opts(emerge_args, silent=True)
David Jamesfcb70ef2011-02-02 16:02:30 -0800306
307 # Set environment variables based on options. Portage normally sets these
308 # environment variables in emerge_main, but we can't use that function,
309 # because it also does a bunch of other stuff that we don't want.
310 # TODO(davidjames): Patch portage to move this logic into a function we can
311 # reuse here.
312 if "--debug" in opts:
313 os.environ["PORTAGE_DEBUG"] = "1"
314 if "--config-root" in opts:
315 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
316 if "--root" in opts:
317 os.environ["ROOT"] = opts["--root"]
318 if "--accept-properties" in opts:
319 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
320
David James88d780c2014-02-05 13:03:29 -0800321 # If we're installing packages to the board, we can disable vardb locks.
322 # This is safe because we only run up to one instance of parallel_emerge in
323 # parallel.
324 # TODO(davidjames): Enable this for the host too.
325 if self.board:
David Jamesfcb70ef2011-02-02 16:02:30 -0800326 os.environ.setdefault("PORTAGE_LOCKS", "false")
David Jamesfcb70ef2011-02-02 16:02:30 -0800327
328 # Now that we've setup the necessary environment variables, we can load the
329 # emerge config from disk.
330 settings, trees, mtimedb = load_emerge_config()
331
David Jamesea3ca332011-05-26 11:48:29 -0700332 # Add in EMERGE_DEFAULT_OPTS, if specified.
333 tmpcmdline = []
334 if "--ignore-default-opts" not in opts:
335 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
336 tmpcmdline.extend(emerge_args)
337 action, opts, cmdline_packages = parse_opts(tmpcmdline)
338
339 # If we're installing to the board, we want the --root-deps option so that
340 # portage will install the build dependencies to that location as well.
341 if self.board:
342 opts.setdefault("--root-deps", True)
343
David Jamesfcb70ef2011-02-02 16:02:30 -0800344 # Check whether our portage tree is out of date. Typically, this happens
345 # when you're setting up a new portage tree, such as in setup_board and
346 # make_chroot. In that case, portage applies a bunch of global updates
347 # here. Once the updates are finished, we need to commit any changes
348 # that the global update made to our mtimedb, and reload the config.
349 #
350 # Portage normally handles this logic in emerge_main, but again, we can't
351 # use that function here.
352 if _global_updates(trees, mtimedb["updates"]):
353 mtimedb.commit()
354 settings, trees, mtimedb = load_emerge_config(trees=trees)
355
356 # Setup implied options. Portage normally handles this logic in
357 # emerge_main.
358 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
359 opts.setdefault("--buildpkg", True)
360 if "--getbinpkgonly" in opts:
361 opts.setdefault("--usepkgonly", True)
362 opts.setdefault("--getbinpkg", True)
363 if "getbinpkg" in settings.features:
364 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
365 opts["--getbinpkg"] = True
366 if "--getbinpkg" in opts or "--usepkgonly" in opts:
367 opts.setdefault("--usepkg", True)
368 if "--fetch-all-uri" in opts:
369 opts.setdefault("--fetchonly", True)
370 if "--skipfirst" in opts:
371 opts.setdefault("--resume", True)
372 if "--buildpkgonly" in opts:
373 # --buildpkgonly will not merge anything, so it overrides all binary
374 # package options.
375 for opt in ("--getbinpkg", "--getbinpkgonly",
376 "--usepkg", "--usepkgonly"):
377 opts.pop(opt, None)
378 if (settings.get("PORTAGE_DEBUG", "") == "1" and
379 "python-trace" in settings.features):
380 portage.debug.set_trace(True)
381
382 # Complain about unsupported options
David James386ccd12011-05-04 20:17:42 -0700383 for opt in ("--ask", "--ask-enter-invalid", "--resume", "--skipfirst"):
David Jamesfcb70ef2011-02-02 16:02:30 -0800384 if opt in opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400385 print("%s is not supported by parallel_emerge" % opt)
David Jamesfcb70ef2011-02-02 16:02:30 -0800386 sys.exit(1)
387
388 # Make emerge specific adjustments to the config (e.g. colors!)
389 adjust_configs(opts, trees)
390
391 # Save our configuration so far in the emerge object
392 emerge = self.emerge
393 emerge.action, emerge.opts = action, opts
394 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
395 emerge.cmdline_packages = cmdline_packages
396 root = settings["ROOT"]
397 emerge.root_config = trees[root]["root_config"]
398
David James386ccd12011-05-04 20:17:42 -0700399 if "--usepkg" in opts:
David Jamesfcb70ef2011-02-02 16:02:30 -0800400 emerge.trees[root]["bintree"].populate("--getbinpkg" in opts)
401
David Jamesfcb70ef2011-02-02 16:02:30 -0800402 def CreateDepgraph(self, emerge, packages):
403 """Create an emerge depgraph object."""
404 # Setup emerge options.
405 emerge_opts = emerge.opts.copy()
406
David James386ccd12011-05-04 20:17:42 -0700407 # Ask portage to build a dependency graph. with the options we specified
408 # above.
David Jamesfcb70ef2011-02-02 16:02:30 -0800409 params = create_depgraph_params(emerge_opts, emerge.action)
David Jamesbf1e3442011-05-28 07:44:20 -0700410 success, depgraph, favorites = backtrack_depgraph(
David James386ccd12011-05-04 20:17:42 -0700411 emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
412 packages, emerge.spinner)
413 emerge.depgraph = depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800414
David James386ccd12011-05-04 20:17:42 -0700415 # Is it impossible to honor the user's request? Bail!
416 if not success:
417 depgraph.display_problems()
418 sys.exit(1)
David Jamesfcb70ef2011-02-02 16:02:30 -0800419
420 emerge.depgraph = depgraph
David Jamesbf1e3442011-05-28 07:44:20 -0700421 emerge.favorites = favorites
David Jamesfcb70ef2011-02-02 16:02:30 -0800422
David Jamesdeebd692011-05-09 17:02:52 -0700423 # Prime and flush emerge caches.
424 root = emerge.settings["ROOT"]
425 vardb = emerge.trees[root]["vartree"].dbapi
David James0bdc5de2011-05-12 16:22:26 -0700426 if "--pretend" not in emerge.opts:
427 vardb.counter_tick()
David Jamesdeebd692011-05-09 17:02:52 -0700428 vardb.flush_cache()
429
David James386ccd12011-05-04 20:17:42 -0700430 def GenDependencyTree(self):
David Jamesfcb70ef2011-02-02 16:02:30 -0800431 """Get dependency tree info from emerge.
432
David Jamesfcb70ef2011-02-02 16:02:30 -0800433 Returns:
434 Dependency tree
435 """
436 start = time.time()
437
438 emerge = self.emerge
439
440 # Create a list of packages to merge
441 packages = set(emerge.cmdline_packages[:])
David Jamesfcb70ef2011-02-02 16:02:30 -0800442
443 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
444 # need any extra output from portage.
445 portage.util.noiselimit = -1
446
447 # My favorite feature: The silent spinner. It doesn't spin. Ever.
448 # I'd disable the colors by default too, but they look kind of cool.
449 emerge.spinner = stdout_spinner()
450 emerge.spinner.update = emerge.spinner.update_quiet
451
452 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400453 print("Calculating deps...")
David Jamesfcb70ef2011-02-02 16:02:30 -0800454
455 self.CreateDepgraph(emerge, packages)
456 depgraph = emerge.depgraph
457
458 # Build our own tree from the emerge digraph.
459 deps_tree = {}
Don Garrett25f309a2014-03-19 14:02:12 -0700460 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -0800461 digraph = depgraph._dynamic_config.digraph
David James3f778802011-08-25 19:31:45 -0700462 root = emerge.settings["ROOT"]
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -0700463 final_db = depgraph._dynamic_config._filtered_trees[root]['graph_db']
David Jamesfcb70ef2011-02-02 16:02:30 -0800464 for node, node_deps in digraph.nodes.items():
465 # Calculate dependency packages that need to be installed first. Each
466 # child on the digraph is a dependency. The "operation" field specifies
467 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
468 # contains the type of dependency (e.g. build, runtime, runtime_post,
469 # etc.)
470 #
David Jamesfcb70ef2011-02-02 16:02:30 -0800471 # Portage refers to the identifiers for packages as a CPV. This acronym
472 # stands for Component/Path/Version.
473 #
474 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
475 # Split up, this CPV would be:
476 # C -- Component: chromeos-base
477 # P -- Path: power_manager
478 # V -- Version: 0.0.1-r1
479 #
480 # We just refer to CPVs as packages here because it's easier.
481 deps = {}
482 for child, priorities in node_deps[0].items():
David James3f778802011-08-25 19:31:45 -0700483 if isinstance(child, Package) and child.root == root:
484 cpv = str(child.cpv)
485 action = str(child.operation)
486
487 # If we're uninstalling a package, check whether Portage is
488 # installing a replacement. If so, just depend on the installation
489 # of the new package, because the old package will automatically
490 # be uninstalled at that time.
491 if action == "uninstall":
492 for pkg in final_db.match_pkgs(child.slot_atom):
493 cpv = str(pkg.cpv)
494 action = "merge"
495 break
496
497 deps[cpv] = dict(action=action,
498 deptypes=[str(x) for x in priorities],
499 deps={})
David Jamesfcb70ef2011-02-02 16:02:30 -0800500
501 # We've built our list of deps, so we can add our package to the tree.
David James3f778802011-08-25 19:31:45 -0700502 if isinstance(node, Package) and node.root == root:
David Jamesfcb70ef2011-02-02 16:02:30 -0800503 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
504 deps=deps)
505
David Jamesfcb70ef2011-02-02 16:02:30 -0800506 # Ask portage for its install plan, so that we can only throw out
David James386ccd12011-05-04 20:17:42 -0700507 # dependencies that portage throws out.
David Jamesfcb70ef2011-02-02 16:02:30 -0800508 deps_info = {}
509 for pkg in depgraph.altlist():
510 if isinstance(pkg, Package):
David James3f778802011-08-25 19:31:45 -0700511 assert pkg.root == root
David Jamesfcb70ef2011-02-02 16:02:30 -0800512 self.package_db[pkg.cpv] = pkg
513
David Jamesfcb70ef2011-02-02 16:02:30 -0800514 # Save off info about the package
David James386ccd12011-05-04 20:17:42 -0700515 deps_info[str(pkg.cpv)] = {"idx": len(deps_info)}
David Jamesfcb70ef2011-02-02 16:02:30 -0800516
517 seconds = time.time() - start
518 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400519 print("Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800520
521 return deps_tree, deps_info
522
523 def PrintTree(self, deps, depth=""):
524 """Print the deps we have seen in the emerge output.
525
526 Args:
527 deps: Dependency tree structure.
528 depth: Allows printing the tree recursively, with indentation.
529 """
530 for entry in sorted(deps):
531 action = deps[entry]["action"]
Mike Frysinger383367e2014-09-16 15:06:17 -0400532 print("%s %s (%s)" % (depth, entry, action))
David Jamesfcb70ef2011-02-02 16:02:30 -0800533 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
534
David James386ccd12011-05-04 20:17:42 -0700535 def GenDependencyGraph(self, deps_tree, deps_info):
David Jamesfcb70ef2011-02-02 16:02:30 -0800536 """Generate a doubly linked dependency graph.
537
538 Args:
539 deps_tree: Dependency tree structure.
540 deps_info: More details on the dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500541
David Jamesfcb70ef2011-02-02 16:02:30 -0800542 Returns:
543 Deps graph in the form of a dict of packages, with each package
544 specifying a "needs" list and "provides" list.
545 """
546 emerge = self.emerge
David Jamesfcb70ef2011-02-02 16:02:30 -0800547
David Jamesfcb70ef2011-02-02 16:02:30 -0800548 # deps_map is the actual dependency graph.
549 #
550 # Each package specifies a "needs" list and a "provides" list. The "needs"
551 # list indicates which packages we depend on. The "provides" list
552 # indicates the reverse dependencies -- what packages need us.
553 #
554 # We also provide some other information in the dependency graph:
555 # - action: What we're planning on doing with this package. Generally,
556 # "merge", "nomerge", or "uninstall"
David Jamesfcb70ef2011-02-02 16:02:30 -0800557 deps_map = {}
558
559 def ReverseTree(packages):
560 """Convert tree to digraph.
561
562 Take the tree of package -> requirements and reverse it to a digraph of
563 buildable packages -> packages they unblock.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500564
David Jamesfcb70ef2011-02-02 16:02:30 -0800565 Args:
566 packages: Tree(s) of dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500567
David Jamesfcb70ef2011-02-02 16:02:30 -0800568 Returns:
569 Unsanitized digraph.
570 """
David James8c7e5e32011-06-28 11:26:03 -0700571 binpkg_phases = set(["setup", "preinst", "postinst"])
David Jamese5e1c0a2014-09-29 17:19:41 -0700572 needed_dep_types = set(["blocker", "buildtime", "buildtime_slot_op",
573 "runtime", "runtime_slot_op"])
574 ignored_dep_types = set(["ignored", "optional", "runtime_post", "soft"])
575 all_dep_types = ignored_dep_types | needed_dep_types
David Jamesfcb70ef2011-02-02 16:02:30 -0800576 for pkg in packages:
577
578 # Create an entry for the package
579 action = packages[pkg]["action"]
David James8c7e5e32011-06-28 11:26:03 -0700580 default_pkg = {"needs": {}, "provides": set(), "action": action,
581 "nodeps": False, "binary": False}
David Jamesfcb70ef2011-02-02 16:02:30 -0800582 this_pkg = deps_map.setdefault(pkg, default_pkg)
583
David James8c7e5e32011-06-28 11:26:03 -0700584 if pkg in deps_info:
585 this_pkg["idx"] = deps_info[pkg]["idx"]
586
587 # If a package doesn't have any defined phases that might use the
588 # dependent packages (i.e. pkg_setup, pkg_preinst, or pkg_postinst),
589 # we can install this package before its deps are ready.
590 emerge_pkg = self.package_db.get(pkg)
591 if emerge_pkg and emerge_pkg.type_name == "binary":
592 this_pkg["binary"] = True
Mike Frysinger66652ec2014-04-24 11:42:25 -0400593 defined_phases = emerge_pkg.defined_phases
David James8c7e5e32011-06-28 11:26:03 -0700594 defined_binpkg_phases = binpkg_phases.intersection(defined_phases)
595 if not defined_binpkg_phases:
596 this_pkg["nodeps"] = True
597
David Jamesfcb70ef2011-02-02 16:02:30 -0800598 # Create entries for dependencies of this package first.
599 ReverseTree(packages[pkg]["deps"])
600
601 # Add dependencies to this package.
602 for dep, dep_item in packages[pkg]["deps"].iteritems():
David James8c7e5e32011-06-28 11:26:03 -0700603 # We only need to enforce strict ordering of dependencies if the
David James3f778802011-08-25 19:31:45 -0700604 # dependency is a blocker, or is a buildtime or runtime dependency.
605 # (I.e., ignored, optional, and runtime_post dependencies don't
606 # depend on ordering.)
David James8c7e5e32011-06-28 11:26:03 -0700607 dep_types = dep_item["deptypes"]
608 if needed_dep_types.intersection(dep_types):
609 deps_map[dep]["provides"].add(pkg)
610 this_pkg["needs"][dep] = "/".join(dep_types)
David Jamesfcb70ef2011-02-02 16:02:30 -0800611
David Jamese5e1c0a2014-09-29 17:19:41 -0700612 # Verify we processed all appropriate dependency types.
613 unknown_dep_types = set(dep_types) - all_dep_types
614 if unknown_dep_types:
615 print("Unknown dependency types found:")
616 print(" %s -> %s (%s)" % (pkg, dep, "/".join(unknown_dep_types)))
617 sys.exit(1)
618
David James3f778802011-08-25 19:31:45 -0700619 # If there's a blocker, Portage may need to move files from one
620 # package to another, which requires editing the CONTENTS files of
621 # both packages. To avoid race conditions while editing this file,
622 # the two packages must not be installed in parallel, so we can't
623 # safely ignore dependencies. See http://crosbug.com/19328
624 if "blocker" in dep_types:
625 this_pkg["nodeps"] = False
626
David Jamesfcb70ef2011-02-02 16:02:30 -0800627 def FindCycles():
628 """Find cycles in the dependency tree.
629
630 Returns:
631 A dict mapping cyclic packages to a dict of the deps that cause
632 cycles. For each dep that causes cycles, it returns an example
633 traversal of the graph that shows the cycle.
634 """
635
636 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
637 """Find cycles in cyclic dependencies starting at specified package.
638
639 Args:
640 pkg: Package identifier.
641 cycles: A dict mapping cyclic packages to a dict of the deps that
642 cause cycles. For each dep that causes cycles, it returns an
643 example traversal of the graph that shows the cycle.
644 unresolved: Nodes that have been visited but are not fully processed.
645 resolved: Nodes that have been visited and are fully processed.
646 """
647 pkg_cycles = cycles.get(pkg)
648 if pkg in resolved and not pkg_cycles:
649 # If we already looked at this package, and found no cyclic
650 # dependencies, we can stop now.
651 return
652 unresolved.append(pkg)
653 for dep in deps_map[pkg]["needs"]:
654 if dep in unresolved:
655 idx = unresolved.index(dep)
656 mycycle = unresolved[idx:] + [dep]
David James321490a2012-12-17 12:05:56 -0800657 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800658 pkg1, pkg2 = mycycle[i], mycycle[i+1]
659 cycles.setdefault(pkg1, {}).setdefault(pkg2, mycycle)
660 elif not pkg_cycles or dep not in pkg_cycles:
661 # Looks like we haven't seen this edge before.
662 FindCyclesAtNode(dep, cycles, unresolved, resolved)
663 unresolved.pop()
664 resolved.add(pkg)
665
666 cycles, unresolved, resolved = {}, [], set()
667 for pkg in deps_map:
668 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
669 return cycles
670
David James386ccd12011-05-04 20:17:42 -0700671 def RemoveUnusedPackages():
David Jamesfcb70ef2011-02-02 16:02:30 -0800672 """Remove installed packages, propagating dependencies."""
David Jamesfcb70ef2011-02-02 16:02:30 -0800673 # Schedule packages that aren't on the install list for removal
674 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
675
David Jamesfcb70ef2011-02-02 16:02:30 -0800676 # Remove the packages we don't want, simplifying the graph and making
677 # it easier for us to crack cycles.
678 for pkg in sorted(rm_pkgs):
679 this_pkg = deps_map[pkg]
680 needs = this_pkg["needs"]
681 provides = this_pkg["provides"]
682 for dep in needs:
683 dep_provides = deps_map[dep]["provides"]
684 dep_provides.update(provides)
685 dep_provides.discard(pkg)
686 dep_provides.discard(dep)
687 for target in provides:
688 target_needs = deps_map[target]["needs"]
689 target_needs.update(needs)
690 target_needs.pop(pkg, None)
691 target_needs.pop(target, None)
692 del deps_map[pkg]
693
694 def PrintCycleBreak(basedep, dep, mycycle):
695 """Print details about a cycle that we are planning on breaking.
696
Mike Frysinger02e1e072013-11-10 22:11:34 -0500697 We are breaking a cycle where dep needs basedep. mycycle is an
698 example cycle which contains dep -> basedep.
699 """
David Jamesfcb70ef2011-02-02 16:02:30 -0800700
David Jamesfcb70ef2011-02-02 16:02:30 -0800701 needs = deps_map[dep]["needs"]
702 depinfo = needs.get(basedep, "deleted")
David Jamesfcb70ef2011-02-02 16:02:30 -0800703
David James3f778802011-08-25 19:31:45 -0700704 # It's OK to swap install order for blockers, as long as the two
705 # packages aren't installed in parallel. If there is a cycle, then
706 # we know the packages depend on each other already, so we can drop the
707 # blocker safely without printing a warning.
708 if depinfo == "blocker":
709 return
710
David Jamesfcb70ef2011-02-02 16:02:30 -0800711 # Notify the user that we're breaking a cycle.
Mike Frysinger383367e2014-09-16 15:06:17 -0400712 print("Breaking %s -> %s (%s)" % (dep, basedep, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800713
714 # Show cycle.
David James321490a2012-12-17 12:05:56 -0800715 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800716 pkg1, pkg2 = mycycle[i], mycycle[i+1]
717 needs = deps_map[pkg1]["needs"]
718 depinfo = needs.get(pkg2, "deleted")
719 if pkg1 == dep and pkg2 == basedep:
720 depinfo = depinfo + ", deleting"
Mike Frysinger383367e2014-09-16 15:06:17 -0400721 print(" %s -> %s (%s)" % (pkg1, pkg2, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800722
723 def SanitizeTree():
724 """Remove circular dependencies.
725
726 We prune all dependencies involved in cycles that go against the emerge
727 ordering. This has a nice property: we're guaranteed to merge
728 dependencies in the same order that portage does.
729
730 Because we don't treat any dependencies as "soft" unless they're killed
731 by a cycle, we pay attention to a larger number of dependencies when
732 merging. This hurts performance a bit, but helps reliability.
733 """
734 start = time.time()
735 cycles = FindCycles()
736 while cycles:
737 for dep, mycycles in cycles.iteritems():
738 for basedep, mycycle in mycycles.iteritems():
739 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
Matt Tennant08797302011-10-17 16:18:45 -0700740 if "--quiet" not in emerge.opts:
741 PrintCycleBreak(basedep, dep, mycycle)
David Jamesfcb70ef2011-02-02 16:02:30 -0800742 del deps_map[dep]["needs"][basedep]
743 deps_map[basedep]["provides"].remove(dep)
744 cycles = FindCycles()
745 seconds = time.time() - start
746 if "--quiet" not in emerge.opts and seconds >= 0.1:
Mike Frysinger383367e2014-09-16 15:06:17 -0400747 print("Tree sanitized in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800748
David James8c7e5e32011-06-28 11:26:03 -0700749 def FindRecursiveProvides(pkg, seen):
750 """Find all nodes that require a particular package.
751
752 Assumes that graph is acyclic.
753
754 Args:
755 pkg: Package identifier.
756 seen: Nodes that have been visited so far.
757 """
758 if pkg in seen:
759 return
760 seen.add(pkg)
761 info = deps_map[pkg]
762 info["tprovides"] = info["provides"].copy()
763 for dep in info["provides"]:
764 FindRecursiveProvides(dep, seen)
765 info["tprovides"].update(deps_map[dep]["tprovides"])
766
David Jamesa22906f2011-05-04 19:53:26 -0700767 ReverseTree(deps_tree)
David Jamesa22906f2011-05-04 19:53:26 -0700768
David James386ccd12011-05-04 20:17:42 -0700769 # We need to remove unused packages so that we can use the dependency
770 # ordering of the install process to show us what cycles to crack.
771 RemoveUnusedPackages()
David Jamesfcb70ef2011-02-02 16:02:30 -0800772 SanitizeTree()
David James8c7e5e32011-06-28 11:26:03 -0700773 seen = set()
774 for pkg in deps_map:
775 FindRecursiveProvides(pkg, seen)
David Jamesfcb70ef2011-02-02 16:02:30 -0800776 return deps_map
777
778 def PrintInstallPlan(self, deps_map):
779 """Print an emerge-style install plan.
780
781 The install plan lists what packages we're installing, in order.
782 It's useful for understanding what parallel_emerge is doing.
783
784 Args:
785 deps_map: The dependency graph.
786 """
787
788 def InstallPlanAtNode(target, deps_map):
789 nodes = []
790 nodes.append(target)
791 for dep in deps_map[target]["provides"]:
792 del deps_map[dep]["needs"][target]
793 if not deps_map[dep]["needs"]:
794 nodes.extend(InstallPlanAtNode(dep, deps_map))
795 return nodes
796
797 deps_map = copy.deepcopy(deps_map)
798 install_plan = []
799 plan = set()
800 for target, info in deps_map.iteritems():
801 if not info["needs"] and target not in plan:
802 for item in InstallPlanAtNode(target, deps_map):
803 plan.add(item)
804 install_plan.append(self.package_db[item])
805
806 for pkg in plan:
807 del deps_map[pkg]
808
809 if deps_map:
Mike Frysinger383367e2014-09-16 15:06:17 -0400810 print("Cyclic dependencies:", " ".join(deps_map))
David Jamesfcb70ef2011-02-02 16:02:30 -0800811 PrintDepsMap(deps_map)
812 sys.exit(1)
813
814 self.emerge.depgraph.display(install_plan)
815
816
817def PrintDepsMap(deps_map):
818 """Print dependency graph, for each package list it's prerequisites."""
819 for i in sorted(deps_map):
Mike Frysinger383367e2014-09-16 15:06:17 -0400820 print("%s: (%s) needs" % (i, deps_map[i]["action"]))
David Jamesfcb70ef2011-02-02 16:02:30 -0800821 needs = deps_map[i]["needs"]
822 for j in sorted(needs):
Mike Frysinger383367e2014-09-16 15:06:17 -0400823 print(" %s" % (j))
David Jamesfcb70ef2011-02-02 16:02:30 -0800824 if not needs:
Mike Frysinger383367e2014-09-16 15:06:17 -0400825 print(" no dependencies")
David Jamesfcb70ef2011-02-02 16:02:30 -0800826
827
828class EmergeJobState(object):
Don Garrett25f309a2014-03-19 14:02:12 -0700829 """Structure describing the EmergeJobState."""
830
David Jamesfcb70ef2011-02-02 16:02:30 -0800831 __slots__ = ["done", "filename", "last_notify_timestamp", "last_output_seek",
832 "last_output_timestamp", "pkgname", "retcode", "start_timestamp",
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700833 "target", "fetch_only", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800834
835 def __init__(self, target, pkgname, done, filename, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700836 retcode=None, fetch_only=False, unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800837
838 # The full name of the target we're building (e.g.
Mike Frysingerfd969312014-04-02 22:16:42 -0400839 # virtual/target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800840 self.target = target
841
Mike Frysingerfd969312014-04-02 22:16:42 -0400842 # The short name of the target we're building (e.g. target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800843 self.pkgname = pkgname
844
845 # Whether the job is done. (True if the job is done; false otherwise.)
846 self.done = done
847
848 # The filename where output is currently stored.
849 self.filename = filename
850
851 # The timestamp of the last time we printed the name of the log file. We
852 # print this at the beginning of the job, so this starts at
853 # start_timestamp.
854 self.last_notify_timestamp = start_timestamp
855
856 # The location (in bytes) of the end of the last complete line we printed.
857 # This starts off at zero. We use this to jump to the right place when we
858 # print output from the same ebuild multiple times.
859 self.last_output_seek = 0
860
861 # The timestamp of the last time we printed output. Since we haven't
862 # printed output yet, this starts at zero.
863 self.last_output_timestamp = 0
864
865 # The return code of our job, if the job is actually finished.
866 self.retcode = retcode
867
Brian Harring0be85c62012-03-17 19:52:12 -0700868 # Was this just a fetch job?
869 self.fetch_only = fetch_only
870
David Jamesfcb70ef2011-02-02 16:02:30 -0800871 # The timestamp when our job started.
872 self.start_timestamp = start_timestamp
873
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700874 # No emerge, only unpack packages.
875 self.unpack_only = unpack_only
876
David Jamesfcb70ef2011-02-02 16:02:30 -0800877
David James321490a2012-12-17 12:05:56 -0800878def KillHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700879 # Kill self and all subprocesses.
880 os.killpg(0, signal.SIGKILL)
881
Mike Frysingercc838832014-05-24 13:10:30 -0400882
David Jamesfcb70ef2011-02-02 16:02:30 -0800883def SetupWorkerSignals():
David James321490a2012-12-17 12:05:56 -0800884 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700885 # Set KILLED flag.
886 KILLED.set()
David James13cead42011-05-18 16:22:01 -0700887
David James7358d032011-05-19 10:40:03 -0700888 # Remove our signal handlers so we don't get called recursively.
889 signal.signal(signal.SIGINT, KillHandler)
890 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -0800891
892 # Ensure that we exit quietly and cleanly, if possible, when we receive
893 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all
894 # of the child processes will print details about KeyboardInterrupt
895 # exceptions, which isn't very helpful.
896 signal.signal(signal.SIGINT, ExitHandler)
897 signal.signal(signal.SIGTERM, ExitHandler)
898
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400899
900def EmergeProcess(output, target, *args, **kwargs):
David James1ed3e252011-10-05 20:26:15 -0700901 """Merge a package in a subprocess.
902
903 Args:
David James1ed3e252011-10-05 20:26:15 -0700904 output: Temporary file to write output.
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400905 target: The package we'll be processing (for display purposes).
David James6b29d052012-11-02 10:27:27 -0700906 *args: Arguments to pass to Scheduler constructor.
907 **kwargs: Keyword arguments to pass to Scheduler constructor.
David James1ed3e252011-10-05 20:26:15 -0700908
909 Returns:
910 The exit code returned by the subprocess.
911 """
912 pid = os.fork()
913 if pid == 0:
914 try:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400915 proctitle.settitle('EmergeProcess', target)
916
David James1ed3e252011-10-05 20:26:15 -0700917 # Sanity checks.
Mike Frysingerf02736e2013-11-08 15:27:00 -0500918 if sys.stdout.fileno() != 1:
919 raise Exception("sys.stdout.fileno() != 1")
920 if sys.stderr.fileno() != 2:
921 raise Exception("sys.stderr.fileno() != 2")
David James1ed3e252011-10-05 20:26:15 -0700922
923 # - Redirect 1 (stdout) and 2 (stderr) at our temporary file.
924 # - Redirect 0 to point at sys.stdin. In this case, sys.stdin
925 # points at a file reading os.devnull, because multiprocessing mucks
926 # with sys.stdin.
927 # - Leave the sys.stdin and output filehandles alone.
928 fd_pipes = {0: sys.stdin.fileno(),
929 1: output.fileno(),
930 2: output.fileno(),
931 sys.stdin.fileno(): sys.stdin.fileno(),
932 output.fileno(): output.fileno()}
Mike Frysinger66652ec2014-04-24 11:42:25 -0400933 # pylint: disable=W0212
934 portage.process._setup_pipes(fd_pipes, close_fds=False)
David James1ed3e252011-10-05 20:26:15 -0700935
936 # Portage doesn't like when sys.stdin.fileno() != 0, so point sys.stdin
937 # at the filehandle we just created in _setup_pipes.
938 if sys.stdin.fileno() != 0:
David James6b29d052012-11-02 10:27:27 -0700939 sys.__stdin__ = sys.stdin = os.fdopen(0, "r")
940
941 scheduler = Scheduler(*args, **kwargs)
942
943 # Enable blocker handling even though we're in --nodeps mode. This
944 # allows us to unmerge the blocker after we've merged the replacement.
945 scheduler._opts_ignore_blockers = frozenset()
David James1ed3e252011-10-05 20:26:15 -0700946
947 # Actually do the merge.
948 retval = scheduler.merge()
949
950 # We catch all exceptions here (including SystemExit, KeyboardInterrupt,
951 # etc) so as to ensure that we don't confuse the multiprocessing module,
952 # which expects that all forked children exit with os._exit().
David James321490a2012-12-17 12:05:56 -0800953 # pylint: disable=W0702
David James1ed3e252011-10-05 20:26:15 -0700954 except:
955 traceback.print_exc(file=output)
956 retval = 1
957 sys.stdout.flush()
958 sys.stderr.flush()
959 output.flush()
Don Garrett25f309a2014-03-19 14:02:12 -0700960 # pylint: disable=W0212
David James1ed3e252011-10-05 20:26:15 -0700961 os._exit(retval)
962 else:
963 # Return the exit code of the subprocess.
964 return os.waitpid(pid, 0)[1]
David Jamesfcb70ef2011-02-02 16:02:30 -0800965
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700966
967def UnpackPackage(pkg_state):
968 """Unpacks package described by pkg_state.
969
970 Args:
971 pkg_state: EmergeJobState object describing target.
972
973 Returns:
974 Exit code returned by subprocess.
975 """
976 pkgdir = os.environ.get("PKGDIR",
977 os.path.join(os.environ["SYSROOT"], "packages"))
978 root = os.environ.get("ROOT", os.environ["SYSROOT"])
979 path = os.path.join(pkgdir, pkg_state.target + ".tbz2")
980 comp = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)
981 cmd = [comp, "-dc"]
982 if comp.endswith("pbzip2"):
983 cmd.append("--ignore-trailing-garbage=1")
984 cmd.append(path)
985
986 result = cros_build_lib.RunCommand(cmd, cwd=root, stdout_to_pipe=True,
987 print_cmd=False, error_code_ok=True)
988
989 # If we were not successful, return now and don't attempt untar.
990 if result.returncode:
991 return result.returncode
992
993 cmd = ["sudo", "tar", "-xf", "-", "-C", root]
994 result = cros_build_lib.RunCommand(cmd, cwd=root, input=result.output,
995 print_cmd=False, error_code_ok=True)
996
997 return result.returncode
998
999
1000def EmergeWorker(task_queue, job_queue, emerge, package_db, fetch_only=False,
1001 unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -08001002 """This worker emerges any packages given to it on the task_queue.
1003
1004 Args:
1005 task_queue: The queue of tasks for this worker to do.
1006 job_queue: The queue of results from the worker.
1007 emerge: An EmergeData() object.
1008 package_db: A dict, mapping package ids to portage Package objects.
Brian Harring0be85c62012-03-17 19:52:12 -07001009 fetch_only: A bool, indicating if we should just fetch the target.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001010 unpack_only: A bool, indicating if we should just unpack the target.
David Jamesfcb70ef2011-02-02 16:02:30 -08001011
1012 It expects package identifiers to be passed to it via task_queue. When
1013 a task is started, it pushes the (target, filename) to the started_queue.
1014 The output is stored in filename. When a merge starts or finishes, we push
1015 EmergeJobState objects to the job_queue.
1016 """
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001017 if fetch_only:
1018 mode = 'fetch'
1019 elif unpack_only:
1020 mode = 'unpack'
1021 else:
1022 mode = 'emerge'
1023 proctitle.settitle('EmergeWorker', mode, '[idle]')
David Jamesfcb70ef2011-02-02 16:02:30 -08001024
1025 SetupWorkerSignals()
1026 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
David Jamesdeebd692011-05-09 17:02:52 -07001027
1028 # Disable flushing of caches to save on I/O.
David James7a1ea4b2011-10-13 15:06:41 -07001029 root = emerge.settings["ROOT"]
1030 vardb = emerge.trees[root]["vartree"].dbapi
Mike Frysingere56debd2014-11-19 01:54:36 -05001031 vardb._flush_cache_enabled = False # pylint: disable=protected-access
Brian Harring0be85c62012-03-17 19:52:12 -07001032 bindb = emerge.trees[root]["bintree"].dbapi
1033 # Might be a set, might be a list, might be None; no clue, just use shallow
1034 # copy to ensure we can roll it back.
Don Garrett25f309a2014-03-19 14:02:12 -07001035 # pylint: disable=W0212
Brian Harring0be85c62012-03-17 19:52:12 -07001036 original_remotepkgs = copy.copy(bindb.bintree._remotepkgs)
David Jamesdeebd692011-05-09 17:02:52 -07001037
David Jamesfcb70ef2011-02-02 16:02:30 -08001038 opts, spinner = emerge.opts, emerge.spinner
1039 opts["--nodeps"] = True
Brian Harring0be85c62012-03-17 19:52:12 -07001040 if fetch_only:
1041 opts["--fetchonly"] = True
1042
David Jamesfcb70ef2011-02-02 16:02:30 -08001043 while True:
1044 # Wait for a new item to show up on the queue. This is a blocking wait,
1045 # so if there's nothing to do, we just sit here.
Brian Harring0be85c62012-03-17 19:52:12 -07001046 pkg_state = task_queue.get()
1047 if pkg_state is None:
David Jamesfcb70ef2011-02-02 16:02:30 -08001048 # If target is None, this means that the main thread wants us to quit.
1049 # The other workers need to exit too, so we'll push the message back on
1050 # to the queue so they'll get it too.
Brian Harring0be85c62012-03-17 19:52:12 -07001051 task_queue.put(None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001052 return
David James7358d032011-05-19 10:40:03 -07001053 if KILLED.is_set():
1054 return
1055
Brian Harring0be85c62012-03-17 19:52:12 -07001056 target = pkg_state.target
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001057 proctitle.settitle('EmergeWorker', mode, target)
Brian Harring0be85c62012-03-17 19:52:12 -07001058
David Jamesfcb70ef2011-02-02 16:02:30 -08001059 db_pkg = package_db[target]
Brian Harring0be85c62012-03-17 19:52:12 -07001060
1061 if db_pkg.type_name == "binary":
1062 if not fetch_only and pkg_state.fetched_successfully:
1063 # Ensure portage doesn't think our pkg is remote- else it'll force
1064 # a redownload of it (even if the on-disk file is fine). In-memory
1065 # caching basically, implemented dumbly.
1066 bindb.bintree._remotepkgs = None
1067 else:
1068 bindb.bintree_remotepkgs = original_remotepkgs
1069
David Jamesfcb70ef2011-02-02 16:02:30 -08001070 db_pkg.root_config = emerge.root_config
1071 install_list = [db_pkg]
1072 pkgname = db_pkg.pf
1073 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False)
David James01b1e0f2012-06-07 17:18:05 -07001074 os.chmod(output.name, 644)
David Jamesfcb70ef2011-02-02 16:02:30 -08001075 start_timestamp = time.time()
Brian Harring0be85c62012-03-17 19:52:12 -07001076 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001077 fetch_only=fetch_only, unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001078 job_queue.put(job)
1079 if "--pretend" in opts:
1080 retcode = 0
1081 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001082 try:
David James386ccd12011-05-04 20:17:42 -07001083 emerge.scheduler_graph.mergelist = install_list
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001084 if unpack_only:
1085 retcode = UnpackPackage(pkg_state)
1086 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001087 retcode = EmergeProcess(output, target, settings, trees, mtimedb,
1088 opts, spinner, favorites=emerge.favorites,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001089 graph_config=emerge.scheduler_graph)
David Jamesfcb70ef2011-02-02 16:02:30 -08001090 except Exception:
1091 traceback.print_exc(file=output)
1092 retcode = 1
David James1ed3e252011-10-05 20:26:15 -07001093 output.close()
David Jamesfcb70ef2011-02-02 16:02:30 -08001094
David James7358d032011-05-19 10:40:03 -07001095 if KILLED.is_set():
1096 return
1097
David Jamesfcb70ef2011-02-02 16:02:30 -08001098 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001099 retcode, fetch_only=fetch_only,
1100 unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001101 job_queue.put(job)
1102
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001103 # Set the title back to idle as the multiprocess pool won't destroy us;
1104 # when another job comes up, it'll re-use this process.
1105 proctitle.settitle('EmergeWorker', mode, '[idle]')
1106
David Jamesfcb70ef2011-02-02 16:02:30 -08001107
1108class LinePrinter(object):
1109 """Helper object to print a single line."""
1110
1111 def __init__(self, line):
1112 self.line = line
1113
David James321490a2012-12-17 12:05:56 -08001114 def Print(self, _seek_locations):
Mike Frysinger383367e2014-09-16 15:06:17 -04001115 print(self.line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001116
1117
1118class JobPrinter(object):
1119 """Helper object to print output of a job."""
1120
1121 def __init__(self, job, unlink=False):
1122 """Print output of job.
1123
Mike Frysinger02e1e072013-11-10 22:11:34 -05001124 If unlink is True, unlink the job output file when done.
1125 """
David Jamesfcb70ef2011-02-02 16:02:30 -08001126 self.current_time = time.time()
1127 self.job = job
1128 self.unlink = unlink
1129
1130 def Print(self, seek_locations):
1131
1132 job = self.job
1133
1134 # Calculate how long the job has been running.
1135 seconds = self.current_time - job.start_timestamp
1136
1137 # Note that we've printed out the job so far.
1138 job.last_output_timestamp = self.current_time
1139
1140 # Note that we're starting the job
1141 info = "job %s (%dm%.1fs)" % (job.pkgname, seconds / 60, seconds % 60)
1142 last_output_seek = seek_locations.get(job.filename, 0)
1143 if last_output_seek:
Mike Frysinger383367e2014-09-16 15:06:17 -04001144 print("=== Continue output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001145 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001146 print("=== Start output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001147
1148 # Print actual output from job
1149 f = codecs.open(job.filename, encoding='utf-8', errors='replace')
1150 f.seek(last_output_seek)
1151 prefix = job.pkgname + ":"
1152 for line in f:
1153
1154 # Save off our position in the file
1155 if line and line[-1] == "\n":
1156 last_output_seek = f.tell()
1157 line = line[:-1]
1158
1159 # Print our line
Mike Frysinger383367e2014-09-16 15:06:17 -04001160 print(prefix, line.encode('utf-8', 'replace'))
David Jamesfcb70ef2011-02-02 16:02:30 -08001161 f.close()
1162
1163 # Save our last spot in the file so that we don't print out the same
1164 # location twice.
1165 seek_locations[job.filename] = last_output_seek
1166
1167 # Note end of output section
1168 if job.done:
Mike Frysinger383367e2014-09-16 15:06:17 -04001169 print("=== Complete: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001170 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001171 print("=== Still running: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001172
1173 if self.unlink:
1174 os.unlink(job.filename)
1175
1176
1177def PrintWorker(queue):
1178 """A worker that prints stuff to the screen as requested."""
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001179 proctitle.settitle('PrintWorker')
David Jamesfcb70ef2011-02-02 16:02:30 -08001180
David James321490a2012-12-17 12:05:56 -08001181 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -07001182 # Set KILLED flag.
1183 KILLED.set()
1184
David Jamesfcb70ef2011-02-02 16:02:30 -08001185 # Switch to default signal handlers so that we'll die after two signals.
David James7358d032011-05-19 10:40:03 -07001186 signal.signal(signal.SIGINT, KillHandler)
1187 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001188
1189 # Don't exit on the first SIGINT / SIGTERM, because the parent worker will
1190 # handle it and tell us when we need to exit.
1191 signal.signal(signal.SIGINT, ExitHandler)
1192 signal.signal(signal.SIGTERM, ExitHandler)
1193
1194 # seek_locations is a map indicating the position we are at in each file.
1195 # It starts off empty, but is set by the various Print jobs as we go along
1196 # to indicate where we left off in each file.
1197 seek_locations = {}
1198 while True:
1199 try:
1200 job = queue.get()
1201 if job:
1202 job.Print(seek_locations)
David Jamesbccf8eb2011-07-27 14:06:06 -07001203 sys.stdout.flush()
David Jamesfcb70ef2011-02-02 16:02:30 -08001204 else:
1205 break
1206 except IOError as ex:
1207 if ex.errno == errno.EINTR:
1208 # Looks like we received a signal. Keep printing.
1209 continue
1210 raise
1211
Brian Harring867e2362012-03-17 04:05:17 -07001212
Brian Harring0be85c62012-03-17 19:52:12 -07001213class TargetState(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001214 """Structure descriting the TargetState."""
Brian Harring867e2362012-03-17 04:05:17 -07001215
Brian Harring0be85c62012-03-17 19:52:12 -07001216 __slots__ = ("target", "info", "score", "prefetched", "fetched_successfully")
Brian Harring867e2362012-03-17 04:05:17 -07001217
David James321490a2012-12-17 12:05:56 -08001218 def __init__(self, target, info):
Brian Harring867e2362012-03-17 04:05:17 -07001219 self.target, self.info = target, info
Brian Harring0be85c62012-03-17 19:52:12 -07001220 self.fetched_successfully = False
1221 self.prefetched = False
David James321490a2012-12-17 12:05:56 -08001222 self.score = None
Brian Harring867e2362012-03-17 04:05:17 -07001223 self.update_score()
1224
1225 def __cmp__(self, other):
1226 return cmp(self.score, other.score)
1227
1228 def update_score(self):
1229 self.score = (
1230 -len(self.info["tprovides"]),
Brian Harring0be85c62012-03-17 19:52:12 -07001231 len(self.info["needs"]),
Brian Harring11c5eeb2012-03-18 11:02:39 -07001232 not self.info["binary"],
Brian Harring867e2362012-03-17 04:05:17 -07001233 -len(self.info["provides"]),
1234 self.info["idx"],
1235 self.target,
1236 )
1237
1238
1239class ScoredHeap(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001240 """Implementation of a general purpose scored heap."""
Brian Harring867e2362012-03-17 04:05:17 -07001241
Brian Harring0be85c62012-03-17 19:52:12 -07001242 __slots__ = ("heap", "_heap_set")
1243
Brian Harring867e2362012-03-17 04:05:17 -07001244 def __init__(self, initial=()):
Brian Harring0be85c62012-03-17 19:52:12 -07001245 self.heap = list()
1246 self._heap_set = set()
1247 if initial:
1248 self.multi_put(initial)
Brian Harring867e2362012-03-17 04:05:17 -07001249
1250 def get(self):
Brian Harring0be85c62012-03-17 19:52:12 -07001251 item = heapq.heappop(self.heap)
1252 self._heap_set.remove(item.target)
1253 return item
Brian Harring867e2362012-03-17 04:05:17 -07001254
Brian Harring0be85c62012-03-17 19:52:12 -07001255 def put(self, item):
1256 if not isinstance(item, TargetState):
1257 raise ValueError("Item %r isn't a TargetState" % (item,))
1258 heapq.heappush(self.heap, item)
1259 self._heap_set.add(item.target)
Brian Harring867e2362012-03-17 04:05:17 -07001260
Brian Harring0be85c62012-03-17 19:52:12 -07001261 def multi_put(self, sequence):
1262 sequence = list(sequence)
1263 self.heap.extend(sequence)
1264 self._heap_set.update(x.target for x in sequence)
Brian Harring867e2362012-03-17 04:05:17 -07001265 self.sort()
1266
David James5c9996d2012-03-24 10:50:46 -07001267 def sort(self):
1268 heapq.heapify(self.heap)
1269
Brian Harring0be85c62012-03-17 19:52:12 -07001270 def __contains__(self, target):
1271 return target in self._heap_set
1272
1273 def __nonzero__(self):
1274 return bool(self.heap)
1275
Brian Harring867e2362012-03-17 04:05:17 -07001276 def __len__(self):
1277 return len(self.heap)
1278
1279
David Jamesfcb70ef2011-02-02 16:02:30 -08001280class EmergeQueue(object):
1281 """Class to schedule emerge jobs according to a dependency graph."""
1282
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001283 def __init__(self, deps_map, emerge, package_db, show_output, unpack_only):
David Jamesfcb70ef2011-02-02 16:02:30 -08001284 # Store the dependency graph.
1285 self._deps_map = deps_map
Brian Harring0be85c62012-03-17 19:52:12 -07001286 self._state_map = {}
David Jamesfcb70ef2011-02-02 16:02:30 -08001287 # Initialize the running queue to empty
Brian Harring0be85c62012-03-17 19:52:12 -07001288 self._build_jobs = {}
1289 self._build_ready = ScoredHeap()
1290 self._fetch_jobs = {}
1291 self._fetch_ready = ScoredHeap()
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001292 self._unpack_jobs = {}
1293 self._unpack_ready = ScoredHeap()
David Jamesfcb70ef2011-02-02 16:02:30 -08001294 # List of total package installs represented in deps_map.
1295 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
1296 self._total_jobs = len(install_jobs)
1297 self._show_output = show_output
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001298 self._unpack_only = unpack_only
David Jamesfcb70ef2011-02-02 16:02:30 -08001299
1300 if "--pretend" in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001301 print("Skipping merge because of --pretend mode.")
David Jamesfcb70ef2011-02-02 16:02:30 -08001302 sys.exit(0)
1303
David Jamesaaf49e42014-04-24 09:40:05 -07001304 # Set up a session so we can easily terminate all children.
1305 self._SetupSession()
David James7358d032011-05-19 10:40:03 -07001306
David Jamesfcb70ef2011-02-02 16:02:30 -08001307 # Setup scheduler graph object. This is used by the child processes
1308 # to help schedule jobs.
1309 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1310
1311 # Calculate how many jobs we can run in parallel. We don't want to pass
1312 # the --jobs flag over to emerge itself, because that'll tell emerge to
1313 # hide its output, and said output is quite useful for debugging hung
1314 # jobs.
1315 procs = min(self._total_jobs,
1316 emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001317 self._build_procs = self._unpack_procs = self._fetch_procs = max(1, procs)
David James8c7e5e32011-06-28 11:26:03 -07001318 self._load_avg = emerge.opts.pop("--load-average", None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001319 self._job_queue = multiprocessing.Queue()
1320 self._print_queue = multiprocessing.Queue()
Brian Harring0be85c62012-03-17 19:52:12 -07001321
1322 self._fetch_queue = multiprocessing.Queue()
1323 args = (self._fetch_queue, self._job_queue, emerge, package_db, True)
1324 self._fetch_pool = multiprocessing.Pool(self._fetch_procs, EmergeWorker,
1325 args)
1326
1327 self._build_queue = multiprocessing.Queue()
1328 args = (self._build_queue, self._job_queue, emerge, package_db)
1329 self._build_pool = multiprocessing.Pool(self._build_procs, EmergeWorker,
1330 args)
1331
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001332 if self._unpack_only:
1333 # Unpack pool only required on unpack_only jobs.
1334 self._unpack_queue = multiprocessing.Queue()
1335 args = (self._unpack_queue, self._job_queue, emerge, package_db, False,
1336 True)
1337 self._unpack_pool = multiprocessing.Pool(self._unpack_procs, EmergeWorker,
1338 args)
1339
David Jamesfcb70ef2011-02-02 16:02:30 -08001340 self._print_worker = multiprocessing.Process(target=PrintWorker,
1341 args=[self._print_queue])
1342 self._print_worker.start()
1343
1344 # Initialize the failed queue to empty.
1345 self._retry_queue = []
1346 self._failed = set()
1347
David Jamesfcb70ef2011-02-02 16:02:30 -08001348 # Setup an exit handler so that we print nice messages if we are
1349 # terminated.
1350 self._SetupExitHandler()
1351
1352 # Schedule our jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001353 self._state_map.update(
1354 (pkg, TargetState(pkg, data)) for pkg, data in deps_map.iteritems())
1355 self._fetch_ready.multi_put(self._state_map.itervalues())
David Jamesfcb70ef2011-02-02 16:02:30 -08001356
David Jamesaaf49e42014-04-24 09:40:05 -07001357 def _SetupSession(self):
1358 """Set up a session so we can easily terminate all children."""
1359 # When we call os.setsid(), this sets up a session / process group for this
1360 # process and all children. These session groups are needed so that we can
1361 # easily kill all children (including processes launched by emerge) before
1362 # we exit.
1363 #
1364 # One unfortunate side effect of os.setsid() is that it blocks CTRL-C from
1365 # being received. To work around this, we only call os.setsid() in a forked
1366 # process, so that the parent can still watch for CTRL-C. The parent will
1367 # just sit around, watching for signals and propagating them to the child,
1368 # until the child exits.
1369 #
1370 # TODO(davidjames): It would be nice if we could replace this with cgroups.
1371 pid = os.fork()
1372 if pid == 0:
1373 os.setsid()
1374 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001375 proctitle.settitle('SessionManager')
1376
David Jamesaaf49e42014-04-24 09:40:05 -07001377 def PropagateToChildren(signum, _frame):
1378 # Just propagate the signals down to the child. We'll exit when the
1379 # child does.
1380 try:
1381 os.kill(pid, signum)
1382 except OSError as ex:
1383 if ex.errno != errno.ESRCH:
1384 raise
1385 signal.signal(signal.SIGINT, PropagateToChildren)
1386 signal.signal(signal.SIGTERM, PropagateToChildren)
1387
1388 def StopGroup(_signum, _frame):
1389 # When we get stopped, stop the children.
1390 try:
1391 os.killpg(pid, signal.SIGSTOP)
1392 os.kill(0, signal.SIGSTOP)
1393 except OSError as ex:
1394 if ex.errno != errno.ESRCH:
1395 raise
1396 signal.signal(signal.SIGTSTP, StopGroup)
1397
1398 def ContinueGroup(_signum, _frame):
1399 # Launch the children again after being stopped.
1400 try:
1401 os.killpg(pid, signal.SIGCONT)
1402 except OSError as ex:
1403 if ex.errno != errno.ESRCH:
1404 raise
1405 signal.signal(signal.SIGCONT, ContinueGroup)
1406
1407 # Loop until the children exit. We exit with os._exit to be sure we
1408 # don't run any finalizers (those will be run by the child process.)
1409 # pylint: disable=W0212
1410 while True:
1411 try:
1412 # Wait for the process to exit. When it does, exit with the return
1413 # value of the subprocess.
Mike Frysingere2d8f0d2014-11-01 13:09:26 -04001414 os._exit(process_util.GetExitStatus(os.waitpid(pid, 0)[1]))
David Jamesaaf49e42014-04-24 09:40:05 -07001415 except OSError as ex:
1416 if ex.errno == errno.EINTR:
1417 continue
1418 traceback.print_exc()
1419 os._exit(1)
1420 except BaseException:
1421 traceback.print_exc()
1422 os._exit(1)
1423
David Jamesfcb70ef2011-02-02 16:02:30 -08001424 def _SetupExitHandler(self):
1425
David James321490a2012-12-17 12:05:56 -08001426 def ExitHandler(signum, _frame):
David James7358d032011-05-19 10:40:03 -07001427 # Set KILLED flag.
1428 KILLED.set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001429
1430 # Kill our signal handlers so we don't get called recursively
David James7358d032011-05-19 10:40:03 -07001431 signal.signal(signal.SIGINT, KillHandler)
1432 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001433
1434 # Print our current job status
Brian Harring0be85c62012-03-17 19:52:12 -07001435 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001436 if job:
1437 self._print_queue.put(JobPrinter(job, unlink=True))
1438
1439 # Notify the user that we are exiting
1440 self._Print("Exiting on signal %s" % signum)
David James7358d032011-05-19 10:40:03 -07001441 self._print_queue.put(None)
1442 self._print_worker.join()
David Jamesfcb70ef2011-02-02 16:02:30 -08001443
1444 # Kill child threads, then exit.
David James7358d032011-05-19 10:40:03 -07001445 os.killpg(0, signal.SIGKILL)
David Jamesfcb70ef2011-02-02 16:02:30 -08001446 sys.exit(1)
1447
1448 # Print out job status when we are killed
1449 signal.signal(signal.SIGINT, ExitHandler)
1450 signal.signal(signal.SIGTERM, ExitHandler)
1451
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001452 def _ScheduleUnpack(self, pkg_state):
1453 self._unpack_jobs[pkg_state.target] = None
1454 self._unpack_queue.put(pkg_state)
1455
Brian Harring0be85c62012-03-17 19:52:12 -07001456 def _Schedule(self, pkg_state):
David Jamesfcb70ef2011-02-02 16:02:30 -08001457 # We maintain a tree of all deps, if this doesn't need
David James8c7e5e32011-06-28 11:26:03 -07001458 # to be installed just free up its children and continue.
David Jamesfcb70ef2011-02-02 16:02:30 -08001459 # It is possible to reinstall deps of deps, without reinstalling
1460 # first level deps, like so:
Mike Frysingerfd969312014-04-02 22:16:42 -04001461 # virtual/target-os (merge) -> eselect (nomerge) -> python (merge)
Brian Harring0be85c62012-03-17 19:52:12 -07001462 this_pkg = pkg_state.info
1463 target = pkg_state.target
1464 if pkg_state.info is not None:
1465 if this_pkg["action"] == "nomerge":
1466 self._Finish(target)
1467 elif target not in self._build_jobs:
1468 # Kick off the build if it's marked to be built.
1469 self._build_jobs[target] = None
1470 self._build_queue.put(pkg_state)
1471 return True
David Jamesfcb70ef2011-02-02 16:02:30 -08001472
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001473 def _ScheduleLoop(self, unpack_only=False):
1474 if unpack_only:
1475 ready_queue = self._unpack_ready
1476 jobs_queue = self._unpack_jobs
1477 procs = self._unpack_procs
1478 else:
1479 ready_queue = self._build_ready
1480 jobs_queue = self._build_jobs
1481 procs = self._build_procs
1482
David James8c7e5e32011-06-28 11:26:03 -07001483 # If the current load exceeds our desired load average, don't schedule
1484 # more than one job.
1485 if self._load_avg and os.getloadavg()[0] > self._load_avg:
1486 needed_jobs = 1
1487 else:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001488 needed_jobs = procs
David James8c7e5e32011-06-28 11:26:03 -07001489
1490 # Schedule more jobs.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001491 while ready_queue and len(jobs_queue) < needed_jobs:
1492 state = ready_queue.get()
1493 if unpack_only:
1494 self._ScheduleUnpack(state)
1495 else:
1496 if state.target not in self._failed:
1497 self._Schedule(state)
David Jamesfcb70ef2011-02-02 16:02:30 -08001498
1499 def _Print(self, line):
1500 """Print a single line."""
1501 self._print_queue.put(LinePrinter(line))
1502
1503 def _Status(self):
1504 """Print status."""
1505 current_time = time.time()
1506 no_output = True
1507
1508 # Print interim output every minute if --show-output is used. Otherwise,
1509 # print notifications about running packages every 2 minutes, and print
1510 # full output for jobs that have been running for 60 minutes or more.
1511 if self._show_output:
1512 interval = 60
1513 notify_interval = 0
1514 else:
1515 interval = 60 * 60
1516 notify_interval = 60 * 2
David James321490a2012-12-17 12:05:56 -08001517 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001518 if job:
1519 last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
1520 if last_timestamp + interval < current_time:
1521 self._print_queue.put(JobPrinter(job))
1522 job.last_output_timestamp = current_time
1523 no_output = False
1524 elif (notify_interval and
1525 job.last_notify_timestamp + notify_interval < current_time):
1526 job_seconds = current_time - job.start_timestamp
1527 args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
1528 info = "Still building %s (%dm%.1fs). Logs in %s" % args
1529 job.last_notify_timestamp = current_time
1530 self._Print(info)
1531 no_output = False
1532
1533 # If we haven't printed any messages yet, print a general status message
1534 # here.
1535 if no_output:
1536 seconds = current_time - GLOBAL_START
Brian Harring0be85c62012-03-17 19:52:12 -07001537 fjobs, fready = len(self._fetch_jobs), len(self._fetch_ready)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001538 ujobs, uready = len(self._unpack_jobs), len(self._unpack_ready)
Brian Harring0be85c62012-03-17 19:52:12 -07001539 bjobs, bready = len(self._build_jobs), len(self._build_ready)
1540 retries = len(self._retry_queue)
1541 pending = max(0, len(self._deps_map) - fjobs - bjobs)
1542 line = "Pending %s/%s, " % (pending, self._total_jobs)
1543 if fjobs or fready:
1544 line += "Fetching %s/%s, " % (fjobs, fready + fjobs)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001545 if ujobs or uready:
1546 line += "Unpacking %s/%s, " % (ujobs, uready + ujobs)
Brian Harring0be85c62012-03-17 19:52:12 -07001547 if bjobs or bready or retries:
1548 line += "Building %s/%s, " % (bjobs, bready + bjobs)
1549 if retries:
1550 line += "Retrying %s, " % (retries,)
Mike Frysingerd6e2df02014-11-26 02:55:04 -05001551 load = " ".join(str(x) for x in os.getloadavg())
1552 line += ("[Time %dm%.1fs Load %s]" % (seconds / 60, seconds % 60, load))
Brian Harring0be85c62012-03-17 19:52:12 -07001553 self._Print(line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001554
1555 def _Finish(self, target):
David James8c7e5e32011-06-28 11:26:03 -07001556 """Mark a target as completed and unblock dependencies."""
1557 this_pkg = self._deps_map[target]
1558 if this_pkg["needs"] and this_pkg["nodeps"]:
1559 # We got installed, but our deps have not been installed yet. Dependent
1560 # packages should only be installed when our needs have been fully met.
1561 this_pkg["action"] = "nomerge"
1562 else:
David James8c7e5e32011-06-28 11:26:03 -07001563 for dep in this_pkg["provides"]:
1564 dep_pkg = self._deps_map[dep]
Brian Harring0be85c62012-03-17 19:52:12 -07001565 state = self._state_map[dep]
David James8c7e5e32011-06-28 11:26:03 -07001566 del dep_pkg["needs"][target]
Brian Harring0be85c62012-03-17 19:52:12 -07001567 state.update_score()
1568 if not state.prefetched:
1569 if dep in self._fetch_ready:
1570 # If it's not currently being fetched, update the prioritization
1571 self._fetch_ready.sort()
1572 elif not dep_pkg["needs"]:
David James8c7e5e32011-06-28 11:26:03 -07001573 if dep_pkg["nodeps"] and dep_pkg["action"] == "nomerge":
1574 self._Finish(dep)
1575 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001576 self._build_ready.put(self._state_map[dep])
David James8c7e5e32011-06-28 11:26:03 -07001577 self._deps_map.pop(target)
David Jamesfcb70ef2011-02-02 16:02:30 -08001578
1579 def _Retry(self):
David James8c7e5e32011-06-28 11:26:03 -07001580 while self._retry_queue:
Brian Harring0be85c62012-03-17 19:52:12 -07001581 state = self._retry_queue.pop(0)
1582 if self._Schedule(state):
1583 self._Print("Retrying emerge of %s." % state.target)
David James8c7e5e32011-06-28 11:26:03 -07001584 break
David Jamesfcb70ef2011-02-02 16:02:30 -08001585
Brian Harringa43f5952012-04-12 01:19:34 -07001586 def _Shutdown(self):
David Jamesfcb70ef2011-02-02 16:02:30 -08001587 # Tell emerge workers to exit. They all exit when 'None' is pushed
1588 # to the queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001589
Brian Harringa43f5952012-04-12 01:19:34 -07001590 # Shutdown the workers first; then jobs (which is how they feed things back)
1591 # then finally the print queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001592
Brian Harringa43f5952012-04-12 01:19:34 -07001593 def _stop(queue, pool):
1594 if pool is None:
1595 return
1596 try:
1597 queue.put(None)
1598 pool.close()
1599 pool.join()
1600 finally:
1601 pool.terminate()
Brian Harring0be85c62012-03-17 19:52:12 -07001602
Brian Harringa43f5952012-04-12 01:19:34 -07001603 _stop(self._fetch_queue, self._fetch_pool)
1604 self._fetch_queue = self._fetch_pool = None
Brian Harring0be85c62012-03-17 19:52:12 -07001605
Brian Harringa43f5952012-04-12 01:19:34 -07001606 _stop(self._build_queue, self._build_pool)
1607 self._build_queue = self._build_pool = None
1608
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001609 if self._unpack_only:
1610 _stop(self._unpack_queue, self._unpack_pool)
1611 self._unpack_queue = self._unpack_pool = None
1612
Brian Harringa43f5952012-04-12 01:19:34 -07001613 if self._job_queue is not None:
1614 self._job_queue.close()
1615 self._job_queue = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001616
1617 # Now that our workers are finished, we can kill the print queue.
Brian Harringa43f5952012-04-12 01:19:34 -07001618 if self._print_worker is not None:
1619 try:
1620 self._print_queue.put(None)
1621 self._print_queue.close()
1622 self._print_worker.join()
1623 finally:
1624 self._print_worker.terminate()
1625 self._print_queue = self._print_worker = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001626
1627 def Run(self):
1628 """Run through the scheduled ebuilds.
1629
1630 Keep running so long as we have uninstalled packages in the
1631 dependency graph to merge.
1632 """
Brian Harringa43f5952012-04-12 01:19:34 -07001633 if not self._deps_map:
1634 return
1635
Brian Harring0be85c62012-03-17 19:52:12 -07001636 # Start the fetchers.
1637 for _ in xrange(min(self._fetch_procs, len(self._fetch_ready))):
1638 state = self._fetch_ready.get()
1639 self._fetch_jobs[state.target] = None
1640 self._fetch_queue.put(state)
1641
1642 # Print an update, then get going.
1643 self._Status()
1644
David Jamese703d0f2012-01-12 16:27:45 -08001645 retried = set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001646 while self._deps_map:
1647 # Check here that we are actually waiting for something.
Brian Harring0be85c62012-03-17 19:52:12 -07001648 if (self._build_queue.empty() and
David Jamesfcb70ef2011-02-02 16:02:30 -08001649 self._job_queue.empty() and
Brian Harring0be85c62012-03-17 19:52:12 -07001650 not self._fetch_jobs and
1651 not self._fetch_ready and
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001652 not self._unpack_jobs and
1653 not self._unpack_ready and
Brian Harring0be85c62012-03-17 19:52:12 -07001654 not self._build_jobs and
1655 not self._build_ready and
David Jamesfcb70ef2011-02-02 16:02:30 -08001656 self._deps_map):
1657 # If we have failed on a package, retry it now.
1658 if self._retry_queue:
1659 self._Retry()
1660 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001661 # Tell the user why we're exiting.
1662 if self._failed:
Mike Frysinger383367e2014-09-16 15:06:17 -04001663 print('Packages failed:\n\t%s' % '\n\t'.join(self._failed))
David James0eae23e2012-07-03 15:04:25 -07001664 status_file = os.environ.get("PARALLEL_EMERGE_STATUS_FILE")
1665 if status_file:
David James321490a2012-12-17 12:05:56 -08001666 failed_pkgs = set(portage.versions.cpv_getkey(x)
1667 for x in self._failed)
David James0eae23e2012-07-03 15:04:25 -07001668 with open(status_file, "a") as f:
1669 f.write("%s\n" % " ".join(failed_pkgs))
David Jamesfcb70ef2011-02-02 16:02:30 -08001670 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001671 print("Deadlock! Circular dependencies!")
David Jamesfcb70ef2011-02-02 16:02:30 -08001672 sys.exit(1)
1673
David James321490a2012-12-17 12:05:56 -08001674 for _ in xrange(12):
David Jamesa74289a2011-08-12 10:41:24 -07001675 try:
1676 job = self._job_queue.get(timeout=5)
1677 break
1678 except Queue.Empty:
1679 # Check if any more jobs can be scheduled.
1680 self._ScheduleLoop()
1681 else:
Brian Harring706747c2012-03-16 03:04:31 -07001682 # Print an update every 60 seconds.
David Jamesfcb70ef2011-02-02 16:02:30 -08001683 self._Status()
1684 continue
1685
1686 target = job.target
1687
Brian Harring0be85c62012-03-17 19:52:12 -07001688 if job.fetch_only:
1689 if not job.done:
1690 self._fetch_jobs[job.target] = job
1691 else:
1692 state = self._state_map[job.target]
1693 state.prefetched = True
1694 state.fetched_successfully = (job.retcode == 0)
1695 del self._fetch_jobs[job.target]
1696 self._Print("Fetched %s in %2.2fs"
1697 % (target, time.time() - job.start_timestamp))
1698
1699 if self._show_output or job.retcode != 0:
1700 self._print_queue.put(JobPrinter(job, unlink=True))
1701 else:
1702 os.unlink(job.filename)
1703 # Failure or not, let build work with it next.
1704 if not self._deps_map[job.target]["needs"]:
1705 self._build_ready.put(state)
1706 self._ScheduleLoop()
1707
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001708 if self._unpack_only and job.retcode == 0:
1709 self._unpack_ready.put(state)
1710 self._ScheduleLoop(unpack_only=True)
1711
Brian Harring0be85c62012-03-17 19:52:12 -07001712 if self._fetch_ready:
1713 state = self._fetch_ready.get()
1714 self._fetch_queue.put(state)
1715 self._fetch_jobs[state.target] = None
1716 else:
1717 # Minor optimization; shut down fetchers early since we know
1718 # the queue is empty.
1719 self._fetch_queue.put(None)
1720 continue
1721
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001722 if job.unpack_only:
1723 if not job.done:
1724 self._unpack_jobs[target] = job
1725 else:
1726 del self._unpack_jobs[target]
1727 self._Print("Unpacked %s in %2.2fs"
1728 % (target, time.time() - job.start_timestamp))
1729 if self._show_output or job.retcode != 0:
1730 self._print_queue.put(JobPrinter(job, unlink=True))
1731 else:
1732 os.unlink(job.filename)
1733 if self._unpack_ready:
1734 state = self._unpack_ready.get()
1735 self._unpack_queue.put(state)
1736 self._unpack_jobs[state.target] = None
1737 continue
1738
David Jamesfcb70ef2011-02-02 16:02:30 -08001739 if not job.done:
Brian Harring0be85c62012-03-17 19:52:12 -07001740 self._build_jobs[target] = job
David Jamesfcb70ef2011-02-02 16:02:30 -08001741 self._Print("Started %s (logged in %s)" % (target, job.filename))
1742 continue
1743
1744 # Print output of job
1745 if self._show_output or job.retcode != 0:
1746 self._print_queue.put(JobPrinter(job, unlink=True))
1747 else:
1748 os.unlink(job.filename)
Brian Harring0be85c62012-03-17 19:52:12 -07001749 del self._build_jobs[target]
David Jamesfcb70ef2011-02-02 16:02:30 -08001750
1751 seconds = time.time() - job.start_timestamp
1752 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
David James32420cc2011-08-25 21:32:46 -07001753 previously_failed = target in self._failed
David Jamesfcb70ef2011-02-02 16:02:30 -08001754
1755 # Complain if necessary.
1756 if job.retcode != 0:
1757 # Handle job failure.
David James32420cc2011-08-25 21:32:46 -07001758 if previously_failed:
David Jamesfcb70ef2011-02-02 16:02:30 -08001759 # If this job has failed previously, give up.
1760 self._Print("Failed %s. Your build has failed." % details)
1761 else:
1762 # Queue up this build to try again after a long while.
David Jamese703d0f2012-01-12 16:27:45 -08001763 retried.add(target)
Brian Harring0be85c62012-03-17 19:52:12 -07001764 self._retry_queue.append(self._state_map[target])
David Jamesfcb70ef2011-02-02 16:02:30 -08001765 self._failed.add(target)
1766 self._Print("Failed %s, retrying later." % details)
1767 else:
David James32420cc2011-08-25 21:32:46 -07001768 if previously_failed:
1769 # Remove target from list of failed packages.
1770 self._failed.remove(target)
1771
1772 self._Print("Completed %s" % details)
1773
1774 # Mark as completed and unblock waiting ebuilds.
1775 self._Finish(target)
1776
1777 if previously_failed and self._retry_queue:
David Jamesfcb70ef2011-02-02 16:02:30 -08001778 # If we have successfully retried a failed package, and there
1779 # are more failed packages, try the next one. We will only have
1780 # one retrying package actively running at a time.
1781 self._Retry()
1782
David Jamesfcb70ef2011-02-02 16:02:30 -08001783
David James8c7e5e32011-06-28 11:26:03 -07001784 # Schedule pending jobs and print an update.
1785 self._ScheduleLoop()
1786 self._Status()
David Jamesfcb70ef2011-02-02 16:02:30 -08001787
David Jamese703d0f2012-01-12 16:27:45 -08001788 # If packages were retried, output a warning.
1789 if retried:
1790 self._Print("")
1791 self._Print("WARNING: The following packages failed the first time,")
1792 self._Print("but succeeded upon retry. This might indicate incorrect")
1793 self._Print("dependencies.")
1794 for pkg in retried:
1795 self._Print(" %s" % pkg)
1796 self._Print("@@@STEP_WARNINGS@@@")
1797 self._Print("")
1798
David Jamesfcb70ef2011-02-02 16:02:30 -08001799 # Tell child threads to exit.
1800 self._Print("Merge complete")
David Jamesfcb70ef2011-02-02 16:02:30 -08001801
1802
Brian Harring30675052012-02-29 12:18:22 -08001803def main(argv):
Brian Harring8294d652012-05-23 02:20:52 -07001804 try:
1805 return real_main(argv)
1806 finally:
1807 # Work around multiprocessing sucking and not cleaning up after itself.
1808 # http://bugs.python.org/issue4106;
1809 # Step one; ensure GC is ran *prior* to the VM starting shutdown.
1810 gc.collect()
1811 # Step two; go looking for those threads and try to manually reap
1812 # them if we can.
1813 for x in threading.enumerate():
1814 # Filter on the name, and ident; if ident is None, the thread
1815 # wasn't started.
1816 if x.name == 'QueueFeederThread' and x.ident is not None:
1817 x.join(1)
David Jamesfcb70ef2011-02-02 16:02:30 -08001818
Brian Harring8294d652012-05-23 02:20:52 -07001819
1820def real_main(argv):
Brian Harring30675052012-02-29 12:18:22 -08001821 parallel_emerge_args = argv[:]
David Jamesfcb70ef2011-02-02 16:02:30 -08001822 deps = DepGraphGenerator()
Brian Harring30675052012-02-29 12:18:22 -08001823 deps.Initialize(parallel_emerge_args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001824 emerge = deps.emerge
1825
1826 if emerge.action is not None:
Brian Harring30675052012-02-29 12:18:22 -08001827 argv = deps.ParseParallelEmergeArgs(argv)
Brian Harring8294d652012-05-23 02:20:52 -07001828 return emerge_main(argv)
David Jamesfcb70ef2011-02-02 16:02:30 -08001829 elif not emerge.cmdline_packages:
1830 Usage()
Brian Harring8294d652012-05-23 02:20:52 -07001831 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001832
1833 # Unless we're in pretend mode, there's not much point running without
1834 # root access. We need to be able to install packages.
1835 #
1836 # NOTE: Even if you're running --pretend, it's a good idea to run
1837 # parallel_emerge with root access so that portage can write to the
1838 # dependency cache. This is important for performance.
David James321490a2012-12-17 12:05:56 -08001839 if "--pretend" not in emerge.opts and portage.data.secpass < 2:
Mike Frysinger383367e2014-09-16 15:06:17 -04001840 print("parallel_emerge: superuser access is required.")
Brian Harring8294d652012-05-23 02:20:52 -07001841 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001842
1843 if "--quiet" not in emerge.opts:
1844 cmdline_packages = " ".join(emerge.cmdline_packages)
Mike Frysinger383367e2014-09-16 15:06:17 -04001845 print("Starting fast-emerge.")
1846 print(" Building package %s on %s" % (cmdline_packages,
1847 deps.board or "root"))
David Jamesfcb70ef2011-02-02 16:02:30 -08001848
David James386ccd12011-05-04 20:17:42 -07001849 deps_tree, deps_info = deps.GenDependencyTree()
David Jamesfcb70ef2011-02-02 16:02:30 -08001850
1851 # You want me to be verbose? I'll give you two trees! Twice as much value.
1852 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
1853 deps.PrintTree(deps_tree)
1854
David James386ccd12011-05-04 20:17:42 -07001855 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001856
1857 # OK, time to print out our progress so far.
1858 deps.PrintInstallPlan(deps_graph)
1859 if "--tree" in emerge.opts:
1860 PrintDepsMap(deps_graph)
1861
1862 # Are we upgrading portage? If so, and there are more packages to merge,
1863 # schedule a restart of parallel_emerge to merge the rest. This ensures that
1864 # we pick up all updates to portage settings before merging any more
1865 # packages.
1866 portage_upgrade = False
1867 root = emerge.settings["ROOT"]
Don Garrett25f309a2014-03-19 14:02:12 -07001868 # pylint: disable=W0212
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -07001869 final_db = emerge.depgraph._dynamic_config._filtered_trees[root]['graph_db']
David Jamesfcb70ef2011-02-02 16:02:30 -08001870 if root == "/":
1871 for db_pkg in final_db.match_pkgs("sys-apps/portage"):
1872 portage_pkg = deps_graph.get(db_pkg.cpv)
David James0ff16f22012-11-02 14:18:07 -07001873 if portage_pkg:
David Jamesfcb70ef2011-02-02 16:02:30 -08001874 portage_upgrade = True
1875 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001876 print("Upgrading portage first, then restarting...")
David Jamesfcb70ef2011-02-02 16:02:30 -08001877
David James0ff16f22012-11-02 14:18:07 -07001878 # Upgrade Portage first, then the rest of the packages.
1879 #
1880 # In order to grant the child permission to run setsid, we need to run sudo
1881 # again. We preserve SUDO_USER here in case an ebuild depends on it.
1882 if portage_upgrade:
1883 # Calculate what arguments to use when re-invoking.
1884 args = ["sudo", "-E", "SUDO_USER=%s" % os.environ.get("SUDO_USER", "")]
1885 args += [os.path.abspath(sys.argv[0])] + parallel_emerge_args
1886 args += ["--exclude=sys-apps/portage"]
1887
1888 # First upgrade Portage.
1889 passthrough_args = ("--quiet", "--pretend", "--verbose")
1890 emerge_args = [k for k in emerge.opts if k in passthrough_args]
1891 ret = emerge_main(emerge_args + ["portage"])
1892 if ret != 0:
1893 return ret
1894
1895 # Now upgrade the rest.
1896 os.execvp(args[0], args)
1897
Bertrand SIMONNETc03c8ee2014-12-10 17:02:55 -08001898 # Attempt to solve crbug.com/433482
1899 # The file descriptor error appears only when getting userpriv_groups
1900 # (lazily generated). Loading userpriv_groups here will reduce the number of
1901 # calls from few hundreds to one.
1902 portage.data._get_global('userpriv_groups')
1903
David Jamesfcb70ef2011-02-02 16:02:30 -08001904 # Run the queued emerges.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001905 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output,
1906 deps.unpack_only)
Brian Harringa43f5952012-04-12 01:19:34 -07001907 try:
1908 scheduler.Run()
1909 finally:
Don Garrett25f309a2014-03-19 14:02:12 -07001910 # pylint: disable=W0212
Brian Harringa43f5952012-04-12 01:19:34 -07001911 scheduler._Shutdown()
David James97ce8902011-08-16 09:51:05 -07001912 scheduler = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001913
Mike Frysingerd20a6e22012-10-04 19:01:10 -04001914 clean_logs(emerge.settings)
1915
Mike Frysinger383367e2014-09-16 15:06:17 -04001916 print("Done")
Brian Harring8294d652012-05-23 02:20:52 -07001917 return 0