blob: 4eee7bb3b8fbfc5653eb5eff0436db7987876a41 [file] [log] [blame]
Mike Frysinger0a647fc2012-08-06 14:36:05 -04001# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
David Jamesfcb70ef2011-02-02 16:02:30 -08002# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Program to run emerge in parallel, for significant speedup.
6
7Usage:
David James386ccd12011-05-04 20:17:42 -07008 ./parallel_emerge [--board=BOARD] [--workon=PKGS]
David Jamesfcb70ef2011-02-02 16:02:30 -08009 [--force-remote-binary=PKGS] [emerge args] package
10
David James78b6cd92012-04-02 21:36:12 -070011This script runs multiple emerge processes in parallel, using appropriate
12Portage APIs. It is faster than standard emerge because it has a
13multiprocess model instead of an asynchronous model.
David Jamesfcb70ef2011-02-02 16:02:30 -080014"""
15
Mike Frysinger383367e2014-09-16 15:06:17 -040016from __future__ import print_function
17
David Jamesfcb70ef2011-02-02 16:02:30 -080018import codecs
19import copy
20import errno
Brian Harring8294d652012-05-23 02:20:52 -070021import gc
David James8c7e5e32011-06-28 11:26:03 -070022import heapq
David Jamesfcb70ef2011-02-02 16:02:30 -080023import multiprocessing
24import os
Mike Frysinger1ae28092013-10-17 17:17:22 -040025try:
26 import Queue
27except ImportError:
28 # Python-3 renamed to "queue". We still use Queue to avoid collisions
29 # with naming variables as "queue". Maybe we'll transition at some point.
30 # pylint: disable=F0401
31 import queue as Queue
David Jamesfcb70ef2011-02-02 16:02:30 -080032import signal
Bertrand SIMONNET19d789e2014-12-09 13:36:31 -080033import subprocess
David Jamesfcb70ef2011-02-02 16:02:30 -080034import sys
35import tempfile
Brian Harring8294d652012-05-23 02:20:52 -070036import threading
David Jamesfcb70ef2011-02-02 16:02:30 -080037import time
38import traceback
David Jamesfcb70ef2011-02-02 16:02:30 -080039
Thiago Goncalesf4acc422013-07-17 10:26:35 -070040from chromite.lib import cros_build_lib
Mike Frysingere2d8f0d2014-11-01 13:09:26 -040041from chromite.lib import process_util
Mike Frysingerd74fe4a2014-04-24 11:43:38 -040042from chromite.lib import proctitle
Thiago Goncalesf4acc422013-07-17 10:26:35 -070043
David Jamesfcb70ef2011-02-02 16:02:30 -080044# If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
45# Chromium OS, the default "portage" user doesn't have the necessary
46# permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
47# is "root" here because we get called through sudo.
48#
49# We need to set this before importing any portage modules, because portage
50# looks up "PORTAGE_USERNAME" at import time.
51#
52# NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
53# encounter this case unless they have an old chroot or blow away the
54# environment by running sudo without the -E specifier.
55if "PORTAGE_USERNAME" not in os.environ:
56 homedir = os.environ.get("HOME")
57 if homedir:
58 os.environ["PORTAGE_USERNAME"] = os.path.basename(homedir)
59
Bertrand SIMONNET19d789e2014-12-09 13:36:31 -080060# Wrap Popen with a lock to ensure no two Popen are executed simultaneously in
61# the same process.
62# Two Popen call at the same time might be the cause for crbug.com/433482.
63_popen_lock = threading.Lock()
64_old_popen = subprocess.Popen
65
66def _LockedPopen(*args, **kwargs):
67 with _popen_lock:
68 return _old_popen(*args, **kwargs)
69
70subprocess.Popen = _LockedPopen
71
David Jamesfcb70ef2011-02-02 16:02:30 -080072# Portage doesn't expose dependency trees in its public API, so we have to
73# make use of some private APIs here. These modules are found under
74# /usr/lib/portage/pym/.
75#
76# TODO(davidjames): Update Portage to expose public APIs for these features.
Don Garrett25f309a2014-03-19 14:02:12 -070077# pylint: disable=F0401
David Jamesfcb70ef2011-02-02 16:02:30 -080078from _emerge.actions import adjust_configs
79from _emerge.actions import load_emerge_config
80from _emerge.create_depgraph_params import create_depgraph_params
David James386ccd12011-05-04 20:17:42 -070081from _emerge.depgraph import backtrack_depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -080082from _emerge.main import emerge_main
83from _emerge.main import parse_opts
84from _emerge.Package import Package
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -070085from _emerge.post_emerge import clean_logs
David Jamesfcb70ef2011-02-02 16:02:30 -080086from _emerge.Scheduler import Scheduler
David Jamesfcb70ef2011-02-02 16:02:30 -080087from _emerge.stdout_spinner import stdout_spinner
David James386ccd12011-05-04 20:17:42 -070088from portage._global_updates import _global_updates
David Jamesfcb70ef2011-02-02 16:02:30 -080089import portage
90import portage.debug
Don Garrettf8bf7842014-03-20 17:03:42 -070091# pylint: enable=F0401
Mike Frysinger91d7da92013-02-19 15:53:46 -050092
David Jamesfcb70ef2011-02-02 16:02:30 -080093
David Jamesfcb70ef2011-02-02 16:02:30 -080094def Usage():
95 """Print usage."""
Mike Frysinger383367e2014-09-16 15:06:17 -040096 print("Usage:")
97 print(" ./parallel_emerge [--board=BOARD] [--workon=PKGS]")
98 print(" [--rebuild] [emerge args] package")
99 print()
100 print("Packages specified as workon packages are always built from source.")
101 print()
102 print("The --workon argument is mainly useful when you want to build and")
103 print("install packages that you are working on unconditionally, but do not")
104 print("to have to rev the package to indicate you want to build it from")
105 print("source. The build_packages script will automatically supply the")
106 print("workon argument to emerge, ensuring that packages selected using")
107 print("cros-workon are rebuilt.")
108 print()
109 print("The --rebuild option rebuilds packages whenever their dependencies")
110 print("are changed. This ensures that your build is correct.")
David Jamesfcb70ef2011-02-02 16:02:30 -0800111
112
David Jamesfcb70ef2011-02-02 16:02:30 -0800113# Global start time
114GLOBAL_START = time.time()
115
David James7358d032011-05-19 10:40:03 -0700116# Whether process has been killed by a signal.
117KILLED = multiprocessing.Event()
118
David Jamesfcb70ef2011-02-02 16:02:30 -0800119
120class EmergeData(object):
121 """This simple struct holds various emerge variables.
122
123 This struct helps us easily pass emerge variables around as a unit.
124 These variables are used for calculating dependencies and installing
125 packages.
126 """
127
David Jamesbf1e3442011-05-28 07:44:20 -0700128 __slots__ = ["action", "cmdline_packages", "depgraph", "favorites",
129 "mtimedb", "opts", "root_config", "scheduler_graph",
130 "settings", "spinner", "trees"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800131
132 def __init__(self):
133 # The action the user requested. If the user is installing packages, this
134 # is None. If the user is doing anything other than installing packages,
135 # this will contain the action name, which will map exactly to the
136 # long-form name of the associated emerge option.
137 #
138 # Example: If you call parallel_emerge --unmerge package, the action name
139 # will be "unmerge"
140 self.action = None
141
142 # The list of packages the user passed on the command-line.
143 self.cmdline_packages = None
144
145 # The emerge dependency graph. It'll contain all the packages involved in
146 # this merge, along with their versions.
147 self.depgraph = None
148
David Jamesbf1e3442011-05-28 07:44:20 -0700149 # The list of candidates to add to the world file.
150 self.favorites = None
151
David Jamesfcb70ef2011-02-02 16:02:30 -0800152 # A dict of the options passed to emerge. This dict has been cleaned up
153 # a bit by parse_opts, so that it's a bit easier for the emerge code to
154 # look at the options.
155 #
156 # Emerge takes a few shortcuts in its cleanup process to make parsing of
157 # the options dict easier. For example, if you pass in "--usepkg=n", the
158 # "--usepkg" flag is just left out of the dictionary altogether. Because
159 # --usepkg=n is the default, this makes parsing easier, because emerge
160 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
161 #
162 # These cleanup processes aren't applied to all options. For example, the
163 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
164 # applied by emerge, see the parse_opts function in the _emerge.main
165 # package.
166 self.opts = None
167
168 # A dictionary used by portage to maintain global state. This state is
169 # loaded from disk when portage starts up, and saved to disk whenever we
170 # call mtimedb.commit().
171 #
172 # This database contains information about global updates (i.e., what
173 # version of portage we have) and what we're currently doing. Portage
174 # saves what it is currently doing in this database so that it can be
175 # resumed when you call it with the --resume option.
176 #
177 # parallel_emerge does not save what it is currently doing in the mtimedb,
178 # so we do not support the --resume option.
179 self.mtimedb = None
180
181 # The portage configuration for our current root. This contains the portage
182 # settings (see below) and the three portage trees for our current root.
183 # (The three portage trees are explained below, in the documentation for
184 # the "trees" member.)
185 self.root_config = None
186
187 # The scheduler graph is used by emerge to calculate what packages to
188 # install. We don't actually install any deps, so this isn't really used,
189 # but we pass it in to the Scheduler object anyway.
190 self.scheduler_graph = None
191
192 # Portage settings for our current session. Most of these settings are set
193 # in make.conf inside our current install root.
194 self.settings = None
195
196 # The spinner, which spews stuff to stdout to indicate that portage is
197 # doing something. We maintain our own spinner, so we set the portage
198 # spinner to "silent" mode.
199 self.spinner = None
200
201 # The portage trees. There are separate portage trees for each root. To get
202 # the portage tree for the current root, you can look in self.trees[root],
203 # where root = self.settings["ROOT"].
204 #
205 # In each root, there are three trees: vartree, porttree, and bintree.
206 # - vartree: A database of the currently-installed packages.
207 # - porttree: A database of ebuilds, that can be used to build packages.
208 # - bintree: A database of binary packages.
209 self.trees = None
210
211
212class DepGraphGenerator(object):
213 """Grab dependency information about packages from portage.
214
215 Typical usage:
216 deps = DepGraphGenerator()
217 deps.Initialize(sys.argv[1:])
218 deps_tree, deps_info = deps.GenDependencyTree()
219 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
220 deps.PrintTree(deps_tree)
221 PrintDepsMap(deps_graph)
222 """
223
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700224 __slots__ = ["board", "emerge", "package_db", "show_output", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800225
226 def __init__(self):
227 self.board = None
228 self.emerge = EmergeData()
David Jamesfcb70ef2011-02-02 16:02:30 -0800229 self.package_db = {}
David Jamesfcb70ef2011-02-02 16:02:30 -0800230 self.show_output = False
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700231 self.unpack_only = False
David Jamesfcb70ef2011-02-02 16:02:30 -0800232
233 def ParseParallelEmergeArgs(self, argv):
234 """Read the parallel emerge arguments from the command-line.
235
236 We need to be compatible with emerge arg format. We scrape arguments that
237 are specific to parallel_emerge, and pass through the rest directly to
238 emerge.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500239
David Jamesfcb70ef2011-02-02 16:02:30 -0800240 Args:
241 argv: arguments list
Mike Frysinger1a736a82013-12-12 01:50:59 -0500242
David Jamesfcb70ef2011-02-02 16:02:30 -0800243 Returns:
244 Arguments that don't belong to parallel_emerge
245 """
246 emerge_args = []
247 for arg in argv:
248 # Specifically match arguments that are specific to parallel_emerge, and
249 # pass through the rest.
250 if arg.startswith("--board="):
251 self.board = arg.replace("--board=", "")
252 elif arg.startswith("--workon="):
253 workon_str = arg.replace("--workon=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700254 emerge_args.append("--reinstall-atoms=%s" % workon_str)
255 emerge_args.append("--usepkg-exclude=%s" % workon_str)
David Jamesfcb70ef2011-02-02 16:02:30 -0800256 elif arg.startswith("--force-remote-binary="):
257 force_remote_binary = arg.replace("--force-remote-binary=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700258 emerge_args.append("--useoldpkg-atoms=%s" % force_remote_binary)
David Jamesfcb70ef2011-02-02 16:02:30 -0800259 elif arg == "--show-output":
260 self.show_output = True
David James386ccd12011-05-04 20:17:42 -0700261 elif arg == "--rebuild":
David James7a1ea4b2011-10-13 15:06:41 -0700262 emerge_args.append("--rebuild-if-unbuilt")
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700263 elif arg == "--unpackonly":
264 emerge_args.append("--fetchonly")
265 self.unpack_only = True
David Jamesfcb70ef2011-02-02 16:02:30 -0800266 else:
267 # Not one of our options, so pass through to emerge.
268 emerge_args.append(arg)
269
David James386ccd12011-05-04 20:17:42 -0700270 # These packages take a really long time to build, so, for expediency, we
271 # are blacklisting them from automatic rebuilds because one of their
272 # dependencies needs to be recompiled.
Mike Frysinger5c2a9052014-04-15 15:52:04 -0400273 for pkg in ("chromeos-base/chromeos-chrome",):
David James7a1ea4b2011-10-13 15:06:41 -0700274 emerge_args.append("--rebuild-exclude=%s" % pkg)
David Jamesfcb70ef2011-02-02 16:02:30 -0800275
276 return emerge_args
277
278 def Initialize(self, args):
279 """Initializer. Parses arguments and sets up portage state."""
280
281 # Parse and strip out args that are just intended for parallel_emerge.
282 emerge_args = self.ParseParallelEmergeArgs(args)
283
284 # Setup various environment variables based on our current board. These
285 # variables are normally setup inside emerge-${BOARD}, but since we don't
286 # call that script, we have to set it up here. These variables serve to
287 # point our tools at /build/BOARD and to setup cross compiles to the
288 # appropriate board as configured in toolchain.conf.
289 if self.board:
Bertrand SIMONNETf6febab2014-10-03 10:59:43 -0700290 sysroot = os.environ.get('SYSROOT', cros_build_lib.GetSysroot(self.board))
Yu-Ju Hongdd9bb2b2014-01-03 17:08:26 -0800291 os.environ["PORTAGE_CONFIGROOT"] = sysroot
292 os.environ["PORTAGE_SYSROOT"] = sysroot
293 os.environ["SYSROOT"] = sysroot
David Jamesfcb70ef2011-02-02 16:02:30 -0800294
295 # Although CHROMEOS_ROOT isn't specific to boards, it's normally setup
296 # inside emerge-${BOARD}, so we set it up here for compatibility. It
297 # will be going away soon as we migrate to CROS_WORKON_SRCROOT.
298 os.environ.setdefault("CHROMEOS_ROOT", os.environ["HOME"] + "/trunk")
299
300 # Turn off interactive delays
301 os.environ["EBEEP_IGNORE"] = "1"
302 os.environ["EPAUSE_IGNORE"] = "1"
Mike Frysinger0a647fc2012-08-06 14:36:05 -0400303 os.environ["CLEAN_DELAY"] = "0"
David Jamesfcb70ef2011-02-02 16:02:30 -0800304
305 # Parse the emerge options.
David Jamesea3ca332011-05-26 11:48:29 -0700306 action, opts, cmdline_packages = parse_opts(emerge_args, silent=True)
David Jamesfcb70ef2011-02-02 16:02:30 -0800307
308 # Set environment variables based on options. Portage normally sets these
309 # environment variables in emerge_main, but we can't use that function,
310 # because it also does a bunch of other stuff that we don't want.
311 # TODO(davidjames): Patch portage to move this logic into a function we can
312 # reuse here.
313 if "--debug" in opts:
314 os.environ["PORTAGE_DEBUG"] = "1"
315 if "--config-root" in opts:
316 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
317 if "--root" in opts:
318 os.environ["ROOT"] = opts["--root"]
319 if "--accept-properties" in opts:
320 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
321
David James88d780c2014-02-05 13:03:29 -0800322 # If we're installing packages to the board, we can disable vardb locks.
323 # This is safe because we only run up to one instance of parallel_emerge in
324 # parallel.
325 # TODO(davidjames): Enable this for the host too.
326 if self.board:
David Jamesfcb70ef2011-02-02 16:02:30 -0800327 os.environ.setdefault("PORTAGE_LOCKS", "false")
David Jamesfcb70ef2011-02-02 16:02:30 -0800328
329 # Now that we've setup the necessary environment variables, we can load the
330 # emerge config from disk.
331 settings, trees, mtimedb = load_emerge_config()
332
David Jamesea3ca332011-05-26 11:48:29 -0700333 # Add in EMERGE_DEFAULT_OPTS, if specified.
334 tmpcmdline = []
335 if "--ignore-default-opts" not in opts:
336 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
337 tmpcmdline.extend(emerge_args)
338 action, opts, cmdline_packages = parse_opts(tmpcmdline)
339
340 # If we're installing to the board, we want the --root-deps option so that
341 # portage will install the build dependencies to that location as well.
342 if self.board:
343 opts.setdefault("--root-deps", True)
344
David Jamesfcb70ef2011-02-02 16:02:30 -0800345 # Check whether our portage tree is out of date. Typically, this happens
346 # when you're setting up a new portage tree, such as in setup_board and
347 # make_chroot. In that case, portage applies a bunch of global updates
348 # here. Once the updates are finished, we need to commit any changes
349 # that the global update made to our mtimedb, and reload the config.
350 #
351 # Portage normally handles this logic in emerge_main, but again, we can't
352 # use that function here.
353 if _global_updates(trees, mtimedb["updates"]):
354 mtimedb.commit()
355 settings, trees, mtimedb = load_emerge_config(trees=trees)
356
357 # Setup implied options. Portage normally handles this logic in
358 # emerge_main.
359 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
360 opts.setdefault("--buildpkg", True)
361 if "--getbinpkgonly" in opts:
362 opts.setdefault("--usepkgonly", True)
363 opts.setdefault("--getbinpkg", True)
364 if "getbinpkg" in settings.features:
365 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
366 opts["--getbinpkg"] = True
367 if "--getbinpkg" in opts or "--usepkgonly" in opts:
368 opts.setdefault("--usepkg", True)
369 if "--fetch-all-uri" in opts:
370 opts.setdefault("--fetchonly", True)
371 if "--skipfirst" in opts:
372 opts.setdefault("--resume", True)
373 if "--buildpkgonly" in opts:
374 # --buildpkgonly will not merge anything, so it overrides all binary
375 # package options.
376 for opt in ("--getbinpkg", "--getbinpkgonly",
377 "--usepkg", "--usepkgonly"):
378 opts.pop(opt, None)
379 if (settings.get("PORTAGE_DEBUG", "") == "1" and
380 "python-trace" in settings.features):
381 portage.debug.set_trace(True)
382
383 # Complain about unsupported options
David James386ccd12011-05-04 20:17:42 -0700384 for opt in ("--ask", "--ask-enter-invalid", "--resume", "--skipfirst"):
David Jamesfcb70ef2011-02-02 16:02:30 -0800385 if opt in opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400386 print("%s is not supported by parallel_emerge" % opt)
David Jamesfcb70ef2011-02-02 16:02:30 -0800387 sys.exit(1)
388
389 # Make emerge specific adjustments to the config (e.g. colors!)
390 adjust_configs(opts, trees)
391
392 # Save our configuration so far in the emerge object
393 emerge = self.emerge
394 emerge.action, emerge.opts = action, opts
395 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
396 emerge.cmdline_packages = cmdline_packages
397 root = settings["ROOT"]
398 emerge.root_config = trees[root]["root_config"]
399
David James386ccd12011-05-04 20:17:42 -0700400 if "--usepkg" in opts:
David Jamesfcb70ef2011-02-02 16:02:30 -0800401 emerge.trees[root]["bintree"].populate("--getbinpkg" in opts)
402
David Jamesfcb70ef2011-02-02 16:02:30 -0800403 def CreateDepgraph(self, emerge, packages):
404 """Create an emerge depgraph object."""
405 # Setup emerge options.
406 emerge_opts = emerge.opts.copy()
407
David James386ccd12011-05-04 20:17:42 -0700408 # Ask portage to build a dependency graph. with the options we specified
409 # above.
David Jamesfcb70ef2011-02-02 16:02:30 -0800410 params = create_depgraph_params(emerge_opts, emerge.action)
David Jamesbf1e3442011-05-28 07:44:20 -0700411 success, depgraph, favorites = backtrack_depgraph(
David James386ccd12011-05-04 20:17:42 -0700412 emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
413 packages, emerge.spinner)
414 emerge.depgraph = depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800415
David James386ccd12011-05-04 20:17:42 -0700416 # Is it impossible to honor the user's request? Bail!
417 if not success:
418 depgraph.display_problems()
419 sys.exit(1)
David Jamesfcb70ef2011-02-02 16:02:30 -0800420
421 emerge.depgraph = depgraph
David Jamesbf1e3442011-05-28 07:44:20 -0700422 emerge.favorites = favorites
David Jamesfcb70ef2011-02-02 16:02:30 -0800423
David Jamesdeebd692011-05-09 17:02:52 -0700424 # Prime and flush emerge caches.
425 root = emerge.settings["ROOT"]
426 vardb = emerge.trees[root]["vartree"].dbapi
David James0bdc5de2011-05-12 16:22:26 -0700427 if "--pretend" not in emerge.opts:
428 vardb.counter_tick()
David Jamesdeebd692011-05-09 17:02:52 -0700429 vardb.flush_cache()
430
David James386ccd12011-05-04 20:17:42 -0700431 def GenDependencyTree(self):
David Jamesfcb70ef2011-02-02 16:02:30 -0800432 """Get dependency tree info from emerge.
433
David Jamesfcb70ef2011-02-02 16:02:30 -0800434 Returns:
435 Dependency tree
436 """
437 start = time.time()
438
439 emerge = self.emerge
440
441 # Create a list of packages to merge
442 packages = set(emerge.cmdline_packages[:])
David Jamesfcb70ef2011-02-02 16:02:30 -0800443
444 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
445 # need any extra output from portage.
446 portage.util.noiselimit = -1
447
448 # My favorite feature: The silent spinner. It doesn't spin. Ever.
449 # I'd disable the colors by default too, but they look kind of cool.
450 emerge.spinner = stdout_spinner()
451 emerge.spinner.update = emerge.spinner.update_quiet
452
453 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400454 print("Calculating deps...")
David Jamesfcb70ef2011-02-02 16:02:30 -0800455
456 self.CreateDepgraph(emerge, packages)
457 depgraph = emerge.depgraph
458
459 # Build our own tree from the emerge digraph.
460 deps_tree = {}
Don Garrett25f309a2014-03-19 14:02:12 -0700461 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -0800462 digraph = depgraph._dynamic_config.digraph
David James3f778802011-08-25 19:31:45 -0700463 root = emerge.settings["ROOT"]
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -0700464 final_db = depgraph._dynamic_config._filtered_trees[root]['graph_db']
David Jamesfcb70ef2011-02-02 16:02:30 -0800465 for node, node_deps in digraph.nodes.items():
466 # Calculate dependency packages that need to be installed first. Each
467 # child on the digraph is a dependency. The "operation" field specifies
468 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
469 # contains the type of dependency (e.g. build, runtime, runtime_post,
470 # etc.)
471 #
David Jamesfcb70ef2011-02-02 16:02:30 -0800472 # Portage refers to the identifiers for packages as a CPV. This acronym
473 # stands for Component/Path/Version.
474 #
475 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
476 # Split up, this CPV would be:
477 # C -- Component: chromeos-base
478 # P -- Path: power_manager
479 # V -- Version: 0.0.1-r1
480 #
481 # We just refer to CPVs as packages here because it's easier.
482 deps = {}
483 for child, priorities in node_deps[0].items():
David James3f778802011-08-25 19:31:45 -0700484 if isinstance(child, Package) and child.root == root:
485 cpv = str(child.cpv)
486 action = str(child.operation)
487
488 # If we're uninstalling a package, check whether Portage is
489 # installing a replacement. If so, just depend on the installation
490 # of the new package, because the old package will automatically
491 # be uninstalled at that time.
492 if action == "uninstall":
493 for pkg in final_db.match_pkgs(child.slot_atom):
494 cpv = str(pkg.cpv)
495 action = "merge"
496 break
497
498 deps[cpv] = dict(action=action,
499 deptypes=[str(x) for x in priorities],
500 deps={})
David Jamesfcb70ef2011-02-02 16:02:30 -0800501
502 # We've built our list of deps, so we can add our package to the tree.
David James3f778802011-08-25 19:31:45 -0700503 if isinstance(node, Package) and node.root == root:
David Jamesfcb70ef2011-02-02 16:02:30 -0800504 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
505 deps=deps)
506
David Jamesfcb70ef2011-02-02 16:02:30 -0800507 # Ask portage for its install plan, so that we can only throw out
David James386ccd12011-05-04 20:17:42 -0700508 # dependencies that portage throws out.
David Jamesfcb70ef2011-02-02 16:02:30 -0800509 deps_info = {}
510 for pkg in depgraph.altlist():
511 if isinstance(pkg, Package):
David James3f778802011-08-25 19:31:45 -0700512 assert pkg.root == root
David Jamesfcb70ef2011-02-02 16:02:30 -0800513 self.package_db[pkg.cpv] = pkg
514
David Jamesfcb70ef2011-02-02 16:02:30 -0800515 # Save off info about the package
David James386ccd12011-05-04 20:17:42 -0700516 deps_info[str(pkg.cpv)] = {"idx": len(deps_info)}
David Jamesfcb70ef2011-02-02 16:02:30 -0800517
518 seconds = time.time() - start
519 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400520 print("Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800521
522 return deps_tree, deps_info
523
524 def PrintTree(self, deps, depth=""):
525 """Print the deps we have seen in the emerge output.
526
527 Args:
528 deps: Dependency tree structure.
529 depth: Allows printing the tree recursively, with indentation.
530 """
531 for entry in sorted(deps):
532 action = deps[entry]["action"]
Mike Frysinger383367e2014-09-16 15:06:17 -0400533 print("%s %s (%s)" % (depth, entry, action))
David Jamesfcb70ef2011-02-02 16:02:30 -0800534 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
535
David James386ccd12011-05-04 20:17:42 -0700536 def GenDependencyGraph(self, deps_tree, deps_info):
David Jamesfcb70ef2011-02-02 16:02:30 -0800537 """Generate a doubly linked dependency graph.
538
539 Args:
540 deps_tree: Dependency tree structure.
541 deps_info: More details on the dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500542
David Jamesfcb70ef2011-02-02 16:02:30 -0800543 Returns:
544 Deps graph in the form of a dict of packages, with each package
545 specifying a "needs" list and "provides" list.
546 """
547 emerge = self.emerge
David Jamesfcb70ef2011-02-02 16:02:30 -0800548
David Jamesfcb70ef2011-02-02 16:02:30 -0800549 # deps_map is the actual dependency graph.
550 #
551 # Each package specifies a "needs" list and a "provides" list. The "needs"
552 # list indicates which packages we depend on. The "provides" list
553 # indicates the reverse dependencies -- what packages need us.
554 #
555 # We also provide some other information in the dependency graph:
556 # - action: What we're planning on doing with this package. Generally,
557 # "merge", "nomerge", or "uninstall"
David Jamesfcb70ef2011-02-02 16:02:30 -0800558 deps_map = {}
559
560 def ReverseTree(packages):
561 """Convert tree to digraph.
562
563 Take the tree of package -> requirements and reverse it to a digraph of
564 buildable packages -> packages they unblock.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500565
David Jamesfcb70ef2011-02-02 16:02:30 -0800566 Args:
567 packages: Tree(s) of dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500568
David Jamesfcb70ef2011-02-02 16:02:30 -0800569 Returns:
570 Unsanitized digraph.
571 """
David James8c7e5e32011-06-28 11:26:03 -0700572 binpkg_phases = set(["setup", "preinst", "postinst"])
David Jamese5e1c0a2014-09-29 17:19:41 -0700573 needed_dep_types = set(["blocker", "buildtime", "buildtime_slot_op",
574 "runtime", "runtime_slot_op"])
575 ignored_dep_types = set(["ignored", "optional", "runtime_post", "soft"])
576 all_dep_types = ignored_dep_types | needed_dep_types
David Jamesfcb70ef2011-02-02 16:02:30 -0800577 for pkg in packages:
578
579 # Create an entry for the package
580 action = packages[pkg]["action"]
David James8c7e5e32011-06-28 11:26:03 -0700581 default_pkg = {"needs": {}, "provides": set(), "action": action,
582 "nodeps": False, "binary": False}
David Jamesfcb70ef2011-02-02 16:02:30 -0800583 this_pkg = deps_map.setdefault(pkg, default_pkg)
584
David James8c7e5e32011-06-28 11:26:03 -0700585 if pkg in deps_info:
586 this_pkg["idx"] = deps_info[pkg]["idx"]
587
588 # If a package doesn't have any defined phases that might use the
589 # dependent packages (i.e. pkg_setup, pkg_preinst, or pkg_postinst),
590 # we can install this package before its deps are ready.
591 emerge_pkg = self.package_db.get(pkg)
592 if emerge_pkg and emerge_pkg.type_name == "binary":
593 this_pkg["binary"] = True
Mike Frysinger66652ec2014-04-24 11:42:25 -0400594 defined_phases = emerge_pkg.defined_phases
David James8c7e5e32011-06-28 11:26:03 -0700595 defined_binpkg_phases = binpkg_phases.intersection(defined_phases)
596 if not defined_binpkg_phases:
597 this_pkg["nodeps"] = True
598
David Jamesfcb70ef2011-02-02 16:02:30 -0800599 # Create entries for dependencies of this package first.
600 ReverseTree(packages[pkg]["deps"])
601
602 # Add dependencies to this package.
603 for dep, dep_item in packages[pkg]["deps"].iteritems():
David James8c7e5e32011-06-28 11:26:03 -0700604 # We only need to enforce strict ordering of dependencies if the
David James3f778802011-08-25 19:31:45 -0700605 # dependency is a blocker, or is a buildtime or runtime dependency.
606 # (I.e., ignored, optional, and runtime_post dependencies don't
607 # depend on ordering.)
David James8c7e5e32011-06-28 11:26:03 -0700608 dep_types = dep_item["deptypes"]
609 if needed_dep_types.intersection(dep_types):
610 deps_map[dep]["provides"].add(pkg)
611 this_pkg["needs"][dep] = "/".join(dep_types)
David Jamesfcb70ef2011-02-02 16:02:30 -0800612
David Jamese5e1c0a2014-09-29 17:19:41 -0700613 # Verify we processed all appropriate dependency types.
614 unknown_dep_types = set(dep_types) - all_dep_types
615 if unknown_dep_types:
616 print("Unknown dependency types found:")
617 print(" %s -> %s (%s)" % (pkg, dep, "/".join(unknown_dep_types)))
618 sys.exit(1)
619
David James3f778802011-08-25 19:31:45 -0700620 # If there's a blocker, Portage may need to move files from one
621 # package to another, which requires editing the CONTENTS files of
622 # both packages. To avoid race conditions while editing this file,
623 # the two packages must not be installed in parallel, so we can't
624 # safely ignore dependencies. See http://crosbug.com/19328
625 if "blocker" in dep_types:
626 this_pkg["nodeps"] = False
627
David Jamesfcb70ef2011-02-02 16:02:30 -0800628 def FindCycles():
629 """Find cycles in the dependency tree.
630
631 Returns:
632 A dict mapping cyclic packages to a dict of the deps that cause
633 cycles. For each dep that causes cycles, it returns an example
634 traversal of the graph that shows the cycle.
635 """
636
637 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
638 """Find cycles in cyclic dependencies starting at specified package.
639
640 Args:
641 pkg: Package identifier.
642 cycles: A dict mapping cyclic packages to a dict of the deps that
643 cause cycles. For each dep that causes cycles, it returns an
644 example traversal of the graph that shows the cycle.
645 unresolved: Nodes that have been visited but are not fully processed.
646 resolved: Nodes that have been visited and are fully processed.
647 """
648 pkg_cycles = cycles.get(pkg)
649 if pkg in resolved and not pkg_cycles:
650 # If we already looked at this package, and found no cyclic
651 # dependencies, we can stop now.
652 return
653 unresolved.append(pkg)
654 for dep in deps_map[pkg]["needs"]:
655 if dep in unresolved:
656 idx = unresolved.index(dep)
657 mycycle = unresolved[idx:] + [dep]
David James321490a2012-12-17 12:05:56 -0800658 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800659 pkg1, pkg2 = mycycle[i], mycycle[i+1]
660 cycles.setdefault(pkg1, {}).setdefault(pkg2, mycycle)
661 elif not pkg_cycles or dep not in pkg_cycles:
662 # Looks like we haven't seen this edge before.
663 FindCyclesAtNode(dep, cycles, unresolved, resolved)
664 unresolved.pop()
665 resolved.add(pkg)
666
667 cycles, unresolved, resolved = {}, [], set()
668 for pkg in deps_map:
669 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
670 return cycles
671
David James386ccd12011-05-04 20:17:42 -0700672 def RemoveUnusedPackages():
David Jamesfcb70ef2011-02-02 16:02:30 -0800673 """Remove installed packages, propagating dependencies."""
David Jamesfcb70ef2011-02-02 16:02:30 -0800674 # Schedule packages that aren't on the install list for removal
675 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
676
David Jamesfcb70ef2011-02-02 16:02:30 -0800677 # Remove the packages we don't want, simplifying the graph and making
678 # it easier for us to crack cycles.
679 for pkg in sorted(rm_pkgs):
680 this_pkg = deps_map[pkg]
681 needs = this_pkg["needs"]
682 provides = this_pkg["provides"]
683 for dep in needs:
684 dep_provides = deps_map[dep]["provides"]
685 dep_provides.update(provides)
686 dep_provides.discard(pkg)
687 dep_provides.discard(dep)
688 for target in provides:
689 target_needs = deps_map[target]["needs"]
690 target_needs.update(needs)
691 target_needs.pop(pkg, None)
692 target_needs.pop(target, None)
693 del deps_map[pkg]
694
695 def PrintCycleBreak(basedep, dep, mycycle):
696 """Print details about a cycle that we are planning on breaking.
697
Mike Frysinger02e1e072013-11-10 22:11:34 -0500698 We are breaking a cycle where dep needs basedep. mycycle is an
699 example cycle which contains dep -> basedep.
700 """
David Jamesfcb70ef2011-02-02 16:02:30 -0800701
David Jamesfcb70ef2011-02-02 16:02:30 -0800702 needs = deps_map[dep]["needs"]
703 depinfo = needs.get(basedep, "deleted")
David Jamesfcb70ef2011-02-02 16:02:30 -0800704
David James3f778802011-08-25 19:31:45 -0700705 # It's OK to swap install order for blockers, as long as the two
706 # packages aren't installed in parallel. If there is a cycle, then
707 # we know the packages depend on each other already, so we can drop the
708 # blocker safely without printing a warning.
709 if depinfo == "blocker":
710 return
711
David Jamesfcb70ef2011-02-02 16:02:30 -0800712 # Notify the user that we're breaking a cycle.
Mike Frysinger383367e2014-09-16 15:06:17 -0400713 print("Breaking %s -> %s (%s)" % (dep, basedep, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800714
715 # Show cycle.
David James321490a2012-12-17 12:05:56 -0800716 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800717 pkg1, pkg2 = mycycle[i], mycycle[i+1]
718 needs = deps_map[pkg1]["needs"]
719 depinfo = needs.get(pkg2, "deleted")
720 if pkg1 == dep and pkg2 == basedep:
721 depinfo = depinfo + ", deleting"
Mike Frysinger383367e2014-09-16 15:06:17 -0400722 print(" %s -> %s (%s)" % (pkg1, pkg2, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800723
724 def SanitizeTree():
725 """Remove circular dependencies.
726
727 We prune all dependencies involved in cycles that go against the emerge
728 ordering. This has a nice property: we're guaranteed to merge
729 dependencies in the same order that portage does.
730
731 Because we don't treat any dependencies as "soft" unless they're killed
732 by a cycle, we pay attention to a larger number of dependencies when
733 merging. This hurts performance a bit, but helps reliability.
734 """
735 start = time.time()
736 cycles = FindCycles()
737 while cycles:
738 for dep, mycycles in cycles.iteritems():
739 for basedep, mycycle in mycycles.iteritems():
740 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
Matt Tennant08797302011-10-17 16:18:45 -0700741 if "--quiet" not in emerge.opts:
742 PrintCycleBreak(basedep, dep, mycycle)
David Jamesfcb70ef2011-02-02 16:02:30 -0800743 del deps_map[dep]["needs"][basedep]
744 deps_map[basedep]["provides"].remove(dep)
745 cycles = FindCycles()
746 seconds = time.time() - start
747 if "--quiet" not in emerge.opts and seconds >= 0.1:
Mike Frysinger383367e2014-09-16 15:06:17 -0400748 print("Tree sanitized in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800749
David James8c7e5e32011-06-28 11:26:03 -0700750 def FindRecursiveProvides(pkg, seen):
751 """Find all nodes that require a particular package.
752
753 Assumes that graph is acyclic.
754
755 Args:
756 pkg: Package identifier.
757 seen: Nodes that have been visited so far.
758 """
759 if pkg in seen:
760 return
761 seen.add(pkg)
762 info = deps_map[pkg]
763 info["tprovides"] = info["provides"].copy()
764 for dep in info["provides"]:
765 FindRecursiveProvides(dep, seen)
766 info["tprovides"].update(deps_map[dep]["tprovides"])
767
David Jamesa22906f2011-05-04 19:53:26 -0700768 ReverseTree(deps_tree)
David Jamesa22906f2011-05-04 19:53:26 -0700769
David James386ccd12011-05-04 20:17:42 -0700770 # We need to remove unused packages so that we can use the dependency
771 # ordering of the install process to show us what cycles to crack.
772 RemoveUnusedPackages()
David Jamesfcb70ef2011-02-02 16:02:30 -0800773 SanitizeTree()
David James8c7e5e32011-06-28 11:26:03 -0700774 seen = set()
775 for pkg in deps_map:
776 FindRecursiveProvides(pkg, seen)
David Jamesfcb70ef2011-02-02 16:02:30 -0800777 return deps_map
778
779 def PrintInstallPlan(self, deps_map):
780 """Print an emerge-style install plan.
781
782 The install plan lists what packages we're installing, in order.
783 It's useful for understanding what parallel_emerge is doing.
784
785 Args:
786 deps_map: The dependency graph.
787 """
788
789 def InstallPlanAtNode(target, deps_map):
790 nodes = []
791 nodes.append(target)
792 for dep in deps_map[target]["provides"]:
793 del deps_map[dep]["needs"][target]
794 if not deps_map[dep]["needs"]:
795 nodes.extend(InstallPlanAtNode(dep, deps_map))
796 return nodes
797
798 deps_map = copy.deepcopy(deps_map)
799 install_plan = []
800 plan = set()
801 for target, info in deps_map.iteritems():
802 if not info["needs"] and target not in plan:
803 for item in InstallPlanAtNode(target, deps_map):
804 plan.add(item)
805 install_plan.append(self.package_db[item])
806
807 for pkg in plan:
808 del deps_map[pkg]
809
810 if deps_map:
Mike Frysinger383367e2014-09-16 15:06:17 -0400811 print("Cyclic dependencies:", " ".join(deps_map))
David Jamesfcb70ef2011-02-02 16:02:30 -0800812 PrintDepsMap(deps_map)
813 sys.exit(1)
814
815 self.emerge.depgraph.display(install_plan)
816
817
818def PrintDepsMap(deps_map):
819 """Print dependency graph, for each package list it's prerequisites."""
820 for i in sorted(deps_map):
Mike Frysinger383367e2014-09-16 15:06:17 -0400821 print("%s: (%s) needs" % (i, deps_map[i]["action"]))
David Jamesfcb70ef2011-02-02 16:02:30 -0800822 needs = deps_map[i]["needs"]
823 for j in sorted(needs):
Mike Frysinger383367e2014-09-16 15:06:17 -0400824 print(" %s" % (j))
David Jamesfcb70ef2011-02-02 16:02:30 -0800825 if not needs:
Mike Frysinger383367e2014-09-16 15:06:17 -0400826 print(" no dependencies")
David Jamesfcb70ef2011-02-02 16:02:30 -0800827
828
829class EmergeJobState(object):
Don Garrett25f309a2014-03-19 14:02:12 -0700830 """Structure describing the EmergeJobState."""
831
David Jamesfcb70ef2011-02-02 16:02:30 -0800832 __slots__ = ["done", "filename", "last_notify_timestamp", "last_output_seek",
833 "last_output_timestamp", "pkgname", "retcode", "start_timestamp",
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700834 "target", "fetch_only", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800835
836 def __init__(self, target, pkgname, done, filename, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700837 retcode=None, fetch_only=False, unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800838
839 # The full name of the target we're building (e.g.
Mike Frysingerfd969312014-04-02 22:16:42 -0400840 # virtual/target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800841 self.target = target
842
Mike Frysingerfd969312014-04-02 22:16:42 -0400843 # The short name of the target we're building (e.g. target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800844 self.pkgname = pkgname
845
846 # Whether the job is done. (True if the job is done; false otherwise.)
847 self.done = done
848
849 # The filename where output is currently stored.
850 self.filename = filename
851
852 # The timestamp of the last time we printed the name of the log file. We
853 # print this at the beginning of the job, so this starts at
854 # start_timestamp.
855 self.last_notify_timestamp = start_timestamp
856
857 # The location (in bytes) of the end of the last complete line we printed.
858 # This starts off at zero. We use this to jump to the right place when we
859 # print output from the same ebuild multiple times.
860 self.last_output_seek = 0
861
862 # The timestamp of the last time we printed output. Since we haven't
863 # printed output yet, this starts at zero.
864 self.last_output_timestamp = 0
865
866 # The return code of our job, if the job is actually finished.
867 self.retcode = retcode
868
Brian Harring0be85c62012-03-17 19:52:12 -0700869 # Was this just a fetch job?
870 self.fetch_only = fetch_only
871
David Jamesfcb70ef2011-02-02 16:02:30 -0800872 # The timestamp when our job started.
873 self.start_timestamp = start_timestamp
874
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700875 # No emerge, only unpack packages.
876 self.unpack_only = unpack_only
877
David Jamesfcb70ef2011-02-02 16:02:30 -0800878
David James321490a2012-12-17 12:05:56 -0800879def KillHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700880 # Kill self and all subprocesses.
881 os.killpg(0, signal.SIGKILL)
882
Mike Frysingercc838832014-05-24 13:10:30 -0400883
David Jamesfcb70ef2011-02-02 16:02:30 -0800884def SetupWorkerSignals():
David James321490a2012-12-17 12:05:56 -0800885 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700886 # Set KILLED flag.
887 KILLED.set()
David James13cead42011-05-18 16:22:01 -0700888
David James7358d032011-05-19 10:40:03 -0700889 # Remove our signal handlers so we don't get called recursively.
890 signal.signal(signal.SIGINT, KillHandler)
891 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -0800892
893 # Ensure that we exit quietly and cleanly, if possible, when we receive
894 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all
895 # of the child processes will print details about KeyboardInterrupt
896 # exceptions, which isn't very helpful.
897 signal.signal(signal.SIGINT, ExitHandler)
898 signal.signal(signal.SIGTERM, ExitHandler)
899
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400900
901def EmergeProcess(output, target, *args, **kwargs):
David James1ed3e252011-10-05 20:26:15 -0700902 """Merge a package in a subprocess.
903
904 Args:
David James1ed3e252011-10-05 20:26:15 -0700905 output: Temporary file to write output.
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400906 target: The package we'll be processing (for display purposes).
David James6b29d052012-11-02 10:27:27 -0700907 *args: Arguments to pass to Scheduler constructor.
908 **kwargs: Keyword arguments to pass to Scheduler constructor.
David James1ed3e252011-10-05 20:26:15 -0700909
910 Returns:
911 The exit code returned by the subprocess.
912 """
913 pid = os.fork()
914 if pid == 0:
915 try:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400916 proctitle.settitle('EmergeProcess', target)
917
David James1ed3e252011-10-05 20:26:15 -0700918 # Sanity checks.
Mike Frysingerf02736e2013-11-08 15:27:00 -0500919 if sys.stdout.fileno() != 1:
920 raise Exception("sys.stdout.fileno() != 1")
921 if sys.stderr.fileno() != 2:
922 raise Exception("sys.stderr.fileno() != 2")
David James1ed3e252011-10-05 20:26:15 -0700923
924 # - Redirect 1 (stdout) and 2 (stderr) at our temporary file.
925 # - Redirect 0 to point at sys.stdin. In this case, sys.stdin
926 # points at a file reading os.devnull, because multiprocessing mucks
927 # with sys.stdin.
928 # - Leave the sys.stdin and output filehandles alone.
929 fd_pipes = {0: sys.stdin.fileno(),
930 1: output.fileno(),
931 2: output.fileno(),
932 sys.stdin.fileno(): sys.stdin.fileno(),
933 output.fileno(): output.fileno()}
Mike Frysinger66652ec2014-04-24 11:42:25 -0400934 # pylint: disable=W0212
935 portage.process._setup_pipes(fd_pipes, close_fds=False)
David James1ed3e252011-10-05 20:26:15 -0700936
937 # Portage doesn't like when sys.stdin.fileno() != 0, so point sys.stdin
938 # at the filehandle we just created in _setup_pipes.
939 if sys.stdin.fileno() != 0:
David James6b29d052012-11-02 10:27:27 -0700940 sys.__stdin__ = sys.stdin = os.fdopen(0, "r")
941
942 scheduler = Scheduler(*args, **kwargs)
943
944 # Enable blocker handling even though we're in --nodeps mode. This
945 # allows us to unmerge the blocker after we've merged the replacement.
946 scheduler._opts_ignore_blockers = frozenset()
David James1ed3e252011-10-05 20:26:15 -0700947
948 # Actually do the merge.
949 retval = scheduler.merge()
950
951 # We catch all exceptions here (including SystemExit, KeyboardInterrupt,
952 # etc) so as to ensure that we don't confuse the multiprocessing module,
953 # which expects that all forked children exit with os._exit().
David James321490a2012-12-17 12:05:56 -0800954 # pylint: disable=W0702
David James1ed3e252011-10-05 20:26:15 -0700955 except:
956 traceback.print_exc(file=output)
957 retval = 1
958 sys.stdout.flush()
959 sys.stderr.flush()
960 output.flush()
Don Garrett25f309a2014-03-19 14:02:12 -0700961 # pylint: disable=W0212
David James1ed3e252011-10-05 20:26:15 -0700962 os._exit(retval)
963 else:
964 # Return the exit code of the subprocess.
965 return os.waitpid(pid, 0)[1]
David Jamesfcb70ef2011-02-02 16:02:30 -0800966
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700967
968def UnpackPackage(pkg_state):
969 """Unpacks package described by pkg_state.
970
971 Args:
972 pkg_state: EmergeJobState object describing target.
973
974 Returns:
975 Exit code returned by subprocess.
976 """
977 pkgdir = os.environ.get("PKGDIR",
978 os.path.join(os.environ["SYSROOT"], "packages"))
979 root = os.environ.get("ROOT", os.environ["SYSROOT"])
980 path = os.path.join(pkgdir, pkg_state.target + ".tbz2")
981 comp = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)
982 cmd = [comp, "-dc"]
983 if comp.endswith("pbzip2"):
984 cmd.append("--ignore-trailing-garbage=1")
985 cmd.append(path)
986
987 result = cros_build_lib.RunCommand(cmd, cwd=root, stdout_to_pipe=True,
988 print_cmd=False, error_code_ok=True)
989
990 # If we were not successful, return now and don't attempt untar.
991 if result.returncode:
992 return result.returncode
993
994 cmd = ["sudo", "tar", "-xf", "-", "-C", root]
995 result = cros_build_lib.RunCommand(cmd, cwd=root, input=result.output,
996 print_cmd=False, error_code_ok=True)
997
998 return result.returncode
999
1000
1001def EmergeWorker(task_queue, job_queue, emerge, package_db, fetch_only=False,
1002 unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -08001003 """This worker emerges any packages given to it on the task_queue.
1004
1005 Args:
1006 task_queue: The queue of tasks for this worker to do.
1007 job_queue: The queue of results from the worker.
1008 emerge: An EmergeData() object.
1009 package_db: A dict, mapping package ids to portage Package objects.
Brian Harring0be85c62012-03-17 19:52:12 -07001010 fetch_only: A bool, indicating if we should just fetch the target.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001011 unpack_only: A bool, indicating if we should just unpack the target.
David Jamesfcb70ef2011-02-02 16:02:30 -08001012
1013 It expects package identifiers to be passed to it via task_queue. When
1014 a task is started, it pushes the (target, filename) to the started_queue.
1015 The output is stored in filename. When a merge starts or finishes, we push
1016 EmergeJobState objects to the job_queue.
1017 """
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001018 if fetch_only:
1019 mode = 'fetch'
1020 elif unpack_only:
1021 mode = 'unpack'
1022 else:
1023 mode = 'emerge'
1024 proctitle.settitle('EmergeWorker', mode, '[idle]')
David Jamesfcb70ef2011-02-02 16:02:30 -08001025
1026 SetupWorkerSignals()
1027 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
David Jamesdeebd692011-05-09 17:02:52 -07001028
1029 # Disable flushing of caches to save on I/O.
David James7a1ea4b2011-10-13 15:06:41 -07001030 root = emerge.settings["ROOT"]
1031 vardb = emerge.trees[root]["vartree"].dbapi
Mike Frysingere56debd2014-11-19 01:54:36 -05001032 vardb._flush_cache_enabled = False # pylint: disable=protected-access
Brian Harring0be85c62012-03-17 19:52:12 -07001033 bindb = emerge.trees[root]["bintree"].dbapi
1034 # Might be a set, might be a list, might be None; no clue, just use shallow
1035 # copy to ensure we can roll it back.
Don Garrett25f309a2014-03-19 14:02:12 -07001036 # pylint: disable=W0212
Brian Harring0be85c62012-03-17 19:52:12 -07001037 original_remotepkgs = copy.copy(bindb.bintree._remotepkgs)
David Jamesdeebd692011-05-09 17:02:52 -07001038
David Jamesfcb70ef2011-02-02 16:02:30 -08001039 opts, spinner = emerge.opts, emerge.spinner
1040 opts["--nodeps"] = True
Brian Harring0be85c62012-03-17 19:52:12 -07001041 if fetch_only:
1042 opts["--fetchonly"] = True
1043
David Jamesfcb70ef2011-02-02 16:02:30 -08001044 while True:
1045 # Wait for a new item to show up on the queue. This is a blocking wait,
1046 # so if there's nothing to do, we just sit here.
Brian Harring0be85c62012-03-17 19:52:12 -07001047 pkg_state = task_queue.get()
1048 if pkg_state is None:
David Jamesfcb70ef2011-02-02 16:02:30 -08001049 # If target is None, this means that the main thread wants us to quit.
1050 # The other workers need to exit too, so we'll push the message back on
1051 # to the queue so they'll get it too.
Brian Harring0be85c62012-03-17 19:52:12 -07001052 task_queue.put(None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001053 return
David James7358d032011-05-19 10:40:03 -07001054 if KILLED.is_set():
1055 return
1056
Brian Harring0be85c62012-03-17 19:52:12 -07001057 target = pkg_state.target
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001058 proctitle.settitle('EmergeWorker', mode, target)
Brian Harring0be85c62012-03-17 19:52:12 -07001059
David Jamesfcb70ef2011-02-02 16:02:30 -08001060 db_pkg = package_db[target]
Brian Harring0be85c62012-03-17 19:52:12 -07001061
1062 if db_pkg.type_name == "binary":
1063 if not fetch_only and pkg_state.fetched_successfully:
1064 # Ensure portage doesn't think our pkg is remote- else it'll force
1065 # a redownload of it (even if the on-disk file is fine). In-memory
1066 # caching basically, implemented dumbly.
1067 bindb.bintree._remotepkgs = None
1068 else:
1069 bindb.bintree_remotepkgs = original_remotepkgs
1070
David Jamesfcb70ef2011-02-02 16:02:30 -08001071 db_pkg.root_config = emerge.root_config
1072 install_list = [db_pkg]
1073 pkgname = db_pkg.pf
1074 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False)
David James01b1e0f2012-06-07 17:18:05 -07001075 os.chmod(output.name, 644)
David Jamesfcb70ef2011-02-02 16:02:30 -08001076 start_timestamp = time.time()
Brian Harring0be85c62012-03-17 19:52:12 -07001077 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001078 fetch_only=fetch_only, unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001079 job_queue.put(job)
1080 if "--pretend" in opts:
1081 retcode = 0
1082 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001083 try:
David James386ccd12011-05-04 20:17:42 -07001084 emerge.scheduler_graph.mergelist = install_list
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001085 if unpack_only:
1086 retcode = UnpackPackage(pkg_state)
1087 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001088 retcode = EmergeProcess(output, target, settings, trees, mtimedb,
1089 opts, spinner, favorites=emerge.favorites,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001090 graph_config=emerge.scheduler_graph)
David Jamesfcb70ef2011-02-02 16:02:30 -08001091 except Exception:
1092 traceback.print_exc(file=output)
1093 retcode = 1
David James1ed3e252011-10-05 20:26:15 -07001094 output.close()
David Jamesfcb70ef2011-02-02 16:02:30 -08001095
David James7358d032011-05-19 10:40:03 -07001096 if KILLED.is_set():
1097 return
1098
David Jamesfcb70ef2011-02-02 16:02:30 -08001099 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001100 retcode, fetch_only=fetch_only,
1101 unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001102 job_queue.put(job)
1103
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001104 # Set the title back to idle as the multiprocess pool won't destroy us;
1105 # when another job comes up, it'll re-use this process.
1106 proctitle.settitle('EmergeWorker', mode, '[idle]')
1107
David Jamesfcb70ef2011-02-02 16:02:30 -08001108
1109class LinePrinter(object):
1110 """Helper object to print a single line."""
1111
1112 def __init__(self, line):
1113 self.line = line
1114
David James321490a2012-12-17 12:05:56 -08001115 def Print(self, _seek_locations):
Mike Frysinger383367e2014-09-16 15:06:17 -04001116 print(self.line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001117
1118
1119class JobPrinter(object):
1120 """Helper object to print output of a job."""
1121
1122 def __init__(self, job, unlink=False):
1123 """Print output of job.
1124
Mike Frysinger02e1e072013-11-10 22:11:34 -05001125 If unlink is True, unlink the job output file when done.
1126 """
David Jamesfcb70ef2011-02-02 16:02:30 -08001127 self.current_time = time.time()
1128 self.job = job
1129 self.unlink = unlink
1130
1131 def Print(self, seek_locations):
1132
1133 job = self.job
1134
1135 # Calculate how long the job has been running.
1136 seconds = self.current_time - job.start_timestamp
1137
1138 # Note that we've printed out the job so far.
1139 job.last_output_timestamp = self.current_time
1140
1141 # Note that we're starting the job
1142 info = "job %s (%dm%.1fs)" % (job.pkgname, seconds / 60, seconds % 60)
1143 last_output_seek = seek_locations.get(job.filename, 0)
1144 if last_output_seek:
Mike Frysinger383367e2014-09-16 15:06:17 -04001145 print("=== Continue output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001146 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001147 print("=== Start output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001148
1149 # Print actual output from job
1150 f = codecs.open(job.filename, encoding='utf-8', errors='replace')
1151 f.seek(last_output_seek)
1152 prefix = job.pkgname + ":"
1153 for line in f:
1154
1155 # Save off our position in the file
1156 if line and line[-1] == "\n":
1157 last_output_seek = f.tell()
1158 line = line[:-1]
1159
1160 # Print our line
Mike Frysinger383367e2014-09-16 15:06:17 -04001161 print(prefix, line.encode('utf-8', 'replace'))
David Jamesfcb70ef2011-02-02 16:02:30 -08001162 f.close()
1163
1164 # Save our last spot in the file so that we don't print out the same
1165 # location twice.
1166 seek_locations[job.filename] = last_output_seek
1167
1168 # Note end of output section
1169 if job.done:
Mike Frysinger383367e2014-09-16 15:06:17 -04001170 print("=== Complete: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001171 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001172 print("=== Still running: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001173
1174 if self.unlink:
1175 os.unlink(job.filename)
1176
1177
1178def PrintWorker(queue):
1179 """A worker that prints stuff to the screen as requested."""
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001180 proctitle.settitle('PrintWorker')
David Jamesfcb70ef2011-02-02 16:02:30 -08001181
David James321490a2012-12-17 12:05:56 -08001182 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -07001183 # Set KILLED flag.
1184 KILLED.set()
1185
David Jamesfcb70ef2011-02-02 16:02:30 -08001186 # Switch to default signal handlers so that we'll die after two signals.
David James7358d032011-05-19 10:40:03 -07001187 signal.signal(signal.SIGINT, KillHandler)
1188 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001189
1190 # Don't exit on the first SIGINT / SIGTERM, because the parent worker will
1191 # handle it and tell us when we need to exit.
1192 signal.signal(signal.SIGINT, ExitHandler)
1193 signal.signal(signal.SIGTERM, ExitHandler)
1194
1195 # seek_locations is a map indicating the position we are at in each file.
1196 # It starts off empty, but is set by the various Print jobs as we go along
1197 # to indicate where we left off in each file.
1198 seek_locations = {}
1199 while True:
1200 try:
1201 job = queue.get()
1202 if job:
1203 job.Print(seek_locations)
David Jamesbccf8eb2011-07-27 14:06:06 -07001204 sys.stdout.flush()
David Jamesfcb70ef2011-02-02 16:02:30 -08001205 else:
1206 break
1207 except IOError as ex:
1208 if ex.errno == errno.EINTR:
1209 # Looks like we received a signal. Keep printing.
1210 continue
1211 raise
1212
Brian Harring867e2362012-03-17 04:05:17 -07001213
Brian Harring0be85c62012-03-17 19:52:12 -07001214class TargetState(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001215 """Structure descriting the TargetState."""
Brian Harring867e2362012-03-17 04:05:17 -07001216
Brian Harring0be85c62012-03-17 19:52:12 -07001217 __slots__ = ("target", "info", "score", "prefetched", "fetched_successfully")
Brian Harring867e2362012-03-17 04:05:17 -07001218
David James321490a2012-12-17 12:05:56 -08001219 def __init__(self, target, info):
Brian Harring867e2362012-03-17 04:05:17 -07001220 self.target, self.info = target, info
Brian Harring0be85c62012-03-17 19:52:12 -07001221 self.fetched_successfully = False
1222 self.prefetched = False
David James321490a2012-12-17 12:05:56 -08001223 self.score = None
Brian Harring867e2362012-03-17 04:05:17 -07001224 self.update_score()
1225
1226 def __cmp__(self, other):
1227 return cmp(self.score, other.score)
1228
1229 def update_score(self):
1230 self.score = (
1231 -len(self.info["tprovides"]),
Brian Harring0be85c62012-03-17 19:52:12 -07001232 len(self.info["needs"]),
Brian Harring11c5eeb2012-03-18 11:02:39 -07001233 not self.info["binary"],
Brian Harring867e2362012-03-17 04:05:17 -07001234 -len(self.info["provides"]),
1235 self.info["idx"],
1236 self.target,
1237 )
1238
1239
1240class ScoredHeap(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001241 """Implementation of a general purpose scored heap."""
Brian Harring867e2362012-03-17 04:05:17 -07001242
Brian Harring0be85c62012-03-17 19:52:12 -07001243 __slots__ = ("heap", "_heap_set")
1244
Brian Harring867e2362012-03-17 04:05:17 -07001245 def __init__(self, initial=()):
Brian Harring0be85c62012-03-17 19:52:12 -07001246 self.heap = list()
1247 self._heap_set = set()
1248 if initial:
1249 self.multi_put(initial)
Brian Harring867e2362012-03-17 04:05:17 -07001250
1251 def get(self):
Brian Harring0be85c62012-03-17 19:52:12 -07001252 item = heapq.heappop(self.heap)
1253 self._heap_set.remove(item.target)
1254 return item
Brian Harring867e2362012-03-17 04:05:17 -07001255
Brian Harring0be85c62012-03-17 19:52:12 -07001256 def put(self, item):
1257 if not isinstance(item, TargetState):
1258 raise ValueError("Item %r isn't a TargetState" % (item,))
1259 heapq.heappush(self.heap, item)
1260 self._heap_set.add(item.target)
Brian Harring867e2362012-03-17 04:05:17 -07001261
Brian Harring0be85c62012-03-17 19:52:12 -07001262 def multi_put(self, sequence):
1263 sequence = list(sequence)
1264 self.heap.extend(sequence)
1265 self._heap_set.update(x.target for x in sequence)
Brian Harring867e2362012-03-17 04:05:17 -07001266 self.sort()
1267
David James5c9996d2012-03-24 10:50:46 -07001268 def sort(self):
1269 heapq.heapify(self.heap)
1270
Brian Harring0be85c62012-03-17 19:52:12 -07001271 def __contains__(self, target):
1272 return target in self._heap_set
1273
1274 def __nonzero__(self):
1275 return bool(self.heap)
1276
Brian Harring867e2362012-03-17 04:05:17 -07001277 def __len__(self):
1278 return len(self.heap)
1279
1280
David Jamesfcb70ef2011-02-02 16:02:30 -08001281class EmergeQueue(object):
1282 """Class to schedule emerge jobs according to a dependency graph."""
1283
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001284 def __init__(self, deps_map, emerge, package_db, show_output, unpack_only):
David Jamesfcb70ef2011-02-02 16:02:30 -08001285 # Store the dependency graph.
1286 self._deps_map = deps_map
Brian Harring0be85c62012-03-17 19:52:12 -07001287 self._state_map = {}
David Jamesfcb70ef2011-02-02 16:02:30 -08001288 # Initialize the running queue to empty
Brian Harring0be85c62012-03-17 19:52:12 -07001289 self._build_jobs = {}
1290 self._build_ready = ScoredHeap()
1291 self._fetch_jobs = {}
1292 self._fetch_ready = ScoredHeap()
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001293 self._unpack_jobs = {}
1294 self._unpack_ready = ScoredHeap()
David Jamesfcb70ef2011-02-02 16:02:30 -08001295 # List of total package installs represented in deps_map.
1296 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
1297 self._total_jobs = len(install_jobs)
1298 self._show_output = show_output
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001299 self._unpack_only = unpack_only
David Jamesfcb70ef2011-02-02 16:02:30 -08001300
1301 if "--pretend" in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001302 print("Skipping merge because of --pretend mode.")
David Jamesfcb70ef2011-02-02 16:02:30 -08001303 sys.exit(0)
1304
David Jamesaaf49e42014-04-24 09:40:05 -07001305 # Set up a session so we can easily terminate all children.
1306 self._SetupSession()
David James7358d032011-05-19 10:40:03 -07001307
David Jamesfcb70ef2011-02-02 16:02:30 -08001308 # Setup scheduler graph object. This is used by the child processes
1309 # to help schedule jobs.
1310 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1311
1312 # Calculate how many jobs we can run in parallel. We don't want to pass
1313 # the --jobs flag over to emerge itself, because that'll tell emerge to
1314 # hide its output, and said output is quite useful for debugging hung
1315 # jobs.
1316 procs = min(self._total_jobs,
1317 emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001318 self._build_procs = self._unpack_procs = self._fetch_procs = max(1, procs)
David James8c7e5e32011-06-28 11:26:03 -07001319 self._load_avg = emerge.opts.pop("--load-average", None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001320 self._job_queue = multiprocessing.Queue()
1321 self._print_queue = multiprocessing.Queue()
Brian Harring0be85c62012-03-17 19:52:12 -07001322
1323 self._fetch_queue = multiprocessing.Queue()
1324 args = (self._fetch_queue, self._job_queue, emerge, package_db, True)
1325 self._fetch_pool = multiprocessing.Pool(self._fetch_procs, EmergeWorker,
1326 args)
1327
1328 self._build_queue = multiprocessing.Queue()
1329 args = (self._build_queue, self._job_queue, emerge, package_db)
1330 self._build_pool = multiprocessing.Pool(self._build_procs, EmergeWorker,
1331 args)
1332
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001333 if self._unpack_only:
1334 # Unpack pool only required on unpack_only jobs.
1335 self._unpack_queue = multiprocessing.Queue()
1336 args = (self._unpack_queue, self._job_queue, emerge, package_db, False,
1337 True)
1338 self._unpack_pool = multiprocessing.Pool(self._unpack_procs, EmergeWorker,
1339 args)
1340
David Jamesfcb70ef2011-02-02 16:02:30 -08001341 self._print_worker = multiprocessing.Process(target=PrintWorker,
1342 args=[self._print_queue])
1343 self._print_worker.start()
1344
1345 # Initialize the failed queue to empty.
1346 self._retry_queue = []
1347 self._failed = set()
1348
David Jamesfcb70ef2011-02-02 16:02:30 -08001349 # Setup an exit handler so that we print nice messages if we are
1350 # terminated.
1351 self._SetupExitHandler()
1352
1353 # Schedule our jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001354 self._state_map.update(
1355 (pkg, TargetState(pkg, data)) for pkg, data in deps_map.iteritems())
1356 self._fetch_ready.multi_put(self._state_map.itervalues())
David Jamesfcb70ef2011-02-02 16:02:30 -08001357
David Jamesaaf49e42014-04-24 09:40:05 -07001358 def _SetupSession(self):
1359 """Set up a session so we can easily terminate all children."""
1360 # When we call os.setsid(), this sets up a session / process group for this
1361 # process and all children. These session groups are needed so that we can
1362 # easily kill all children (including processes launched by emerge) before
1363 # we exit.
1364 #
1365 # One unfortunate side effect of os.setsid() is that it blocks CTRL-C from
1366 # being received. To work around this, we only call os.setsid() in a forked
1367 # process, so that the parent can still watch for CTRL-C. The parent will
1368 # just sit around, watching for signals and propagating them to the child,
1369 # until the child exits.
1370 #
1371 # TODO(davidjames): It would be nice if we could replace this with cgroups.
1372 pid = os.fork()
1373 if pid == 0:
1374 os.setsid()
1375 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001376 proctitle.settitle('SessionManager')
1377
David Jamesaaf49e42014-04-24 09:40:05 -07001378 def PropagateToChildren(signum, _frame):
1379 # Just propagate the signals down to the child. We'll exit when the
1380 # child does.
1381 try:
1382 os.kill(pid, signum)
1383 except OSError as ex:
1384 if ex.errno != errno.ESRCH:
1385 raise
1386 signal.signal(signal.SIGINT, PropagateToChildren)
1387 signal.signal(signal.SIGTERM, PropagateToChildren)
1388
1389 def StopGroup(_signum, _frame):
1390 # When we get stopped, stop the children.
1391 try:
1392 os.killpg(pid, signal.SIGSTOP)
1393 os.kill(0, signal.SIGSTOP)
1394 except OSError as ex:
1395 if ex.errno != errno.ESRCH:
1396 raise
1397 signal.signal(signal.SIGTSTP, StopGroup)
1398
1399 def ContinueGroup(_signum, _frame):
1400 # Launch the children again after being stopped.
1401 try:
1402 os.killpg(pid, signal.SIGCONT)
1403 except OSError as ex:
1404 if ex.errno != errno.ESRCH:
1405 raise
1406 signal.signal(signal.SIGCONT, ContinueGroup)
1407
1408 # Loop until the children exit. We exit with os._exit to be sure we
1409 # don't run any finalizers (those will be run by the child process.)
1410 # pylint: disable=W0212
1411 while True:
1412 try:
1413 # Wait for the process to exit. When it does, exit with the return
1414 # value of the subprocess.
Mike Frysingere2d8f0d2014-11-01 13:09:26 -04001415 os._exit(process_util.GetExitStatus(os.waitpid(pid, 0)[1]))
David Jamesaaf49e42014-04-24 09:40:05 -07001416 except OSError as ex:
1417 if ex.errno == errno.EINTR:
1418 continue
1419 traceback.print_exc()
1420 os._exit(1)
1421 except BaseException:
1422 traceback.print_exc()
1423 os._exit(1)
1424
David Jamesfcb70ef2011-02-02 16:02:30 -08001425 def _SetupExitHandler(self):
1426
David James321490a2012-12-17 12:05:56 -08001427 def ExitHandler(signum, _frame):
David James7358d032011-05-19 10:40:03 -07001428 # Set KILLED flag.
1429 KILLED.set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001430
1431 # Kill our signal handlers so we don't get called recursively
David James7358d032011-05-19 10:40:03 -07001432 signal.signal(signal.SIGINT, KillHandler)
1433 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001434
1435 # Print our current job status
Brian Harring0be85c62012-03-17 19:52:12 -07001436 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001437 if job:
1438 self._print_queue.put(JobPrinter(job, unlink=True))
1439
1440 # Notify the user that we are exiting
1441 self._Print("Exiting on signal %s" % signum)
David James7358d032011-05-19 10:40:03 -07001442 self._print_queue.put(None)
1443 self._print_worker.join()
David Jamesfcb70ef2011-02-02 16:02:30 -08001444
1445 # Kill child threads, then exit.
David James7358d032011-05-19 10:40:03 -07001446 os.killpg(0, signal.SIGKILL)
David Jamesfcb70ef2011-02-02 16:02:30 -08001447 sys.exit(1)
1448
1449 # Print out job status when we are killed
1450 signal.signal(signal.SIGINT, ExitHandler)
1451 signal.signal(signal.SIGTERM, ExitHandler)
1452
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001453 def _ScheduleUnpack(self, pkg_state):
1454 self._unpack_jobs[pkg_state.target] = None
1455 self._unpack_queue.put(pkg_state)
1456
Brian Harring0be85c62012-03-17 19:52:12 -07001457 def _Schedule(self, pkg_state):
David Jamesfcb70ef2011-02-02 16:02:30 -08001458 # We maintain a tree of all deps, if this doesn't need
David James8c7e5e32011-06-28 11:26:03 -07001459 # to be installed just free up its children and continue.
David Jamesfcb70ef2011-02-02 16:02:30 -08001460 # It is possible to reinstall deps of deps, without reinstalling
1461 # first level deps, like so:
Mike Frysingerfd969312014-04-02 22:16:42 -04001462 # virtual/target-os (merge) -> eselect (nomerge) -> python (merge)
Brian Harring0be85c62012-03-17 19:52:12 -07001463 this_pkg = pkg_state.info
1464 target = pkg_state.target
1465 if pkg_state.info is not None:
1466 if this_pkg["action"] == "nomerge":
1467 self._Finish(target)
1468 elif target not in self._build_jobs:
1469 # Kick off the build if it's marked to be built.
1470 self._build_jobs[target] = None
1471 self._build_queue.put(pkg_state)
1472 return True
David Jamesfcb70ef2011-02-02 16:02:30 -08001473
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001474 def _ScheduleLoop(self, unpack_only=False):
1475 if unpack_only:
1476 ready_queue = self._unpack_ready
1477 jobs_queue = self._unpack_jobs
1478 procs = self._unpack_procs
1479 else:
1480 ready_queue = self._build_ready
1481 jobs_queue = self._build_jobs
1482 procs = self._build_procs
1483
David James8c7e5e32011-06-28 11:26:03 -07001484 # If the current load exceeds our desired load average, don't schedule
1485 # more than one job.
1486 if self._load_avg and os.getloadavg()[0] > self._load_avg:
1487 needed_jobs = 1
1488 else:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001489 needed_jobs = procs
David James8c7e5e32011-06-28 11:26:03 -07001490
1491 # Schedule more jobs.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001492 while ready_queue and len(jobs_queue) < needed_jobs:
1493 state = ready_queue.get()
1494 if unpack_only:
1495 self._ScheduleUnpack(state)
1496 else:
1497 if state.target not in self._failed:
1498 self._Schedule(state)
David Jamesfcb70ef2011-02-02 16:02:30 -08001499
1500 def _Print(self, line):
1501 """Print a single line."""
1502 self._print_queue.put(LinePrinter(line))
1503
1504 def _Status(self):
1505 """Print status."""
1506 current_time = time.time()
1507 no_output = True
1508
1509 # Print interim output every minute if --show-output is used. Otherwise,
1510 # print notifications about running packages every 2 minutes, and print
1511 # full output for jobs that have been running for 60 minutes or more.
1512 if self._show_output:
1513 interval = 60
1514 notify_interval = 0
1515 else:
1516 interval = 60 * 60
1517 notify_interval = 60 * 2
David James321490a2012-12-17 12:05:56 -08001518 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001519 if job:
1520 last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
1521 if last_timestamp + interval < current_time:
1522 self._print_queue.put(JobPrinter(job))
1523 job.last_output_timestamp = current_time
1524 no_output = False
1525 elif (notify_interval and
1526 job.last_notify_timestamp + notify_interval < current_time):
1527 job_seconds = current_time - job.start_timestamp
1528 args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
1529 info = "Still building %s (%dm%.1fs). Logs in %s" % args
1530 job.last_notify_timestamp = current_time
1531 self._Print(info)
1532 no_output = False
1533
1534 # If we haven't printed any messages yet, print a general status message
1535 # here.
1536 if no_output:
1537 seconds = current_time - GLOBAL_START
Brian Harring0be85c62012-03-17 19:52:12 -07001538 fjobs, fready = len(self._fetch_jobs), len(self._fetch_ready)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001539 ujobs, uready = len(self._unpack_jobs), len(self._unpack_ready)
Brian Harring0be85c62012-03-17 19:52:12 -07001540 bjobs, bready = len(self._build_jobs), len(self._build_ready)
1541 retries = len(self._retry_queue)
1542 pending = max(0, len(self._deps_map) - fjobs - bjobs)
1543 line = "Pending %s/%s, " % (pending, self._total_jobs)
1544 if fjobs or fready:
1545 line += "Fetching %s/%s, " % (fjobs, fready + fjobs)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001546 if ujobs or uready:
1547 line += "Unpacking %s/%s, " % (ujobs, uready + ujobs)
Brian Harring0be85c62012-03-17 19:52:12 -07001548 if bjobs or bready or retries:
1549 line += "Building %s/%s, " % (bjobs, bready + bjobs)
1550 if retries:
1551 line += "Retrying %s, " % (retries,)
Mike Frysingerd6e2df02014-11-26 02:55:04 -05001552 load = " ".join(str(x) for x in os.getloadavg())
1553 line += ("[Time %dm%.1fs Load %s]" % (seconds / 60, seconds % 60, load))
Brian Harring0be85c62012-03-17 19:52:12 -07001554 self._Print(line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001555
1556 def _Finish(self, target):
David James8c7e5e32011-06-28 11:26:03 -07001557 """Mark a target as completed and unblock dependencies."""
1558 this_pkg = self._deps_map[target]
1559 if this_pkg["needs"] and this_pkg["nodeps"]:
1560 # We got installed, but our deps have not been installed yet. Dependent
1561 # packages should only be installed when our needs have been fully met.
1562 this_pkg["action"] = "nomerge"
1563 else:
David James8c7e5e32011-06-28 11:26:03 -07001564 for dep in this_pkg["provides"]:
1565 dep_pkg = self._deps_map[dep]
Brian Harring0be85c62012-03-17 19:52:12 -07001566 state = self._state_map[dep]
David James8c7e5e32011-06-28 11:26:03 -07001567 del dep_pkg["needs"][target]
Brian Harring0be85c62012-03-17 19:52:12 -07001568 state.update_score()
1569 if not state.prefetched:
1570 if dep in self._fetch_ready:
1571 # If it's not currently being fetched, update the prioritization
1572 self._fetch_ready.sort()
1573 elif not dep_pkg["needs"]:
David James8c7e5e32011-06-28 11:26:03 -07001574 if dep_pkg["nodeps"] and dep_pkg["action"] == "nomerge":
1575 self._Finish(dep)
1576 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001577 self._build_ready.put(self._state_map[dep])
David James8c7e5e32011-06-28 11:26:03 -07001578 self._deps_map.pop(target)
David Jamesfcb70ef2011-02-02 16:02:30 -08001579
1580 def _Retry(self):
David James8c7e5e32011-06-28 11:26:03 -07001581 while self._retry_queue:
Brian Harring0be85c62012-03-17 19:52:12 -07001582 state = self._retry_queue.pop(0)
1583 if self._Schedule(state):
1584 self._Print("Retrying emerge of %s." % state.target)
David James8c7e5e32011-06-28 11:26:03 -07001585 break
David Jamesfcb70ef2011-02-02 16:02:30 -08001586
Brian Harringa43f5952012-04-12 01:19:34 -07001587 def _Shutdown(self):
David Jamesfcb70ef2011-02-02 16:02:30 -08001588 # Tell emerge workers to exit. They all exit when 'None' is pushed
1589 # to the queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001590
Brian Harringa43f5952012-04-12 01:19:34 -07001591 # Shutdown the workers first; then jobs (which is how they feed things back)
1592 # then finally the print queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001593
Brian Harringa43f5952012-04-12 01:19:34 -07001594 def _stop(queue, pool):
1595 if pool is None:
1596 return
1597 try:
1598 queue.put(None)
1599 pool.close()
1600 pool.join()
1601 finally:
1602 pool.terminate()
Brian Harring0be85c62012-03-17 19:52:12 -07001603
Brian Harringa43f5952012-04-12 01:19:34 -07001604 _stop(self._fetch_queue, self._fetch_pool)
1605 self._fetch_queue = self._fetch_pool = None
Brian Harring0be85c62012-03-17 19:52:12 -07001606
Brian Harringa43f5952012-04-12 01:19:34 -07001607 _stop(self._build_queue, self._build_pool)
1608 self._build_queue = self._build_pool = None
1609
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001610 if self._unpack_only:
1611 _stop(self._unpack_queue, self._unpack_pool)
1612 self._unpack_queue = self._unpack_pool = None
1613
Brian Harringa43f5952012-04-12 01:19:34 -07001614 if self._job_queue is not None:
1615 self._job_queue.close()
1616 self._job_queue = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001617
1618 # Now that our workers are finished, we can kill the print queue.
Brian Harringa43f5952012-04-12 01:19:34 -07001619 if self._print_worker is not None:
1620 try:
1621 self._print_queue.put(None)
1622 self._print_queue.close()
1623 self._print_worker.join()
1624 finally:
1625 self._print_worker.terminate()
1626 self._print_queue = self._print_worker = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001627
1628 def Run(self):
1629 """Run through the scheduled ebuilds.
1630
1631 Keep running so long as we have uninstalled packages in the
1632 dependency graph to merge.
1633 """
Brian Harringa43f5952012-04-12 01:19:34 -07001634 if not self._deps_map:
1635 return
1636
Brian Harring0be85c62012-03-17 19:52:12 -07001637 # Start the fetchers.
1638 for _ in xrange(min(self._fetch_procs, len(self._fetch_ready))):
1639 state = self._fetch_ready.get()
1640 self._fetch_jobs[state.target] = None
1641 self._fetch_queue.put(state)
1642
1643 # Print an update, then get going.
1644 self._Status()
1645
David Jamese703d0f2012-01-12 16:27:45 -08001646 retried = set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001647 while self._deps_map:
1648 # Check here that we are actually waiting for something.
Brian Harring0be85c62012-03-17 19:52:12 -07001649 if (self._build_queue.empty() and
David Jamesfcb70ef2011-02-02 16:02:30 -08001650 self._job_queue.empty() and
Brian Harring0be85c62012-03-17 19:52:12 -07001651 not self._fetch_jobs and
1652 not self._fetch_ready and
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001653 not self._unpack_jobs and
1654 not self._unpack_ready and
Brian Harring0be85c62012-03-17 19:52:12 -07001655 not self._build_jobs and
1656 not self._build_ready and
David Jamesfcb70ef2011-02-02 16:02:30 -08001657 self._deps_map):
1658 # If we have failed on a package, retry it now.
1659 if self._retry_queue:
1660 self._Retry()
1661 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001662 # Tell the user why we're exiting.
1663 if self._failed:
Mike Frysinger383367e2014-09-16 15:06:17 -04001664 print('Packages failed:\n\t%s' % '\n\t'.join(self._failed))
David James0eae23e2012-07-03 15:04:25 -07001665 status_file = os.environ.get("PARALLEL_EMERGE_STATUS_FILE")
1666 if status_file:
David James321490a2012-12-17 12:05:56 -08001667 failed_pkgs = set(portage.versions.cpv_getkey(x)
1668 for x in self._failed)
David James0eae23e2012-07-03 15:04:25 -07001669 with open(status_file, "a") as f:
1670 f.write("%s\n" % " ".join(failed_pkgs))
David Jamesfcb70ef2011-02-02 16:02:30 -08001671 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001672 print("Deadlock! Circular dependencies!")
David Jamesfcb70ef2011-02-02 16:02:30 -08001673 sys.exit(1)
1674
David James321490a2012-12-17 12:05:56 -08001675 for _ in xrange(12):
David Jamesa74289a2011-08-12 10:41:24 -07001676 try:
1677 job = self._job_queue.get(timeout=5)
1678 break
1679 except Queue.Empty:
1680 # Check if any more jobs can be scheduled.
1681 self._ScheduleLoop()
1682 else:
Brian Harring706747c2012-03-16 03:04:31 -07001683 # Print an update every 60 seconds.
David Jamesfcb70ef2011-02-02 16:02:30 -08001684 self._Status()
1685 continue
1686
1687 target = job.target
1688
Brian Harring0be85c62012-03-17 19:52:12 -07001689 if job.fetch_only:
1690 if not job.done:
1691 self._fetch_jobs[job.target] = job
1692 else:
1693 state = self._state_map[job.target]
1694 state.prefetched = True
1695 state.fetched_successfully = (job.retcode == 0)
1696 del self._fetch_jobs[job.target]
1697 self._Print("Fetched %s in %2.2fs"
1698 % (target, time.time() - job.start_timestamp))
1699
1700 if self._show_output or job.retcode != 0:
1701 self._print_queue.put(JobPrinter(job, unlink=True))
1702 else:
1703 os.unlink(job.filename)
1704 # Failure or not, let build work with it next.
1705 if not self._deps_map[job.target]["needs"]:
1706 self._build_ready.put(state)
1707 self._ScheduleLoop()
1708
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001709 if self._unpack_only and job.retcode == 0:
1710 self._unpack_ready.put(state)
1711 self._ScheduleLoop(unpack_only=True)
1712
Brian Harring0be85c62012-03-17 19:52:12 -07001713 if self._fetch_ready:
1714 state = self._fetch_ready.get()
1715 self._fetch_queue.put(state)
1716 self._fetch_jobs[state.target] = None
1717 else:
1718 # Minor optimization; shut down fetchers early since we know
1719 # the queue is empty.
1720 self._fetch_queue.put(None)
1721 continue
1722
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001723 if job.unpack_only:
1724 if not job.done:
1725 self._unpack_jobs[target] = job
1726 else:
1727 del self._unpack_jobs[target]
1728 self._Print("Unpacked %s in %2.2fs"
1729 % (target, time.time() - job.start_timestamp))
1730 if self._show_output or job.retcode != 0:
1731 self._print_queue.put(JobPrinter(job, unlink=True))
1732 else:
1733 os.unlink(job.filename)
1734 if self._unpack_ready:
1735 state = self._unpack_ready.get()
1736 self._unpack_queue.put(state)
1737 self._unpack_jobs[state.target] = None
1738 continue
1739
David Jamesfcb70ef2011-02-02 16:02:30 -08001740 if not job.done:
Brian Harring0be85c62012-03-17 19:52:12 -07001741 self._build_jobs[target] = job
David Jamesfcb70ef2011-02-02 16:02:30 -08001742 self._Print("Started %s (logged in %s)" % (target, job.filename))
1743 continue
1744
1745 # Print output of job
1746 if self._show_output or job.retcode != 0:
1747 self._print_queue.put(JobPrinter(job, unlink=True))
1748 else:
1749 os.unlink(job.filename)
Brian Harring0be85c62012-03-17 19:52:12 -07001750 del self._build_jobs[target]
David Jamesfcb70ef2011-02-02 16:02:30 -08001751
1752 seconds = time.time() - job.start_timestamp
1753 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
David James32420cc2011-08-25 21:32:46 -07001754 previously_failed = target in self._failed
David Jamesfcb70ef2011-02-02 16:02:30 -08001755
1756 # Complain if necessary.
1757 if job.retcode != 0:
1758 # Handle job failure.
David James32420cc2011-08-25 21:32:46 -07001759 if previously_failed:
David Jamesfcb70ef2011-02-02 16:02:30 -08001760 # If this job has failed previously, give up.
1761 self._Print("Failed %s. Your build has failed." % details)
1762 else:
1763 # Queue up this build to try again after a long while.
David Jamese703d0f2012-01-12 16:27:45 -08001764 retried.add(target)
Brian Harring0be85c62012-03-17 19:52:12 -07001765 self._retry_queue.append(self._state_map[target])
David Jamesfcb70ef2011-02-02 16:02:30 -08001766 self._failed.add(target)
1767 self._Print("Failed %s, retrying later." % details)
1768 else:
David James32420cc2011-08-25 21:32:46 -07001769 if previously_failed:
1770 # Remove target from list of failed packages.
1771 self._failed.remove(target)
1772
1773 self._Print("Completed %s" % details)
1774
1775 # Mark as completed and unblock waiting ebuilds.
1776 self._Finish(target)
1777
1778 if previously_failed and self._retry_queue:
David Jamesfcb70ef2011-02-02 16:02:30 -08001779 # If we have successfully retried a failed package, and there
1780 # are more failed packages, try the next one. We will only have
1781 # one retrying package actively running at a time.
1782 self._Retry()
1783
David Jamesfcb70ef2011-02-02 16:02:30 -08001784
David James8c7e5e32011-06-28 11:26:03 -07001785 # Schedule pending jobs and print an update.
1786 self._ScheduleLoop()
1787 self._Status()
David Jamesfcb70ef2011-02-02 16:02:30 -08001788
David Jamese703d0f2012-01-12 16:27:45 -08001789 # If packages were retried, output a warning.
1790 if retried:
1791 self._Print("")
1792 self._Print("WARNING: The following packages failed the first time,")
1793 self._Print("but succeeded upon retry. This might indicate incorrect")
1794 self._Print("dependencies.")
1795 for pkg in retried:
1796 self._Print(" %s" % pkg)
1797 self._Print("@@@STEP_WARNINGS@@@")
1798 self._Print("")
1799
David Jamesfcb70ef2011-02-02 16:02:30 -08001800 # Tell child threads to exit.
1801 self._Print("Merge complete")
David Jamesfcb70ef2011-02-02 16:02:30 -08001802
1803
Brian Harring30675052012-02-29 12:18:22 -08001804def main(argv):
Brian Harring8294d652012-05-23 02:20:52 -07001805 try:
1806 return real_main(argv)
1807 finally:
1808 # Work around multiprocessing sucking and not cleaning up after itself.
1809 # http://bugs.python.org/issue4106;
1810 # Step one; ensure GC is ran *prior* to the VM starting shutdown.
1811 gc.collect()
1812 # Step two; go looking for those threads and try to manually reap
1813 # them if we can.
1814 for x in threading.enumerate():
1815 # Filter on the name, and ident; if ident is None, the thread
1816 # wasn't started.
1817 if x.name == 'QueueFeederThread' and x.ident is not None:
1818 x.join(1)
David Jamesfcb70ef2011-02-02 16:02:30 -08001819
Brian Harring8294d652012-05-23 02:20:52 -07001820
1821def real_main(argv):
Brian Harring30675052012-02-29 12:18:22 -08001822 parallel_emerge_args = argv[:]
David Jamesfcb70ef2011-02-02 16:02:30 -08001823 deps = DepGraphGenerator()
Brian Harring30675052012-02-29 12:18:22 -08001824 deps.Initialize(parallel_emerge_args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001825 emerge = deps.emerge
1826
1827 if emerge.action is not None:
Brian Harring30675052012-02-29 12:18:22 -08001828 argv = deps.ParseParallelEmergeArgs(argv)
Brian Harring8294d652012-05-23 02:20:52 -07001829 return emerge_main(argv)
David Jamesfcb70ef2011-02-02 16:02:30 -08001830 elif not emerge.cmdline_packages:
1831 Usage()
Brian Harring8294d652012-05-23 02:20:52 -07001832 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001833
1834 # Unless we're in pretend mode, there's not much point running without
1835 # root access. We need to be able to install packages.
1836 #
1837 # NOTE: Even if you're running --pretend, it's a good idea to run
1838 # parallel_emerge with root access so that portage can write to the
1839 # dependency cache. This is important for performance.
David James321490a2012-12-17 12:05:56 -08001840 if "--pretend" not in emerge.opts and portage.data.secpass < 2:
Mike Frysinger383367e2014-09-16 15:06:17 -04001841 print("parallel_emerge: superuser access is required.")
Brian Harring8294d652012-05-23 02:20:52 -07001842 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001843
1844 if "--quiet" not in emerge.opts:
1845 cmdline_packages = " ".join(emerge.cmdline_packages)
Mike Frysinger383367e2014-09-16 15:06:17 -04001846 print("Starting fast-emerge.")
1847 print(" Building package %s on %s" % (cmdline_packages,
1848 deps.board or "root"))
David Jamesfcb70ef2011-02-02 16:02:30 -08001849
David James386ccd12011-05-04 20:17:42 -07001850 deps_tree, deps_info = deps.GenDependencyTree()
David Jamesfcb70ef2011-02-02 16:02:30 -08001851
1852 # You want me to be verbose? I'll give you two trees! Twice as much value.
1853 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
1854 deps.PrintTree(deps_tree)
1855
David James386ccd12011-05-04 20:17:42 -07001856 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001857
1858 # OK, time to print out our progress so far.
1859 deps.PrintInstallPlan(deps_graph)
1860 if "--tree" in emerge.opts:
1861 PrintDepsMap(deps_graph)
1862
1863 # Are we upgrading portage? If so, and there are more packages to merge,
1864 # schedule a restart of parallel_emerge to merge the rest. This ensures that
1865 # we pick up all updates to portage settings before merging any more
1866 # packages.
1867 portage_upgrade = False
1868 root = emerge.settings["ROOT"]
Don Garrett25f309a2014-03-19 14:02:12 -07001869 # pylint: disable=W0212
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -07001870 final_db = emerge.depgraph._dynamic_config._filtered_trees[root]['graph_db']
David Jamesfcb70ef2011-02-02 16:02:30 -08001871 if root == "/":
1872 for db_pkg in final_db.match_pkgs("sys-apps/portage"):
1873 portage_pkg = deps_graph.get(db_pkg.cpv)
David James0ff16f22012-11-02 14:18:07 -07001874 if portage_pkg:
David Jamesfcb70ef2011-02-02 16:02:30 -08001875 portage_upgrade = True
1876 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001877 print("Upgrading portage first, then restarting...")
David Jamesfcb70ef2011-02-02 16:02:30 -08001878
David James0ff16f22012-11-02 14:18:07 -07001879 # Upgrade Portage first, then the rest of the packages.
1880 #
1881 # In order to grant the child permission to run setsid, we need to run sudo
1882 # again. We preserve SUDO_USER here in case an ebuild depends on it.
1883 if portage_upgrade:
1884 # Calculate what arguments to use when re-invoking.
1885 args = ["sudo", "-E", "SUDO_USER=%s" % os.environ.get("SUDO_USER", "")]
1886 args += [os.path.abspath(sys.argv[0])] + parallel_emerge_args
1887 args += ["--exclude=sys-apps/portage"]
1888
1889 # First upgrade Portage.
1890 passthrough_args = ("--quiet", "--pretend", "--verbose")
1891 emerge_args = [k for k in emerge.opts if k in passthrough_args]
1892 ret = emerge_main(emerge_args + ["portage"])
1893 if ret != 0:
1894 return ret
1895
1896 # Now upgrade the rest.
1897 os.execvp(args[0], args)
1898
David Jamesfcb70ef2011-02-02 16:02:30 -08001899 # Run the queued emerges.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001900 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output,
1901 deps.unpack_only)
Brian Harringa43f5952012-04-12 01:19:34 -07001902 try:
1903 scheduler.Run()
1904 finally:
Don Garrett25f309a2014-03-19 14:02:12 -07001905 # pylint: disable=W0212
Brian Harringa43f5952012-04-12 01:19:34 -07001906 scheduler._Shutdown()
David James97ce8902011-08-16 09:51:05 -07001907 scheduler = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001908
Mike Frysingerd20a6e22012-10-04 19:01:10 -04001909 clean_logs(emerge.settings)
1910
Mike Frysinger383367e2014-09-16 15:06:17 -04001911 print("Done")
Brian Harring8294d652012-05-23 02:20:52 -07001912 return 0