blob: 837bd417d908a0081ce8978093f7b8e2f32bf8e1 [file] [log] [blame]
Mike Frysinger0a647fc2012-08-06 14:36:05 -04001# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
David Jamesfcb70ef2011-02-02 16:02:30 -08002# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Program to run emerge in parallel, for significant speedup.
6
7Usage:
David James386ccd12011-05-04 20:17:42 -07008 ./parallel_emerge [--board=BOARD] [--workon=PKGS]
David Jamesfcb70ef2011-02-02 16:02:30 -08009 [--force-remote-binary=PKGS] [emerge args] package
10
David James78b6cd92012-04-02 21:36:12 -070011This script runs multiple emerge processes in parallel, using appropriate
12Portage APIs. It is faster than standard emerge because it has a
13multiprocess model instead of an asynchronous model.
David Jamesfcb70ef2011-02-02 16:02:30 -080014"""
15
Mike Frysinger383367e2014-09-16 15:06:17 -040016from __future__ import print_function
17
David Jamesfcb70ef2011-02-02 16:02:30 -080018import codecs
19import copy
20import errno
Brian Harring8294d652012-05-23 02:20:52 -070021import gc
David James8c7e5e32011-06-28 11:26:03 -070022import heapq
David Jamesfcb70ef2011-02-02 16:02:30 -080023import multiprocessing
24import os
Mike Frysinger1ae28092013-10-17 17:17:22 -040025try:
26 import Queue
27except ImportError:
28 # Python-3 renamed to "queue". We still use Queue to avoid collisions
29 # with naming variables as "queue". Maybe we'll transition at some point.
30 # pylint: disable=F0401
31 import queue as Queue
David Jamesfcb70ef2011-02-02 16:02:30 -080032import signal
33import sys
34import tempfile
Brian Harring8294d652012-05-23 02:20:52 -070035import threading
David Jamesfcb70ef2011-02-02 16:02:30 -080036import time
37import traceback
David Jamesfcb70ef2011-02-02 16:02:30 -080038
Thiago Goncalesf4acc422013-07-17 10:26:35 -070039from chromite.lib import cros_build_lib
Mike Frysingere2d8f0d2014-11-01 13:09:26 -040040from chromite.lib import process_util
Mike Frysingerd74fe4a2014-04-24 11:43:38 -040041from chromite.lib import proctitle
Thiago Goncalesf4acc422013-07-17 10:26:35 -070042
David Jamesfcb70ef2011-02-02 16:02:30 -080043# If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
44# Chromium OS, the default "portage" user doesn't have the necessary
45# permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
46# is "root" here because we get called through sudo.
47#
48# We need to set this before importing any portage modules, because portage
49# looks up "PORTAGE_USERNAME" at import time.
50#
51# NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
52# encounter this case unless they have an old chroot or blow away the
53# environment by running sudo without the -E specifier.
54if "PORTAGE_USERNAME" not in os.environ:
55 homedir = os.environ.get("HOME")
56 if homedir:
57 os.environ["PORTAGE_USERNAME"] = os.path.basename(homedir)
58
59# Portage doesn't expose dependency trees in its public API, so we have to
60# make use of some private APIs here. These modules are found under
61# /usr/lib/portage/pym/.
62#
63# TODO(davidjames): Update Portage to expose public APIs for these features.
Don Garrett25f309a2014-03-19 14:02:12 -070064# pylint: disable=F0401
David Jamesfcb70ef2011-02-02 16:02:30 -080065from _emerge.actions import adjust_configs
66from _emerge.actions import load_emerge_config
67from _emerge.create_depgraph_params import create_depgraph_params
David James386ccd12011-05-04 20:17:42 -070068from _emerge.depgraph import backtrack_depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -080069from _emerge.main import emerge_main
70from _emerge.main import parse_opts
71from _emerge.Package import Package
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -070072from _emerge.post_emerge import clean_logs
David Jamesfcb70ef2011-02-02 16:02:30 -080073from _emerge.Scheduler import Scheduler
David Jamesfcb70ef2011-02-02 16:02:30 -080074from _emerge.stdout_spinner import stdout_spinner
David James386ccd12011-05-04 20:17:42 -070075from portage._global_updates import _global_updates
David Jamesfcb70ef2011-02-02 16:02:30 -080076import portage
77import portage.debug
Don Garrettf8bf7842014-03-20 17:03:42 -070078# pylint: enable=F0401
Mike Frysinger91d7da92013-02-19 15:53:46 -050079
David Jamesfcb70ef2011-02-02 16:02:30 -080080
David Jamesfcb70ef2011-02-02 16:02:30 -080081def Usage():
82 """Print usage."""
Mike Frysinger383367e2014-09-16 15:06:17 -040083 print("Usage:")
84 print(" ./parallel_emerge [--board=BOARD] [--workon=PKGS]")
85 print(" [--rebuild] [emerge args] package")
86 print()
87 print("Packages specified as workon packages are always built from source.")
88 print()
89 print("The --workon argument is mainly useful when you want to build and")
90 print("install packages that you are working on unconditionally, but do not")
91 print("to have to rev the package to indicate you want to build it from")
92 print("source. The build_packages script will automatically supply the")
93 print("workon argument to emerge, ensuring that packages selected using")
94 print("cros-workon are rebuilt.")
95 print()
96 print("The --rebuild option rebuilds packages whenever their dependencies")
97 print("are changed. This ensures that your build is correct.")
David Jamesfcb70ef2011-02-02 16:02:30 -080098
99
David Jamesfcb70ef2011-02-02 16:02:30 -0800100# Global start time
101GLOBAL_START = time.time()
102
David James7358d032011-05-19 10:40:03 -0700103# Whether process has been killed by a signal.
104KILLED = multiprocessing.Event()
105
David Jamesfcb70ef2011-02-02 16:02:30 -0800106
107class EmergeData(object):
108 """This simple struct holds various emerge variables.
109
110 This struct helps us easily pass emerge variables around as a unit.
111 These variables are used for calculating dependencies and installing
112 packages.
113 """
114
David Jamesbf1e3442011-05-28 07:44:20 -0700115 __slots__ = ["action", "cmdline_packages", "depgraph", "favorites",
116 "mtimedb", "opts", "root_config", "scheduler_graph",
117 "settings", "spinner", "trees"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800118
119 def __init__(self):
120 # The action the user requested. If the user is installing packages, this
121 # is None. If the user is doing anything other than installing packages,
122 # this will contain the action name, which will map exactly to the
123 # long-form name of the associated emerge option.
124 #
125 # Example: If you call parallel_emerge --unmerge package, the action name
126 # will be "unmerge"
127 self.action = None
128
129 # The list of packages the user passed on the command-line.
130 self.cmdline_packages = None
131
132 # The emerge dependency graph. It'll contain all the packages involved in
133 # this merge, along with their versions.
134 self.depgraph = None
135
David Jamesbf1e3442011-05-28 07:44:20 -0700136 # The list of candidates to add to the world file.
137 self.favorites = None
138
David Jamesfcb70ef2011-02-02 16:02:30 -0800139 # A dict of the options passed to emerge. This dict has been cleaned up
140 # a bit by parse_opts, so that it's a bit easier for the emerge code to
141 # look at the options.
142 #
143 # Emerge takes a few shortcuts in its cleanup process to make parsing of
144 # the options dict easier. For example, if you pass in "--usepkg=n", the
145 # "--usepkg" flag is just left out of the dictionary altogether. Because
146 # --usepkg=n is the default, this makes parsing easier, because emerge
147 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
148 #
149 # These cleanup processes aren't applied to all options. For example, the
150 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
151 # applied by emerge, see the parse_opts function in the _emerge.main
152 # package.
153 self.opts = None
154
155 # A dictionary used by portage to maintain global state. This state is
156 # loaded from disk when portage starts up, and saved to disk whenever we
157 # call mtimedb.commit().
158 #
159 # This database contains information about global updates (i.e., what
160 # version of portage we have) and what we're currently doing. Portage
161 # saves what it is currently doing in this database so that it can be
162 # resumed when you call it with the --resume option.
163 #
164 # parallel_emerge does not save what it is currently doing in the mtimedb,
165 # so we do not support the --resume option.
166 self.mtimedb = None
167
168 # The portage configuration for our current root. This contains the portage
169 # settings (see below) and the three portage trees for our current root.
170 # (The three portage trees are explained below, in the documentation for
171 # the "trees" member.)
172 self.root_config = None
173
174 # The scheduler graph is used by emerge to calculate what packages to
175 # install. We don't actually install any deps, so this isn't really used,
176 # but we pass it in to the Scheduler object anyway.
177 self.scheduler_graph = None
178
179 # Portage settings for our current session. Most of these settings are set
180 # in make.conf inside our current install root.
181 self.settings = None
182
183 # The spinner, which spews stuff to stdout to indicate that portage is
184 # doing something. We maintain our own spinner, so we set the portage
185 # spinner to "silent" mode.
186 self.spinner = None
187
188 # The portage trees. There are separate portage trees for each root. To get
189 # the portage tree for the current root, you can look in self.trees[root],
190 # where root = self.settings["ROOT"].
191 #
192 # In each root, there are three trees: vartree, porttree, and bintree.
193 # - vartree: A database of the currently-installed packages.
194 # - porttree: A database of ebuilds, that can be used to build packages.
195 # - bintree: A database of binary packages.
196 self.trees = None
197
198
199class DepGraphGenerator(object):
200 """Grab dependency information about packages from portage.
201
202 Typical usage:
203 deps = DepGraphGenerator()
204 deps.Initialize(sys.argv[1:])
205 deps_tree, deps_info = deps.GenDependencyTree()
206 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
207 deps.PrintTree(deps_tree)
208 PrintDepsMap(deps_graph)
209 """
210
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700211 __slots__ = ["board", "emerge", "package_db", "show_output", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800212
213 def __init__(self):
214 self.board = None
215 self.emerge = EmergeData()
David Jamesfcb70ef2011-02-02 16:02:30 -0800216 self.package_db = {}
David Jamesfcb70ef2011-02-02 16:02:30 -0800217 self.show_output = False
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700218 self.unpack_only = False
David Jamesfcb70ef2011-02-02 16:02:30 -0800219
220 def ParseParallelEmergeArgs(self, argv):
221 """Read the parallel emerge arguments from the command-line.
222
223 We need to be compatible with emerge arg format. We scrape arguments that
224 are specific to parallel_emerge, and pass through the rest directly to
225 emerge.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500226
David Jamesfcb70ef2011-02-02 16:02:30 -0800227 Args:
228 argv: arguments list
Mike Frysinger1a736a82013-12-12 01:50:59 -0500229
David Jamesfcb70ef2011-02-02 16:02:30 -0800230 Returns:
231 Arguments that don't belong to parallel_emerge
232 """
233 emerge_args = []
234 for arg in argv:
235 # Specifically match arguments that are specific to parallel_emerge, and
236 # pass through the rest.
237 if arg.startswith("--board="):
238 self.board = arg.replace("--board=", "")
239 elif arg.startswith("--workon="):
240 workon_str = arg.replace("--workon=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700241 emerge_args.append("--reinstall-atoms=%s" % workon_str)
242 emerge_args.append("--usepkg-exclude=%s" % workon_str)
David Jamesfcb70ef2011-02-02 16:02:30 -0800243 elif arg.startswith("--force-remote-binary="):
244 force_remote_binary = arg.replace("--force-remote-binary=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700245 emerge_args.append("--useoldpkg-atoms=%s" % force_remote_binary)
David Jamesfcb70ef2011-02-02 16:02:30 -0800246 elif arg == "--show-output":
247 self.show_output = True
David James386ccd12011-05-04 20:17:42 -0700248 elif arg == "--rebuild":
David James7a1ea4b2011-10-13 15:06:41 -0700249 emerge_args.append("--rebuild-if-unbuilt")
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700250 elif arg == "--unpackonly":
251 emerge_args.append("--fetchonly")
252 self.unpack_only = True
David Jamesfcb70ef2011-02-02 16:02:30 -0800253 else:
254 # Not one of our options, so pass through to emerge.
255 emerge_args.append(arg)
256
David James386ccd12011-05-04 20:17:42 -0700257 # These packages take a really long time to build, so, for expediency, we
258 # are blacklisting them from automatic rebuilds because one of their
259 # dependencies needs to be recompiled.
Mike Frysinger5c2a9052014-04-15 15:52:04 -0400260 for pkg in ("chromeos-base/chromeos-chrome",):
David James7a1ea4b2011-10-13 15:06:41 -0700261 emerge_args.append("--rebuild-exclude=%s" % pkg)
David Jamesfcb70ef2011-02-02 16:02:30 -0800262
263 return emerge_args
264
265 def Initialize(self, args):
266 """Initializer. Parses arguments and sets up portage state."""
267
268 # Parse and strip out args that are just intended for parallel_emerge.
269 emerge_args = self.ParseParallelEmergeArgs(args)
270
271 # Setup various environment variables based on our current board. These
272 # variables are normally setup inside emerge-${BOARD}, but since we don't
273 # call that script, we have to set it up here. These variables serve to
274 # point our tools at /build/BOARD and to setup cross compiles to the
275 # appropriate board as configured in toolchain.conf.
276 if self.board:
Bertrand SIMONNETf6febab2014-10-03 10:59:43 -0700277 sysroot = os.environ.get('SYSROOT', cros_build_lib.GetSysroot(self.board))
Yu-Ju Hongdd9bb2b2014-01-03 17:08:26 -0800278 os.environ["PORTAGE_CONFIGROOT"] = sysroot
279 os.environ["PORTAGE_SYSROOT"] = sysroot
280 os.environ["SYSROOT"] = sysroot
David Jamesfcb70ef2011-02-02 16:02:30 -0800281
282 # Although CHROMEOS_ROOT isn't specific to boards, it's normally setup
283 # inside emerge-${BOARD}, so we set it up here for compatibility. It
284 # will be going away soon as we migrate to CROS_WORKON_SRCROOT.
285 os.environ.setdefault("CHROMEOS_ROOT", os.environ["HOME"] + "/trunk")
286
287 # Turn off interactive delays
288 os.environ["EBEEP_IGNORE"] = "1"
289 os.environ["EPAUSE_IGNORE"] = "1"
Mike Frysinger0a647fc2012-08-06 14:36:05 -0400290 os.environ["CLEAN_DELAY"] = "0"
David Jamesfcb70ef2011-02-02 16:02:30 -0800291
292 # Parse the emerge options.
David Jamesea3ca332011-05-26 11:48:29 -0700293 action, opts, cmdline_packages = parse_opts(emerge_args, silent=True)
David Jamesfcb70ef2011-02-02 16:02:30 -0800294
295 # Set environment variables based on options. Portage normally sets these
296 # environment variables in emerge_main, but we can't use that function,
297 # because it also does a bunch of other stuff that we don't want.
298 # TODO(davidjames): Patch portage to move this logic into a function we can
299 # reuse here.
300 if "--debug" in opts:
301 os.environ["PORTAGE_DEBUG"] = "1"
302 if "--config-root" in opts:
303 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
304 if "--root" in opts:
305 os.environ["ROOT"] = opts["--root"]
306 if "--accept-properties" in opts:
307 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
308
David James88d780c2014-02-05 13:03:29 -0800309 # If we're installing packages to the board, we can disable vardb locks.
310 # This is safe because we only run up to one instance of parallel_emerge in
311 # parallel.
312 # TODO(davidjames): Enable this for the host too.
313 if self.board:
David Jamesfcb70ef2011-02-02 16:02:30 -0800314 os.environ.setdefault("PORTAGE_LOCKS", "false")
David Jamesfcb70ef2011-02-02 16:02:30 -0800315
316 # Now that we've setup the necessary environment variables, we can load the
317 # emerge config from disk.
318 settings, trees, mtimedb = load_emerge_config()
319
David Jamesea3ca332011-05-26 11:48:29 -0700320 # Add in EMERGE_DEFAULT_OPTS, if specified.
321 tmpcmdline = []
322 if "--ignore-default-opts" not in opts:
323 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
324 tmpcmdline.extend(emerge_args)
325 action, opts, cmdline_packages = parse_opts(tmpcmdline)
326
327 # If we're installing to the board, we want the --root-deps option so that
328 # portage will install the build dependencies to that location as well.
329 if self.board:
330 opts.setdefault("--root-deps", True)
331
David Jamesfcb70ef2011-02-02 16:02:30 -0800332 # Check whether our portage tree is out of date. Typically, this happens
333 # when you're setting up a new portage tree, such as in setup_board and
334 # make_chroot. In that case, portage applies a bunch of global updates
335 # here. Once the updates are finished, we need to commit any changes
336 # that the global update made to our mtimedb, and reload the config.
337 #
338 # Portage normally handles this logic in emerge_main, but again, we can't
339 # use that function here.
340 if _global_updates(trees, mtimedb["updates"]):
341 mtimedb.commit()
342 settings, trees, mtimedb = load_emerge_config(trees=trees)
343
344 # Setup implied options. Portage normally handles this logic in
345 # emerge_main.
346 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
347 opts.setdefault("--buildpkg", True)
348 if "--getbinpkgonly" in opts:
349 opts.setdefault("--usepkgonly", True)
350 opts.setdefault("--getbinpkg", True)
351 if "getbinpkg" in settings.features:
352 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
353 opts["--getbinpkg"] = True
354 if "--getbinpkg" in opts or "--usepkgonly" in opts:
355 opts.setdefault("--usepkg", True)
356 if "--fetch-all-uri" in opts:
357 opts.setdefault("--fetchonly", True)
358 if "--skipfirst" in opts:
359 opts.setdefault("--resume", True)
360 if "--buildpkgonly" in opts:
361 # --buildpkgonly will not merge anything, so it overrides all binary
362 # package options.
363 for opt in ("--getbinpkg", "--getbinpkgonly",
364 "--usepkg", "--usepkgonly"):
365 opts.pop(opt, None)
366 if (settings.get("PORTAGE_DEBUG", "") == "1" and
367 "python-trace" in settings.features):
368 portage.debug.set_trace(True)
369
370 # Complain about unsupported options
David James386ccd12011-05-04 20:17:42 -0700371 for opt in ("--ask", "--ask-enter-invalid", "--resume", "--skipfirst"):
David Jamesfcb70ef2011-02-02 16:02:30 -0800372 if opt in opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400373 print("%s is not supported by parallel_emerge" % opt)
David Jamesfcb70ef2011-02-02 16:02:30 -0800374 sys.exit(1)
375
376 # Make emerge specific adjustments to the config (e.g. colors!)
377 adjust_configs(opts, trees)
378
379 # Save our configuration so far in the emerge object
380 emerge = self.emerge
381 emerge.action, emerge.opts = action, opts
382 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
383 emerge.cmdline_packages = cmdline_packages
384 root = settings["ROOT"]
385 emerge.root_config = trees[root]["root_config"]
386
David James386ccd12011-05-04 20:17:42 -0700387 if "--usepkg" in opts:
David Jamesfcb70ef2011-02-02 16:02:30 -0800388 emerge.trees[root]["bintree"].populate("--getbinpkg" in opts)
389
David Jamesfcb70ef2011-02-02 16:02:30 -0800390 def CreateDepgraph(self, emerge, packages):
391 """Create an emerge depgraph object."""
392 # Setup emerge options.
393 emerge_opts = emerge.opts.copy()
394
David James386ccd12011-05-04 20:17:42 -0700395 # Ask portage to build a dependency graph. with the options we specified
396 # above.
David Jamesfcb70ef2011-02-02 16:02:30 -0800397 params = create_depgraph_params(emerge_opts, emerge.action)
David Jamesbf1e3442011-05-28 07:44:20 -0700398 success, depgraph, favorites = backtrack_depgraph(
David James386ccd12011-05-04 20:17:42 -0700399 emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
400 packages, emerge.spinner)
401 emerge.depgraph = depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800402
David James386ccd12011-05-04 20:17:42 -0700403 # Is it impossible to honor the user's request? Bail!
404 if not success:
405 depgraph.display_problems()
406 sys.exit(1)
David Jamesfcb70ef2011-02-02 16:02:30 -0800407
408 emerge.depgraph = depgraph
David Jamesbf1e3442011-05-28 07:44:20 -0700409 emerge.favorites = favorites
David Jamesfcb70ef2011-02-02 16:02:30 -0800410
David Jamesdeebd692011-05-09 17:02:52 -0700411 # Prime and flush emerge caches.
412 root = emerge.settings["ROOT"]
413 vardb = emerge.trees[root]["vartree"].dbapi
David James0bdc5de2011-05-12 16:22:26 -0700414 if "--pretend" not in emerge.opts:
415 vardb.counter_tick()
David Jamesdeebd692011-05-09 17:02:52 -0700416 vardb.flush_cache()
417
David James386ccd12011-05-04 20:17:42 -0700418 def GenDependencyTree(self):
David Jamesfcb70ef2011-02-02 16:02:30 -0800419 """Get dependency tree info from emerge.
420
David Jamesfcb70ef2011-02-02 16:02:30 -0800421 Returns:
422 Dependency tree
423 """
424 start = time.time()
425
426 emerge = self.emerge
427
428 # Create a list of packages to merge
429 packages = set(emerge.cmdline_packages[:])
David Jamesfcb70ef2011-02-02 16:02:30 -0800430
431 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
432 # need any extra output from portage.
433 portage.util.noiselimit = -1
434
435 # My favorite feature: The silent spinner. It doesn't spin. Ever.
436 # I'd disable the colors by default too, but they look kind of cool.
437 emerge.spinner = stdout_spinner()
438 emerge.spinner.update = emerge.spinner.update_quiet
439
440 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400441 print("Calculating deps...")
David Jamesfcb70ef2011-02-02 16:02:30 -0800442
443 self.CreateDepgraph(emerge, packages)
444 depgraph = emerge.depgraph
445
446 # Build our own tree from the emerge digraph.
447 deps_tree = {}
Don Garrett25f309a2014-03-19 14:02:12 -0700448 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -0800449 digraph = depgraph._dynamic_config.digraph
David James3f778802011-08-25 19:31:45 -0700450 root = emerge.settings["ROOT"]
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -0700451 final_db = depgraph._dynamic_config._filtered_trees[root]['graph_db']
David Jamesfcb70ef2011-02-02 16:02:30 -0800452 for node, node_deps in digraph.nodes.items():
453 # Calculate dependency packages that need to be installed first. Each
454 # child on the digraph is a dependency. The "operation" field specifies
455 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
456 # contains the type of dependency (e.g. build, runtime, runtime_post,
457 # etc.)
458 #
David Jamesfcb70ef2011-02-02 16:02:30 -0800459 # Portage refers to the identifiers for packages as a CPV. This acronym
460 # stands for Component/Path/Version.
461 #
462 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
463 # Split up, this CPV would be:
464 # C -- Component: chromeos-base
465 # P -- Path: power_manager
466 # V -- Version: 0.0.1-r1
467 #
468 # We just refer to CPVs as packages here because it's easier.
469 deps = {}
470 for child, priorities in node_deps[0].items():
David James3f778802011-08-25 19:31:45 -0700471 if isinstance(child, Package) and child.root == root:
472 cpv = str(child.cpv)
473 action = str(child.operation)
474
475 # If we're uninstalling a package, check whether Portage is
476 # installing a replacement. If so, just depend on the installation
477 # of the new package, because the old package will automatically
478 # be uninstalled at that time.
479 if action == "uninstall":
480 for pkg in final_db.match_pkgs(child.slot_atom):
481 cpv = str(pkg.cpv)
482 action = "merge"
483 break
484
485 deps[cpv] = dict(action=action,
486 deptypes=[str(x) for x in priorities],
487 deps={})
David Jamesfcb70ef2011-02-02 16:02:30 -0800488
489 # We've built our list of deps, so we can add our package to the tree.
David James3f778802011-08-25 19:31:45 -0700490 if isinstance(node, Package) and node.root == root:
David Jamesfcb70ef2011-02-02 16:02:30 -0800491 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
492 deps=deps)
493
David Jamesfcb70ef2011-02-02 16:02:30 -0800494 # Ask portage for its install plan, so that we can only throw out
David James386ccd12011-05-04 20:17:42 -0700495 # dependencies that portage throws out.
David Jamesfcb70ef2011-02-02 16:02:30 -0800496 deps_info = {}
497 for pkg in depgraph.altlist():
498 if isinstance(pkg, Package):
David James3f778802011-08-25 19:31:45 -0700499 assert pkg.root == root
David Jamesfcb70ef2011-02-02 16:02:30 -0800500 self.package_db[pkg.cpv] = pkg
501
David Jamesfcb70ef2011-02-02 16:02:30 -0800502 # Save off info about the package
David James386ccd12011-05-04 20:17:42 -0700503 deps_info[str(pkg.cpv)] = {"idx": len(deps_info)}
David Jamesfcb70ef2011-02-02 16:02:30 -0800504
505 seconds = time.time() - start
506 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400507 print("Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800508
509 return deps_tree, deps_info
510
511 def PrintTree(self, deps, depth=""):
512 """Print the deps we have seen in the emerge output.
513
514 Args:
515 deps: Dependency tree structure.
516 depth: Allows printing the tree recursively, with indentation.
517 """
518 for entry in sorted(deps):
519 action = deps[entry]["action"]
Mike Frysinger383367e2014-09-16 15:06:17 -0400520 print("%s %s (%s)" % (depth, entry, action))
David Jamesfcb70ef2011-02-02 16:02:30 -0800521 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
522
David James386ccd12011-05-04 20:17:42 -0700523 def GenDependencyGraph(self, deps_tree, deps_info):
David Jamesfcb70ef2011-02-02 16:02:30 -0800524 """Generate a doubly linked dependency graph.
525
526 Args:
527 deps_tree: Dependency tree structure.
528 deps_info: More details on the dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500529
David Jamesfcb70ef2011-02-02 16:02:30 -0800530 Returns:
531 Deps graph in the form of a dict of packages, with each package
532 specifying a "needs" list and "provides" list.
533 """
534 emerge = self.emerge
David Jamesfcb70ef2011-02-02 16:02:30 -0800535
David Jamesfcb70ef2011-02-02 16:02:30 -0800536 # deps_map is the actual dependency graph.
537 #
538 # Each package specifies a "needs" list and a "provides" list. The "needs"
539 # list indicates which packages we depend on. The "provides" list
540 # indicates the reverse dependencies -- what packages need us.
541 #
542 # We also provide some other information in the dependency graph:
543 # - action: What we're planning on doing with this package. Generally,
544 # "merge", "nomerge", or "uninstall"
David Jamesfcb70ef2011-02-02 16:02:30 -0800545 deps_map = {}
546
547 def ReverseTree(packages):
548 """Convert tree to digraph.
549
550 Take the tree of package -> requirements and reverse it to a digraph of
551 buildable packages -> packages they unblock.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500552
David Jamesfcb70ef2011-02-02 16:02:30 -0800553 Args:
554 packages: Tree(s) of dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500555
David Jamesfcb70ef2011-02-02 16:02:30 -0800556 Returns:
557 Unsanitized digraph.
558 """
David James8c7e5e32011-06-28 11:26:03 -0700559 binpkg_phases = set(["setup", "preinst", "postinst"])
David Jamese5e1c0a2014-09-29 17:19:41 -0700560 needed_dep_types = set(["blocker", "buildtime", "buildtime_slot_op",
561 "runtime", "runtime_slot_op"])
562 ignored_dep_types = set(["ignored", "optional", "runtime_post", "soft"])
563 all_dep_types = ignored_dep_types | needed_dep_types
David Jamesfcb70ef2011-02-02 16:02:30 -0800564 for pkg in packages:
565
566 # Create an entry for the package
567 action = packages[pkg]["action"]
David James8c7e5e32011-06-28 11:26:03 -0700568 default_pkg = {"needs": {}, "provides": set(), "action": action,
569 "nodeps": False, "binary": False}
David Jamesfcb70ef2011-02-02 16:02:30 -0800570 this_pkg = deps_map.setdefault(pkg, default_pkg)
571
David James8c7e5e32011-06-28 11:26:03 -0700572 if pkg in deps_info:
573 this_pkg["idx"] = deps_info[pkg]["idx"]
574
575 # If a package doesn't have any defined phases that might use the
576 # dependent packages (i.e. pkg_setup, pkg_preinst, or pkg_postinst),
577 # we can install this package before its deps are ready.
578 emerge_pkg = self.package_db.get(pkg)
579 if emerge_pkg and emerge_pkg.type_name == "binary":
580 this_pkg["binary"] = True
Mike Frysinger66652ec2014-04-24 11:42:25 -0400581 defined_phases = emerge_pkg.defined_phases
David James8c7e5e32011-06-28 11:26:03 -0700582 defined_binpkg_phases = binpkg_phases.intersection(defined_phases)
583 if not defined_binpkg_phases:
584 this_pkg["nodeps"] = True
585
David Jamesfcb70ef2011-02-02 16:02:30 -0800586 # Create entries for dependencies of this package first.
587 ReverseTree(packages[pkg]["deps"])
588
589 # Add dependencies to this package.
590 for dep, dep_item in packages[pkg]["deps"].iteritems():
David James8c7e5e32011-06-28 11:26:03 -0700591 # We only need to enforce strict ordering of dependencies if the
David James3f778802011-08-25 19:31:45 -0700592 # dependency is a blocker, or is a buildtime or runtime dependency.
593 # (I.e., ignored, optional, and runtime_post dependencies don't
594 # depend on ordering.)
David James8c7e5e32011-06-28 11:26:03 -0700595 dep_types = dep_item["deptypes"]
596 if needed_dep_types.intersection(dep_types):
597 deps_map[dep]["provides"].add(pkg)
598 this_pkg["needs"][dep] = "/".join(dep_types)
David Jamesfcb70ef2011-02-02 16:02:30 -0800599
David Jamese5e1c0a2014-09-29 17:19:41 -0700600 # Verify we processed all appropriate dependency types.
601 unknown_dep_types = set(dep_types) - all_dep_types
602 if unknown_dep_types:
603 print("Unknown dependency types found:")
604 print(" %s -> %s (%s)" % (pkg, dep, "/".join(unknown_dep_types)))
605 sys.exit(1)
606
David James3f778802011-08-25 19:31:45 -0700607 # If there's a blocker, Portage may need to move files from one
608 # package to another, which requires editing the CONTENTS files of
609 # both packages. To avoid race conditions while editing this file,
610 # the two packages must not be installed in parallel, so we can't
611 # safely ignore dependencies. See http://crosbug.com/19328
612 if "blocker" in dep_types:
613 this_pkg["nodeps"] = False
614
David Jamesfcb70ef2011-02-02 16:02:30 -0800615 def FindCycles():
616 """Find cycles in the dependency tree.
617
618 Returns:
619 A dict mapping cyclic packages to a dict of the deps that cause
620 cycles. For each dep that causes cycles, it returns an example
621 traversal of the graph that shows the cycle.
622 """
623
624 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
625 """Find cycles in cyclic dependencies starting at specified package.
626
627 Args:
628 pkg: Package identifier.
629 cycles: A dict mapping cyclic packages to a dict of the deps that
630 cause cycles. For each dep that causes cycles, it returns an
631 example traversal of the graph that shows the cycle.
632 unresolved: Nodes that have been visited but are not fully processed.
633 resolved: Nodes that have been visited and are fully processed.
634 """
635 pkg_cycles = cycles.get(pkg)
636 if pkg in resolved and not pkg_cycles:
637 # If we already looked at this package, and found no cyclic
638 # dependencies, we can stop now.
639 return
640 unresolved.append(pkg)
641 for dep in deps_map[pkg]["needs"]:
642 if dep in unresolved:
643 idx = unresolved.index(dep)
644 mycycle = unresolved[idx:] + [dep]
David James321490a2012-12-17 12:05:56 -0800645 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800646 pkg1, pkg2 = mycycle[i], mycycle[i+1]
647 cycles.setdefault(pkg1, {}).setdefault(pkg2, mycycle)
648 elif not pkg_cycles or dep not in pkg_cycles:
649 # Looks like we haven't seen this edge before.
650 FindCyclesAtNode(dep, cycles, unresolved, resolved)
651 unresolved.pop()
652 resolved.add(pkg)
653
654 cycles, unresolved, resolved = {}, [], set()
655 for pkg in deps_map:
656 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
657 return cycles
658
David James386ccd12011-05-04 20:17:42 -0700659 def RemoveUnusedPackages():
David Jamesfcb70ef2011-02-02 16:02:30 -0800660 """Remove installed packages, propagating dependencies."""
David Jamesfcb70ef2011-02-02 16:02:30 -0800661 # Schedule packages that aren't on the install list for removal
662 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
663
David Jamesfcb70ef2011-02-02 16:02:30 -0800664 # Remove the packages we don't want, simplifying the graph and making
665 # it easier for us to crack cycles.
666 for pkg in sorted(rm_pkgs):
667 this_pkg = deps_map[pkg]
668 needs = this_pkg["needs"]
669 provides = this_pkg["provides"]
670 for dep in needs:
671 dep_provides = deps_map[dep]["provides"]
672 dep_provides.update(provides)
673 dep_provides.discard(pkg)
674 dep_provides.discard(dep)
675 for target in provides:
676 target_needs = deps_map[target]["needs"]
677 target_needs.update(needs)
678 target_needs.pop(pkg, None)
679 target_needs.pop(target, None)
680 del deps_map[pkg]
681
682 def PrintCycleBreak(basedep, dep, mycycle):
683 """Print details about a cycle that we are planning on breaking.
684
Mike Frysinger02e1e072013-11-10 22:11:34 -0500685 We are breaking a cycle where dep needs basedep. mycycle is an
686 example cycle which contains dep -> basedep.
687 """
David Jamesfcb70ef2011-02-02 16:02:30 -0800688
David Jamesfcb70ef2011-02-02 16:02:30 -0800689 needs = deps_map[dep]["needs"]
690 depinfo = needs.get(basedep, "deleted")
David Jamesfcb70ef2011-02-02 16:02:30 -0800691
David James3f778802011-08-25 19:31:45 -0700692 # It's OK to swap install order for blockers, as long as the two
693 # packages aren't installed in parallel. If there is a cycle, then
694 # we know the packages depend on each other already, so we can drop the
695 # blocker safely without printing a warning.
696 if depinfo == "blocker":
697 return
698
David Jamesfcb70ef2011-02-02 16:02:30 -0800699 # Notify the user that we're breaking a cycle.
Mike Frysinger383367e2014-09-16 15:06:17 -0400700 print("Breaking %s -> %s (%s)" % (dep, basedep, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800701
702 # Show cycle.
David James321490a2012-12-17 12:05:56 -0800703 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800704 pkg1, pkg2 = mycycle[i], mycycle[i+1]
705 needs = deps_map[pkg1]["needs"]
706 depinfo = needs.get(pkg2, "deleted")
707 if pkg1 == dep and pkg2 == basedep:
708 depinfo = depinfo + ", deleting"
Mike Frysinger383367e2014-09-16 15:06:17 -0400709 print(" %s -> %s (%s)" % (pkg1, pkg2, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800710
711 def SanitizeTree():
712 """Remove circular dependencies.
713
714 We prune all dependencies involved in cycles that go against the emerge
715 ordering. This has a nice property: we're guaranteed to merge
716 dependencies in the same order that portage does.
717
718 Because we don't treat any dependencies as "soft" unless they're killed
719 by a cycle, we pay attention to a larger number of dependencies when
720 merging. This hurts performance a bit, but helps reliability.
721 """
722 start = time.time()
723 cycles = FindCycles()
724 while cycles:
725 for dep, mycycles in cycles.iteritems():
726 for basedep, mycycle in mycycles.iteritems():
727 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
Matt Tennant08797302011-10-17 16:18:45 -0700728 if "--quiet" not in emerge.opts:
729 PrintCycleBreak(basedep, dep, mycycle)
David Jamesfcb70ef2011-02-02 16:02:30 -0800730 del deps_map[dep]["needs"][basedep]
731 deps_map[basedep]["provides"].remove(dep)
732 cycles = FindCycles()
733 seconds = time.time() - start
734 if "--quiet" not in emerge.opts and seconds >= 0.1:
Mike Frysinger383367e2014-09-16 15:06:17 -0400735 print("Tree sanitized in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800736
David James8c7e5e32011-06-28 11:26:03 -0700737 def FindRecursiveProvides(pkg, seen):
738 """Find all nodes that require a particular package.
739
740 Assumes that graph is acyclic.
741
742 Args:
743 pkg: Package identifier.
744 seen: Nodes that have been visited so far.
745 """
746 if pkg in seen:
747 return
748 seen.add(pkg)
749 info = deps_map[pkg]
750 info["tprovides"] = info["provides"].copy()
751 for dep in info["provides"]:
752 FindRecursiveProvides(dep, seen)
753 info["tprovides"].update(deps_map[dep]["tprovides"])
754
David Jamesa22906f2011-05-04 19:53:26 -0700755 ReverseTree(deps_tree)
David Jamesa22906f2011-05-04 19:53:26 -0700756
David James386ccd12011-05-04 20:17:42 -0700757 # We need to remove unused packages so that we can use the dependency
758 # ordering of the install process to show us what cycles to crack.
759 RemoveUnusedPackages()
David Jamesfcb70ef2011-02-02 16:02:30 -0800760 SanitizeTree()
David James8c7e5e32011-06-28 11:26:03 -0700761 seen = set()
762 for pkg in deps_map:
763 FindRecursiveProvides(pkg, seen)
David Jamesfcb70ef2011-02-02 16:02:30 -0800764 return deps_map
765
766 def PrintInstallPlan(self, deps_map):
767 """Print an emerge-style install plan.
768
769 The install plan lists what packages we're installing, in order.
770 It's useful for understanding what parallel_emerge is doing.
771
772 Args:
773 deps_map: The dependency graph.
774 """
775
776 def InstallPlanAtNode(target, deps_map):
777 nodes = []
778 nodes.append(target)
779 for dep in deps_map[target]["provides"]:
780 del deps_map[dep]["needs"][target]
781 if not deps_map[dep]["needs"]:
782 nodes.extend(InstallPlanAtNode(dep, deps_map))
783 return nodes
784
785 deps_map = copy.deepcopy(deps_map)
786 install_plan = []
787 plan = set()
788 for target, info in deps_map.iteritems():
789 if not info["needs"] and target not in plan:
790 for item in InstallPlanAtNode(target, deps_map):
791 plan.add(item)
792 install_plan.append(self.package_db[item])
793
794 for pkg in plan:
795 del deps_map[pkg]
796
797 if deps_map:
Mike Frysinger383367e2014-09-16 15:06:17 -0400798 print("Cyclic dependencies:", " ".join(deps_map))
David Jamesfcb70ef2011-02-02 16:02:30 -0800799 PrintDepsMap(deps_map)
800 sys.exit(1)
801
802 self.emerge.depgraph.display(install_plan)
803
804
805def PrintDepsMap(deps_map):
806 """Print dependency graph, for each package list it's prerequisites."""
807 for i in sorted(deps_map):
Mike Frysinger383367e2014-09-16 15:06:17 -0400808 print("%s: (%s) needs" % (i, deps_map[i]["action"]))
David Jamesfcb70ef2011-02-02 16:02:30 -0800809 needs = deps_map[i]["needs"]
810 for j in sorted(needs):
Mike Frysinger383367e2014-09-16 15:06:17 -0400811 print(" %s" % (j))
David Jamesfcb70ef2011-02-02 16:02:30 -0800812 if not needs:
Mike Frysinger383367e2014-09-16 15:06:17 -0400813 print(" no dependencies")
David Jamesfcb70ef2011-02-02 16:02:30 -0800814
815
816class EmergeJobState(object):
Don Garrett25f309a2014-03-19 14:02:12 -0700817 """Structure describing the EmergeJobState."""
818
David Jamesfcb70ef2011-02-02 16:02:30 -0800819 __slots__ = ["done", "filename", "last_notify_timestamp", "last_output_seek",
820 "last_output_timestamp", "pkgname", "retcode", "start_timestamp",
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700821 "target", "fetch_only", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800822
823 def __init__(self, target, pkgname, done, filename, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700824 retcode=None, fetch_only=False, unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800825
826 # The full name of the target we're building (e.g.
Mike Frysingerfd969312014-04-02 22:16:42 -0400827 # virtual/target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800828 self.target = target
829
Mike Frysingerfd969312014-04-02 22:16:42 -0400830 # The short name of the target we're building (e.g. target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800831 self.pkgname = pkgname
832
833 # Whether the job is done. (True if the job is done; false otherwise.)
834 self.done = done
835
836 # The filename where output is currently stored.
837 self.filename = filename
838
839 # The timestamp of the last time we printed the name of the log file. We
840 # print this at the beginning of the job, so this starts at
841 # start_timestamp.
842 self.last_notify_timestamp = start_timestamp
843
844 # The location (in bytes) of the end of the last complete line we printed.
845 # This starts off at zero. We use this to jump to the right place when we
846 # print output from the same ebuild multiple times.
847 self.last_output_seek = 0
848
849 # The timestamp of the last time we printed output. Since we haven't
850 # printed output yet, this starts at zero.
851 self.last_output_timestamp = 0
852
853 # The return code of our job, if the job is actually finished.
854 self.retcode = retcode
855
Brian Harring0be85c62012-03-17 19:52:12 -0700856 # Was this just a fetch job?
857 self.fetch_only = fetch_only
858
David Jamesfcb70ef2011-02-02 16:02:30 -0800859 # The timestamp when our job started.
860 self.start_timestamp = start_timestamp
861
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700862 # No emerge, only unpack packages.
863 self.unpack_only = unpack_only
864
David Jamesfcb70ef2011-02-02 16:02:30 -0800865
David James321490a2012-12-17 12:05:56 -0800866def KillHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700867 # Kill self and all subprocesses.
868 os.killpg(0, signal.SIGKILL)
869
Mike Frysingercc838832014-05-24 13:10:30 -0400870
David Jamesfcb70ef2011-02-02 16:02:30 -0800871def SetupWorkerSignals():
David James321490a2012-12-17 12:05:56 -0800872 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700873 # Set KILLED flag.
874 KILLED.set()
David James13cead42011-05-18 16:22:01 -0700875
David James7358d032011-05-19 10:40:03 -0700876 # Remove our signal handlers so we don't get called recursively.
877 signal.signal(signal.SIGINT, KillHandler)
878 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -0800879
880 # Ensure that we exit quietly and cleanly, if possible, when we receive
881 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all
882 # of the child processes will print details about KeyboardInterrupt
883 # exceptions, which isn't very helpful.
884 signal.signal(signal.SIGINT, ExitHandler)
885 signal.signal(signal.SIGTERM, ExitHandler)
886
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400887
888def EmergeProcess(output, target, *args, **kwargs):
David James1ed3e252011-10-05 20:26:15 -0700889 """Merge a package in a subprocess.
890
891 Args:
David James1ed3e252011-10-05 20:26:15 -0700892 output: Temporary file to write output.
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400893 target: The package we'll be processing (for display purposes).
David James6b29d052012-11-02 10:27:27 -0700894 *args: Arguments to pass to Scheduler constructor.
895 **kwargs: Keyword arguments to pass to Scheduler constructor.
David James1ed3e252011-10-05 20:26:15 -0700896
897 Returns:
898 The exit code returned by the subprocess.
899 """
900 pid = os.fork()
901 if pid == 0:
902 try:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400903 proctitle.settitle('EmergeProcess', target)
904
David James1ed3e252011-10-05 20:26:15 -0700905 # Sanity checks.
Mike Frysingerf02736e2013-11-08 15:27:00 -0500906 if sys.stdout.fileno() != 1:
907 raise Exception("sys.stdout.fileno() != 1")
908 if sys.stderr.fileno() != 2:
909 raise Exception("sys.stderr.fileno() != 2")
David James1ed3e252011-10-05 20:26:15 -0700910
911 # - Redirect 1 (stdout) and 2 (stderr) at our temporary file.
912 # - Redirect 0 to point at sys.stdin. In this case, sys.stdin
913 # points at a file reading os.devnull, because multiprocessing mucks
914 # with sys.stdin.
915 # - Leave the sys.stdin and output filehandles alone.
916 fd_pipes = {0: sys.stdin.fileno(),
917 1: output.fileno(),
918 2: output.fileno(),
919 sys.stdin.fileno(): sys.stdin.fileno(),
920 output.fileno(): output.fileno()}
Mike Frysinger66652ec2014-04-24 11:42:25 -0400921 # pylint: disable=W0212
922 portage.process._setup_pipes(fd_pipes, close_fds=False)
David James1ed3e252011-10-05 20:26:15 -0700923
924 # Portage doesn't like when sys.stdin.fileno() != 0, so point sys.stdin
925 # at the filehandle we just created in _setup_pipes.
926 if sys.stdin.fileno() != 0:
David James6b29d052012-11-02 10:27:27 -0700927 sys.__stdin__ = sys.stdin = os.fdopen(0, "r")
928
929 scheduler = Scheduler(*args, **kwargs)
930
931 # Enable blocker handling even though we're in --nodeps mode. This
932 # allows us to unmerge the blocker after we've merged the replacement.
933 scheduler._opts_ignore_blockers = frozenset()
David James1ed3e252011-10-05 20:26:15 -0700934
935 # Actually do the merge.
936 retval = scheduler.merge()
937
938 # We catch all exceptions here (including SystemExit, KeyboardInterrupt,
939 # etc) so as to ensure that we don't confuse the multiprocessing module,
940 # which expects that all forked children exit with os._exit().
David James321490a2012-12-17 12:05:56 -0800941 # pylint: disable=W0702
David James1ed3e252011-10-05 20:26:15 -0700942 except:
943 traceback.print_exc(file=output)
944 retval = 1
945 sys.stdout.flush()
946 sys.stderr.flush()
947 output.flush()
Don Garrett25f309a2014-03-19 14:02:12 -0700948 # pylint: disable=W0212
David James1ed3e252011-10-05 20:26:15 -0700949 os._exit(retval)
950 else:
951 # Return the exit code of the subprocess.
952 return os.waitpid(pid, 0)[1]
David Jamesfcb70ef2011-02-02 16:02:30 -0800953
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700954
955def UnpackPackage(pkg_state):
956 """Unpacks package described by pkg_state.
957
958 Args:
959 pkg_state: EmergeJobState object describing target.
960
961 Returns:
962 Exit code returned by subprocess.
963 """
964 pkgdir = os.environ.get("PKGDIR",
965 os.path.join(os.environ["SYSROOT"], "packages"))
966 root = os.environ.get("ROOT", os.environ["SYSROOT"])
967 path = os.path.join(pkgdir, pkg_state.target + ".tbz2")
968 comp = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)
969 cmd = [comp, "-dc"]
970 if comp.endswith("pbzip2"):
971 cmd.append("--ignore-trailing-garbage=1")
972 cmd.append(path)
973
974 result = cros_build_lib.RunCommand(cmd, cwd=root, stdout_to_pipe=True,
975 print_cmd=False, error_code_ok=True)
976
977 # If we were not successful, return now and don't attempt untar.
978 if result.returncode:
979 return result.returncode
980
981 cmd = ["sudo", "tar", "-xf", "-", "-C", root]
982 result = cros_build_lib.RunCommand(cmd, cwd=root, input=result.output,
983 print_cmd=False, error_code_ok=True)
984
985 return result.returncode
986
987
988def EmergeWorker(task_queue, job_queue, emerge, package_db, fetch_only=False,
989 unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800990 """This worker emerges any packages given to it on the task_queue.
991
992 Args:
993 task_queue: The queue of tasks for this worker to do.
994 job_queue: The queue of results from the worker.
995 emerge: An EmergeData() object.
996 package_db: A dict, mapping package ids to portage Package objects.
Brian Harring0be85c62012-03-17 19:52:12 -0700997 fetch_only: A bool, indicating if we should just fetch the target.
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700998 unpack_only: A bool, indicating if we should just unpack the target.
David Jamesfcb70ef2011-02-02 16:02:30 -0800999
1000 It expects package identifiers to be passed to it via task_queue. When
1001 a task is started, it pushes the (target, filename) to the started_queue.
1002 The output is stored in filename. When a merge starts or finishes, we push
1003 EmergeJobState objects to the job_queue.
1004 """
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001005 if fetch_only:
1006 mode = 'fetch'
1007 elif unpack_only:
1008 mode = 'unpack'
1009 else:
1010 mode = 'emerge'
1011 proctitle.settitle('EmergeWorker', mode, '[idle]')
David Jamesfcb70ef2011-02-02 16:02:30 -08001012
1013 SetupWorkerSignals()
1014 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
David Jamesdeebd692011-05-09 17:02:52 -07001015
1016 # Disable flushing of caches to save on I/O.
David James7a1ea4b2011-10-13 15:06:41 -07001017 root = emerge.settings["ROOT"]
1018 vardb = emerge.trees[root]["vartree"].dbapi
Mike Frysingere56debd2014-11-19 01:54:36 -05001019 vardb._flush_cache_enabled = False # pylint: disable=protected-access
Brian Harring0be85c62012-03-17 19:52:12 -07001020 bindb = emerge.trees[root]["bintree"].dbapi
1021 # Might be a set, might be a list, might be None; no clue, just use shallow
1022 # copy to ensure we can roll it back.
Don Garrett25f309a2014-03-19 14:02:12 -07001023 # pylint: disable=W0212
Brian Harring0be85c62012-03-17 19:52:12 -07001024 original_remotepkgs = copy.copy(bindb.bintree._remotepkgs)
David Jamesdeebd692011-05-09 17:02:52 -07001025
David Jamesfcb70ef2011-02-02 16:02:30 -08001026 opts, spinner = emerge.opts, emerge.spinner
1027 opts["--nodeps"] = True
Brian Harring0be85c62012-03-17 19:52:12 -07001028 if fetch_only:
1029 opts["--fetchonly"] = True
1030
David Jamesfcb70ef2011-02-02 16:02:30 -08001031 while True:
1032 # Wait for a new item to show up on the queue. This is a blocking wait,
1033 # so if there's nothing to do, we just sit here.
Brian Harring0be85c62012-03-17 19:52:12 -07001034 pkg_state = task_queue.get()
1035 if pkg_state is None:
David Jamesfcb70ef2011-02-02 16:02:30 -08001036 # If target is None, this means that the main thread wants us to quit.
1037 # The other workers need to exit too, so we'll push the message back on
1038 # to the queue so they'll get it too.
Brian Harring0be85c62012-03-17 19:52:12 -07001039 task_queue.put(None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001040 return
David James7358d032011-05-19 10:40:03 -07001041 if KILLED.is_set():
1042 return
1043
Brian Harring0be85c62012-03-17 19:52:12 -07001044 target = pkg_state.target
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001045 proctitle.settitle('EmergeWorker', mode, target)
Brian Harring0be85c62012-03-17 19:52:12 -07001046
David Jamesfcb70ef2011-02-02 16:02:30 -08001047 db_pkg = package_db[target]
Brian Harring0be85c62012-03-17 19:52:12 -07001048
1049 if db_pkg.type_name == "binary":
1050 if not fetch_only and pkg_state.fetched_successfully:
1051 # Ensure portage doesn't think our pkg is remote- else it'll force
1052 # a redownload of it (even if the on-disk file is fine). In-memory
1053 # caching basically, implemented dumbly.
1054 bindb.bintree._remotepkgs = None
1055 else:
1056 bindb.bintree_remotepkgs = original_remotepkgs
1057
David Jamesfcb70ef2011-02-02 16:02:30 -08001058 db_pkg.root_config = emerge.root_config
1059 install_list = [db_pkg]
1060 pkgname = db_pkg.pf
1061 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False)
David James01b1e0f2012-06-07 17:18:05 -07001062 os.chmod(output.name, 644)
David Jamesfcb70ef2011-02-02 16:02:30 -08001063 start_timestamp = time.time()
Brian Harring0be85c62012-03-17 19:52:12 -07001064 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001065 fetch_only=fetch_only, unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001066 job_queue.put(job)
1067 if "--pretend" in opts:
1068 retcode = 0
1069 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001070 try:
David James386ccd12011-05-04 20:17:42 -07001071 emerge.scheduler_graph.mergelist = install_list
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001072 if unpack_only:
1073 retcode = UnpackPackage(pkg_state)
1074 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001075 retcode = EmergeProcess(output, target, settings, trees, mtimedb,
1076 opts, spinner, favorites=emerge.favorites,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001077 graph_config=emerge.scheduler_graph)
David Jamesfcb70ef2011-02-02 16:02:30 -08001078 except Exception:
1079 traceback.print_exc(file=output)
1080 retcode = 1
David James1ed3e252011-10-05 20:26:15 -07001081 output.close()
David Jamesfcb70ef2011-02-02 16:02:30 -08001082
David James7358d032011-05-19 10:40:03 -07001083 if KILLED.is_set():
1084 return
1085
David Jamesfcb70ef2011-02-02 16:02:30 -08001086 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001087 retcode, fetch_only=fetch_only,
1088 unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001089 job_queue.put(job)
1090
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001091 # Set the title back to idle as the multiprocess pool won't destroy us;
1092 # when another job comes up, it'll re-use this process.
1093 proctitle.settitle('EmergeWorker', mode, '[idle]')
1094
David Jamesfcb70ef2011-02-02 16:02:30 -08001095
1096class LinePrinter(object):
1097 """Helper object to print a single line."""
1098
1099 def __init__(self, line):
1100 self.line = line
1101
David James321490a2012-12-17 12:05:56 -08001102 def Print(self, _seek_locations):
Mike Frysinger383367e2014-09-16 15:06:17 -04001103 print(self.line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001104
1105
1106class JobPrinter(object):
1107 """Helper object to print output of a job."""
1108
1109 def __init__(self, job, unlink=False):
1110 """Print output of job.
1111
Mike Frysinger02e1e072013-11-10 22:11:34 -05001112 If unlink is True, unlink the job output file when done.
1113 """
David Jamesfcb70ef2011-02-02 16:02:30 -08001114 self.current_time = time.time()
1115 self.job = job
1116 self.unlink = unlink
1117
1118 def Print(self, seek_locations):
1119
1120 job = self.job
1121
1122 # Calculate how long the job has been running.
1123 seconds = self.current_time - job.start_timestamp
1124
1125 # Note that we've printed out the job so far.
1126 job.last_output_timestamp = self.current_time
1127
1128 # Note that we're starting the job
1129 info = "job %s (%dm%.1fs)" % (job.pkgname, seconds / 60, seconds % 60)
1130 last_output_seek = seek_locations.get(job.filename, 0)
1131 if last_output_seek:
Mike Frysinger383367e2014-09-16 15:06:17 -04001132 print("=== Continue output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001133 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001134 print("=== Start output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001135
1136 # Print actual output from job
1137 f = codecs.open(job.filename, encoding='utf-8', errors='replace')
1138 f.seek(last_output_seek)
1139 prefix = job.pkgname + ":"
1140 for line in f:
1141
1142 # Save off our position in the file
1143 if line and line[-1] == "\n":
1144 last_output_seek = f.tell()
1145 line = line[:-1]
1146
1147 # Print our line
Mike Frysinger383367e2014-09-16 15:06:17 -04001148 print(prefix, line.encode('utf-8', 'replace'))
David Jamesfcb70ef2011-02-02 16:02:30 -08001149 f.close()
1150
1151 # Save our last spot in the file so that we don't print out the same
1152 # location twice.
1153 seek_locations[job.filename] = last_output_seek
1154
1155 # Note end of output section
1156 if job.done:
Mike Frysinger383367e2014-09-16 15:06:17 -04001157 print("=== Complete: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001158 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001159 print("=== Still running: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001160
1161 if self.unlink:
1162 os.unlink(job.filename)
1163
1164
1165def PrintWorker(queue):
1166 """A worker that prints stuff to the screen as requested."""
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001167 proctitle.settitle('PrintWorker')
David Jamesfcb70ef2011-02-02 16:02:30 -08001168
David James321490a2012-12-17 12:05:56 -08001169 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -07001170 # Set KILLED flag.
1171 KILLED.set()
1172
David Jamesfcb70ef2011-02-02 16:02:30 -08001173 # Switch to default signal handlers so that we'll die after two signals.
David James7358d032011-05-19 10:40:03 -07001174 signal.signal(signal.SIGINT, KillHandler)
1175 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001176
1177 # Don't exit on the first SIGINT / SIGTERM, because the parent worker will
1178 # handle it and tell us when we need to exit.
1179 signal.signal(signal.SIGINT, ExitHandler)
1180 signal.signal(signal.SIGTERM, ExitHandler)
1181
1182 # seek_locations is a map indicating the position we are at in each file.
1183 # It starts off empty, but is set by the various Print jobs as we go along
1184 # to indicate where we left off in each file.
1185 seek_locations = {}
1186 while True:
1187 try:
1188 job = queue.get()
1189 if job:
1190 job.Print(seek_locations)
David Jamesbccf8eb2011-07-27 14:06:06 -07001191 sys.stdout.flush()
David Jamesfcb70ef2011-02-02 16:02:30 -08001192 else:
1193 break
1194 except IOError as ex:
1195 if ex.errno == errno.EINTR:
1196 # Looks like we received a signal. Keep printing.
1197 continue
1198 raise
1199
Brian Harring867e2362012-03-17 04:05:17 -07001200
Brian Harring0be85c62012-03-17 19:52:12 -07001201class TargetState(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001202 """Structure descriting the TargetState."""
Brian Harring867e2362012-03-17 04:05:17 -07001203
Brian Harring0be85c62012-03-17 19:52:12 -07001204 __slots__ = ("target", "info", "score", "prefetched", "fetched_successfully")
Brian Harring867e2362012-03-17 04:05:17 -07001205
David James321490a2012-12-17 12:05:56 -08001206 def __init__(self, target, info):
Brian Harring867e2362012-03-17 04:05:17 -07001207 self.target, self.info = target, info
Brian Harring0be85c62012-03-17 19:52:12 -07001208 self.fetched_successfully = False
1209 self.prefetched = False
David James321490a2012-12-17 12:05:56 -08001210 self.score = None
Brian Harring867e2362012-03-17 04:05:17 -07001211 self.update_score()
1212
1213 def __cmp__(self, other):
1214 return cmp(self.score, other.score)
1215
1216 def update_score(self):
1217 self.score = (
1218 -len(self.info["tprovides"]),
Brian Harring0be85c62012-03-17 19:52:12 -07001219 len(self.info["needs"]),
Brian Harring11c5eeb2012-03-18 11:02:39 -07001220 not self.info["binary"],
Brian Harring867e2362012-03-17 04:05:17 -07001221 -len(self.info["provides"]),
1222 self.info["idx"],
1223 self.target,
1224 )
1225
1226
1227class ScoredHeap(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001228 """Implementation of a general purpose scored heap."""
Brian Harring867e2362012-03-17 04:05:17 -07001229
Brian Harring0be85c62012-03-17 19:52:12 -07001230 __slots__ = ("heap", "_heap_set")
1231
Brian Harring867e2362012-03-17 04:05:17 -07001232 def __init__(self, initial=()):
Brian Harring0be85c62012-03-17 19:52:12 -07001233 self.heap = list()
1234 self._heap_set = set()
1235 if initial:
1236 self.multi_put(initial)
Brian Harring867e2362012-03-17 04:05:17 -07001237
1238 def get(self):
Brian Harring0be85c62012-03-17 19:52:12 -07001239 item = heapq.heappop(self.heap)
1240 self._heap_set.remove(item.target)
1241 return item
Brian Harring867e2362012-03-17 04:05:17 -07001242
Brian Harring0be85c62012-03-17 19:52:12 -07001243 def put(self, item):
1244 if not isinstance(item, TargetState):
1245 raise ValueError("Item %r isn't a TargetState" % (item,))
1246 heapq.heappush(self.heap, item)
1247 self._heap_set.add(item.target)
Brian Harring867e2362012-03-17 04:05:17 -07001248
Brian Harring0be85c62012-03-17 19:52:12 -07001249 def multi_put(self, sequence):
1250 sequence = list(sequence)
1251 self.heap.extend(sequence)
1252 self._heap_set.update(x.target for x in sequence)
Brian Harring867e2362012-03-17 04:05:17 -07001253 self.sort()
1254
David James5c9996d2012-03-24 10:50:46 -07001255 def sort(self):
1256 heapq.heapify(self.heap)
1257
Brian Harring0be85c62012-03-17 19:52:12 -07001258 def __contains__(self, target):
1259 return target in self._heap_set
1260
1261 def __nonzero__(self):
1262 return bool(self.heap)
1263
Brian Harring867e2362012-03-17 04:05:17 -07001264 def __len__(self):
1265 return len(self.heap)
1266
1267
David Jamesfcb70ef2011-02-02 16:02:30 -08001268class EmergeQueue(object):
1269 """Class to schedule emerge jobs according to a dependency graph."""
1270
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001271 def __init__(self, deps_map, emerge, package_db, show_output, unpack_only):
David Jamesfcb70ef2011-02-02 16:02:30 -08001272 # Store the dependency graph.
1273 self._deps_map = deps_map
Brian Harring0be85c62012-03-17 19:52:12 -07001274 self._state_map = {}
David Jamesfcb70ef2011-02-02 16:02:30 -08001275 # Initialize the running queue to empty
Brian Harring0be85c62012-03-17 19:52:12 -07001276 self._build_jobs = {}
1277 self._build_ready = ScoredHeap()
1278 self._fetch_jobs = {}
1279 self._fetch_ready = ScoredHeap()
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001280 self._unpack_jobs = {}
1281 self._unpack_ready = ScoredHeap()
David Jamesfcb70ef2011-02-02 16:02:30 -08001282 # List of total package installs represented in deps_map.
1283 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
1284 self._total_jobs = len(install_jobs)
1285 self._show_output = show_output
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001286 self._unpack_only = unpack_only
David Jamesfcb70ef2011-02-02 16:02:30 -08001287
1288 if "--pretend" in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001289 print("Skipping merge because of --pretend mode.")
David Jamesfcb70ef2011-02-02 16:02:30 -08001290 sys.exit(0)
1291
David Jamesaaf49e42014-04-24 09:40:05 -07001292 # Set up a session so we can easily terminate all children.
1293 self._SetupSession()
David James7358d032011-05-19 10:40:03 -07001294
David Jamesfcb70ef2011-02-02 16:02:30 -08001295 # Setup scheduler graph object. This is used by the child processes
1296 # to help schedule jobs.
1297 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1298
1299 # Calculate how many jobs we can run in parallel. We don't want to pass
1300 # the --jobs flag over to emerge itself, because that'll tell emerge to
1301 # hide its output, and said output is quite useful for debugging hung
1302 # jobs.
1303 procs = min(self._total_jobs,
1304 emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001305 self._build_procs = self._unpack_procs = self._fetch_procs = max(1, procs)
David James8c7e5e32011-06-28 11:26:03 -07001306 self._load_avg = emerge.opts.pop("--load-average", None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001307 self._job_queue = multiprocessing.Queue()
1308 self._print_queue = multiprocessing.Queue()
Brian Harring0be85c62012-03-17 19:52:12 -07001309
1310 self._fetch_queue = multiprocessing.Queue()
1311 args = (self._fetch_queue, self._job_queue, emerge, package_db, True)
1312 self._fetch_pool = multiprocessing.Pool(self._fetch_procs, EmergeWorker,
1313 args)
1314
1315 self._build_queue = multiprocessing.Queue()
1316 args = (self._build_queue, self._job_queue, emerge, package_db)
1317 self._build_pool = multiprocessing.Pool(self._build_procs, EmergeWorker,
1318 args)
1319
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001320 if self._unpack_only:
1321 # Unpack pool only required on unpack_only jobs.
1322 self._unpack_queue = multiprocessing.Queue()
1323 args = (self._unpack_queue, self._job_queue, emerge, package_db, False,
1324 True)
1325 self._unpack_pool = multiprocessing.Pool(self._unpack_procs, EmergeWorker,
1326 args)
1327
David Jamesfcb70ef2011-02-02 16:02:30 -08001328 self._print_worker = multiprocessing.Process(target=PrintWorker,
1329 args=[self._print_queue])
1330 self._print_worker.start()
1331
1332 # Initialize the failed queue to empty.
1333 self._retry_queue = []
1334 self._failed = set()
1335
David Jamesfcb70ef2011-02-02 16:02:30 -08001336 # Setup an exit handler so that we print nice messages if we are
1337 # terminated.
1338 self._SetupExitHandler()
1339
1340 # Schedule our jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001341 self._state_map.update(
1342 (pkg, TargetState(pkg, data)) for pkg, data in deps_map.iteritems())
1343 self._fetch_ready.multi_put(self._state_map.itervalues())
David Jamesfcb70ef2011-02-02 16:02:30 -08001344
David Jamesaaf49e42014-04-24 09:40:05 -07001345 def _SetupSession(self):
1346 """Set up a session so we can easily terminate all children."""
1347 # When we call os.setsid(), this sets up a session / process group for this
1348 # process and all children. These session groups are needed so that we can
1349 # easily kill all children (including processes launched by emerge) before
1350 # we exit.
1351 #
1352 # One unfortunate side effect of os.setsid() is that it blocks CTRL-C from
1353 # being received. To work around this, we only call os.setsid() in a forked
1354 # process, so that the parent can still watch for CTRL-C. The parent will
1355 # just sit around, watching for signals and propagating them to the child,
1356 # until the child exits.
1357 #
1358 # TODO(davidjames): It would be nice if we could replace this with cgroups.
1359 pid = os.fork()
1360 if pid == 0:
1361 os.setsid()
1362 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001363 proctitle.settitle('SessionManager')
1364
David Jamesaaf49e42014-04-24 09:40:05 -07001365 def PropagateToChildren(signum, _frame):
1366 # Just propagate the signals down to the child. We'll exit when the
1367 # child does.
1368 try:
1369 os.kill(pid, signum)
1370 except OSError as ex:
1371 if ex.errno != errno.ESRCH:
1372 raise
1373 signal.signal(signal.SIGINT, PropagateToChildren)
1374 signal.signal(signal.SIGTERM, PropagateToChildren)
1375
1376 def StopGroup(_signum, _frame):
1377 # When we get stopped, stop the children.
1378 try:
1379 os.killpg(pid, signal.SIGSTOP)
1380 os.kill(0, signal.SIGSTOP)
1381 except OSError as ex:
1382 if ex.errno != errno.ESRCH:
1383 raise
1384 signal.signal(signal.SIGTSTP, StopGroup)
1385
1386 def ContinueGroup(_signum, _frame):
1387 # Launch the children again after being stopped.
1388 try:
1389 os.killpg(pid, signal.SIGCONT)
1390 except OSError as ex:
1391 if ex.errno != errno.ESRCH:
1392 raise
1393 signal.signal(signal.SIGCONT, ContinueGroup)
1394
1395 # Loop until the children exit. We exit with os._exit to be sure we
1396 # don't run any finalizers (those will be run by the child process.)
1397 # pylint: disable=W0212
1398 while True:
1399 try:
1400 # Wait for the process to exit. When it does, exit with the return
1401 # value of the subprocess.
Mike Frysingere2d8f0d2014-11-01 13:09:26 -04001402 os._exit(process_util.GetExitStatus(os.waitpid(pid, 0)[1]))
David Jamesaaf49e42014-04-24 09:40:05 -07001403 except OSError as ex:
1404 if ex.errno == errno.EINTR:
1405 continue
1406 traceback.print_exc()
1407 os._exit(1)
1408 except BaseException:
1409 traceback.print_exc()
1410 os._exit(1)
1411
David Jamesfcb70ef2011-02-02 16:02:30 -08001412 def _SetupExitHandler(self):
1413
David James321490a2012-12-17 12:05:56 -08001414 def ExitHandler(signum, _frame):
David James7358d032011-05-19 10:40:03 -07001415 # Set KILLED flag.
1416 KILLED.set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001417
1418 # Kill our signal handlers so we don't get called recursively
David James7358d032011-05-19 10:40:03 -07001419 signal.signal(signal.SIGINT, KillHandler)
1420 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001421
1422 # Print our current job status
Brian Harring0be85c62012-03-17 19:52:12 -07001423 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001424 if job:
1425 self._print_queue.put(JobPrinter(job, unlink=True))
1426
1427 # Notify the user that we are exiting
1428 self._Print("Exiting on signal %s" % signum)
David James7358d032011-05-19 10:40:03 -07001429 self._print_queue.put(None)
1430 self._print_worker.join()
David Jamesfcb70ef2011-02-02 16:02:30 -08001431
1432 # Kill child threads, then exit.
David James7358d032011-05-19 10:40:03 -07001433 os.killpg(0, signal.SIGKILL)
David Jamesfcb70ef2011-02-02 16:02:30 -08001434 sys.exit(1)
1435
1436 # Print out job status when we are killed
1437 signal.signal(signal.SIGINT, ExitHandler)
1438 signal.signal(signal.SIGTERM, ExitHandler)
1439
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001440 def _ScheduleUnpack(self, pkg_state):
1441 self._unpack_jobs[pkg_state.target] = None
1442 self._unpack_queue.put(pkg_state)
1443
Brian Harring0be85c62012-03-17 19:52:12 -07001444 def _Schedule(self, pkg_state):
David Jamesfcb70ef2011-02-02 16:02:30 -08001445 # We maintain a tree of all deps, if this doesn't need
David James8c7e5e32011-06-28 11:26:03 -07001446 # to be installed just free up its children and continue.
David Jamesfcb70ef2011-02-02 16:02:30 -08001447 # It is possible to reinstall deps of deps, without reinstalling
1448 # first level deps, like so:
Mike Frysingerfd969312014-04-02 22:16:42 -04001449 # virtual/target-os (merge) -> eselect (nomerge) -> python (merge)
Brian Harring0be85c62012-03-17 19:52:12 -07001450 this_pkg = pkg_state.info
1451 target = pkg_state.target
1452 if pkg_state.info is not None:
1453 if this_pkg["action"] == "nomerge":
1454 self._Finish(target)
1455 elif target not in self._build_jobs:
1456 # Kick off the build if it's marked to be built.
1457 self._build_jobs[target] = None
1458 self._build_queue.put(pkg_state)
1459 return True
David Jamesfcb70ef2011-02-02 16:02:30 -08001460
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001461 def _ScheduleLoop(self, unpack_only=False):
1462 if unpack_only:
1463 ready_queue = self._unpack_ready
1464 jobs_queue = self._unpack_jobs
1465 procs = self._unpack_procs
1466 else:
1467 ready_queue = self._build_ready
1468 jobs_queue = self._build_jobs
1469 procs = self._build_procs
1470
David James8c7e5e32011-06-28 11:26:03 -07001471 # If the current load exceeds our desired load average, don't schedule
1472 # more than one job.
1473 if self._load_avg and os.getloadavg()[0] > self._load_avg:
1474 needed_jobs = 1
1475 else:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001476 needed_jobs = procs
David James8c7e5e32011-06-28 11:26:03 -07001477
1478 # Schedule more jobs.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001479 while ready_queue and len(jobs_queue) < needed_jobs:
1480 state = ready_queue.get()
1481 if unpack_only:
1482 self._ScheduleUnpack(state)
1483 else:
1484 if state.target not in self._failed:
1485 self._Schedule(state)
David Jamesfcb70ef2011-02-02 16:02:30 -08001486
1487 def _Print(self, line):
1488 """Print a single line."""
1489 self._print_queue.put(LinePrinter(line))
1490
1491 def _Status(self):
1492 """Print status."""
1493 current_time = time.time()
1494 no_output = True
1495
1496 # Print interim output every minute if --show-output is used. Otherwise,
1497 # print notifications about running packages every 2 minutes, and print
1498 # full output for jobs that have been running for 60 minutes or more.
1499 if self._show_output:
1500 interval = 60
1501 notify_interval = 0
1502 else:
1503 interval = 60 * 60
1504 notify_interval = 60 * 2
David James321490a2012-12-17 12:05:56 -08001505 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001506 if job:
1507 last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
1508 if last_timestamp + interval < current_time:
1509 self._print_queue.put(JobPrinter(job))
1510 job.last_output_timestamp = current_time
1511 no_output = False
1512 elif (notify_interval and
1513 job.last_notify_timestamp + notify_interval < current_time):
1514 job_seconds = current_time - job.start_timestamp
1515 args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
1516 info = "Still building %s (%dm%.1fs). Logs in %s" % args
1517 job.last_notify_timestamp = current_time
1518 self._Print(info)
1519 no_output = False
1520
1521 # If we haven't printed any messages yet, print a general status message
1522 # here.
1523 if no_output:
1524 seconds = current_time - GLOBAL_START
Brian Harring0be85c62012-03-17 19:52:12 -07001525 fjobs, fready = len(self._fetch_jobs), len(self._fetch_ready)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001526 ujobs, uready = len(self._unpack_jobs), len(self._unpack_ready)
Brian Harring0be85c62012-03-17 19:52:12 -07001527 bjobs, bready = len(self._build_jobs), len(self._build_ready)
1528 retries = len(self._retry_queue)
1529 pending = max(0, len(self._deps_map) - fjobs - bjobs)
1530 line = "Pending %s/%s, " % (pending, self._total_jobs)
1531 if fjobs or fready:
1532 line += "Fetching %s/%s, " % (fjobs, fready + fjobs)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001533 if ujobs or uready:
1534 line += "Unpacking %s/%s, " % (ujobs, uready + ujobs)
Brian Harring0be85c62012-03-17 19:52:12 -07001535 if bjobs or bready or retries:
1536 line += "Building %s/%s, " % (bjobs, bready + bjobs)
1537 if retries:
1538 line += "Retrying %s, " % (retries,)
Mike Frysingerd6e2df02014-11-26 02:55:04 -05001539 load = " ".join(str(x) for x in os.getloadavg())
1540 line += ("[Time %dm%.1fs Load %s]" % (seconds / 60, seconds % 60, load))
Brian Harring0be85c62012-03-17 19:52:12 -07001541 self._Print(line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001542
1543 def _Finish(self, target):
David James8c7e5e32011-06-28 11:26:03 -07001544 """Mark a target as completed and unblock dependencies."""
1545 this_pkg = self._deps_map[target]
1546 if this_pkg["needs"] and this_pkg["nodeps"]:
1547 # We got installed, but our deps have not been installed yet. Dependent
1548 # packages should only be installed when our needs have been fully met.
1549 this_pkg["action"] = "nomerge"
1550 else:
David James8c7e5e32011-06-28 11:26:03 -07001551 for dep in this_pkg["provides"]:
1552 dep_pkg = self._deps_map[dep]
Brian Harring0be85c62012-03-17 19:52:12 -07001553 state = self._state_map[dep]
David James8c7e5e32011-06-28 11:26:03 -07001554 del dep_pkg["needs"][target]
Brian Harring0be85c62012-03-17 19:52:12 -07001555 state.update_score()
1556 if not state.prefetched:
1557 if dep in self._fetch_ready:
1558 # If it's not currently being fetched, update the prioritization
1559 self._fetch_ready.sort()
1560 elif not dep_pkg["needs"]:
David James8c7e5e32011-06-28 11:26:03 -07001561 if dep_pkg["nodeps"] and dep_pkg["action"] == "nomerge":
1562 self._Finish(dep)
1563 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001564 self._build_ready.put(self._state_map[dep])
David James8c7e5e32011-06-28 11:26:03 -07001565 self._deps_map.pop(target)
David Jamesfcb70ef2011-02-02 16:02:30 -08001566
1567 def _Retry(self):
David James8c7e5e32011-06-28 11:26:03 -07001568 while self._retry_queue:
Brian Harring0be85c62012-03-17 19:52:12 -07001569 state = self._retry_queue.pop(0)
1570 if self._Schedule(state):
1571 self._Print("Retrying emerge of %s." % state.target)
David James8c7e5e32011-06-28 11:26:03 -07001572 break
David Jamesfcb70ef2011-02-02 16:02:30 -08001573
Brian Harringa43f5952012-04-12 01:19:34 -07001574 def _Shutdown(self):
David Jamesfcb70ef2011-02-02 16:02:30 -08001575 # Tell emerge workers to exit. They all exit when 'None' is pushed
1576 # to the queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001577
Brian Harringa43f5952012-04-12 01:19:34 -07001578 # Shutdown the workers first; then jobs (which is how they feed things back)
1579 # then finally the print queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001580
Brian Harringa43f5952012-04-12 01:19:34 -07001581 def _stop(queue, pool):
1582 if pool is None:
1583 return
1584 try:
1585 queue.put(None)
1586 pool.close()
1587 pool.join()
1588 finally:
1589 pool.terminate()
Brian Harring0be85c62012-03-17 19:52:12 -07001590
Brian Harringa43f5952012-04-12 01:19:34 -07001591 _stop(self._fetch_queue, self._fetch_pool)
1592 self._fetch_queue = self._fetch_pool = None
Brian Harring0be85c62012-03-17 19:52:12 -07001593
Brian Harringa43f5952012-04-12 01:19:34 -07001594 _stop(self._build_queue, self._build_pool)
1595 self._build_queue = self._build_pool = None
1596
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001597 if self._unpack_only:
1598 _stop(self._unpack_queue, self._unpack_pool)
1599 self._unpack_queue = self._unpack_pool = None
1600
Brian Harringa43f5952012-04-12 01:19:34 -07001601 if self._job_queue is not None:
1602 self._job_queue.close()
1603 self._job_queue = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001604
1605 # Now that our workers are finished, we can kill the print queue.
Brian Harringa43f5952012-04-12 01:19:34 -07001606 if self._print_worker is not None:
1607 try:
1608 self._print_queue.put(None)
1609 self._print_queue.close()
1610 self._print_worker.join()
1611 finally:
1612 self._print_worker.terminate()
1613 self._print_queue = self._print_worker = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001614
1615 def Run(self):
1616 """Run through the scheduled ebuilds.
1617
1618 Keep running so long as we have uninstalled packages in the
1619 dependency graph to merge.
1620 """
Brian Harringa43f5952012-04-12 01:19:34 -07001621 if not self._deps_map:
1622 return
1623
Brian Harring0be85c62012-03-17 19:52:12 -07001624 # Start the fetchers.
1625 for _ in xrange(min(self._fetch_procs, len(self._fetch_ready))):
1626 state = self._fetch_ready.get()
1627 self._fetch_jobs[state.target] = None
1628 self._fetch_queue.put(state)
1629
1630 # Print an update, then get going.
1631 self._Status()
1632
David Jamese703d0f2012-01-12 16:27:45 -08001633 retried = set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001634 while self._deps_map:
1635 # Check here that we are actually waiting for something.
Brian Harring0be85c62012-03-17 19:52:12 -07001636 if (self._build_queue.empty() and
David Jamesfcb70ef2011-02-02 16:02:30 -08001637 self._job_queue.empty() and
Brian Harring0be85c62012-03-17 19:52:12 -07001638 not self._fetch_jobs and
1639 not self._fetch_ready and
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001640 not self._unpack_jobs and
1641 not self._unpack_ready and
Brian Harring0be85c62012-03-17 19:52:12 -07001642 not self._build_jobs and
1643 not self._build_ready and
David Jamesfcb70ef2011-02-02 16:02:30 -08001644 self._deps_map):
1645 # If we have failed on a package, retry it now.
1646 if self._retry_queue:
1647 self._Retry()
1648 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001649 # Tell the user why we're exiting.
1650 if self._failed:
Mike Frysinger383367e2014-09-16 15:06:17 -04001651 print('Packages failed:\n\t%s' % '\n\t'.join(self._failed))
David James0eae23e2012-07-03 15:04:25 -07001652 status_file = os.environ.get("PARALLEL_EMERGE_STATUS_FILE")
1653 if status_file:
David James321490a2012-12-17 12:05:56 -08001654 failed_pkgs = set(portage.versions.cpv_getkey(x)
1655 for x in self._failed)
David James0eae23e2012-07-03 15:04:25 -07001656 with open(status_file, "a") as f:
1657 f.write("%s\n" % " ".join(failed_pkgs))
David Jamesfcb70ef2011-02-02 16:02:30 -08001658 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001659 print("Deadlock! Circular dependencies!")
David Jamesfcb70ef2011-02-02 16:02:30 -08001660 sys.exit(1)
1661
David James321490a2012-12-17 12:05:56 -08001662 for _ in xrange(12):
David Jamesa74289a2011-08-12 10:41:24 -07001663 try:
1664 job = self._job_queue.get(timeout=5)
1665 break
1666 except Queue.Empty:
1667 # Check if any more jobs can be scheduled.
1668 self._ScheduleLoop()
1669 else:
Brian Harring706747c2012-03-16 03:04:31 -07001670 # Print an update every 60 seconds.
David Jamesfcb70ef2011-02-02 16:02:30 -08001671 self._Status()
1672 continue
1673
1674 target = job.target
1675
Brian Harring0be85c62012-03-17 19:52:12 -07001676 if job.fetch_only:
1677 if not job.done:
1678 self._fetch_jobs[job.target] = job
1679 else:
1680 state = self._state_map[job.target]
1681 state.prefetched = True
1682 state.fetched_successfully = (job.retcode == 0)
1683 del self._fetch_jobs[job.target]
1684 self._Print("Fetched %s in %2.2fs"
1685 % (target, time.time() - job.start_timestamp))
1686
1687 if self._show_output or job.retcode != 0:
1688 self._print_queue.put(JobPrinter(job, unlink=True))
1689 else:
1690 os.unlink(job.filename)
1691 # Failure or not, let build work with it next.
1692 if not self._deps_map[job.target]["needs"]:
1693 self._build_ready.put(state)
1694 self._ScheduleLoop()
1695
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001696 if self._unpack_only and job.retcode == 0:
1697 self._unpack_ready.put(state)
1698 self._ScheduleLoop(unpack_only=True)
1699
Brian Harring0be85c62012-03-17 19:52:12 -07001700 if self._fetch_ready:
1701 state = self._fetch_ready.get()
1702 self._fetch_queue.put(state)
1703 self._fetch_jobs[state.target] = None
1704 else:
1705 # Minor optimization; shut down fetchers early since we know
1706 # the queue is empty.
1707 self._fetch_queue.put(None)
1708 continue
1709
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001710 if job.unpack_only:
1711 if not job.done:
1712 self._unpack_jobs[target] = job
1713 else:
1714 del self._unpack_jobs[target]
1715 self._Print("Unpacked %s in %2.2fs"
1716 % (target, time.time() - job.start_timestamp))
1717 if self._show_output or job.retcode != 0:
1718 self._print_queue.put(JobPrinter(job, unlink=True))
1719 else:
1720 os.unlink(job.filename)
1721 if self._unpack_ready:
1722 state = self._unpack_ready.get()
1723 self._unpack_queue.put(state)
1724 self._unpack_jobs[state.target] = None
1725 continue
1726
David Jamesfcb70ef2011-02-02 16:02:30 -08001727 if not job.done:
Brian Harring0be85c62012-03-17 19:52:12 -07001728 self._build_jobs[target] = job
David Jamesfcb70ef2011-02-02 16:02:30 -08001729 self._Print("Started %s (logged in %s)" % (target, job.filename))
1730 continue
1731
1732 # Print output of job
1733 if self._show_output or job.retcode != 0:
1734 self._print_queue.put(JobPrinter(job, unlink=True))
1735 else:
1736 os.unlink(job.filename)
Brian Harring0be85c62012-03-17 19:52:12 -07001737 del self._build_jobs[target]
David Jamesfcb70ef2011-02-02 16:02:30 -08001738
1739 seconds = time.time() - job.start_timestamp
1740 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
David James32420cc2011-08-25 21:32:46 -07001741 previously_failed = target in self._failed
David Jamesfcb70ef2011-02-02 16:02:30 -08001742
1743 # Complain if necessary.
1744 if job.retcode != 0:
1745 # Handle job failure.
David James32420cc2011-08-25 21:32:46 -07001746 if previously_failed:
David Jamesfcb70ef2011-02-02 16:02:30 -08001747 # If this job has failed previously, give up.
1748 self._Print("Failed %s. Your build has failed." % details)
1749 else:
1750 # Queue up this build to try again after a long while.
David Jamese703d0f2012-01-12 16:27:45 -08001751 retried.add(target)
Brian Harring0be85c62012-03-17 19:52:12 -07001752 self._retry_queue.append(self._state_map[target])
David Jamesfcb70ef2011-02-02 16:02:30 -08001753 self._failed.add(target)
1754 self._Print("Failed %s, retrying later." % details)
1755 else:
David James32420cc2011-08-25 21:32:46 -07001756 if previously_failed:
1757 # Remove target from list of failed packages.
1758 self._failed.remove(target)
1759
1760 self._Print("Completed %s" % details)
1761
1762 # Mark as completed and unblock waiting ebuilds.
1763 self._Finish(target)
1764
1765 if previously_failed and self._retry_queue:
David Jamesfcb70ef2011-02-02 16:02:30 -08001766 # If we have successfully retried a failed package, and there
1767 # are more failed packages, try the next one. We will only have
1768 # one retrying package actively running at a time.
1769 self._Retry()
1770
David Jamesfcb70ef2011-02-02 16:02:30 -08001771
David James8c7e5e32011-06-28 11:26:03 -07001772 # Schedule pending jobs and print an update.
1773 self._ScheduleLoop()
1774 self._Status()
David Jamesfcb70ef2011-02-02 16:02:30 -08001775
David Jamese703d0f2012-01-12 16:27:45 -08001776 # If packages were retried, output a warning.
1777 if retried:
1778 self._Print("")
1779 self._Print("WARNING: The following packages failed the first time,")
1780 self._Print("but succeeded upon retry. This might indicate incorrect")
1781 self._Print("dependencies.")
1782 for pkg in retried:
1783 self._Print(" %s" % pkg)
1784 self._Print("@@@STEP_WARNINGS@@@")
1785 self._Print("")
1786
David Jamesfcb70ef2011-02-02 16:02:30 -08001787 # Tell child threads to exit.
1788 self._Print("Merge complete")
David Jamesfcb70ef2011-02-02 16:02:30 -08001789
1790
Brian Harring30675052012-02-29 12:18:22 -08001791def main(argv):
Brian Harring8294d652012-05-23 02:20:52 -07001792 try:
1793 return real_main(argv)
1794 finally:
1795 # Work around multiprocessing sucking and not cleaning up after itself.
1796 # http://bugs.python.org/issue4106;
1797 # Step one; ensure GC is ran *prior* to the VM starting shutdown.
1798 gc.collect()
1799 # Step two; go looking for those threads and try to manually reap
1800 # them if we can.
1801 for x in threading.enumerate():
1802 # Filter on the name, and ident; if ident is None, the thread
1803 # wasn't started.
1804 if x.name == 'QueueFeederThread' and x.ident is not None:
1805 x.join(1)
David Jamesfcb70ef2011-02-02 16:02:30 -08001806
Brian Harring8294d652012-05-23 02:20:52 -07001807
1808def real_main(argv):
Brian Harring30675052012-02-29 12:18:22 -08001809 parallel_emerge_args = argv[:]
David Jamesfcb70ef2011-02-02 16:02:30 -08001810 deps = DepGraphGenerator()
Brian Harring30675052012-02-29 12:18:22 -08001811 deps.Initialize(parallel_emerge_args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001812 emerge = deps.emerge
1813
1814 if emerge.action is not None:
Brian Harring30675052012-02-29 12:18:22 -08001815 argv = deps.ParseParallelEmergeArgs(argv)
Brian Harring8294d652012-05-23 02:20:52 -07001816 return emerge_main(argv)
David Jamesfcb70ef2011-02-02 16:02:30 -08001817 elif not emerge.cmdline_packages:
1818 Usage()
Brian Harring8294d652012-05-23 02:20:52 -07001819 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001820
1821 # Unless we're in pretend mode, there's not much point running without
1822 # root access. We need to be able to install packages.
1823 #
1824 # NOTE: Even if you're running --pretend, it's a good idea to run
1825 # parallel_emerge with root access so that portage can write to the
1826 # dependency cache. This is important for performance.
David James321490a2012-12-17 12:05:56 -08001827 if "--pretend" not in emerge.opts and portage.data.secpass < 2:
Mike Frysinger383367e2014-09-16 15:06:17 -04001828 print("parallel_emerge: superuser access is required.")
Brian Harring8294d652012-05-23 02:20:52 -07001829 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001830
1831 if "--quiet" not in emerge.opts:
1832 cmdline_packages = " ".join(emerge.cmdline_packages)
Mike Frysinger383367e2014-09-16 15:06:17 -04001833 print("Starting fast-emerge.")
1834 print(" Building package %s on %s" % (cmdline_packages,
1835 deps.board or "root"))
David Jamesfcb70ef2011-02-02 16:02:30 -08001836
David James386ccd12011-05-04 20:17:42 -07001837 deps_tree, deps_info = deps.GenDependencyTree()
David Jamesfcb70ef2011-02-02 16:02:30 -08001838
1839 # You want me to be verbose? I'll give you two trees! Twice as much value.
1840 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
1841 deps.PrintTree(deps_tree)
1842
David James386ccd12011-05-04 20:17:42 -07001843 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001844
1845 # OK, time to print out our progress so far.
1846 deps.PrintInstallPlan(deps_graph)
1847 if "--tree" in emerge.opts:
1848 PrintDepsMap(deps_graph)
1849
1850 # Are we upgrading portage? If so, and there are more packages to merge,
1851 # schedule a restart of parallel_emerge to merge the rest. This ensures that
1852 # we pick up all updates to portage settings before merging any more
1853 # packages.
1854 portage_upgrade = False
1855 root = emerge.settings["ROOT"]
Don Garrett25f309a2014-03-19 14:02:12 -07001856 # pylint: disable=W0212
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -07001857 final_db = emerge.depgraph._dynamic_config._filtered_trees[root]['graph_db']
David Jamesfcb70ef2011-02-02 16:02:30 -08001858 if root == "/":
1859 for db_pkg in final_db.match_pkgs("sys-apps/portage"):
1860 portage_pkg = deps_graph.get(db_pkg.cpv)
David James0ff16f22012-11-02 14:18:07 -07001861 if portage_pkg:
David Jamesfcb70ef2011-02-02 16:02:30 -08001862 portage_upgrade = True
1863 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001864 print("Upgrading portage first, then restarting...")
David Jamesfcb70ef2011-02-02 16:02:30 -08001865
David James0ff16f22012-11-02 14:18:07 -07001866 # Upgrade Portage first, then the rest of the packages.
1867 #
1868 # In order to grant the child permission to run setsid, we need to run sudo
1869 # again. We preserve SUDO_USER here in case an ebuild depends on it.
1870 if portage_upgrade:
1871 # Calculate what arguments to use when re-invoking.
1872 args = ["sudo", "-E", "SUDO_USER=%s" % os.environ.get("SUDO_USER", "")]
1873 args += [os.path.abspath(sys.argv[0])] + parallel_emerge_args
1874 args += ["--exclude=sys-apps/portage"]
1875
1876 # First upgrade Portage.
1877 passthrough_args = ("--quiet", "--pretend", "--verbose")
1878 emerge_args = [k for k in emerge.opts if k in passthrough_args]
1879 ret = emerge_main(emerge_args + ["portage"])
1880 if ret != 0:
1881 return ret
1882
1883 # Now upgrade the rest.
1884 os.execvp(args[0], args)
1885
David Jamesfcb70ef2011-02-02 16:02:30 -08001886 # Run the queued emerges.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001887 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output,
1888 deps.unpack_only)
Brian Harringa43f5952012-04-12 01:19:34 -07001889 try:
1890 scheduler.Run()
1891 finally:
Don Garrett25f309a2014-03-19 14:02:12 -07001892 # pylint: disable=W0212
Brian Harringa43f5952012-04-12 01:19:34 -07001893 scheduler._Shutdown()
David James97ce8902011-08-16 09:51:05 -07001894 scheduler = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001895
Mike Frysingerd20a6e22012-10-04 19:01:10 -04001896 clean_logs(emerge.settings)
1897
Mike Frysinger383367e2014-09-16 15:06:17 -04001898 print("Done")
Brian Harring8294d652012-05-23 02:20:52 -07001899 return 0