blob: 2326c60bcb200ef05d11324ae9987a873e35e52a [file] [log] [blame]
Mike Frysinger0a647fc2012-08-06 14:36:05 -04001# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
David Jamesfcb70ef2011-02-02 16:02:30 -08002# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Program to run emerge in parallel, for significant speedup.
6
7Usage:
David James386ccd12011-05-04 20:17:42 -07008 ./parallel_emerge [--board=BOARD] [--workon=PKGS]
David Jamesfcb70ef2011-02-02 16:02:30 -08009 [--force-remote-binary=PKGS] [emerge args] package
10
David James78b6cd92012-04-02 21:36:12 -070011This script runs multiple emerge processes in parallel, using appropriate
12Portage APIs. It is faster than standard emerge because it has a
13multiprocess model instead of an asynchronous model.
David Jamesfcb70ef2011-02-02 16:02:30 -080014"""
15
Mike Frysinger383367e2014-09-16 15:06:17 -040016from __future__ import print_function
17
David Jamesfcb70ef2011-02-02 16:02:30 -080018import codecs
19import copy
20import errno
Brian Harring8294d652012-05-23 02:20:52 -070021import gc
David James8c7e5e32011-06-28 11:26:03 -070022import heapq
David Jamesfcb70ef2011-02-02 16:02:30 -080023import multiprocessing
24import os
Mike Frysinger1ae28092013-10-17 17:17:22 -040025try:
26 import Queue
27except ImportError:
28 # Python-3 renamed to "queue". We still use Queue to avoid collisions
29 # with naming variables as "queue". Maybe we'll transition at some point.
30 # pylint: disable=F0401
31 import queue as Queue
David Jamesfcb70ef2011-02-02 16:02:30 -080032import signal
33import sys
34import tempfile
Brian Harring8294d652012-05-23 02:20:52 -070035import threading
David Jamesfcb70ef2011-02-02 16:02:30 -080036import time
37import traceback
David Jamesfcb70ef2011-02-02 16:02:30 -080038
Thiago Goncalesf4acc422013-07-17 10:26:35 -070039from chromite.lib import cros_build_lib
David Jamesaaf49e42014-04-24 09:40:05 -070040from chromite.lib import osutils
Mike Frysingerd74fe4a2014-04-24 11:43:38 -040041from chromite.lib import proctitle
Thiago Goncalesf4acc422013-07-17 10:26:35 -070042
David Jamesfcb70ef2011-02-02 16:02:30 -080043# If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
44# Chromium OS, the default "portage" user doesn't have the necessary
45# permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
46# is "root" here because we get called through sudo.
47#
48# We need to set this before importing any portage modules, because portage
49# looks up "PORTAGE_USERNAME" at import time.
50#
51# NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
52# encounter this case unless they have an old chroot or blow away the
53# environment by running sudo without the -E specifier.
54if "PORTAGE_USERNAME" not in os.environ:
55 homedir = os.environ.get("HOME")
56 if homedir:
57 os.environ["PORTAGE_USERNAME"] = os.path.basename(homedir)
58
59# Portage doesn't expose dependency trees in its public API, so we have to
60# make use of some private APIs here. These modules are found under
61# /usr/lib/portage/pym/.
62#
63# TODO(davidjames): Update Portage to expose public APIs for these features.
Don Garrett25f309a2014-03-19 14:02:12 -070064# pylint: disable=F0401
David Jamesfcb70ef2011-02-02 16:02:30 -080065from _emerge.actions import adjust_configs
66from _emerge.actions import load_emerge_config
67from _emerge.create_depgraph_params import create_depgraph_params
David James386ccd12011-05-04 20:17:42 -070068from _emerge.depgraph import backtrack_depgraph
Mike Frysinger901eaad2012-10-10 18:18:03 -040069try:
70 from _emerge.main import clean_logs
71except ImportError:
72 # Older portage versions did not provide clean_logs, so stub it.
73 # We need this if running in an older chroot that hasn't yet upgraded
74 # the portage version.
75 clean_logs = lambda x: None
David Jamesfcb70ef2011-02-02 16:02:30 -080076from _emerge.main import emerge_main
77from _emerge.main import parse_opts
78from _emerge.Package import Package
79from _emerge.Scheduler import Scheduler
David Jamesfcb70ef2011-02-02 16:02:30 -080080from _emerge.stdout_spinner import stdout_spinner
David James386ccd12011-05-04 20:17:42 -070081from portage._global_updates import _global_updates
David Jamesfcb70ef2011-02-02 16:02:30 -080082import portage
83import portage.debug
Don Garrettf8bf7842014-03-20 17:03:42 -070084# pylint: enable=F0401
Mike Frysinger91d7da92013-02-19 15:53:46 -050085
David Jamesfcb70ef2011-02-02 16:02:30 -080086
David Jamesfcb70ef2011-02-02 16:02:30 -080087def Usage():
88 """Print usage."""
Mike Frysinger383367e2014-09-16 15:06:17 -040089 print("Usage:")
90 print(" ./parallel_emerge [--board=BOARD] [--workon=PKGS]")
91 print(" [--rebuild] [emerge args] package")
92 print()
93 print("Packages specified as workon packages are always built from source.")
94 print()
95 print("The --workon argument is mainly useful when you want to build and")
96 print("install packages that you are working on unconditionally, but do not")
97 print("to have to rev the package to indicate you want to build it from")
98 print("source. The build_packages script will automatically supply the")
99 print("workon argument to emerge, ensuring that packages selected using")
100 print("cros-workon are rebuilt.")
101 print()
102 print("The --rebuild option rebuilds packages whenever their dependencies")
103 print("are changed. This ensures that your build is correct.")
David Jamesfcb70ef2011-02-02 16:02:30 -0800104
105
David Jamesfcb70ef2011-02-02 16:02:30 -0800106# Global start time
107GLOBAL_START = time.time()
108
David James7358d032011-05-19 10:40:03 -0700109# Whether process has been killed by a signal.
110KILLED = multiprocessing.Event()
111
David Jamesfcb70ef2011-02-02 16:02:30 -0800112
113class EmergeData(object):
114 """This simple struct holds various emerge variables.
115
116 This struct helps us easily pass emerge variables around as a unit.
117 These variables are used for calculating dependencies and installing
118 packages.
119 """
120
David Jamesbf1e3442011-05-28 07:44:20 -0700121 __slots__ = ["action", "cmdline_packages", "depgraph", "favorites",
122 "mtimedb", "opts", "root_config", "scheduler_graph",
123 "settings", "spinner", "trees"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800124
125 def __init__(self):
126 # The action the user requested. If the user is installing packages, this
127 # is None. If the user is doing anything other than installing packages,
128 # this will contain the action name, which will map exactly to the
129 # long-form name of the associated emerge option.
130 #
131 # Example: If you call parallel_emerge --unmerge package, the action name
132 # will be "unmerge"
133 self.action = None
134
135 # The list of packages the user passed on the command-line.
136 self.cmdline_packages = None
137
138 # The emerge dependency graph. It'll contain all the packages involved in
139 # this merge, along with their versions.
140 self.depgraph = None
141
David Jamesbf1e3442011-05-28 07:44:20 -0700142 # The list of candidates to add to the world file.
143 self.favorites = None
144
David Jamesfcb70ef2011-02-02 16:02:30 -0800145 # A dict of the options passed to emerge. This dict has been cleaned up
146 # a bit by parse_opts, so that it's a bit easier for the emerge code to
147 # look at the options.
148 #
149 # Emerge takes a few shortcuts in its cleanup process to make parsing of
150 # the options dict easier. For example, if you pass in "--usepkg=n", the
151 # "--usepkg" flag is just left out of the dictionary altogether. Because
152 # --usepkg=n is the default, this makes parsing easier, because emerge
153 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
154 #
155 # These cleanup processes aren't applied to all options. For example, the
156 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
157 # applied by emerge, see the parse_opts function in the _emerge.main
158 # package.
159 self.opts = None
160
161 # A dictionary used by portage to maintain global state. This state is
162 # loaded from disk when portage starts up, and saved to disk whenever we
163 # call mtimedb.commit().
164 #
165 # This database contains information about global updates (i.e., what
166 # version of portage we have) and what we're currently doing. Portage
167 # saves what it is currently doing in this database so that it can be
168 # resumed when you call it with the --resume option.
169 #
170 # parallel_emerge does not save what it is currently doing in the mtimedb,
171 # so we do not support the --resume option.
172 self.mtimedb = None
173
174 # The portage configuration for our current root. This contains the portage
175 # settings (see below) and the three portage trees for our current root.
176 # (The three portage trees are explained below, in the documentation for
177 # the "trees" member.)
178 self.root_config = None
179
180 # The scheduler graph is used by emerge to calculate what packages to
181 # install. We don't actually install any deps, so this isn't really used,
182 # but we pass it in to the Scheduler object anyway.
183 self.scheduler_graph = None
184
185 # Portage settings for our current session. Most of these settings are set
186 # in make.conf inside our current install root.
187 self.settings = None
188
189 # The spinner, which spews stuff to stdout to indicate that portage is
190 # doing something. We maintain our own spinner, so we set the portage
191 # spinner to "silent" mode.
192 self.spinner = None
193
194 # The portage trees. There are separate portage trees for each root. To get
195 # the portage tree for the current root, you can look in self.trees[root],
196 # where root = self.settings["ROOT"].
197 #
198 # In each root, there are three trees: vartree, porttree, and bintree.
199 # - vartree: A database of the currently-installed packages.
200 # - porttree: A database of ebuilds, that can be used to build packages.
201 # - bintree: A database of binary packages.
202 self.trees = None
203
204
205class DepGraphGenerator(object):
206 """Grab dependency information about packages from portage.
207
208 Typical usage:
209 deps = DepGraphGenerator()
210 deps.Initialize(sys.argv[1:])
211 deps_tree, deps_info = deps.GenDependencyTree()
212 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
213 deps.PrintTree(deps_tree)
214 PrintDepsMap(deps_graph)
215 """
216
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700217 __slots__ = ["board", "emerge", "package_db", "show_output", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800218
219 def __init__(self):
220 self.board = None
221 self.emerge = EmergeData()
David Jamesfcb70ef2011-02-02 16:02:30 -0800222 self.package_db = {}
David Jamesfcb70ef2011-02-02 16:02:30 -0800223 self.show_output = False
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700224 self.unpack_only = False
David Jamesfcb70ef2011-02-02 16:02:30 -0800225
226 def ParseParallelEmergeArgs(self, argv):
227 """Read the parallel emerge arguments from the command-line.
228
229 We need to be compatible with emerge arg format. We scrape arguments that
230 are specific to parallel_emerge, and pass through the rest directly to
231 emerge.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500232
David Jamesfcb70ef2011-02-02 16:02:30 -0800233 Args:
234 argv: arguments list
Mike Frysinger1a736a82013-12-12 01:50:59 -0500235
David Jamesfcb70ef2011-02-02 16:02:30 -0800236 Returns:
237 Arguments that don't belong to parallel_emerge
238 """
239 emerge_args = []
240 for arg in argv:
241 # Specifically match arguments that are specific to parallel_emerge, and
242 # pass through the rest.
243 if arg.startswith("--board="):
244 self.board = arg.replace("--board=", "")
245 elif arg.startswith("--workon="):
246 workon_str = arg.replace("--workon=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700247 emerge_args.append("--reinstall-atoms=%s" % workon_str)
248 emerge_args.append("--usepkg-exclude=%s" % workon_str)
David Jamesfcb70ef2011-02-02 16:02:30 -0800249 elif arg.startswith("--force-remote-binary="):
250 force_remote_binary = arg.replace("--force-remote-binary=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700251 emerge_args.append("--useoldpkg-atoms=%s" % force_remote_binary)
David Jamesfcb70ef2011-02-02 16:02:30 -0800252 elif arg == "--show-output":
253 self.show_output = True
David James386ccd12011-05-04 20:17:42 -0700254 elif arg == "--rebuild":
David James7a1ea4b2011-10-13 15:06:41 -0700255 emerge_args.append("--rebuild-if-unbuilt")
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700256 elif arg == "--unpackonly":
257 emerge_args.append("--fetchonly")
258 self.unpack_only = True
David Jamesfcb70ef2011-02-02 16:02:30 -0800259 else:
260 # Not one of our options, so pass through to emerge.
261 emerge_args.append(arg)
262
David James386ccd12011-05-04 20:17:42 -0700263 # These packages take a really long time to build, so, for expediency, we
264 # are blacklisting them from automatic rebuilds because one of their
265 # dependencies needs to be recompiled.
Mike Frysinger5c2a9052014-04-15 15:52:04 -0400266 for pkg in ("chromeos-base/chromeos-chrome",):
David James7a1ea4b2011-10-13 15:06:41 -0700267 emerge_args.append("--rebuild-exclude=%s" % pkg)
David Jamesfcb70ef2011-02-02 16:02:30 -0800268
269 return emerge_args
270
271 def Initialize(self, args):
272 """Initializer. Parses arguments and sets up portage state."""
273
274 # Parse and strip out args that are just intended for parallel_emerge.
275 emerge_args = self.ParseParallelEmergeArgs(args)
276
277 # Setup various environment variables based on our current board. These
278 # variables are normally setup inside emerge-${BOARD}, but since we don't
279 # call that script, we have to set it up here. These variables serve to
280 # point our tools at /build/BOARD and to setup cross compiles to the
281 # appropriate board as configured in toolchain.conf.
282 if self.board:
Yu-Ju Hongdd9bb2b2014-01-03 17:08:26 -0800283 sysroot = cros_build_lib.GetSysroot(board=self.board)
284 os.environ["PORTAGE_CONFIGROOT"] = sysroot
285 os.environ["PORTAGE_SYSROOT"] = sysroot
286 os.environ["SYSROOT"] = sysroot
David Jamesfcb70ef2011-02-02 16:02:30 -0800287
288 # Although CHROMEOS_ROOT isn't specific to boards, it's normally setup
289 # inside emerge-${BOARD}, so we set it up here for compatibility. It
290 # will be going away soon as we migrate to CROS_WORKON_SRCROOT.
291 os.environ.setdefault("CHROMEOS_ROOT", os.environ["HOME"] + "/trunk")
292
293 # Turn off interactive delays
294 os.environ["EBEEP_IGNORE"] = "1"
295 os.environ["EPAUSE_IGNORE"] = "1"
Mike Frysinger0a647fc2012-08-06 14:36:05 -0400296 os.environ["CLEAN_DELAY"] = "0"
David Jamesfcb70ef2011-02-02 16:02:30 -0800297
298 # Parse the emerge options.
David Jamesea3ca332011-05-26 11:48:29 -0700299 action, opts, cmdline_packages = parse_opts(emerge_args, silent=True)
David Jamesfcb70ef2011-02-02 16:02:30 -0800300
301 # Set environment variables based on options. Portage normally sets these
302 # environment variables in emerge_main, but we can't use that function,
303 # because it also does a bunch of other stuff that we don't want.
304 # TODO(davidjames): Patch portage to move this logic into a function we can
305 # reuse here.
306 if "--debug" in opts:
307 os.environ["PORTAGE_DEBUG"] = "1"
308 if "--config-root" in opts:
309 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
310 if "--root" in opts:
311 os.environ["ROOT"] = opts["--root"]
312 if "--accept-properties" in opts:
313 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
314
David James88d780c2014-02-05 13:03:29 -0800315 # If we're installing packages to the board, we can disable vardb locks.
316 # This is safe because we only run up to one instance of parallel_emerge in
317 # parallel.
318 # TODO(davidjames): Enable this for the host too.
319 if self.board:
David Jamesfcb70ef2011-02-02 16:02:30 -0800320 os.environ.setdefault("PORTAGE_LOCKS", "false")
David Jamesfcb70ef2011-02-02 16:02:30 -0800321
322 # Now that we've setup the necessary environment variables, we can load the
323 # emerge config from disk.
324 settings, trees, mtimedb = load_emerge_config()
325
David Jamesea3ca332011-05-26 11:48:29 -0700326 # Add in EMERGE_DEFAULT_OPTS, if specified.
327 tmpcmdline = []
328 if "--ignore-default-opts" not in opts:
329 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
330 tmpcmdline.extend(emerge_args)
331 action, opts, cmdline_packages = parse_opts(tmpcmdline)
332
333 # If we're installing to the board, we want the --root-deps option so that
334 # portage will install the build dependencies to that location as well.
335 if self.board:
336 opts.setdefault("--root-deps", True)
337
David Jamesfcb70ef2011-02-02 16:02:30 -0800338 # Check whether our portage tree is out of date. Typically, this happens
339 # when you're setting up a new portage tree, such as in setup_board and
340 # make_chroot. In that case, portage applies a bunch of global updates
341 # here. Once the updates are finished, we need to commit any changes
342 # that the global update made to our mtimedb, and reload the config.
343 #
344 # Portage normally handles this logic in emerge_main, but again, we can't
345 # use that function here.
346 if _global_updates(trees, mtimedb["updates"]):
347 mtimedb.commit()
348 settings, trees, mtimedb = load_emerge_config(trees=trees)
349
350 # Setup implied options. Portage normally handles this logic in
351 # emerge_main.
352 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
353 opts.setdefault("--buildpkg", True)
354 if "--getbinpkgonly" in opts:
355 opts.setdefault("--usepkgonly", True)
356 opts.setdefault("--getbinpkg", True)
357 if "getbinpkg" in settings.features:
358 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
359 opts["--getbinpkg"] = True
360 if "--getbinpkg" in opts or "--usepkgonly" in opts:
361 opts.setdefault("--usepkg", True)
362 if "--fetch-all-uri" in opts:
363 opts.setdefault("--fetchonly", True)
364 if "--skipfirst" in opts:
365 opts.setdefault("--resume", True)
366 if "--buildpkgonly" in opts:
367 # --buildpkgonly will not merge anything, so it overrides all binary
368 # package options.
369 for opt in ("--getbinpkg", "--getbinpkgonly",
370 "--usepkg", "--usepkgonly"):
371 opts.pop(opt, None)
372 if (settings.get("PORTAGE_DEBUG", "") == "1" and
373 "python-trace" in settings.features):
374 portage.debug.set_trace(True)
375
376 # Complain about unsupported options
David James386ccd12011-05-04 20:17:42 -0700377 for opt in ("--ask", "--ask-enter-invalid", "--resume", "--skipfirst"):
David Jamesfcb70ef2011-02-02 16:02:30 -0800378 if opt in opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400379 print("%s is not supported by parallel_emerge" % opt)
David Jamesfcb70ef2011-02-02 16:02:30 -0800380 sys.exit(1)
381
382 # Make emerge specific adjustments to the config (e.g. colors!)
383 adjust_configs(opts, trees)
384
385 # Save our configuration so far in the emerge object
386 emerge = self.emerge
387 emerge.action, emerge.opts = action, opts
388 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
389 emerge.cmdline_packages = cmdline_packages
390 root = settings["ROOT"]
391 emerge.root_config = trees[root]["root_config"]
392
David James386ccd12011-05-04 20:17:42 -0700393 if "--usepkg" in opts:
David Jamesfcb70ef2011-02-02 16:02:30 -0800394 emerge.trees[root]["bintree"].populate("--getbinpkg" in opts)
395
David Jamesfcb70ef2011-02-02 16:02:30 -0800396 def CreateDepgraph(self, emerge, packages):
397 """Create an emerge depgraph object."""
398 # Setup emerge options.
399 emerge_opts = emerge.opts.copy()
400
David James386ccd12011-05-04 20:17:42 -0700401 # Ask portage to build a dependency graph. with the options we specified
402 # above.
David Jamesfcb70ef2011-02-02 16:02:30 -0800403 params = create_depgraph_params(emerge_opts, emerge.action)
David Jamesbf1e3442011-05-28 07:44:20 -0700404 success, depgraph, favorites = backtrack_depgraph(
David James386ccd12011-05-04 20:17:42 -0700405 emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
406 packages, emerge.spinner)
407 emerge.depgraph = depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800408
David James386ccd12011-05-04 20:17:42 -0700409 # Is it impossible to honor the user's request? Bail!
410 if not success:
411 depgraph.display_problems()
412 sys.exit(1)
David Jamesfcb70ef2011-02-02 16:02:30 -0800413
414 emerge.depgraph = depgraph
David Jamesbf1e3442011-05-28 07:44:20 -0700415 emerge.favorites = favorites
David Jamesfcb70ef2011-02-02 16:02:30 -0800416
David Jamesdeebd692011-05-09 17:02:52 -0700417 # Prime and flush emerge caches.
418 root = emerge.settings["ROOT"]
419 vardb = emerge.trees[root]["vartree"].dbapi
David James0bdc5de2011-05-12 16:22:26 -0700420 if "--pretend" not in emerge.opts:
421 vardb.counter_tick()
David Jamesdeebd692011-05-09 17:02:52 -0700422 vardb.flush_cache()
423
David James386ccd12011-05-04 20:17:42 -0700424 def GenDependencyTree(self):
David Jamesfcb70ef2011-02-02 16:02:30 -0800425 """Get dependency tree info from emerge.
426
David Jamesfcb70ef2011-02-02 16:02:30 -0800427 Returns:
428 Dependency tree
429 """
430 start = time.time()
431
432 emerge = self.emerge
433
434 # Create a list of packages to merge
435 packages = set(emerge.cmdline_packages[:])
David Jamesfcb70ef2011-02-02 16:02:30 -0800436
437 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
438 # need any extra output from portage.
439 portage.util.noiselimit = -1
440
441 # My favorite feature: The silent spinner. It doesn't spin. Ever.
442 # I'd disable the colors by default too, but they look kind of cool.
443 emerge.spinner = stdout_spinner()
444 emerge.spinner.update = emerge.spinner.update_quiet
445
446 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400447 print("Calculating deps...")
David Jamesfcb70ef2011-02-02 16:02:30 -0800448
449 self.CreateDepgraph(emerge, packages)
450 depgraph = emerge.depgraph
451
452 # Build our own tree from the emerge digraph.
453 deps_tree = {}
Don Garrett25f309a2014-03-19 14:02:12 -0700454 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -0800455 digraph = depgraph._dynamic_config.digraph
David James3f778802011-08-25 19:31:45 -0700456 root = emerge.settings["ROOT"]
Bertrand SIMONNETb35e19e2014-07-28 16:29:58 -0700457 final_db = get_db(depgraph._dynamic_config, root)
David Jamesfcb70ef2011-02-02 16:02:30 -0800458 for node, node_deps in digraph.nodes.items():
459 # Calculate dependency packages that need to be installed first. Each
460 # child on the digraph is a dependency. The "operation" field specifies
461 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
462 # contains the type of dependency (e.g. build, runtime, runtime_post,
463 # etc.)
464 #
David Jamesfcb70ef2011-02-02 16:02:30 -0800465 # Portage refers to the identifiers for packages as a CPV. This acronym
466 # stands for Component/Path/Version.
467 #
468 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
469 # Split up, this CPV would be:
470 # C -- Component: chromeos-base
471 # P -- Path: power_manager
472 # V -- Version: 0.0.1-r1
473 #
474 # We just refer to CPVs as packages here because it's easier.
475 deps = {}
476 for child, priorities in node_deps[0].items():
David James3f778802011-08-25 19:31:45 -0700477 if isinstance(child, Package) and child.root == root:
478 cpv = str(child.cpv)
479 action = str(child.operation)
480
481 # If we're uninstalling a package, check whether Portage is
482 # installing a replacement. If so, just depend on the installation
483 # of the new package, because the old package will automatically
484 # be uninstalled at that time.
485 if action == "uninstall":
486 for pkg in final_db.match_pkgs(child.slot_atom):
487 cpv = str(pkg.cpv)
488 action = "merge"
489 break
490
491 deps[cpv] = dict(action=action,
492 deptypes=[str(x) for x in priorities],
493 deps={})
David Jamesfcb70ef2011-02-02 16:02:30 -0800494
495 # We've built our list of deps, so we can add our package to the tree.
David James3f778802011-08-25 19:31:45 -0700496 if isinstance(node, Package) and node.root == root:
David Jamesfcb70ef2011-02-02 16:02:30 -0800497 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
498 deps=deps)
499
David Jamesfcb70ef2011-02-02 16:02:30 -0800500 # Ask portage for its install plan, so that we can only throw out
David James386ccd12011-05-04 20:17:42 -0700501 # dependencies that portage throws out.
David Jamesfcb70ef2011-02-02 16:02:30 -0800502 deps_info = {}
503 for pkg in depgraph.altlist():
504 if isinstance(pkg, Package):
David James3f778802011-08-25 19:31:45 -0700505 assert pkg.root == root
David Jamesfcb70ef2011-02-02 16:02:30 -0800506 self.package_db[pkg.cpv] = pkg
507
David Jamesfcb70ef2011-02-02 16:02:30 -0800508 # Save off info about the package
David James386ccd12011-05-04 20:17:42 -0700509 deps_info[str(pkg.cpv)] = {"idx": len(deps_info)}
David Jamesfcb70ef2011-02-02 16:02:30 -0800510
511 seconds = time.time() - start
512 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400513 print("Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800514
515 return deps_tree, deps_info
516
517 def PrintTree(self, deps, depth=""):
518 """Print the deps we have seen in the emerge output.
519
520 Args:
521 deps: Dependency tree structure.
522 depth: Allows printing the tree recursively, with indentation.
523 """
524 for entry in sorted(deps):
525 action = deps[entry]["action"]
Mike Frysinger383367e2014-09-16 15:06:17 -0400526 print("%s %s (%s)" % (depth, entry, action))
David Jamesfcb70ef2011-02-02 16:02:30 -0800527 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
528
David James386ccd12011-05-04 20:17:42 -0700529 def GenDependencyGraph(self, deps_tree, deps_info):
David Jamesfcb70ef2011-02-02 16:02:30 -0800530 """Generate a doubly linked dependency graph.
531
532 Args:
533 deps_tree: Dependency tree structure.
534 deps_info: More details on the dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500535
David Jamesfcb70ef2011-02-02 16:02:30 -0800536 Returns:
537 Deps graph in the form of a dict of packages, with each package
538 specifying a "needs" list and "provides" list.
539 """
540 emerge = self.emerge
David Jamesfcb70ef2011-02-02 16:02:30 -0800541
David Jamesfcb70ef2011-02-02 16:02:30 -0800542 # deps_map is the actual dependency graph.
543 #
544 # Each package specifies a "needs" list and a "provides" list. The "needs"
545 # list indicates which packages we depend on. The "provides" list
546 # indicates the reverse dependencies -- what packages need us.
547 #
548 # We also provide some other information in the dependency graph:
549 # - action: What we're planning on doing with this package. Generally,
550 # "merge", "nomerge", or "uninstall"
David Jamesfcb70ef2011-02-02 16:02:30 -0800551 deps_map = {}
552
553 def ReverseTree(packages):
554 """Convert tree to digraph.
555
556 Take the tree of package -> requirements and reverse it to a digraph of
557 buildable packages -> packages they unblock.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500558
David Jamesfcb70ef2011-02-02 16:02:30 -0800559 Args:
560 packages: Tree(s) of dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500561
David Jamesfcb70ef2011-02-02 16:02:30 -0800562 Returns:
563 Unsanitized digraph.
564 """
David James8c7e5e32011-06-28 11:26:03 -0700565 binpkg_phases = set(["setup", "preinst", "postinst"])
David Jamese5e1c0a2014-09-29 17:19:41 -0700566 needed_dep_types = set(["blocker", "buildtime", "buildtime_slot_op",
567 "runtime", "runtime_slot_op"])
568 ignored_dep_types = set(["ignored", "optional", "runtime_post", "soft"])
569 all_dep_types = ignored_dep_types | needed_dep_types
David Jamesfcb70ef2011-02-02 16:02:30 -0800570 for pkg in packages:
571
572 # Create an entry for the package
573 action = packages[pkg]["action"]
David James8c7e5e32011-06-28 11:26:03 -0700574 default_pkg = {"needs": {}, "provides": set(), "action": action,
575 "nodeps": False, "binary": False}
David Jamesfcb70ef2011-02-02 16:02:30 -0800576 this_pkg = deps_map.setdefault(pkg, default_pkg)
577
David James8c7e5e32011-06-28 11:26:03 -0700578 if pkg in deps_info:
579 this_pkg["idx"] = deps_info[pkg]["idx"]
580
581 # If a package doesn't have any defined phases that might use the
582 # dependent packages (i.e. pkg_setup, pkg_preinst, or pkg_postinst),
583 # we can install this package before its deps are ready.
584 emerge_pkg = self.package_db.get(pkg)
585 if emerge_pkg and emerge_pkg.type_name == "binary":
586 this_pkg["binary"] = True
Mike Frysinger66652ec2014-04-24 11:42:25 -0400587 defined_phases = emerge_pkg.defined_phases
David James8c7e5e32011-06-28 11:26:03 -0700588 defined_binpkg_phases = binpkg_phases.intersection(defined_phases)
589 if not defined_binpkg_phases:
590 this_pkg["nodeps"] = True
591
David Jamesfcb70ef2011-02-02 16:02:30 -0800592 # Create entries for dependencies of this package first.
593 ReverseTree(packages[pkg]["deps"])
594
595 # Add dependencies to this package.
596 for dep, dep_item in packages[pkg]["deps"].iteritems():
David James8c7e5e32011-06-28 11:26:03 -0700597 # We only need to enforce strict ordering of dependencies if the
David James3f778802011-08-25 19:31:45 -0700598 # dependency is a blocker, or is a buildtime or runtime dependency.
599 # (I.e., ignored, optional, and runtime_post dependencies don't
600 # depend on ordering.)
David James8c7e5e32011-06-28 11:26:03 -0700601 dep_types = dep_item["deptypes"]
602 if needed_dep_types.intersection(dep_types):
603 deps_map[dep]["provides"].add(pkg)
604 this_pkg["needs"][dep] = "/".join(dep_types)
David Jamesfcb70ef2011-02-02 16:02:30 -0800605
David Jamese5e1c0a2014-09-29 17:19:41 -0700606 # Verify we processed all appropriate dependency types.
607 unknown_dep_types = set(dep_types) - all_dep_types
608 if unknown_dep_types:
609 print("Unknown dependency types found:")
610 print(" %s -> %s (%s)" % (pkg, dep, "/".join(unknown_dep_types)))
611 sys.exit(1)
612
David James3f778802011-08-25 19:31:45 -0700613 # If there's a blocker, Portage may need to move files from one
614 # package to another, which requires editing the CONTENTS files of
615 # both packages. To avoid race conditions while editing this file,
616 # the two packages must not be installed in parallel, so we can't
617 # safely ignore dependencies. See http://crosbug.com/19328
618 if "blocker" in dep_types:
619 this_pkg["nodeps"] = False
620
David Jamesfcb70ef2011-02-02 16:02:30 -0800621 def FindCycles():
622 """Find cycles in the dependency tree.
623
624 Returns:
625 A dict mapping cyclic packages to a dict of the deps that cause
626 cycles. For each dep that causes cycles, it returns an example
627 traversal of the graph that shows the cycle.
628 """
629
630 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
631 """Find cycles in cyclic dependencies starting at specified package.
632
633 Args:
634 pkg: Package identifier.
635 cycles: A dict mapping cyclic packages to a dict of the deps that
636 cause cycles. For each dep that causes cycles, it returns an
637 example traversal of the graph that shows the cycle.
638 unresolved: Nodes that have been visited but are not fully processed.
639 resolved: Nodes that have been visited and are fully processed.
640 """
641 pkg_cycles = cycles.get(pkg)
642 if pkg in resolved and not pkg_cycles:
643 # If we already looked at this package, and found no cyclic
644 # dependencies, we can stop now.
645 return
646 unresolved.append(pkg)
647 for dep in deps_map[pkg]["needs"]:
648 if dep in unresolved:
649 idx = unresolved.index(dep)
650 mycycle = unresolved[idx:] + [dep]
David James321490a2012-12-17 12:05:56 -0800651 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800652 pkg1, pkg2 = mycycle[i], mycycle[i+1]
653 cycles.setdefault(pkg1, {}).setdefault(pkg2, mycycle)
654 elif not pkg_cycles or dep not in pkg_cycles:
655 # Looks like we haven't seen this edge before.
656 FindCyclesAtNode(dep, cycles, unresolved, resolved)
657 unresolved.pop()
658 resolved.add(pkg)
659
660 cycles, unresolved, resolved = {}, [], set()
661 for pkg in deps_map:
662 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
663 return cycles
664
David James386ccd12011-05-04 20:17:42 -0700665 def RemoveUnusedPackages():
David Jamesfcb70ef2011-02-02 16:02:30 -0800666 """Remove installed packages, propagating dependencies."""
David Jamesfcb70ef2011-02-02 16:02:30 -0800667 # Schedule packages that aren't on the install list for removal
668 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
669
David Jamesfcb70ef2011-02-02 16:02:30 -0800670 # Remove the packages we don't want, simplifying the graph and making
671 # it easier for us to crack cycles.
672 for pkg in sorted(rm_pkgs):
673 this_pkg = deps_map[pkg]
674 needs = this_pkg["needs"]
675 provides = this_pkg["provides"]
676 for dep in needs:
677 dep_provides = deps_map[dep]["provides"]
678 dep_provides.update(provides)
679 dep_provides.discard(pkg)
680 dep_provides.discard(dep)
681 for target in provides:
682 target_needs = deps_map[target]["needs"]
683 target_needs.update(needs)
684 target_needs.pop(pkg, None)
685 target_needs.pop(target, None)
686 del deps_map[pkg]
687
688 def PrintCycleBreak(basedep, dep, mycycle):
689 """Print details about a cycle that we are planning on breaking.
690
Mike Frysinger02e1e072013-11-10 22:11:34 -0500691 We are breaking a cycle where dep needs basedep. mycycle is an
692 example cycle which contains dep -> basedep.
693 """
David Jamesfcb70ef2011-02-02 16:02:30 -0800694
David Jamesfcb70ef2011-02-02 16:02:30 -0800695 needs = deps_map[dep]["needs"]
696 depinfo = needs.get(basedep, "deleted")
David Jamesfcb70ef2011-02-02 16:02:30 -0800697
David James3f778802011-08-25 19:31:45 -0700698 # It's OK to swap install order for blockers, as long as the two
699 # packages aren't installed in parallel. If there is a cycle, then
700 # we know the packages depend on each other already, so we can drop the
701 # blocker safely without printing a warning.
702 if depinfo == "blocker":
703 return
704
David Jamesfcb70ef2011-02-02 16:02:30 -0800705 # Notify the user that we're breaking a cycle.
Mike Frysinger383367e2014-09-16 15:06:17 -0400706 print("Breaking %s -> %s (%s)" % (dep, basedep, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800707
708 # Show cycle.
David James321490a2012-12-17 12:05:56 -0800709 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800710 pkg1, pkg2 = mycycle[i], mycycle[i+1]
711 needs = deps_map[pkg1]["needs"]
712 depinfo = needs.get(pkg2, "deleted")
713 if pkg1 == dep and pkg2 == basedep:
714 depinfo = depinfo + ", deleting"
Mike Frysinger383367e2014-09-16 15:06:17 -0400715 print(" %s -> %s (%s)" % (pkg1, pkg2, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800716
717 def SanitizeTree():
718 """Remove circular dependencies.
719
720 We prune all dependencies involved in cycles that go against the emerge
721 ordering. This has a nice property: we're guaranteed to merge
722 dependencies in the same order that portage does.
723
724 Because we don't treat any dependencies as "soft" unless they're killed
725 by a cycle, we pay attention to a larger number of dependencies when
726 merging. This hurts performance a bit, but helps reliability.
727 """
728 start = time.time()
729 cycles = FindCycles()
730 while cycles:
731 for dep, mycycles in cycles.iteritems():
732 for basedep, mycycle in mycycles.iteritems():
733 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
Matt Tennant08797302011-10-17 16:18:45 -0700734 if "--quiet" not in emerge.opts:
735 PrintCycleBreak(basedep, dep, mycycle)
David Jamesfcb70ef2011-02-02 16:02:30 -0800736 del deps_map[dep]["needs"][basedep]
737 deps_map[basedep]["provides"].remove(dep)
738 cycles = FindCycles()
739 seconds = time.time() - start
740 if "--quiet" not in emerge.opts and seconds >= 0.1:
Mike Frysinger383367e2014-09-16 15:06:17 -0400741 print("Tree sanitized in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800742
David James8c7e5e32011-06-28 11:26:03 -0700743 def FindRecursiveProvides(pkg, seen):
744 """Find all nodes that require a particular package.
745
746 Assumes that graph is acyclic.
747
748 Args:
749 pkg: Package identifier.
750 seen: Nodes that have been visited so far.
751 """
752 if pkg in seen:
753 return
754 seen.add(pkg)
755 info = deps_map[pkg]
756 info["tprovides"] = info["provides"].copy()
757 for dep in info["provides"]:
758 FindRecursiveProvides(dep, seen)
759 info["tprovides"].update(deps_map[dep]["tprovides"])
760
David Jamesa22906f2011-05-04 19:53:26 -0700761 ReverseTree(deps_tree)
David Jamesa22906f2011-05-04 19:53:26 -0700762
David James386ccd12011-05-04 20:17:42 -0700763 # We need to remove unused packages so that we can use the dependency
764 # ordering of the install process to show us what cycles to crack.
765 RemoveUnusedPackages()
David Jamesfcb70ef2011-02-02 16:02:30 -0800766 SanitizeTree()
David James8c7e5e32011-06-28 11:26:03 -0700767 seen = set()
768 for pkg in deps_map:
769 FindRecursiveProvides(pkg, seen)
David Jamesfcb70ef2011-02-02 16:02:30 -0800770 return deps_map
771
772 def PrintInstallPlan(self, deps_map):
773 """Print an emerge-style install plan.
774
775 The install plan lists what packages we're installing, in order.
776 It's useful for understanding what parallel_emerge is doing.
777
778 Args:
779 deps_map: The dependency graph.
780 """
781
782 def InstallPlanAtNode(target, deps_map):
783 nodes = []
784 nodes.append(target)
785 for dep in deps_map[target]["provides"]:
786 del deps_map[dep]["needs"][target]
787 if not deps_map[dep]["needs"]:
788 nodes.extend(InstallPlanAtNode(dep, deps_map))
789 return nodes
790
791 deps_map = copy.deepcopy(deps_map)
792 install_plan = []
793 plan = set()
794 for target, info in deps_map.iteritems():
795 if not info["needs"] and target not in plan:
796 for item in InstallPlanAtNode(target, deps_map):
797 plan.add(item)
798 install_plan.append(self.package_db[item])
799
800 for pkg in plan:
801 del deps_map[pkg]
802
803 if deps_map:
Mike Frysinger383367e2014-09-16 15:06:17 -0400804 print("Cyclic dependencies:", " ".join(deps_map))
David Jamesfcb70ef2011-02-02 16:02:30 -0800805 PrintDepsMap(deps_map)
806 sys.exit(1)
807
808 self.emerge.depgraph.display(install_plan)
809
810
811def PrintDepsMap(deps_map):
812 """Print dependency graph, for each package list it's prerequisites."""
813 for i in sorted(deps_map):
Mike Frysinger383367e2014-09-16 15:06:17 -0400814 print("%s: (%s) needs" % (i, deps_map[i]["action"]))
David Jamesfcb70ef2011-02-02 16:02:30 -0800815 needs = deps_map[i]["needs"]
816 for j in sorted(needs):
Mike Frysinger383367e2014-09-16 15:06:17 -0400817 print(" %s" % (j))
David Jamesfcb70ef2011-02-02 16:02:30 -0800818 if not needs:
Mike Frysinger383367e2014-09-16 15:06:17 -0400819 print(" no dependencies")
David Jamesfcb70ef2011-02-02 16:02:30 -0800820
821
822class EmergeJobState(object):
Don Garrett25f309a2014-03-19 14:02:12 -0700823 """Structure describing the EmergeJobState."""
824
David Jamesfcb70ef2011-02-02 16:02:30 -0800825 __slots__ = ["done", "filename", "last_notify_timestamp", "last_output_seek",
826 "last_output_timestamp", "pkgname", "retcode", "start_timestamp",
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700827 "target", "fetch_only", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800828
829 def __init__(self, target, pkgname, done, filename, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700830 retcode=None, fetch_only=False, unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800831
832 # The full name of the target we're building (e.g.
Mike Frysingerfd969312014-04-02 22:16:42 -0400833 # virtual/target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800834 self.target = target
835
Mike Frysingerfd969312014-04-02 22:16:42 -0400836 # The short name of the target we're building (e.g. target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800837 self.pkgname = pkgname
838
839 # Whether the job is done. (True if the job is done; false otherwise.)
840 self.done = done
841
842 # The filename where output is currently stored.
843 self.filename = filename
844
845 # The timestamp of the last time we printed the name of the log file. We
846 # print this at the beginning of the job, so this starts at
847 # start_timestamp.
848 self.last_notify_timestamp = start_timestamp
849
850 # The location (in bytes) of the end of the last complete line we printed.
851 # This starts off at zero. We use this to jump to the right place when we
852 # print output from the same ebuild multiple times.
853 self.last_output_seek = 0
854
855 # The timestamp of the last time we printed output. Since we haven't
856 # printed output yet, this starts at zero.
857 self.last_output_timestamp = 0
858
859 # The return code of our job, if the job is actually finished.
860 self.retcode = retcode
861
Brian Harring0be85c62012-03-17 19:52:12 -0700862 # Was this just a fetch job?
863 self.fetch_only = fetch_only
864
David Jamesfcb70ef2011-02-02 16:02:30 -0800865 # The timestamp when our job started.
866 self.start_timestamp = start_timestamp
867
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700868 # No emerge, only unpack packages.
869 self.unpack_only = unpack_only
870
David Jamesfcb70ef2011-02-02 16:02:30 -0800871
David James321490a2012-12-17 12:05:56 -0800872def KillHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700873 # Kill self and all subprocesses.
874 os.killpg(0, signal.SIGKILL)
875
Mike Frysingercc838832014-05-24 13:10:30 -0400876
David Jamesfcb70ef2011-02-02 16:02:30 -0800877def SetupWorkerSignals():
David James321490a2012-12-17 12:05:56 -0800878 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700879 # Set KILLED flag.
880 KILLED.set()
David James13cead42011-05-18 16:22:01 -0700881
David James7358d032011-05-19 10:40:03 -0700882 # Remove our signal handlers so we don't get called recursively.
883 signal.signal(signal.SIGINT, KillHandler)
884 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -0800885
886 # Ensure that we exit quietly and cleanly, if possible, when we receive
887 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all
888 # of the child processes will print details about KeyboardInterrupt
889 # exceptions, which isn't very helpful.
890 signal.signal(signal.SIGINT, ExitHandler)
891 signal.signal(signal.SIGTERM, ExitHandler)
892
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400893
894def EmergeProcess(output, target, *args, **kwargs):
David James1ed3e252011-10-05 20:26:15 -0700895 """Merge a package in a subprocess.
896
897 Args:
David James1ed3e252011-10-05 20:26:15 -0700898 output: Temporary file to write output.
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400899 target: The package we'll be processing (for display purposes).
David James6b29d052012-11-02 10:27:27 -0700900 *args: Arguments to pass to Scheduler constructor.
901 **kwargs: Keyword arguments to pass to Scheduler constructor.
David James1ed3e252011-10-05 20:26:15 -0700902
903 Returns:
904 The exit code returned by the subprocess.
905 """
906 pid = os.fork()
907 if pid == 0:
908 try:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400909 proctitle.settitle('EmergeProcess', target)
910
David James1ed3e252011-10-05 20:26:15 -0700911 # Sanity checks.
Mike Frysingerf02736e2013-11-08 15:27:00 -0500912 if sys.stdout.fileno() != 1:
913 raise Exception("sys.stdout.fileno() != 1")
914 if sys.stderr.fileno() != 2:
915 raise Exception("sys.stderr.fileno() != 2")
David James1ed3e252011-10-05 20:26:15 -0700916
917 # - Redirect 1 (stdout) and 2 (stderr) at our temporary file.
918 # - Redirect 0 to point at sys.stdin. In this case, sys.stdin
919 # points at a file reading os.devnull, because multiprocessing mucks
920 # with sys.stdin.
921 # - Leave the sys.stdin and output filehandles alone.
922 fd_pipes = {0: sys.stdin.fileno(),
923 1: output.fileno(),
924 2: output.fileno(),
925 sys.stdin.fileno(): sys.stdin.fileno(),
926 output.fileno(): output.fileno()}
Mike Frysinger66652ec2014-04-24 11:42:25 -0400927 # pylint: disable=W0212
928 portage.process._setup_pipes(fd_pipes, close_fds=False)
David James1ed3e252011-10-05 20:26:15 -0700929
930 # Portage doesn't like when sys.stdin.fileno() != 0, so point sys.stdin
931 # at the filehandle we just created in _setup_pipes.
932 if sys.stdin.fileno() != 0:
David James6b29d052012-11-02 10:27:27 -0700933 sys.__stdin__ = sys.stdin = os.fdopen(0, "r")
934
935 scheduler = Scheduler(*args, **kwargs)
936
937 # Enable blocker handling even though we're in --nodeps mode. This
938 # allows us to unmerge the blocker after we've merged the replacement.
939 scheduler._opts_ignore_blockers = frozenset()
David James1ed3e252011-10-05 20:26:15 -0700940
941 # Actually do the merge.
942 retval = scheduler.merge()
943
944 # We catch all exceptions here (including SystemExit, KeyboardInterrupt,
945 # etc) so as to ensure that we don't confuse the multiprocessing module,
946 # which expects that all forked children exit with os._exit().
David James321490a2012-12-17 12:05:56 -0800947 # pylint: disable=W0702
David James1ed3e252011-10-05 20:26:15 -0700948 except:
949 traceback.print_exc(file=output)
950 retval = 1
951 sys.stdout.flush()
952 sys.stderr.flush()
953 output.flush()
Don Garrett25f309a2014-03-19 14:02:12 -0700954 # pylint: disable=W0212
David James1ed3e252011-10-05 20:26:15 -0700955 os._exit(retval)
956 else:
957 # Return the exit code of the subprocess.
958 return os.waitpid(pid, 0)[1]
David Jamesfcb70ef2011-02-02 16:02:30 -0800959
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700960
961def UnpackPackage(pkg_state):
962 """Unpacks package described by pkg_state.
963
964 Args:
965 pkg_state: EmergeJobState object describing target.
966
967 Returns:
968 Exit code returned by subprocess.
969 """
970 pkgdir = os.environ.get("PKGDIR",
971 os.path.join(os.environ["SYSROOT"], "packages"))
972 root = os.environ.get("ROOT", os.environ["SYSROOT"])
973 path = os.path.join(pkgdir, pkg_state.target + ".tbz2")
974 comp = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)
975 cmd = [comp, "-dc"]
976 if comp.endswith("pbzip2"):
977 cmd.append("--ignore-trailing-garbage=1")
978 cmd.append(path)
979
980 result = cros_build_lib.RunCommand(cmd, cwd=root, stdout_to_pipe=True,
981 print_cmd=False, error_code_ok=True)
982
983 # If we were not successful, return now and don't attempt untar.
984 if result.returncode:
985 return result.returncode
986
987 cmd = ["sudo", "tar", "-xf", "-", "-C", root]
988 result = cros_build_lib.RunCommand(cmd, cwd=root, input=result.output,
989 print_cmd=False, error_code_ok=True)
990
991 return result.returncode
992
993
994def EmergeWorker(task_queue, job_queue, emerge, package_db, fetch_only=False,
995 unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800996 """This worker emerges any packages given to it on the task_queue.
997
998 Args:
999 task_queue: The queue of tasks for this worker to do.
1000 job_queue: The queue of results from the worker.
1001 emerge: An EmergeData() object.
1002 package_db: A dict, mapping package ids to portage Package objects.
Brian Harring0be85c62012-03-17 19:52:12 -07001003 fetch_only: A bool, indicating if we should just fetch the target.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001004 unpack_only: A bool, indicating if we should just unpack the target.
David Jamesfcb70ef2011-02-02 16:02:30 -08001005
1006 It expects package identifiers to be passed to it via task_queue. When
1007 a task is started, it pushes the (target, filename) to the started_queue.
1008 The output is stored in filename. When a merge starts or finishes, we push
1009 EmergeJobState objects to the job_queue.
1010 """
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001011 if fetch_only:
1012 mode = 'fetch'
1013 elif unpack_only:
1014 mode = 'unpack'
1015 else:
1016 mode = 'emerge'
1017 proctitle.settitle('EmergeWorker', mode, '[idle]')
David Jamesfcb70ef2011-02-02 16:02:30 -08001018
1019 SetupWorkerSignals()
1020 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
David Jamesdeebd692011-05-09 17:02:52 -07001021
1022 # Disable flushing of caches to save on I/O.
David James7a1ea4b2011-10-13 15:06:41 -07001023 root = emerge.settings["ROOT"]
1024 vardb = emerge.trees[root]["vartree"].dbapi
1025 vardb._flush_cache_enabled = False
Brian Harring0be85c62012-03-17 19:52:12 -07001026 bindb = emerge.trees[root]["bintree"].dbapi
1027 # Might be a set, might be a list, might be None; no clue, just use shallow
1028 # copy to ensure we can roll it back.
Don Garrett25f309a2014-03-19 14:02:12 -07001029 # pylint: disable=W0212
Brian Harring0be85c62012-03-17 19:52:12 -07001030 original_remotepkgs = copy.copy(bindb.bintree._remotepkgs)
David Jamesdeebd692011-05-09 17:02:52 -07001031
David Jamesfcb70ef2011-02-02 16:02:30 -08001032 opts, spinner = emerge.opts, emerge.spinner
1033 opts["--nodeps"] = True
Brian Harring0be85c62012-03-17 19:52:12 -07001034 if fetch_only:
1035 opts["--fetchonly"] = True
1036
David Jamesfcb70ef2011-02-02 16:02:30 -08001037 while True:
1038 # Wait for a new item to show up on the queue. This is a blocking wait,
1039 # so if there's nothing to do, we just sit here.
Brian Harring0be85c62012-03-17 19:52:12 -07001040 pkg_state = task_queue.get()
1041 if pkg_state is None:
David Jamesfcb70ef2011-02-02 16:02:30 -08001042 # If target is None, this means that the main thread wants us to quit.
1043 # The other workers need to exit too, so we'll push the message back on
1044 # to the queue so they'll get it too.
Brian Harring0be85c62012-03-17 19:52:12 -07001045 task_queue.put(None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001046 return
David James7358d032011-05-19 10:40:03 -07001047 if KILLED.is_set():
1048 return
1049
Brian Harring0be85c62012-03-17 19:52:12 -07001050 target = pkg_state.target
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001051 proctitle.settitle('EmergeWorker', mode, target)
Brian Harring0be85c62012-03-17 19:52:12 -07001052
David Jamesfcb70ef2011-02-02 16:02:30 -08001053 db_pkg = package_db[target]
Brian Harring0be85c62012-03-17 19:52:12 -07001054
1055 if db_pkg.type_name == "binary":
1056 if not fetch_only and pkg_state.fetched_successfully:
1057 # Ensure portage doesn't think our pkg is remote- else it'll force
1058 # a redownload of it (even if the on-disk file is fine). In-memory
1059 # caching basically, implemented dumbly.
1060 bindb.bintree._remotepkgs = None
1061 else:
1062 bindb.bintree_remotepkgs = original_remotepkgs
1063
David Jamesfcb70ef2011-02-02 16:02:30 -08001064 db_pkg.root_config = emerge.root_config
1065 install_list = [db_pkg]
1066 pkgname = db_pkg.pf
1067 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False)
David James01b1e0f2012-06-07 17:18:05 -07001068 os.chmod(output.name, 644)
David Jamesfcb70ef2011-02-02 16:02:30 -08001069 start_timestamp = time.time()
Brian Harring0be85c62012-03-17 19:52:12 -07001070 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001071 fetch_only=fetch_only, unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001072 job_queue.put(job)
1073 if "--pretend" in opts:
1074 retcode = 0
1075 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001076 try:
David James386ccd12011-05-04 20:17:42 -07001077 emerge.scheduler_graph.mergelist = install_list
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001078 if unpack_only:
1079 retcode = UnpackPackage(pkg_state)
1080 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001081 retcode = EmergeProcess(output, target, settings, trees, mtimedb,
1082 opts, spinner, favorites=emerge.favorites,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001083 graph_config=emerge.scheduler_graph)
David Jamesfcb70ef2011-02-02 16:02:30 -08001084 except Exception:
1085 traceback.print_exc(file=output)
1086 retcode = 1
David James1ed3e252011-10-05 20:26:15 -07001087 output.close()
David Jamesfcb70ef2011-02-02 16:02:30 -08001088
David James7358d032011-05-19 10:40:03 -07001089 if KILLED.is_set():
1090 return
1091
David Jamesfcb70ef2011-02-02 16:02:30 -08001092 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001093 retcode, fetch_only=fetch_only,
1094 unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001095 job_queue.put(job)
1096
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001097 # Set the title back to idle as the multiprocess pool won't destroy us;
1098 # when another job comes up, it'll re-use this process.
1099 proctitle.settitle('EmergeWorker', mode, '[idle]')
1100
David Jamesfcb70ef2011-02-02 16:02:30 -08001101
1102class LinePrinter(object):
1103 """Helper object to print a single line."""
1104
1105 def __init__(self, line):
1106 self.line = line
1107
David James321490a2012-12-17 12:05:56 -08001108 def Print(self, _seek_locations):
Mike Frysinger383367e2014-09-16 15:06:17 -04001109 print(self.line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001110
1111
1112class JobPrinter(object):
1113 """Helper object to print output of a job."""
1114
1115 def __init__(self, job, unlink=False):
1116 """Print output of job.
1117
Mike Frysinger02e1e072013-11-10 22:11:34 -05001118 If unlink is True, unlink the job output file when done.
1119 """
David Jamesfcb70ef2011-02-02 16:02:30 -08001120 self.current_time = time.time()
1121 self.job = job
1122 self.unlink = unlink
1123
1124 def Print(self, seek_locations):
1125
1126 job = self.job
1127
1128 # Calculate how long the job has been running.
1129 seconds = self.current_time - job.start_timestamp
1130
1131 # Note that we've printed out the job so far.
1132 job.last_output_timestamp = self.current_time
1133
1134 # Note that we're starting the job
1135 info = "job %s (%dm%.1fs)" % (job.pkgname, seconds / 60, seconds % 60)
1136 last_output_seek = seek_locations.get(job.filename, 0)
1137 if last_output_seek:
Mike Frysinger383367e2014-09-16 15:06:17 -04001138 print("=== Continue output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001139 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001140 print("=== Start output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001141
1142 # Print actual output from job
1143 f = codecs.open(job.filename, encoding='utf-8', errors='replace')
1144 f.seek(last_output_seek)
1145 prefix = job.pkgname + ":"
1146 for line in f:
1147
1148 # Save off our position in the file
1149 if line and line[-1] == "\n":
1150 last_output_seek = f.tell()
1151 line = line[:-1]
1152
1153 # Print our line
Mike Frysinger383367e2014-09-16 15:06:17 -04001154 print(prefix, line.encode('utf-8', 'replace'))
David Jamesfcb70ef2011-02-02 16:02:30 -08001155 f.close()
1156
1157 # Save our last spot in the file so that we don't print out the same
1158 # location twice.
1159 seek_locations[job.filename] = last_output_seek
1160
1161 # Note end of output section
1162 if job.done:
Mike Frysinger383367e2014-09-16 15:06:17 -04001163 print("=== Complete: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001164 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001165 print("=== Still running: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001166
1167 if self.unlink:
1168 os.unlink(job.filename)
1169
1170
1171def PrintWorker(queue):
1172 """A worker that prints stuff to the screen as requested."""
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001173 proctitle.settitle('PrintWorker')
David Jamesfcb70ef2011-02-02 16:02:30 -08001174
David James321490a2012-12-17 12:05:56 -08001175 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -07001176 # Set KILLED flag.
1177 KILLED.set()
1178
David Jamesfcb70ef2011-02-02 16:02:30 -08001179 # Switch to default signal handlers so that we'll die after two signals.
David James7358d032011-05-19 10:40:03 -07001180 signal.signal(signal.SIGINT, KillHandler)
1181 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001182
1183 # Don't exit on the first SIGINT / SIGTERM, because the parent worker will
1184 # handle it and tell us when we need to exit.
1185 signal.signal(signal.SIGINT, ExitHandler)
1186 signal.signal(signal.SIGTERM, ExitHandler)
1187
1188 # seek_locations is a map indicating the position we are at in each file.
1189 # It starts off empty, but is set by the various Print jobs as we go along
1190 # to indicate where we left off in each file.
1191 seek_locations = {}
1192 while True:
1193 try:
1194 job = queue.get()
1195 if job:
1196 job.Print(seek_locations)
David Jamesbccf8eb2011-07-27 14:06:06 -07001197 sys.stdout.flush()
David Jamesfcb70ef2011-02-02 16:02:30 -08001198 else:
1199 break
1200 except IOError as ex:
1201 if ex.errno == errno.EINTR:
1202 # Looks like we received a signal. Keep printing.
1203 continue
1204 raise
1205
Brian Harring867e2362012-03-17 04:05:17 -07001206
Brian Harring0be85c62012-03-17 19:52:12 -07001207class TargetState(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001208 """Structure descriting the TargetState."""
Brian Harring867e2362012-03-17 04:05:17 -07001209
Brian Harring0be85c62012-03-17 19:52:12 -07001210 __slots__ = ("target", "info", "score", "prefetched", "fetched_successfully")
Brian Harring867e2362012-03-17 04:05:17 -07001211
David James321490a2012-12-17 12:05:56 -08001212 def __init__(self, target, info):
Brian Harring867e2362012-03-17 04:05:17 -07001213 self.target, self.info = target, info
Brian Harring0be85c62012-03-17 19:52:12 -07001214 self.fetched_successfully = False
1215 self.prefetched = False
David James321490a2012-12-17 12:05:56 -08001216 self.score = None
Brian Harring867e2362012-03-17 04:05:17 -07001217 self.update_score()
1218
1219 def __cmp__(self, other):
1220 return cmp(self.score, other.score)
1221
1222 def update_score(self):
1223 self.score = (
1224 -len(self.info["tprovides"]),
Brian Harring0be85c62012-03-17 19:52:12 -07001225 len(self.info["needs"]),
Brian Harring11c5eeb2012-03-18 11:02:39 -07001226 not self.info["binary"],
Brian Harring867e2362012-03-17 04:05:17 -07001227 -len(self.info["provides"]),
1228 self.info["idx"],
1229 self.target,
1230 )
1231
1232
1233class ScoredHeap(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001234 """Implementation of a general purpose scored heap."""
Brian Harring867e2362012-03-17 04:05:17 -07001235
Brian Harring0be85c62012-03-17 19:52:12 -07001236 __slots__ = ("heap", "_heap_set")
1237
Brian Harring867e2362012-03-17 04:05:17 -07001238 def __init__(self, initial=()):
Brian Harring0be85c62012-03-17 19:52:12 -07001239 self.heap = list()
1240 self._heap_set = set()
1241 if initial:
1242 self.multi_put(initial)
Brian Harring867e2362012-03-17 04:05:17 -07001243
1244 def get(self):
Brian Harring0be85c62012-03-17 19:52:12 -07001245 item = heapq.heappop(self.heap)
1246 self._heap_set.remove(item.target)
1247 return item
Brian Harring867e2362012-03-17 04:05:17 -07001248
Brian Harring0be85c62012-03-17 19:52:12 -07001249 def put(self, item):
1250 if not isinstance(item, TargetState):
1251 raise ValueError("Item %r isn't a TargetState" % (item,))
1252 heapq.heappush(self.heap, item)
1253 self._heap_set.add(item.target)
Brian Harring867e2362012-03-17 04:05:17 -07001254
Brian Harring0be85c62012-03-17 19:52:12 -07001255 def multi_put(self, sequence):
1256 sequence = list(sequence)
1257 self.heap.extend(sequence)
1258 self._heap_set.update(x.target for x in sequence)
Brian Harring867e2362012-03-17 04:05:17 -07001259 self.sort()
1260
David James5c9996d2012-03-24 10:50:46 -07001261 def sort(self):
1262 heapq.heapify(self.heap)
1263
Brian Harring0be85c62012-03-17 19:52:12 -07001264 def __contains__(self, target):
1265 return target in self._heap_set
1266
1267 def __nonzero__(self):
1268 return bool(self.heap)
1269
Brian Harring867e2362012-03-17 04:05:17 -07001270 def __len__(self):
1271 return len(self.heap)
1272
1273
David Jamesfcb70ef2011-02-02 16:02:30 -08001274class EmergeQueue(object):
1275 """Class to schedule emerge jobs according to a dependency graph."""
1276
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001277 def __init__(self, deps_map, emerge, package_db, show_output, unpack_only):
David Jamesfcb70ef2011-02-02 16:02:30 -08001278 # Store the dependency graph.
1279 self._deps_map = deps_map
Brian Harring0be85c62012-03-17 19:52:12 -07001280 self._state_map = {}
David Jamesfcb70ef2011-02-02 16:02:30 -08001281 # Initialize the running queue to empty
Brian Harring0be85c62012-03-17 19:52:12 -07001282 self._build_jobs = {}
1283 self._build_ready = ScoredHeap()
1284 self._fetch_jobs = {}
1285 self._fetch_ready = ScoredHeap()
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001286 self._unpack_jobs = {}
1287 self._unpack_ready = ScoredHeap()
David Jamesfcb70ef2011-02-02 16:02:30 -08001288 # List of total package installs represented in deps_map.
1289 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
1290 self._total_jobs = len(install_jobs)
1291 self._show_output = show_output
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001292 self._unpack_only = unpack_only
David Jamesfcb70ef2011-02-02 16:02:30 -08001293
1294 if "--pretend" in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001295 print("Skipping merge because of --pretend mode.")
David Jamesfcb70ef2011-02-02 16:02:30 -08001296 sys.exit(0)
1297
David Jamesaaf49e42014-04-24 09:40:05 -07001298 # Set up a session so we can easily terminate all children.
1299 self._SetupSession()
David James7358d032011-05-19 10:40:03 -07001300
David Jamesfcb70ef2011-02-02 16:02:30 -08001301 # Setup scheduler graph object. This is used by the child processes
1302 # to help schedule jobs.
1303 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1304
1305 # Calculate how many jobs we can run in parallel. We don't want to pass
1306 # the --jobs flag over to emerge itself, because that'll tell emerge to
1307 # hide its output, and said output is quite useful for debugging hung
1308 # jobs.
1309 procs = min(self._total_jobs,
1310 emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001311 self._build_procs = self._unpack_procs = self._fetch_procs = max(1, procs)
David James8c7e5e32011-06-28 11:26:03 -07001312 self._load_avg = emerge.opts.pop("--load-average", None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001313 self._job_queue = multiprocessing.Queue()
1314 self._print_queue = multiprocessing.Queue()
Brian Harring0be85c62012-03-17 19:52:12 -07001315
1316 self._fetch_queue = multiprocessing.Queue()
1317 args = (self._fetch_queue, self._job_queue, emerge, package_db, True)
1318 self._fetch_pool = multiprocessing.Pool(self._fetch_procs, EmergeWorker,
1319 args)
1320
1321 self._build_queue = multiprocessing.Queue()
1322 args = (self._build_queue, self._job_queue, emerge, package_db)
1323 self._build_pool = multiprocessing.Pool(self._build_procs, EmergeWorker,
1324 args)
1325
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001326 if self._unpack_only:
1327 # Unpack pool only required on unpack_only jobs.
1328 self._unpack_queue = multiprocessing.Queue()
1329 args = (self._unpack_queue, self._job_queue, emerge, package_db, False,
1330 True)
1331 self._unpack_pool = multiprocessing.Pool(self._unpack_procs, EmergeWorker,
1332 args)
1333
David Jamesfcb70ef2011-02-02 16:02:30 -08001334 self._print_worker = multiprocessing.Process(target=PrintWorker,
1335 args=[self._print_queue])
1336 self._print_worker.start()
1337
1338 # Initialize the failed queue to empty.
1339 self._retry_queue = []
1340 self._failed = set()
1341
David Jamesfcb70ef2011-02-02 16:02:30 -08001342 # Setup an exit handler so that we print nice messages if we are
1343 # terminated.
1344 self._SetupExitHandler()
1345
1346 # Schedule our jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001347 self._state_map.update(
1348 (pkg, TargetState(pkg, data)) for pkg, data in deps_map.iteritems())
1349 self._fetch_ready.multi_put(self._state_map.itervalues())
David Jamesfcb70ef2011-02-02 16:02:30 -08001350
David Jamesaaf49e42014-04-24 09:40:05 -07001351 def _SetupSession(self):
1352 """Set up a session so we can easily terminate all children."""
1353 # When we call os.setsid(), this sets up a session / process group for this
1354 # process and all children. These session groups are needed so that we can
1355 # easily kill all children (including processes launched by emerge) before
1356 # we exit.
1357 #
1358 # One unfortunate side effect of os.setsid() is that it blocks CTRL-C from
1359 # being received. To work around this, we only call os.setsid() in a forked
1360 # process, so that the parent can still watch for CTRL-C. The parent will
1361 # just sit around, watching for signals and propagating them to the child,
1362 # until the child exits.
1363 #
1364 # TODO(davidjames): It would be nice if we could replace this with cgroups.
1365 pid = os.fork()
1366 if pid == 0:
1367 os.setsid()
1368 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001369 proctitle.settitle('SessionManager')
1370
David Jamesaaf49e42014-04-24 09:40:05 -07001371 def PropagateToChildren(signum, _frame):
1372 # Just propagate the signals down to the child. We'll exit when the
1373 # child does.
1374 try:
1375 os.kill(pid, signum)
1376 except OSError as ex:
1377 if ex.errno != errno.ESRCH:
1378 raise
1379 signal.signal(signal.SIGINT, PropagateToChildren)
1380 signal.signal(signal.SIGTERM, PropagateToChildren)
1381
1382 def StopGroup(_signum, _frame):
1383 # When we get stopped, stop the children.
1384 try:
1385 os.killpg(pid, signal.SIGSTOP)
1386 os.kill(0, signal.SIGSTOP)
1387 except OSError as ex:
1388 if ex.errno != errno.ESRCH:
1389 raise
1390 signal.signal(signal.SIGTSTP, StopGroup)
1391
1392 def ContinueGroup(_signum, _frame):
1393 # Launch the children again after being stopped.
1394 try:
1395 os.killpg(pid, signal.SIGCONT)
1396 except OSError as ex:
1397 if ex.errno != errno.ESRCH:
1398 raise
1399 signal.signal(signal.SIGCONT, ContinueGroup)
1400
1401 # Loop until the children exit. We exit with os._exit to be sure we
1402 # don't run any finalizers (those will be run by the child process.)
1403 # pylint: disable=W0212
1404 while True:
1405 try:
1406 # Wait for the process to exit. When it does, exit with the return
1407 # value of the subprocess.
1408 os._exit(osutils.GetExitStatus(os.waitpid(pid, 0)[1]))
1409 except OSError as ex:
1410 if ex.errno == errno.EINTR:
1411 continue
1412 traceback.print_exc()
1413 os._exit(1)
1414 except BaseException:
1415 traceback.print_exc()
1416 os._exit(1)
1417
David Jamesfcb70ef2011-02-02 16:02:30 -08001418 def _SetupExitHandler(self):
1419
David James321490a2012-12-17 12:05:56 -08001420 def ExitHandler(signum, _frame):
David James7358d032011-05-19 10:40:03 -07001421 # Set KILLED flag.
1422 KILLED.set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001423
1424 # Kill our signal handlers so we don't get called recursively
David James7358d032011-05-19 10:40:03 -07001425 signal.signal(signal.SIGINT, KillHandler)
1426 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001427
1428 # Print our current job status
Brian Harring0be85c62012-03-17 19:52:12 -07001429 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001430 if job:
1431 self._print_queue.put(JobPrinter(job, unlink=True))
1432
1433 # Notify the user that we are exiting
1434 self._Print("Exiting on signal %s" % signum)
David James7358d032011-05-19 10:40:03 -07001435 self._print_queue.put(None)
1436 self._print_worker.join()
David Jamesfcb70ef2011-02-02 16:02:30 -08001437
1438 # Kill child threads, then exit.
David James7358d032011-05-19 10:40:03 -07001439 os.killpg(0, signal.SIGKILL)
David Jamesfcb70ef2011-02-02 16:02:30 -08001440 sys.exit(1)
1441
1442 # Print out job status when we are killed
1443 signal.signal(signal.SIGINT, ExitHandler)
1444 signal.signal(signal.SIGTERM, ExitHandler)
1445
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001446 def _ScheduleUnpack(self, pkg_state):
1447 self._unpack_jobs[pkg_state.target] = None
1448 self._unpack_queue.put(pkg_state)
1449
Brian Harring0be85c62012-03-17 19:52:12 -07001450 def _Schedule(self, pkg_state):
David Jamesfcb70ef2011-02-02 16:02:30 -08001451 # We maintain a tree of all deps, if this doesn't need
David James8c7e5e32011-06-28 11:26:03 -07001452 # to be installed just free up its children and continue.
David Jamesfcb70ef2011-02-02 16:02:30 -08001453 # It is possible to reinstall deps of deps, without reinstalling
1454 # first level deps, like so:
Mike Frysingerfd969312014-04-02 22:16:42 -04001455 # virtual/target-os (merge) -> eselect (nomerge) -> python (merge)
Brian Harring0be85c62012-03-17 19:52:12 -07001456 this_pkg = pkg_state.info
1457 target = pkg_state.target
1458 if pkg_state.info is not None:
1459 if this_pkg["action"] == "nomerge":
1460 self._Finish(target)
1461 elif target not in self._build_jobs:
1462 # Kick off the build if it's marked to be built.
1463 self._build_jobs[target] = None
1464 self._build_queue.put(pkg_state)
1465 return True
David Jamesfcb70ef2011-02-02 16:02:30 -08001466
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001467 def _ScheduleLoop(self, unpack_only=False):
1468 if unpack_only:
1469 ready_queue = self._unpack_ready
1470 jobs_queue = self._unpack_jobs
1471 procs = self._unpack_procs
1472 else:
1473 ready_queue = self._build_ready
1474 jobs_queue = self._build_jobs
1475 procs = self._build_procs
1476
David James8c7e5e32011-06-28 11:26:03 -07001477 # If the current load exceeds our desired load average, don't schedule
1478 # more than one job.
1479 if self._load_avg and os.getloadavg()[0] > self._load_avg:
1480 needed_jobs = 1
1481 else:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001482 needed_jobs = procs
David James8c7e5e32011-06-28 11:26:03 -07001483
1484 # Schedule more jobs.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001485 while ready_queue and len(jobs_queue) < needed_jobs:
1486 state = ready_queue.get()
1487 if unpack_only:
1488 self._ScheduleUnpack(state)
1489 else:
1490 if state.target not in self._failed:
1491 self._Schedule(state)
David Jamesfcb70ef2011-02-02 16:02:30 -08001492
1493 def _Print(self, line):
1494 """Print a single line."""
1495 self._print_queue.put(LinePrinter(line))
1496
1497 def _Status(self):
1498 """Print status."""
1499 current_time = time.time()
1500 no_output = True
1501
1502 # Print interim output every minute if --show-output is used. Otherwise,
1503 # print notifications about running packages every 2 minutes, and print
1504 # full output for jobs that have been running for 60 minutes or more.
1505 if self._show_output:
1506 interval = 60
1507 notify_interval = 0
1508 else:
1509 interval = 60 * 60
1510 notify_interval = 60 * 2
David James321490a2012-12-17 12:05:56 -08001511 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001512 if job:
1513 last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
1514 if last_timestamp + interval < current_time:
1515 self._print_queue.put(JobPrinter(job))
1516 job.last_output_timestamp = current_time
1517 no_output = False
1518 elif (notify_interval and
1519 job.last_notify_timestamp + notify_interval < current_time):
1520 job_seconds = current_time - job.start_timestamp
1521 args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
1522 info = "Still building %s (%dm%.1fs). Logs in %s" % args
1523 job.last_notify_timestamp = current_time
1524 self._Print(info)
1525 no_output = False
1526
1527 # If we haven't printed any messages yet, print a general status message
1528 # here.
1529 if no_output:
1530 seconds = current_time - GLOBAL_START
Brian Harring0be85c62012-03-17 19:52:12 -07001531 fjobs, fready = len(self._fetch_jobs), len(self._fetch_ready)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001532 ujobs, uready = len(self._unpack_jobs), len(self._unpack_ready)
Brian Harring0be85c62012-03-17 19:52:12 -07001533 bjobs, bready = len(self._build_jobs), len(self._build_ready)
1534 retries = len(self._retry_queue)
1535 pending = max(0, len(self._deps_map) - fjobs - bjobs)
1536 line = "Pending %s/%s, " % (pending, self._total_jobs)
1537 if fjobs or fready:
1538 line += "Fetching %s/%s, " % (fjobs, fready + fjobs)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001539 if ujobs or uready:
1540 line += "Unpacking %s/%s, " % (ujobs, uready + ujobs)
Brian Harring0be85c62012-03-17 19:52:12 -07001541 if bjobs or bready or retries:
1542 line += "Building %s/%s, " % (bjobs, bready + bjobs)
1543 if retries:
1544 line += "Retrying %s, " % (retries,)
David James8c7e5e32011-06-28 11:26:03 -07001545 load = " ".join(str(x) for x in os.getloadavg())
Brian Harring0be85c62012-03-17 19:52:12 -07001546 line += ("[Time %dm%.1fs Load %s]" % (seconds/60, seconds %60, load))
1547 self._Print(line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001548
1549 def _Finish(self, target):
David James8c7e5e32011-06-28 11:26:03 -07001550 """Mark a target as completed and unblock dependencies."""
1551 this_pkg = self._deps_map[target]
1552 if this_pkg["needs"] and this_pkg["nodeps"]:
1553 # We got installed, but our deps have not been installed yet. Dependent
1554 # packages should only be installed when our needs have been fully met.
1555 this_pkg["action"] = "nomerge"
1556 else:
David James8c7e5e32011-06-28 11:26:03 -07001557 for dep in this_pkg["provides"]:
1558 dep_pkg = self._deps_map[dep]
Brian Harring0be85c62012-03-17 19:52:12 -07001559 state = self._state_map[dep]
David James8c7e5e32011-06-28 11:26:03 -07001560 del dep_pkg["needs"][target]
Brian Harring0be85c62012-03-17 19:52:12 -07001561 state.update_score()
1562 if not state.prefetched:
1563 if dep in self._fetch_ready:
1564 # If it's not currently being fetched, update the prioritization
1565 self._fetch_ready.sort()
1566 elif not dep_pkg["needs"]:
David James8c7e5e32011-06-28 11:26:03 -07001567 if dep_pkg["nodeps"] and dep_pkg["action"] == "nomerge":
1568 self._Finish(dep)
1569 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001570 self._build_ready.put(self._state_map[dep])
David James8c7e5e32011-06-28 11:26:03 -07001571 self._deps_map.pop(target)
David Jamesfcb70ef2011-02-02 16:02:30 -08001572
1573 def _Retry(self):
David James8c7e5e32011-06-28 11:26:03 -07001574 while self._retry_queue:
Brian Harring0be85c62012-03-17 19:52:12 -07001575 state = self._retry_queue.pop(0)
1576 if self._Schedule(state):
1577 self._Print("Retrying emerge of %s." % state.target)
David James8c7e5e32011-06-28 11:26:03 -07001578 break
David Jamesfcb70ef2011-02-02 16:02:30 -08001579
Brian Harringa43f5952012-04-12 01:19:34 -07001580 def _Shutdown(self):
David Jamesfcb70ef2011-02-02 16:02:30 -08001581 # Tell emerge workers to exit. They all exit when 'None' is pushed
1582 # to the queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001583
Brian Harringa43f5952012-04-12 01:19:34 -07001584 # Shutdown the workers first; then jobs (which is how they feed things back)
1585 # then finally the print queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001586
Brian Harringa43f5952012-04-12 01:19:34 -07001587 def _stop(queue, pool):
1588 if pool is None:
1589 return
1590 try:
1591 queue.put(None)
1592 pool.close()
1593 pool.join()
1594 finally:
1595 pool.terminate()
Brian Harring0be85c62012-03-17 19:52:12 -07001596
Brian Harringa43f5952012-04-12 01:19:34 -07001597 _stop(self._fetch_queue, self._fetch_pool)
1598 self._fetch_queue = self._fetch_pool = None
Brian Harring0be85c62012-03-17 19:52:12 -07001599
Brian Harringa43f5952012-04-12 01:19:34 -07001600 _stop(self._build_queue, self._build_pool)
1601 self._build_queue = self._build_pool = None
1602
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001603 if self._unpack_only:
1604 _stop(self._unpack_queue, self._unpack_pool)
1605 self._unpack_queue = self._unpack_pool = None
1606
Brian Harringa43f5952012-04-12 01:19:34 -07001607 if self._job_queue is not None:
1608 self._job_queue.close()
1609 self._job_queue = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001610
1611 # Now that our workers are finished, we can kill the print queue.
Brian Harringa43f5952012-04-12 01:19:34 -07001612 if self._print_worker is not None:
1613 try:
1614 self._print_queue.put(None)
1615 self._print_queue.close()
1616 self._print_worker.join()
1617 finally:
1618 self._print_worker.terminate()
1619 self._print_queue = self._print_worker = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001620
1621 def Run(self):
1622 """Run through the scheduled ebuilds.
1623
1624 Keep running so long as we have uninstalled packages in the
1625 dependency graph to merge.
1626 """
Brian Harringa43f5952012-04-12 01:19:34 -07001627 if not self._deps_map:
1628 return
1629
Brian Harring0be85c62012-03-17 19:52:12 -07001630 # Start the fetchers.
1631 for _ in xrange(min(self._fetch_procs, len(self._fetch_ready))):
1632 state = self._fetch_ready.get()
1633 self._fetch_jobs[state.target] = None
1634 self._fetch_queue.put(state)
1635
1636 # Print an update, then get going.
1637 self._Status()
1638
David Jamese703d0f2012-01-12 16:27:45 -08001639 retried = set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001640 while self._deps_map:
1641 # Check here that we are actually waiting for something.
Brian Harring0be85c62012-03-17 19:52:12 -07001642 if (self._build_queue.empty() and
David Jamesfcb70ef2011-02-02 16:02:30 -08001643 self._job_queue.empty() and
Brian Harring0be85c62012-03-17 19:52:12 -07001644 not self._fetch_jobs and
1645 not self._fetch_ready and
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001646 not self._unpack_jobs and
1647 not self._unpack_ready and
Brian Harring0be85c62012-03-17 19:52:12 -07001648 not self._build_jobs and
1649 not self._build_ready and
David Jamesfcb70ef2011-02-02 16:02:30 -08001650 self._deps_map):
1651 # If we have failed on a package, retry it now.
1652 if self._retry_queue:
1653 self._Retry()
1654 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001655 # Tell the user why we're exiting.
1656 if self._failed:
Mike Frysinger383367e2014-09-16 15:06:17 -04001657 print('Packages failed:\n\t%s' % '\n\t'.join(self._failed))
David James0eae23e2012-07-03 15:04:25 -07001658 status_file = os.environ.get("PARALLEL_EMERGE_STATUS_FILE")
1659 if status_file:
David James321490a2012-12-17 12:05:56 -08001660 failed_pkgs = set(portage.versions.cpv_getkey(x)
1661 for x in self._failed)
David James0eae23e2012-07-03 15:04:25 -07001662 with open(status_file, "a") as f:
1663 f.write("%s\n" % " ".join(failed_pkgs))
David Jamesfcb70ef2011-02-02 16:02:30 -08001664 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001665 print("Deadlock! Circular dependencies!")
David Jamesfcb70ef2011-02-02 16:02:30 -08001666 sys.exit(1)
1667
David James321490a2012-12-17 12:05:56 -08001668 for _ in xrange(12):
David Jamesa74289a2011-08-12 10:41:24 -07001669 try:
1670 job = self._job_queue.get(timeout=5)
1671 break
1672 except Queue.Empty:
1673 # Check if any more jobs can be scheduled.
1674 self._ScheduleLoop()
1675 else:
Brian Harring706747c2012-03-16 03:04:31 -07001676 # Print an update every 60 seconds.
David Jamesfcb70ef2011-02-02 16:02:30 -08001677 self._Status()
1678 continue
1679
1680 target = job.target
1681
Brian Harring0be85c62012-03-17 19:52:12 -07001682 if job.fetch_only:
1683 if not job.done:
1684 self._fetch_jobs[job.target] = job
1685 else:
1686 state = self._state_map[job.target]
1687 state.prefetched = True
1688 state.fetched_successfully = (job.retcode == 0)
1689 del self._fetch_jobs[job.target]
1690 self._Print("Fetched %s in %2.2fs"
1691 % (target, time.time() - job.start_timestamp))
1692
1693 if self._show_output or job.retcode != 0:
1694 self._print_queue.put(JobPrinter(job, unlink=True))
1695 else:
1696 os.unlink(job.filename)
1697 # Failure or not, let build work with it next.
1698 if not self._deps_map[job.target]["needs"]:
1699 self._build_ready.put(state)
1700 self._ScheduleLoop()
1701
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001702 if self._unpack_only and job.retcode == 0:
1703 self._unpack_ready.put(state)
1704 self._ScheduleLoop(unpack_only=True)
1705
Brian Harring0be85c62012-03-17 19:52:12 -07001706 if self._fetch_ready:
1707 state = self._fetch_ready.get()
1708 self._fetch_queue.put(state)
1709 self._fetch_jobs[state.target] = None
1710 else:
1711 # Minor optimization; shut down fetchers early since we know
1712 # the queue is empty.
1713 self._fetch_queue.put(None)
1714 continue
1715
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001716 if job.unpack_only:
1717 if not job.done:
1718 self._unpack_jobs[target] = job
1719 else:
1720 del self._unpack_jobs[target]
1721 self._Print("Unpacked %s in %2.2fs"
1722 % (target, time.time() - job.start_timestamp))
1723 if self._show_output or job.retcode != 0:
1724 self._print_queue.put(JobPrinter(job, unlink=True))
1725 else:
1726 os.unlink(job.filename)
1727 if self._unpack_ready:
1728 state = self._unpack_ready.get()
1729 self._unpack_queue.put(state)
1730 self._unpack_jobs[state.target] = None
1731 continue
1732
David Jamesfcb70ef2011-02-02 16:02:30 -08001733 if not job.done:
Brian Harring0be85c62012-03-17 19:52:12 -07001734 self._build_jobs[target] = job
David Jamesfcb70ef2011-02-02 16:02:30 -08001735 self._Print("Started %s (logged in %s)" % (target, job.filename))
1736 continue
1737
1738 # Print output of job
1739 if self._show_output or job.retcode != 0:
1740 self._print_queue.put(JobPrinter(job, unlink=True))
1741 else:
1742 os.unlink(job.filename)
Brian Harring0be85c62012-03-17 19:52:12 -07001743 del self._build_jobs[target]
David Jamesfcb70ef2011-02-02 16:02:30 -08001744
1745 seconds = time.time() - job.start_timestamp
1746 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
David James32420cc2011-08-25 21:32:46 -07001747 previously_failed = target in self._failed
David Jamesfcb70ef2011-02-02 16:02:30 -08001748
1749 # Complain if necessary.
1750 if job.retcode != 0:
1751 # Handle job failure.
David James32420cc2011-08-25 21:32:46 -07001752 if previously_failed:
David Jamesfcb70ef2011-02-02 16:02:30 -08001753 # If this job has failed previously, give up.
1754 self._Print("Failed %s. Your build has failed." % details)
1755 else:
1756 # Queue up this build to try again after a long while.
David Jamese703d0f2012-01-12 16:27:45 -08001757 retried.add(target)
Brian Harring0be85c62012-03-17 19:52:12 -07001758 self._retry_queue.append(self._state_map[target])
David Jamesfcb70ef2011-02-02 16:02:30 -08001759 self._failed.add(target)
1760 self._Print("Failed %s, retrying later." % details)
1761 else:
David James32420cc2011-08-25 21:32:46 -07001762 if previously_failed:
1763 # Remove target from list of failed packages.
1764 self._failed.remove(target)
1765
1766 self._Print("Completed %s" % details)
1767
1768 # Mark as completed and unblock waiting ebuilds.
1769 self._Finish(target)
1770
1771 if previously_failed and self._retry_queue:
David Jamesfcb70ef2011-02-02 16:02:30 -08001772 # If we have successfully retried a failed package, and there
1773 # are more failed packages, try the next one. We will only have
1774 # one retrying package actively running at a time.
1775 self._Retry()
1776
David Jamesfcb70ef2011-02-02 16:02:30 -08001777
David James8c7e5e32011-06-28 11:26:03 -07001778 # Schedule pending jobs and print an update.
1779 self._ScheduleLoop()
1780 self._Status()
David Jamesfcb70ef2011-02-02 16:02:30 -08001781
David Jamese703d0f2012-01-12 16:27:45 -08001782 # If packages were retried, output a warning.
1783 if retried:
1784 self._Print("")
1785 self._Print("WARNING: The following packages failed the first time,")
1786 self._Print("but succeeded upon retry. This might indicate incorrect")
1787 self._Print("dependencies.")
1788 for pkg in retried:
1789 self._Print(" %s" % pkg)
1790 self._Print("@@@STEP_WARNINGS@@@")
1791 self._Print("")
1792
David Jamesfcb70ef2011-02-02 16:02:30 -08001793 # Tell child threads to exit.
1794 self._Print("Merge complete")
David Jamesfcb70ef2011-02-02 16:02:30 -08001795
1796
Brian Harring30675052012-02-29 12:18:22 -08001797def main(argv):
Brian Harring8294d652012-05-23 02:20:52 -07001798 try:
1799 return real_main(argv)
1800 finally:
1801 # Work around multiprocessing sucking and not cleaning up after itself.
1802 # http://bugs.python.org/issue4106;
1803 # Step one; ensure GC is ran *prior* to the VM starting shutdown.
1804 gc.collect()
1805 # Step two; go looking for those threads and try to manually reap
1806 # them if we can.
1807 for x in threading.enumerate():
1808 # Filter on the name, and ident; if ident is None, the thread
1809 # wasn't started.
1810 if x.name == 'QueueFeederThread' and x.ident is not None:
1811 x.join(1)
David Jamesfcb70ef2011-02-02 16:02:30 -08001812
Brian Harring8294d652012-05-23 02:20:52 -07001813
Bertrand SIMONNETb35e19e2014-07-28 16:29:58 -07001814def get_db(config, root):
Mike Frysinger33fbccb2014-09-05 17:09:07 -04001815 """Return the dbapi.
Bertrand SIMONNETb35e19e2014-07-28 16:29:58 -07001816 Handles both portage 2.1.11 and 2.2.10 (where mydbapi has been removed).
1817
1818 TODO(bsimonnet): Remove this once portage has been uprevd.
1819 """
1820 try:
1821 return config.mydbapi[root]
1822 except AttributeError:
1823 # pylint: disable=W0212
1824 return config._filtered_trees[root]['graph_db']
1825
1826
Brian Harring8294d652012-05-23 02:20:52 -07001827def real_main(argv):
Brian Harring30675052012-02-29 12:18:22 -08001828 parallel_emerge_args = argv[:]
David Jamesfcb70ef2011-02-02 16:02:30 -08001829 deps = DepGraphGenerator()
Brian Harring30675052012-02-29 12:18:22 -08001830 deps.Initialize(parallel_emerge_args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001831 emerge = deps.emerge
1832
1833 if emerge.action is not None:
Brian Harring30675052012-02-29 12:18:22 -08001834 argv = deps.ParseParallelEmergeArgs(argv)
Brian Harring8294d652012-05-23 02:20:52 -07001835 return emerge_main(argv)
David Jamesfcb70ef2011-02-02 16:02:30 -08001836 elif not emerge.cmdline_packages:
1837 Usage()
Brian Harring8294d652012-05-23 02:20:52 -07001838 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001839
1840 # Unless we're in pretend mode, there's not much point running without
1841 # root access. We need to be able to install packages.
1842 #
1843 # NOTE: Even if you're running --pretend, it's a good idea to run
1844 # parallel_emerge with root access so that portage can write to the
1845 # dependency cache. This is important for performance.
David James321490a2012-12-17 12:05:56 -08001846 if "--pretend" not in emerge.opts and portage.data.secpass < 2:
Mike Frysinger383367e2014-09-16 15:06:17 -04001847 print("parallel_emerge: superuser access is required.")
Brian Harring8294d652012-05-23 02:20:52 -07001848 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001849
1850 if "--quiet" not in emerge.opts:
1851 cmdline_packages = " ".join(emerge.cmdline_packages)
Mike Frysinger383367e2014-09-16 15:06:17 -04001852 print("Starting fast-emerge.")
1853 print(" Building package %s on %s" % (cmdline_packages,
1854 deps.board or "root"))
David Jamesfcb70ef2011-02-02 16:02:30 -08001855
David James386ccd12011-05-04 20:17:42 -07001856 deps_tree, deps_info = deps.GenDependencyTree()
David Jamesfcb70ef2011-02-02 16:02:30 -08001857
1858 # You want me to be verbose? I'll give you two trees! Twice as much value.
1859 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
1860 deps.PrintTree(deps_tree)
1861
David James386ccd12011-05-04 20:17:42 -07001862 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001863
1864 # OK, time to print out our progress so far.
1865 deps.PrintInstallPlan(deps_graph)
1866 if "--tree" in emerge.opts:
1867 PrintDepsMap(deps_graph)
1868
1869 # Are we upgrading portage? If so, and there are more packages to merge,
1870 # schedule a restart of parallel_emerge to merge the rest. This ensures that
1871 # we pick up all updates to portage settings before merging any more
1872 # packages.
1873 portage_upgrade = False
1874 root = emerge.settings["ROOT"]
Don Garrett25f309a2014-03-19 14:02:12 -07001875 # pylint: disable=W0212
Bertrand SIMONNETb35e19e2014-07-28 16:29:58 -07001876 final_db = get_db(emerge.depgraph._dynamic_config, root)
David Jamesfcb70ef2011-02-02 16:02:30 -08001877 if root == "/":
1878 for db_pkg in final_db.match_pkgs("sys-apps/portage"):
1879 portage_pkg = deps_graph.get(db_pkg.cpv)
David James0ff16f22012-11-02 14:18:07 -07001880 if portage_pkg:
David Jamesfcb70ef2011-02-02 16:02:30 -08001881 portage_upgrade = True
1882 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001883 print("Upgrading portage first, then restarting...")
David Jamesfcb70ef2011-02-02 16:02:30 -08001884
David James0ff16f22012-11-02 14:18:07 -07001885 # Upgrade Portage first, then the rest of the packages.
1886 #
1887 # In order to grant the child permission to run setsid, we need to run sudo
1888 # again. We preserve SUDO_USER here in case an ebuild depends on it.
1889 if portage_upgrade:
1890 # Calculate what arguments to use when re-invoking.
1891 args = ["sudo", "-E", "SUDO_USER=%s" % os.environ.get("SUDO_USER", "")]
1892 args += [os.path.abspath(sys.argv[0])] + parallel_emerge_args
1893 args += ["--exclude=sys-apps/portage"]
1894
1895 # First upgrade Portage.
1896 passthrough_args = ("--quiet", "--pretend", "--verbose")
1897 emerge_args = [k for k in emerge.opts if k in passthrough_args]
1898 ret = emerge_main(emerge_args + ["portage"])
1899 if ret != 0:
1900 return ret
1901
1902 # Now upgrade the rest.
1903 os.execvp(args[0], args)
1904
David Jamesfcb70ef2011-02-02 16:02:30 -08001905 # Run the queued emerges.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001906 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output,
1907 deps.unpack_only)
Brian Harringa43f5952012-04-12 01:19:34 -07001908 try:
1909 scheduler.Run()
1910 finally:
Don Garrett25f309a2014-03-19 14:02:12 -07001911 # pylint: disable=W0212
Brian Harringa43f5952012-04-12 01:19:34 -07001912 scheduler._Shutdown()
David James97ce8902011-08-16 09:51:05 -07001913 scheduler = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001914
Mike Frysingerd20a6e22012-10-04 19:01:10 -04001915 clean_logs(emerge.settings)
1916
Mike Frysinger383367e2014-09-16 15:06:17 -04001917 print("Done")
Brian Harring8294d652012-05-23 02:20:52 -07001918 return 0