blob: 196363206114957e7a2e6011940535d5706e4102 [file] [log] [blame]
Mike Frysinger0a647fc2012-08-06 14:36:05 -04001# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
David Jamesfcb70ef2011-02-02 16:02:30 -08002# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Program to run emerge in parallel, for significant speedup.
6
7Usage:
David James386ccd12011-05-04 20:17:42 -07008 ./parallel_emerge [--board=BOARD] [--workon=PKGS]
David Jamesfcb70ef2011-02-02 16:02:30 -08009 [--force-remote-binary=PKGS] [emerge args] package
10
David James78b6cd92012-04-02 21:36:12 -070011This script runs multiple emerge processes in parallel, using appropriate
12Portage APIs. It is faster than standard emerge because it has a
13multiprocess model instead of an asynchronous model.
David Jamesfcb70ef2011-02-02 16:02:30 -080014"""
15
Mike Frysinger383367e2014-09-16 15:06:17 -040016from __future__ import print_function
17
David Jamesfcb70ef2011-02-02 16:02:30 -080018import codecs
19import copy
20import errno
Brian Harring8294d652012-05-23 02:20:52 -070021import gc
David James8c7e5e32011-06-28 11:26:03 -070022import heapq
David Jamesfcb70ef2011-02-02 16:02:30 -080023import multiprocessing
24import os
Mike Frysinger1ae28092013-10-17 17:17:22 -040025try:
26 import Queue
27except ImportError:
28 # Python-3 renamed to "queue". We still use Queue to avoid collisions
29 # with naming variables as "queue". Maybe we'll transition at some point.
30 # pylint: disable=F0401
31 import queue as Queue
David Jamesfcb70ef2011-02-02 16:02:30 -080032import signal
33import sys
34import tempfile
Brian Harring8294d652012-05-23 02:20:52 -070035import threading
David Jamesfcb70ef2011-02-02 16:02:30 -080036import time
37import traceback
David Jamesfcb70ef2011-02-02 16:02:30 -080038
Thiago Goncalesf4acc422013-07-17 10:26:35 -070039from chromite.lib import cros_build_lib
David Jamesaaf49e42014-04-24 09:40:05 -070040from chromite.lib import osutils
Mike Frysingerd74fe4a2014-04-24 11:43:38 -040041from chromite.lib import proctitle
Thiago Goncalesf4acc422013-07-17 10:26:35 -070042
David Jamesfcb70ef2011-02-02 16:02:30 -080043# If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
44# Chromium OS, the default "portage" user doesn't have the necessary
45# permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
46# is "root" here because we get called through sudo.
47#
48# We need to set this before importing any portage modules, because portage
49# looks up "PORTAGE_USERNAME" at import time.
50#
51# NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
52# encounter this case unless they have an old chroot or blow away the
53# environment by running sudo without the -E specifier.
54if "PORTAGE_USERNAME" not in os.environ:
55 homedir = os.environ.get("HOME")
56 if homedir:
57 os.environ["PORTAGE_USERNAME"] = os.path.basename(homedir)
58
59# Portage doesn't expose dependency trees in its public API, so we have to
60# make use of some private APIs here. These modules are found under
61# /usr/lib/portage/pym/.
62#
63# TODO(davidjames): Update Portage to expose public APIs for these features.
Don Garrett25f309a2014-03-19 14:02:12 -070064# pylint: disable=F0401
David Jamesfcb70ef2011-02-02 16:02:30 -080065from _emerge.actions import adjust_configs
66from _emerge.actions import load_emerge_config
67from _emerge.create_depgraph_params import create_depgraph_params
David James386ccd12011-05-04 20:17:42 -070068from _emerge.depgraph import backtrack_depgraph
Mike Frysinger901eaad2012-10-10 18:18:03 -040069try:
70 from _emerge.main import clean_logs
71except ImportError:
72 # Older portage versions did not provide clean_logs, so stub it.
73 # We need this if running in an older chroot that hasn't yet upgraded
74 # the portage version.
75 clean_logs = lambda x: None
David Jamesfcb70ef2011-02-02 16:02:30 -080076from _emerge.main import emerge_main
77from _emerge.main import parse_opts
78from _emerge.Package import Package
79from _emerge.Scheduler import Scheduler
David Jamesfcb70ef2011-02-02 16:02:30 -080080from _emerge.stdout_spinner import stdout_spinner
David James386ccd12011-05-04 20:17:42 -070081from portage._global_updates import _global_updates
David Jamesfcb70ef2011-02-02 16:02:30 -080082import portage
83import portage.debug
Don Garrettf8bf7842014-03-20 17:03:42 -070084# pylint: enable=F0401
Mike Frysinger91d7da92013-02-19 15:53:46 -050085
David Jamesfcb70ef2011-02-02 16:02:30 -080086
David Jamesfcb70ef2011-02-02 16:02:30 -080087def Usage():
88 """Print usage."""
Mike Frysinger383367e2014-09-16 15:06:17 -040089 print("Usage:")
90 print(" ./parallel_emerge [--board=BOARD] [--workon=PKGS]")
91 print(" [--rebuild] [emerge args] package")
92 print()
93 print("Packages specified as workon packages are always built from source.")
94 print()
95 print("The --workon argument is mainly useful when you want to build and")
96 print("install packages that you are working on unconditionally, but do not")
97 print("to have to rev the package to indicate you want to build it from")
98 print("source. The build_packages script will automatically supply the")
99 print("workon argument to emerge, ensuring that packages selected using")
100 print("cros-workon are rebuilt.")
101 print()
102 print("The --rebuild option rebuilds packages whenever their dependencies")
103 print("are changed. This ensures that your build is correct.")
David Jamesfcb70ef2011-02-02 16:02:30 -0800104
105
David Jamesfcb70ef2011-02-02 16:02:30 -0800106# Global start time
107GLOBAL_START = time.time()
108
David James7358d032011-05-19 10:40:03 -0700109# Whether process has been killed by a signal.
110KILLED = multiprocessing.Event()
111
David Jamesfcb70ef2011-02-02 16:02:30 -0800112
113class EmergeData(object):
114 """This simple struct holds various emerge variables.
115
116 This struct helps us easily pass emerge variables around as a unit.
117 These variables are used for calculating dependencies and installing
118 packages.
119 """
120
David Jamesbf1e3442011-05-28 07:44:20 -0700121 __slots__ = ["action", "cmdline_packages", "depgraph", "favorites",
122 "mtimedb", "opts", "root_config", "scheduler_graph",
123 "settings", "spinner", "trees"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800124
125 def __init__(self):
126 # The action the user requested. If the user is installing packages, this
127 # is None. If the user is doing anything other than installing packages,
128 # this will contain the action name, which will map exactly to the
129 # long-form name of the associated emerge option.
130 #
131 # Example: If you call parallel_emerge --unmerge package, the action name
132 # will be "unmerge"
133 self.action = None
134
135 # The list of packages the user passed on the command-line.
136 self.cmdline_packages = None
137
138 # The emerge dependency graph. It'll contain all the packages involved in
139 # this merge, along with their versions.
140 self.depgraph = None
141
David Jamesbf1e3442011-05-28 07:44:20 -0700142 # The list of candidates to add to the world file.
143 self.favorites = None
144
David Jamesfcb70ef2011-02-02 16:02:30 -0800145 # A dict of the options passed to emerge. This dict has been cleaned up
146 # a bit by parse_opts, so that it's a bit easier for the emerge code to
147 # look at the options.
148 #
149 # Emerge takes a few shortcuts in its cleanup process to make parsing of
150 # the options dict easier. For example, if you pass in "--usepkg=n", the
151 # "--usepkg" flag is just left out of the dictionary altogether. Because
152 # --usepkg=n is the default, this makes parsing easier, because emerge
153 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
154 #
155 # These cleanup processes aren't applied to all options. For example, the
156 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
157 # applied by emerge, see the parse_opts function in the _emerge.main
158 # package.
159 self.opts = None
160
161 # A dictionary used by portage to maintain global state. This state is
162 # loaded from disk when portage starts up, and saved to disk whenever we
163 # call mtimedb.commit().
164 #
165 # This database contains information about global updates (i.e., what
166 # version of portage we have) and what we're currently doing. Portage
167 # saves what it is currently doing in this database so that it can be
168 # resumed when you call it with the --resume option.
169 #
170 # parallel_emerge does not save what it is currently doing in the mtimedb,
171 # so we do not support the --resume option.
172 self.mtimedb = None
173
174 # The portage configuration for our current root. This contains the portage
175 # settings (see below) and the three portage trees for our current root.
176 # (The three portage trees are explained below, in the documentation for
177 # the "trees" member.)
178 self.root_config = None
179
180 # The scheduler graph is used by emerge to calculate what packages to
181 # install. We don't actually install any deps, so this isn't really used,
182 # but we pass it in to the Scheduler object anyway.
183 self.scheduler_graph = None
184
185 # Portage settings for our current session. Most of these settings are set
186 # in make.conf inside our current install root.
187 self.settings = None
188
189 # The spinner, which spews stuff to stdout to indicate that portage is
190 # doing something. We maintain our own spinner, so we set the portage
191 # spinner to "silent" mode.
192 self.spinner = None
193
194 # The portage trees. There are separate portage trees for each root. To get
195 # the portage tree for the current root, you can look in self.trees[root],
196 # where root = self.settings["ROOT"].
197 #
198 # In each root, there are three trees: vartree, porttree, and bintree.
199 # - vartree: A database of the currently-installed packages.
200 # - porttree: A database of ebuilds, that can be used to build packages.
201 # - bintree: A database of binary packages.
202 self.trees = None
203
204
205class DepGraphGenerator(object):
206 """Grab dependency information about packages from portage.
207
208 Typical usage:
209 deps = DepGraphGenerator()
210 deps.Initialize(sys.argv[1:])
211 deps_tree, deps_info = deps.GenDependencyTree()
212 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
213 deps.PrintTree(deps_tree)
214 PrintDepsMap(deps_graph)
215 """
216
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700217 __slots__ = ["board", "emerge", "package_db", "show_output", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800218
219 def __init__(self):
220 self.board = None
221 self.emerge = EmergeData()
David Jamesfcb70ef2011-02-02 16:02:30 -0800222 self.package_db = {}
David Jamesfcb70ef2011-02-02 16:02:30 -0800223 self.show_output = False
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700224 self.unpack_only = False
David Jamesfcb70ef2011-02-02 16:02:30 -0800225
226 def ParseParallelEmergeArgs(self, argv):
227 """Read the parallel emerge arguments from the command-line.
228
229 We need to be compatible with emerge arg format. We scrape arguments that
230 are specific to parallel_emerge, and pass through the rest directly to
231 emerge.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500232
David Jamesfcb70ef2011-02-02 16:02:30 -0800233 Args:
234 argv: arguments list
Mike Frysinger1a736a82013-12-12 01:50:59 -0500235
David Jamesfcb70ef2011-02-02 16:02:30 -0800236 Returns:
237 Arguments that don't belong to parallel_emerge
238 """
239 emerge_args = []
240 for arg in argv:
241 # Specifically match arguments that are specific to parallel_emerge, and
242 # pass through the rest.
243 if arg.startswith("--board="):
244 self.board = arg.replace("--board=", "")
245 elif arg.startswith("--workon="):
246 workon_str = arg.replace("--workon=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700247 emerge_args.append("--reinstall-atoms=%s" % workon_str)
248 emerge_args.append("--usepkg-exclude=%s" % workon_str)
David Jamesfcb70ef2011-02-02 16:02:30 -0800249 elif arg.startswith("--force-remote-binary="):
250 force_remote_binary = arg.replace("--force-remote-binary=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700251 emerge_args.append("--useoldpkg-atoms=%s" % force_remote_binary)
David Jamesfcb70ef2011-02-02 16:02:30 -0800252 elif arg == "--show-output":
253 self.show_output = True
David James386ccd12011-05-04 20:17:42 -0700254 elif arg == "--rebuild":
David James7a1ea4b2011-10-13 15:06:41 -0700255 emerge_args.append("--rebuild-if-unbuilt")
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700256 elif arg == "--unpackonly":
257 emerge_args.append("--fetchonly")
258 self.unpack_only = True
David Jamesfcb70ef2011-02-02 16:02:30 -0800259 else:
260 # Not one of our options, so pass through to emerge.
261 emerge_args.append(arg)
262
David James386ccd12011-05-04 20:17:42 -0700263 # These packages take a really long time to build, so, for expediency, we
264 # are blacklisting them from automatic rebuilds because one of their
265 # dependencies needs to be recompiled.
Mike Frysinger5c2a9052014-04-15 15:52:04 -0400266 for pkg in ("chromeos-base/chromeos-chrome",):
David James7a1ea4b2011-10-13 15:06:41 -0700267 emerge_args.append("--rebuild-exclude=%s" % pkg)
David Jamesfcb70ef2011-02-02 16:02:30 -0800268
269 return emerge_args
270
271 def Initialize(self, args):
272 """Initializer. Parses arguments and sets up portage state."""
273
274 # Parse and strip out args that are just intended for parallel_emerge.
275 emerge_args = self.ParseParallelEmergeArgs(args)
276
277 # Setup various environment variables based on our current board. These
278 # variables are normally setup inside emerge-${BOARD}, but since we don't
279 # call that script, we have to set it up here. These variables serve to
280 # point our tools at /build/BOARD and to setup cross compiles to the
281 # appropriate board as configured in toolchain.conf.
282 if self.board:
Yu-Ju Hongdd9bb2b2014-01-03 17:08:26 -0800283 sysroot = cros_build_lib.GetSysroot(board=self.board)
284 os.environ["PORTAGE_CONFIGROOT"] = sysroot
285 os.environ["PORTAGE_SYSROOT"] = sysroot
286 os.environ["SYSROOT"] = sysroot
Don Garrett0760f242014-09-23 19:37:01 -0700287 # This enables licensing in gen-package-licenses.sh.
288 # TODO(dgarrett): Remove when it's no longer optional.
289 os.environ["ENABLE_LICENSING"] = "Y"
David Jamesfcb70ef2011-02-02 16:02:30 -0800290
291 # Although CHROMEOS_ROOT isn't specific to boards, it's normally setup
292 # inside emerge-${BOARD}, so we set it up here for compatibility. It
293 # will be going away soon as we migrate to CROS_WORKON_SRCROOT.
294 os.environ.setdefault("CHROMEOS_ROOT", os.environ["HOME"] + "/trunk")
295
296 # Turn off interactive delays
297 os.environ["EBEEP_IGNORE"] = "1"
298 os.environ["EPAUSE_IGNORE"] = "1"
Mike Frysinger0a647fc2012-08-06 14:36:05 -0400299 os.environ["CLEAN_DELAY"] = "0"
David Jamesfcb70ef2011-02-02 16:02:30 -0800300
301 # Parse the emerge options.
David Jamesea3ca332011-05-26 11:48:29 -0700302 action, opts, cmdline_packages = parse_opts(emerge_args, silent=True)
David Jamesfcb70ef2011-02-02 16:02:30 -0800303
304 # Set environment variables based on options. Portage normally sets these
305 # environment variables in emerge_main, but we can't use that function,
306 # because it also does a bunch of other stuff that we don't want.
307 # TODO(davidjames): Patch portage to move this logic into a function we can
308 # reuse here.
309 if "--debug" in opts:
310 os.environ["PORTAGE_DEBUG"] = "1"
311 if "--config-root" in opts:
312 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
313 if "--root" in opts:
314 os.environ["ROOT"] = opts["--root"]
315 if "--accept-properties" in opts:
316 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
317
David James88d780c2014-02-05 13:03:29 -0800318 # If we're installing packages to the board, we can disable vardb locks.
319 # This is safe because we only run up to one instance of parallel_emerge in
320 # parallel.
321 # TODO(davidjames): Enable this for the host too.
322 if self.board:
David Jamesfcb70ef2011-02-02 16:02:30 -0800323 os.environ.setdefault("PORTAGE_LOCKS", "false")
David Jamesfcb70ef2011-02-02 16:02:30 -0800324
325 # Now that we've setup the necessary environment variables, we can load the
326 # emerge config from disk.
327 settings, trees, mtimedb = load_emerge_config()
328
David Jamesea3ca332011-05-26 11:48:29 -0700329 # Add in EMERGE_DEFAULT_OPTS, if specified.
330 tmpcmdline = []
331 if "--ignore-default-opts" not in opts:
332 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
333 tmpcmdline.extend(emerge_args)
334 action, opts, cmdline_packages = parse_opts(tmpcmdline)
335
336 # If we're installing to the board, we want the --root-deps option so that
337 # portage will install the build dependencies to that location as well.
338 if self.board:
339 opts.setdefault("--root-deps", True)
340
David Jamesfcb70ef2011-02-02 16:02:30 -0800341 # Check whether our portage tree is out of date. Typically, this happens
342 # when you're setting up a new portage tree, such as in setup_board and
343 # make_chroot. In that case, portage applies a bunch of global updates
344 # here. Once the updates are finished, we need to commit any changes
345 # that the global update made to our mtimedb, and reload the config.
346 #
347 # Portage normally handles this logic in emerge_main, but again, we can't
348 # use that function here.
349 if _global_updates(trees, mtimedb["updates"]):
350 mtimedb.commit()
351 settings, trees, mtimedb = load_emerge_config(trees=trees)
352
353 # Setup implied options. Portage normally handles this logic in
354 # emerge_main.
355 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
356 opts.setdefault("--buildpkg", True)
357 if "--getbinpkgonly" in opts:
358 opts.setdefault("--usepkgonly", True)
359 opts.setdefault("--getbinpkg", True)
360 if "getbinpkg" in settings.features:
361 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
362 opts["--getbinpkg"] = True
363 if "--getbinpkg" in opts or "--usepkgonly" in opts:
364 opts.setdefault("--usepkg", True)
365 if "--fetch-all-uri" in opts:
366 opts.setdefault("--fetchonly", True)
367 if "--skipfirst" in opts:
368 opts.setdefault("--resume", True)
369 if "--buildpkgonly" in opts:
370 # --buildpkgonly will not merge anything, so it overrides all binary
371 # package options.
372 for opt in ("--getbinpkg", "--getbinpkgonly",
373 "--usepkg", "--usepkgonly"):
374 opts.pop(opt, None)
375 if (settings.get("PORTAGE_DEBUG", "") == "1" and
376 "python-trace" in settings.features):
377 portage.debug.set_trace(True)
378
379 # Complain about unsupported options
David James386ccd12011-05-04 20:17:42 -0700380 for opt in ("--ask", "--ask-enter-invalid", "--resume", "--skipfirst"):
David Jamesfcb70ef2011-02-02 16:02:30 -0800381 if opt in opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400382 print("%s is not supported by parallel_emerge" % opt)
David Jamesfcb70ef2011-02-02 16:02:30 -0800383 sys.exit(1)
384
385 # Make emerge specific adjustments to the config (e.g. colors!)
386 adjust_configs(opts, trees)
387
388 # Save our configuration so far in the emerge object
389 emerge = self.emerge
390 emerge.action, emerge.opts = action, opts
391 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
392 emerge.cmdline_packages = cmdline_packages
393 root = settings["ROOT"]
394 emerge.root_config = trees[root]["root_config"]
395
David James386ccd12011-05-04 20:17:42 -0700396 if "--usepkg" in opts:
David Jamesfcb70ef2011-02-02 16:02:30 -0800397 emerge.trees[root]["bintree"].populate("--getbinpkg" in opts)
398
David Jamesfcb70ef2011-02-02 16:02:30 -0800399 def CreateDepgraph(self, emerge, packages):
400 """Create an emerge depgraph object."""
401 # Setup emerge options.
402 emerge_opts = emerge.opts.copy()
403
David James386ccd12011-05-04 20:17:42 -0700404 # Ask portage to build a dependency graph. with the options we specified
405 # above.
David Jamesfcb70ef2011-02-02 16:02:30 -0800406 params = create_depgraph_params(emerge_opts, emerge.action)
David Jamesbf1e3442011-05-28 07:44:20 -0700407 success, depgraph, favorites = backtrack_depgraph(
David James386ccd12011-05-04 20:17:42 -0700408 emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
409 packages, emerge.spinner)
410 emerge.depgraph = depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800411
David James386ccd12011-05-04 20:17:42 -0700412 # Is it impossible to honor the user's request? Bail!
413 if not success:
414 depgraph.display_problems()
415 sys.exit(1)
David Jamesfcb70ef2011-02-02 16:02:30 -0800416
417 emerge.depgraph = depgraph
David Jamesbf1e3442011-05-28 07:44:20 -0700418 emerge.favorites = favorites
David Jamesfcb70ef2011-02-02 16:02:30 -0800419
David Jamesdeebd692011-05-09 17:02:52 -0700420 # Prime and flush emerge caches.
421 root = emerge.settings["ROOT"]
422 vardb = emerge.trees[root]["vartree"].dbapi
David James0bdc5de2011-05-12 16:22:26 -0700423 if "--pretend" not in emerge.opts:
424 vardb.counter_tick()
David Jamesdeebd692011-05-09 17:02:52 -0700425 vardb.flush_cache()
426
David James386ccd12011-05-04 20:17:42 -0700427 def GenDependencyTree(self):
David Jamesfcb70ef2011-02-02 16:02:30 -0800428 """Get dependency tree info from emerge.
429
David Jamesfcb70ef2011-02-02 16:02:30 -0800430 Returns:
431 Dependency tree
432 """
433 start = time.time()
434
435 emerge = self.emerge
436
437 # Create a list of packages to merge
438 packages = set(emerge.cmdline_packages[:])
David Jamesfcb70ef2011-02-02 16:02:30 -0800439
440 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
441 # need any extra output from portage.
442 portage.util.noiselimit = -1
443
444 # My favorite feature: The silent spinner. It doesn't spin. Ever.
445 # I'd disable the colors by default too, but they look kind of cool.
446 emerge.spinner = stdout_spinner()
447 emerge.spinner.update = emerge.spinner.update_quiet
448
449 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400450 print("Calculating deps...")
David Jamesfcb70ef2011-02-02 16:02:30 -0800451
452 self.CreateDepgraph(emerge, packages)
453 depgraph = emerge.depgraph
454
455 # Build our own tree from the emerge digraph.
456 deps_tree = {}
Don Garrett25f309a2014-03-19 14:02:12 -0700457 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -0800458 digraph = depgraph._dynamic_config.digraph
David James3f778802011-08-25 19:31:45 -0700459 root = emerge.settings["ROOT"]
Bertrand SIMONNETb35e19e2014-07-28 16:29:58 -0700460 final_db = get_db(depgraph._dynamic_config, root)
David Jamesfcb70ef2011-02-02 16:02:30 -0800461 for node, node_deps in digraph.nodes.items():
462 # Calculate dependency packages that need to be installed first. Each
463 # child on the digraph is a dependency. The "operation" field specifies
464 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
465 # contains the type of dependency (e.g. build, runtime, runtime_post,
466 # etc.)
467 #
David Jamesfcb70ef2011-02-02 16:02:30 -0800468 # Portage refers to the identifiers for packages as a CPV. This acronym
469 # stands for Component/Path/Version.
470 #
471 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
472 # Split up, this CPV would be:
473 # C -- Component: chromeos-base
474 # P -- Path: power_manager
475 # V -- Version: 0.0.1-r1
476 #
477 # We just refer to CPVs as packages here because it's easier.
478 deps = {}
479 for child, priorities in node_deps[0].items():
David James3f778802011-08-25 19:31:45 -0700480 if isinstance(child, Package) and child.root == root:
481 cpv = str(child.cpv)
482 action = str(child.operation)
483
484 # If we're uninstalling a package, check whether Portage is
485 # installing a replacement. If so, just depend on the installation
486 # of the new package, because the old package will automatically
487 # be uninstalled at that time.
488 if action == "uninstall":
489 for pkg in final_db.match_pkgs(child.slot_atom):
490 cpv = str(pkg.cpv)
491 action = "merge"
492 break
493
494 deps[cpv] = dict(action=action,
495 deptypes=[str(x) for x in priorities],
496 deps={})
David Jamesfcb70ef2011-02-02 16:02:30 -0800497
498 # We've built our list of deps, so we can add our package to the tree.
David James3f778802011-08-25 19:31:45 -0700499 if isinstance(node, Package) and node.root == root:
David Jamesfcb70ef2011-02-02 16:02:30 -0800500 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
501 deps=deps)
502
David Jamesfcb70ef2011-02-02 16:02:30 -0800503 # Ask portage for its install plan, so that we can only throw out
David James386ccd12011-05-04 20:17:42 -0700504 # dependencies that portage throws out.
David Jamesfcb70ef2011-02-02 16:02:30 -0800505 deps_info = {}
506 for pkg in depgraph.altlist():
507 if isinstance(pkg, Package):
David James3f778802011-08-25 19:31:45 -0700508 assert pkg.root == root
David Jamesfcb70ef2011-02-02 16:02:30 -0800509 self.package_db[pkg.cpv] = pkg
510
David Jamesfcb70ef2011-02-02 16:02:30 -0800511 # Save off info about the package
David James386ccd12011-05-04 20:17:42 -0700512 deps_info[str(pkg.cpv)] = {"idx": len(deps_info)}
David Jamesfcb70ef2011-02-02 16:02:30 -0800513
514 seconds = time.time() - start
515 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400516 print("Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800517
518 return deps_tree, deps_info
519
520 def PrintTree(self, deps, depth=""):
521 """Print the deps we have seen in the emerge output.
522
523 Args:
524 deps: Dependency tree structure.
525 depth: Allows printing the tree recursively, with indentation.
526 """
527 for entry in sorted(deps):
528 action = deps[entry]["action"]
Mike Frysinger383367e2014-09-16 15:06:17 -0400529 print("%s %s (%s)" % (depth, entry, action))
David Jamesfcb70ef2011-02-02 16:02:30 -0800530 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
531
David James386ccd12011-05-04 20:17:42 -0700532 def GenDependencyGraph(self, deps_tree, deps_info):
David Jamesfcb70ef2011-02-02 16:02:30 -0800533 """Generate a doubly linked dependency graph.
534
535 Args:
536 deps_tree: Dependency tree structure.
537 deps_info: More details on the dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500538
David Jamesfcb70ef2011-02-02 16:02:30 -0800539 Returns:
540 Deps graph in the form of a dict of packages, with each package
541 specifying a "needs" list and "provides" list.
542 """
543 emerge = self.emerge
David Jamesfcb70ef2011-02-02 16:02:30 -0800544
David Jamesfcb70ef2011-02-02 16:02:30 -0800545 # deps_map is the actual dependency graph.
546 #
547 # Each package specifies a "needs" list and a "provides" list. The "needs"
548 # list indicates which packages we depend on. The "provides" list
549 # indicates the reverse dependencies -- what packages need us.
550 #
551 # We also provide some other information in the dependency graph:
552 # - action: What we're planning on doing with this package. Generally,
553 # "merge", "nomerge", or "uninstall"
David Jamesfcb70ef2011-02-02 16:02:30 -0800554 deps_map = {}
555
556 def ReverseTree(packages):
557 """Convert tree to digraph.
558
559 Take the tree of package -> requirements and reverse it to a digraph of
560 buildable packages -> packages they unblock.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500561
David Jamesfcb70ef2011-02-02 16:02:30 -0800562 Args:
563 packages: Tree(s) of dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500564
David Jamesfcb70ef2011-02-02 16:02:30 -0800565 Returns:
566 Unsanitized digraph.
567 """
David James8c7e5e32011-06-28 11:26:03 -0700568 binpkg_phases = set(["setup", "preinst", "postinst"])
David James3f778802011-08-25 19:31:45 -0700569 needed_dep_types = set(["blocker", "buildtime", "runtime"])
David Jamesfcb70ef2011-02-02 16:02:30 -0800570 for pkg in packages:
571
572 # Create an entry for the package
573 action = packages[pkg]["action"]
David James8c7e5e32011-06-28 11:26:03 -0700574 default_pkg = {"needs": {}, "provides": set(), "action": action,
575 "nodeps": False, "binary": False}
David Jamesfcb70ef2011-02-02 16:02:30 -0800576 this_pkg = deps_map.setdefault(pkg, default_pkg)
577
David James8c7e5e32011-06-28 11:26:03 -0700578 if pkg in deps_info:
579 this_pkg["idx"] = deps_info[pkg]["idx"]
580
581 # If a package doesn't have any defined phases that might use the
582 # dependent packages (i.e. pkg_setup, pkg_preinst, or pkg_postinst),
583 # we can install this package before its deps are ready.
584 emerge_pkg = self.package_db.get(pkg)
585 if emerge_pkg and emerge_pkg.type_name == "binary":
586 this_pkg["binary"] = True
Mike Frysinger66652ec2014-04-24 11:42:25 -0400587 defined_phases = emerge_pkg.defined_phases
David James8c7e5e32011-06-28 11:26:03 -0700588 defined_binpkg_phases = binpkg_phases.intersection(defined_phases)
589 if not defined_binpkg_phases:
590 this_pkg["nodeps"] = True
591
David Jamesfcb70ef2011-02-02 16:02:30 -0800592 # Create entries for dependencies of this package first.
593 ReverseTree(packages[pkg]["deps"])
594
595 # Add dependencies to this package.
596 for dep, dep_item in packages[pkg]["deps"].iteritems():
David James8c7e5e32011-06-28 11:26:03 -0700597 # We only need to enforce strict ordering of dependencies if the
David James3f778802011-08-25 19:31:45 -0700598 # dependency is a blocker, or is a buildtime or runtime dependency.
599 # (I.e., ignored, optional, and runtime_post dependencies don't
600 # depend on ordering.)
David James8c7e5e32011-06-28 11:26:03 -0700601 dep_types = dep_item["deptypes"]
602 if needed_dep_types.intersection(dep_types):
603 deps_map[dep]["provides"].add(pkg)
604 this_pkg["needs"][dep] = "/".join(dep_types)
David Jamesfcb70ef2011-02-02 16:02:30 -0800605
David James3f778802011-08-25 19:31:45 -0700606 # If there's a blocker, Portage may need to move files from one
607 # package to another, which requires editing the CONTENTS files of
608 # both packages. To avoid race conditions while editing this file,
609 # the two packages must not be installed in parallel, so we can't
610 # safely ignore dependencies. See http://crosbug.com/19328
611 if "blocker" in dep_types:
612 this_pkg["nodeps"] = False
613
David Jamesfcb70ef2011-02-02 16:02:30 -0800614 def FindCycles():
615 """Find cycles in the dependency tree.
616
617 Returns:
618 A dict mapping cyclic packages to a dict of the deps that cause
619 cycles. For each dep that causes cycles, it returns an example
620 traversal of the graph that shows the cycle.
621 """
622
623 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
624 """Find cycles in cyclic dependencies starting at specified package.
625
626 Args:
627 pkg: Package identifier.
628 cycles: A dict mapping cyclic packages to a dict of the deps that
629 cause cycles. For each dep that causes cycles, it returns an
630 example traversal of the graph that shows the cycle.
631 unresolved: Nodes that have been visited but are not fully processed.
632 resolved: Nodes that have been visited and are fully processed.
633 """
634 pkg_cycles = cycles.get(pkg)
635 if pkg in resolved and not pkg_cycles:
636 # If we already looked at this package, and found no cyclic
637 # dependencies, we can stop now.
638 return
639 unresolved.append(pkg)
640 for dep in deps_map[pkg]["needs"]:
641 if dep in unresolved:
642 idx = unresolved.index(dep)
643 mycycle = unresolved[idx:] + [dep]
David James321490a2012-12-17 12:05:56 -0800644 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800645 pkg1, pkg2 = mycycle[i], mycycle[i+1]
646 cycles.setdefault(pkg1, {}).setdefault(pkg2, mycycle)
647 elif not pkg_cycles or dep not in pkg_cycles:
648 # Looks like we haven't seen this edge before.
649 FindCyclesAtNode(dep, cycles, unresolved, resolved)
650 unresolved.pop()
651 resolved.add(pkg)
652
653 cycles, unresolved, resolved = {}, [], set()
654 for pkg in deps_map:
655 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
656 return cycles
657
David James386ccd12011-05-04 20:17:42 -0700658 def RemoveUnusedPackages():
David Jamesfcb70ef2011-02-02 16:02:30 -0800659 """Remove installed packages, propagating dependencies."""
David Jamesfcb70ef2011-02-02 16:02:30 -0800660 # Schedule packages that aren't on the install list for removal
661 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
662
David Jamesfcb70ef2011-02-02 16:02:30 -0800663 # Remove the packages we don't want, simplifying the graph and making
664 # it easier for us to crack cycles.
665 for pkg in sorted(rm_pkgs):
666 this_pkg = deps_map[pkg]
667 needs = this_pkg["needs"]
668 provides = this_pkg["provides"]
669 for dep in needs:
670 dep_provides = deps_map[dep]["provides"]
671 dep_provides.update(provides)
672 dep_provides.discard(pkg)
673 dep_provides.discard(dep)
674 for target in provides:
675 target_needs = deps_map[target]["needs"]
676 target_needs.update(needs)
677 target_needs.pop(pkg, None)
678 target_needs.pop(target, None)
679 del deps_map[pkg]
680
681 def PrintCycleBreak(basedep, dep, mycycle):
682 """Print details about a cycle that we are planning on breaking.
683
Mike Frysinger02e1e072013-11-10 22:11:34 -0500684 We are breaking a cycle where dep needs basedep. mycycle is an
685 example cycle which contains dep -> basedep.
686 """
David Jamesfcb70ef2011-02-02 16:02:30 -0800687
David Jamesfcb70ef2011-02-02 16:02:30 -0800688 needs = deps_map[dep]["needs"]
689 depinfo = needs.get(basedep, "deleted")
David Jamesfcb70ef2011-02-02 16:02:30 -0800690
David James3f778802011-08-25 19:31:45 -0700691 # It's OK to swap install order for blockers, as long as the two
692 # packages aren't installed in parallel. If there is a cycle, then
693 # we know the packages depend on each other already, so we can drop the
694 # blocker safely without printing a warning.
695 if depinfo == "blocker":
696 return
697
David Jamesfcb70ef2011-02-02 16:02:30 -0800698 # Notify the user that we're breaking a cycle.
Mike Frysinger383367e2014-09-16 15:06:17 -0400699 print("Breaking %s -> %s (%s)" % (dep, basedep, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800700
701 # Show cycle.
David James321490a2012-12-17 12:05:56 -0800702 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800703 pkg1, pkg2 = mycycle[i], mycycle[i+1]
704 needs = deps_map[pkg1]["needs"]
705 depinfo = needs.get(pkg2, "deleted")
706 if pkg1 == dep and pkg2 == basedep:
707 depinfo = depinfo + ", deleting"
Mike Frysinger383367e2014-09-16 15:06:17 -0400708 print(" %s -> %s (%s)" % (pkg1, pkg2, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800709
710 def SanitizeTree():
711 """Remove circular dependencies.
712
713 We prune all dependencies involved in cycles that go against the emerge
714 ordering. This has a nice property: we're guaranteed to merge
715 dependencies in the same order that portage does.
716
717 Because we don't treat any dependencies as "soft" unless they're killed
718 by a cycle, we pay attention to a larger number of dependencies when
719 merging. This hurts performance a bit, but helps reliability.
720 """
721 start = time.time()
722 cycles = FindCycles()
723 while cycles:
724 for dep, mycycles in cycles.iteritems():
725 for basedep, mycycle in mycycles.iteritems():
726 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
Matt Tennant08797302011-10-17 16:18:45 -0700727 if "--quiet" not in emerge.opts:
728 PrintCycleBreak(basedep, dep, mycycle)
David Jamesfcb70ef2011-02-02 16:02:30 -0800729 del deps_map[dep]["needs"][basedep]
730 deps_map[basedep]["provides"].remove(dep)
731 cycles = FindCycles()
732 seconds = time.time() - start
733 if "--quiet" not in emerge.opts and seconds >= 0.1:
Mike Frysinger383367e2014-09-16 15:06:17 -0400734 print("Tree sanitized in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800735
David James8c7e5e32011-06-28 11:26:03 -0700736 def FindRecursiveProvides(pkg, seen):
737 """Find all nodes that require a particular package.
738
739 Assumes that graph is acyclic.
740
741 Args:
742 pkg: Package identifier.
743 seen: Nodes that have been visited so far.
744 """
745 if pkg in seen:
746 return
747 seen.add(pkg)
748 info = deps_map[pkg]
749 info["tprovides"] = info["provides"].copy()
750 for dep in info["provides"]:
751 FindRecursiveProvides(dep, seen)
752 info["tprovides"].update(deps_map[dep]["tprovides"])
753
David Jamesa22906f2011-05-04 19:53:26 -0700754 ReverseTree(deps_tree)
David Jamesa22906f2011-05-04 19:53:26 -0700755
David James386ccd12011-05-04 20:17:42 -0700756 # We need to remove unused packages so that we can use the dependency
757 # ordering of the install process to show us what cycles to crack.
758 RemoveUnusedPackages()
David Jamesfcb70ef2011-02-02 16:02:30 -0800759 SanitizeTree()
David James8c7e5e32011-06-28 11:26:03 -0700760 seen = set()
761 for pkg in deps_map:
762 FindRecursiveProvides(pkg, seen)
David Jamesfcb70ef2011-02-02 16:02:30 -0800763 return deps_map
764
765 def PrintInstallPlan(self, deps_map):
766 """Print an emerge-style install plan.
767
768 The install plan lists what packages we're installing, in order.
769 It's useful for understanding what parallel_emerge is doing.
770
771 Args:
772 deps_map: The dependency graph.
773 """
774
775 def InstallPlanAtNode(target, deps_map):
776 nodes = []
777 nodes.append(target)
778 for dep in deps_map[target]["provides"]:
779 del deps_map[dep]["needs"][target]
780 if not deps_map[dep]["needs"]:
781 nodes.extend(InstallPlanAtNode(dep, deps_map))
782 return nodes
783
784 deps_map = copy.deepcopy(deps_map)
785 install_plan = []
786 plan = set()
787 for target, info in deps_map.iteritems():
788 if not info["needs"] and target not in plan:
789 for item in InstallPlanAtNode(target, deps_map):
790 plan.add(item)
791 install_plan.append(self.package_db[item])
792
793 for pkg in plan:
794 del deps_map[pkg]
795
796 if deps_map:
Mike Frysinger383367e2014-09-16 15:06:17 -0400797 print("Cyclic dependencies:", " ".join(deps_map))
David Jamesfcb70ef2011-02-02 16:02:30 -0800798 PrintDepsMap(deps_map)
799 sys.exit(1)
800
801 self.emerge.depgraph.display(install_plan)
802
803
804def PrintDepsMap(deps_map):
805 """Print dependency graph, for each package list it's prerequisites."""
806 for i in sorted(deps_map):
Mike Frysinger383367e2014-09-16 15:06:17 -0400807 print("%s: (%s) needs" % (i, deps_map[i]["action"]))
David Jamesfcb70ef2011-02-02 16:02:30 -0800808 needs = deps_map[i]["needs"]
809 for j in sorted(needs):
Mike Frysinger383367e2014-09-16 15:06:17 -0400810 print(" %s" % (j))
David Jamesfcb70ef2011-02-02 16:02:30 -0800811 if not needs:
Mike Frysinger383367e2014-09-16 15:06:17 -0400812 print(" no dependencies")
David Jamesfcb70ef2011-02-02 16:02:30 -0800813
814
815class EmergeJobState(object):
Don Garrett25f309a2014-03-19 14:02:12 -0700816 """Structure describing the EmergeJobState."""
817
David Jamesfcb70ef2011-02-02 16:02:30 -0800818 __slots__ = ["done", "filename", "last_notify_timestamp", "last_output_seek",
819 "last_output_timestamp", "pkgname", "retcode", "start_timestamp",
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700820 "target", "fetch_only", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800821
822 def __init__(self, target, pkgname, done, filename, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700823 retcode=None, fetch_only=False, unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800824
825 # The full name of the target we're building (e.g.
Mike Frysingerfd969312014-04-02 22:16:42 -0400826 # virtual/target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800827 self.target = target
828
Mike Frysingerfd969312014-04-02 22:16:42 -0400829 # The short name of the target we're building (e.g. target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800830 self.pkgname = pkgname
831
832 # Whether the job is done. (True if the job is done; false otherwise.)
833 self.done = done
834
835 # The filename where output is currently stored.
836 self.filename = filename
837
838 # The timestamp of the last time we printed the name of the log file. We
839 # print this at the beginning of the job, so this starts at
840 # start_timestamp.
841 self.last_notify_timestamp = start_timestamp
842
843 # The location (in bytes) of the end of the last complete line we printed.
844 # This starts off at zero. We use this to jump to the right place when we
845 # print output from the same ebuild multiple times.
846 self.last_output_seek = 0
847
848 # The timestamp of the last time we printed output. Since we haven't
849 # printed output yet, this starts at zero.
850 self.last_output_timestamp = 0
851
852 # The return code of our job, if the job is actually finished.
853 self.retcode = retcode
854
Brian Harring0be85c62012-03-17 19:52:12 -0700855 # Was this just a fetch job?
856 self.fetch_only = fetch_only
857
David Jamesfcb70ef2011-02-02 16:02:30 -0800858 # The timestamp when our job started.
859 self.start_timestamp = start_timestamp
860
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700861 # No emerge, only unpack packages.
862 self.unpack_only = unpack_only
863
David Jamesfcb70ef2011-02-02 16:02:30 -0800864
David James321490a2012-12-17 12:05:56 -0800865def KillHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700866 # Kill self and all subprocesses.
867 os.killpg(0, signal.SIGKILL)
868
Mike Frysingercc838832014-05-24 13:10:30 -0400869
David Jamesfcb70ef2011-02-02 16:02:30 -0800870def SetupWorkerSignals():
David James321490a2012-12-17 12:05:56 -0800871 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700872 # Set KILLED flag.
873 KILLED.set()
David James13cead42011-05-18 16:22:01 -0700874
David James7358d032011-05-19 10:40:03 -0700875 # Remove our signal handlers so we don't get called recursively.
876 signal.signal(signal.SIGINT, KillHandler)
877 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -0800878
879 # Ensure that we exit quietly and cleanly, if possible, when we receive
880 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all
881 # of the child processes will print details about KeyboardInterrupt
882 # exceptions, which isn't very helpful.
883 signal.signal(signal.SIGINT, ExitHandler)
884 signal.signal(signal.SIGTERM, ExitHandler)
885
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400886
887def EmergeProcess(output, target, *args, **kwargs):
David James1ed3e252011-10-05 20:26:15 -0700888 """Merge a package in a subprocess.
889
890 Args:
David James1ed3e252011-10-05 20:26:15 -0700891 output: Temporary file to write output.
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400892 target: The package we'll be processing (for display purposes).
David James6b29d052012-11-02 10:27:27 -0700893 *args: Arguments to pass to Scheduler constructor.
894 **kwargs: Keyword arguments to pass to Scheduler constructor.
David James1ed3e252011-10-05 20:26:15 -0700895
896 Returns:
897 The exit code returned by the subprocess.
898 """
899 pid = os.fork()
900 if pid == 0:
901 try:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400902 proctitle.settitle('EmergeProcess', target)
903
David James1ed3e252011-10-05 20:26:15 -0700904 # Sanity checks.
Mike Frysingerf02736e2013-11-08 15:27:00 -0500905 if sys.stdout.fileno() != 1:
906 raise Exception("sys.stdout.fileno() != 1")
907 if sys.stderr.fileno() != 2:
908 raise Exception("sys.stderr.fileno() != 2")
David James1ed3e252011-10-05 20:26:15 -0700909
910 # - Redirect 1 (stdout) and 2 (stderr) at our temporary file.
911 # - Redirect 0 to point at sys.stdin. In this case, sys.stdin
912 # points at a file reading os.devnull, because multiprocessing mucks
913 # with sys.stdin.
914 # - Leave the sys.stdin and output filehandles alone.
915 fd_pipes = {0: sys.stdin.fileno(),
916 1: output.fileno(),
917 2: output.fileno(),
918 sys.stdin.fileno(): sys.stdin.fileno(),
919 output.fileno(): output.fileno()}
Mike Frysinger66652ec2014-04-24 11:42:25 -0400920 # pylint: disable=W0212
921 portage.process._setup_pipes(fd_pipes, close_fds=False)
David James1ed3e252011-10-05 20:26:15 -0700922
923 # Portage doesn't like when sys.stdin.fileno() != 0, so point sys.stdin
924 # at the filehandle we just created in _setup_pipes.
925 if sys.stdin.fileno() != 0:
David James6b29d052012-11-02 10:27:27 -0700926 sys.__stdin__ = sys.stdin = os.fdopen(0, "r")
927
928 scheduler = Scheduler(*args, **kwargs)
929
930 # Enable blocker handling even though we're in --nodeps mode. This
931 # allows us to unmerge the blocker after we've merged the replacement.
932 scheduler._opts_ignore_blockers = frozenset()
David James1ed3e252011-10-05 20:26:15 -0700933
934 # Actually do the merge.
935 retval = scheduler.merge()
936
937 # We catch all exceptions here (including SystemExit, KeyboardInterrupt,
938 # etc) so as to ensure that we don't confuse the multiprocessing module,
939 # which expects that all forked children exit with os._exit().
David James321490a2012-12-17 12:05:56 -0800940 # pylint: disable=W0702
David James1ed3e252011-10-05 20:26:15 -0700941 except:
942 traceback.print_exc(file=output)
943 retval = 1
944 sys.stdout.flush()
945 sys.stderr.flush()
946 output.flush()
Don Garrett25f309a2014-03-19 14:02:12 -0700947 # pylint: disable=W0212
David James1ed3e252011-10-05 20:26:15 -0700948 os._exit(retval)
949 else:
950 # Return the exit code of the subprocess.
951 return os.waitpid(pid, 0)[1]
David Jamesfcb70ef2011-02-02 16:02:30 -0800952
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700953
954def UnpackPackage(pkg_state):
955 """Unpacks package described by pkg_state.
956
957 Args:
958 pkg_state: EmergeJobState object describing target.
959
960 Returns:
961 Exit code returned by subprocess.
962 """
963 pkgdir = os.environ.get("PKGDIR",
964 os.path.join(os.environ["SYSROOT"], "packages"))
965 root = os.environ.get("ROOT", os.environ["SYSROOT"])
966 path = os.path.join(pkgdir, pkg_state.target + ".tbz2")
967 comp = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)
968 cmd = [comp, "-dc"]
969 if comp.endswith("pbzip2"):
970 cmd.append("--ignore-trailing-garbage=1")
971 cmd.append(path)
972
973 result = cros_build_lib.RunCommand(cmd, cwd=root, stdout_to_pipe=True,
974 print_cmd=False, error_code_ok=True)
975
976 # If we were not successful, return now and don't attempt untar.
977 if result.returncode:
978 return result.returncode
979
980 cmd = ["sudo", "tar", "-xf", "-", "-C", root]
981 result = cros_build_lib.RunCommand(cmd, cwd=root, input=result.output,
982 print_cmd=False, error_code_ok=True)
983
984 return result.returncode
985
986
987def EmergeWorker(task_queue, job_queue, emerge, package_db, fetch_only=False,
988 unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800989 """This worker emerges any packages given to it on the task_queue.
990
991 Args:
992 task_queue: The queue of tasks for this worker to do.
993 job_queue: The queue of results from the worker.
994 emerge: An EmergeData() object.
995 package_db: A dict, mapping package ids to portage Package objects.
Brian Harring0be85c62012-03-17 19:52:12 -0700996 fetch_only: A bool, indicating if we should just fetch the target.
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700997 unpack_only: A bool, indicating if we should just unpack the target.
David Jamesfcb70ef2011-02-02 16:02:30 -0800998
999 It expects package identifiers to be passed to it via task_queue. When
1000 a task is started, it pushes the (target, filename) to the started_queue.
1001 The output is stored in filename. When a merge starts or finishes, we push
1002 EmergeJobState objects to the job_queue.
1003 """
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001004 if fetch_only:
1005 mode = 'fetch'
1006 elif unpack_only:
1007 mode = 'unpack'
1008 else:
1009 mode = 'emerge'
1010 proctitle.settitle('EmergeWorker', mode, '[idle]')
David Jamesfcb70ef2011-02-02 16:02:30 -08001011
1012 SetupWorkerSignals()
1013 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
David Jamesdeebd692011-05-09 17:02:52 -07001014
1015 # Disable flushing of caches to save on I/O.
David James7a1ea4b2011-10-13 15:06:41 -07001016 root = emerge.settings["ROOT"]
1017 vardb = emerge.trees[root]["vartree"].dbapi
1018 vardb._flush_cache_enabled = False
Brian Harring0be85c62012-03-17 19:52:12 -07001019 bindb = emerge.trees[root]["bintree"].dbapi
1020 # Might be a set, might be a list, might be None; no clue, just use shallow
1021 # copy to ensure we can roll it back.
Don Garrett25f309a2014-03-19 14:02:12 -07001022 # pylint: disable=W0212
Brian Harring0be85c62012-03-17 19:52:12 -07001023 original_remotepkgs = copy.copy(bindb.bintree._remotepkgs)
David Jamesdeebd692011-05-09 17:02:52 -07001024
David Jamesfcb70ef2011-02-02 16:02:30 -08001025 opts, spinner = emerge.opts, emerge.spinner
1026 opts["--nodeps"] = True
Brian Harring0be85c62012-03-17 19:52:12 -07001027 if fetch_only:
1028 opts["--fetchonly"] = True
1029
David Jamesfcb70ef2011-02-02 16:02:30 -08001030 while True:
1031 # Wait for a new item to show up on the queue. This is a blocking wait,
1032 # so if there's nothing to do, we just sit here.
Brian Harring0be85c62012-03-17 19:52:12 -07001033 pkg_state = task_queue.get()
1034 if pkg_state is None:
David Jamesfcb70ef2011-02-02 16:02:30 -08001035 # If target is None, this means that the main thread wants us to quit.
1036 # The other workers need to exit too, so we'll push the message back on
1037 # to the queue so they'll get it too.
Brian Harring0be85c62012-03-17 19:52:12 -07001038 task_queue.put(None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001039 return
David James7358d032011-05-19 10:40:03 -07001040 if KILLED.is_set():
1041 return
1042
Brian Harring0be85c62012-03-17 19:52:12 -07001043 target = pkg_state.target
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001044 proctitle.settitle('EmergeWorker', mode, target)
Brian Harring0be85c62012-03-17 19:52:12 -07001045
David Jamesfcb70ef2011-02-02 16:02:30 -08001046 db_pkg = package_db[target]
Brian Harring0be85c62012-03-17 19:52:12 -07001047
1048 if db_pkg.type_name == "binary":
1049 if not fetch_only and pkg_state.fetched_successfully:
1050 # Ensure portage doesn't think our pkg is remote- else it'll force
1051 # a redownload of it (even if the on-disk file is fine). In-memory
1052 # caching basically, implemented dumbly.
1053 bindb.bintree._remotepkgs = None
1054 else:
1055 bindb.bintree_remotepkgs = original_remotepkgs
1056
David Jamesfcb70ef2011-02-02 16:02:30 -08001057 db_pkg.root_config = emerge.root_config
1058 install_list = [db_pkg]
1059 pkgname = db_pkg.pf
1060 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False)
David James01b1e0f2012-06-07 17:18:05 -07001061 os.chmod(output.name, 644)
David Jamesfcb70ef2011-02-02 16:02:30 -08001062 start_timestamp = time.time()
Brian Harring0be85c62012-03-17 19:52:12 -07001063 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001064 fetch_only=fetch_only, unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001065 job_queue.put(job)
1066 if "--pretend" in opts:
1067 retcode = 0
1068 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001069 try:
David James386ccd12011-05-04 20:17:42 -07001070 emerge.scheduler_graph.mergelist = install_list
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001071 if unpack_only:
1072 retcode = UnpackPackage(pkg_state)
1073 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001074 retcode = EmergeProcess(output, target, settings, trees, mtimedb,
1075 opts, spinner, favorites=emerge.favorites,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001076 graph_config=emerge.scheduler_graph)
David Jamesfcb70ef2011-02-02 16:02:30 -08001077 except Exception:
1078 traceback.print_exc(file=output)
1079 retcode = 1
David James1ed3e252011-10-05 20:26:15 -07001080 output.close()
David Jamesfcb70ef2011-02-02 16:02:30 -08001081
David James7358d032011-05-19 10:40:03 -07001082 if KILLED.is_set():
1083 return
1084
David Jamesfcb70ef2011-02-02 16:02:30 -08001085 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001086 retcode, fetch_only=fetch_only,
1087 unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001088 job_queue.put(job)
1089
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001090 # Set the title back to idle as the multiprocess pool won't destroy us;
1091 # when another job comes up, it'll re-use this process.
1092 proctitle.settitle('EmergeWorker', mode, '[idle]')
1093
David Jamesfcb70ef2011-02-02 16:02:30 -08001094
1095class LinePrinter(object):
1096 """Helper object to print a single line."""
1097
1098 def __init__(self, line):
1099 self.line = line
1100
David James321490a2012-12-17 12:05:56 -08001101 def Print(self, _seek_locations):
Mike Frysinger383367e2014-09-16 15:06:17 -04001102 print(self.line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001103
1104
1105class JobPrinter(object):
1106 """Helper object to print output of a job."""
1107
1108 def __init__(self, job, unlink=False):
1109 """Print output of job.
1110
Mike Frysinger02e1e072013-11-10 22:11:34 -05001111 If unlink is True, unlink the job output file when done.
1112 """
David Jamesfcb70ef2011-02-02 16:02:30 -08001113 self.current_time = time.time()
1114 self.job = job
1115 self.unlink = unlink
1116
1117 def Print(self, seek_locations):
1118
1119 job = self.job
1120
1121 # Calculate how long the job has been running.
1122 seconds = self.current_time - job.start_timestamp
1123
1124 # Note that we've printed out the job so far.
1125 job.last_output_timestamp = self.current_time
1126
1127 # Note that we're starting the job
1128 info = "job %s (%dm%.1fs)" % (job.pkgname, seconds / 60, seconds % 60)
1129 last_output_seek = seek_locations.get(job.filename, 0)
1130 if last_output_seek:
Mike Frysinger383367e2014-09-16 15:06:17 -04001131 print("=== Continue output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001132 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001133 print("=== Start output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001134
1135 # Print actual output from job
1136 f = codecs.open(job.filename, encoding='utf-8', errors='replace')
1137 f.seek(last_output_seek)
1138 prefix = job.pkgname + ":"
1139 for line in f:
1140
1141 # Save off our position in the file
1142 if line and line[-1] == "\n":
1143 last_output_seek = f.tell()
1144 line = line[:-1]
1145
1146 # Print our line
Mike Frysinger383367e2014-09-16 15:06:17 -04001147 print(prefix, line.encode('utf-8', 'replace'))
David Jamesfcb70ef2011-02-02 16:02:30 -08001148 f.close()
1149
1150 # Save our last spot in the file so that we don't print out the same
1151 # location twice.
1152 seek_locations[job.filename] = last_output_seek
1153
1154 # Note end of output section
1155 if job.done:
Mike Frysinger383367e2014-09-16 15:06:17 -04001156 print("=== Complete: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001157 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001158 print("=== Still running: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001159
1160 if self.unlink:
1161 os.unlink(job.filename)
1162
1163
1164def PrintWorker(queue):
1165 """A worker that prints stuff to the screen as requested."""
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001166 proctitle.settitle('PrintWorker')
David Jamesfcb70ef2011-02-02 16:02:30 -08001167
David James321490a2012-12-17 12:05:56 -08001168 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -07001169 # Set KILLED flag.
1170 KILLED.set()
1171
David Jamesfcb70ef2011-02-02 16:02:30 -08001172 # Switch to default signal handlers so that we'll die after two signals.
David James7358d032011-05-19 10:40:03 -07001173 signal.signal(signal.SIGINT, KillHandler)
1174 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001175
1176 # Don't exit on the first SIGINT / SIGTERM, because the parent worker will
1177 # handle it and tell us when we need to exit.
1178 signal.signal(signal.SIGINT, ExitHandler)
1179 signal.signal(signal.SIGTERM, ExitHandler)
1180
1181 # seek_locations is a map indicating the position we are at in each file.
1182 # It starts off empty, but is set by the various Print jobs as we go along
1183 # to indicate where we left off in each file.
1184 seek_locations = {}
1185 while True:
1186 try:
1187 job = queue.get()
1188 if job:
1189 job.Print(seek_locations)
David Jamesbccf8eb2011-07-27 14:06:06 -07001190 sys.stdout.flush()
David Jamesfcb70ef2011-02-02 16:02:30 -08001191 else:
1192 break
1193 except IOError as ex:
1194 if ex.errno == errno.EINTR:
1195 # Looks like we received a signal. Keep printing.
1196 continue
1197 raise
1198
Brian Harring867e2362012-03-17 04:05:17 -07001199
Brian Harring0be85c62012-03-17 19:52:12 -07001200class TargetState(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001201 """Structure descriting the TargetState."""
Brian Harring867e2362012-03-17 04:05:17 -07001202
Brian Harring0be85c62012-03-17 19:52:12 -07001203 __slots__ = ("target", "info", "score", "prefetched", "fetched_successfully")
Brian Harring867e2362012-03-17 04:05:17 -07001204
David James321490a2012-12-17 12:05:56 -08001205 def __init__(self, target, info):
Brian Harring867e2362012-03-17 04:05:17 -07001206 self.target, self.info = target, info
Brian Harring0be85c62012-03-17 19:52:12 -07001207 self.fetched_successfully = False
1208 self.prefetched = False
David James321490a2012-12-17 12:05:56 -08001209 self.score = None
Brian Harring867e2362012-03-17 04:05:17 -07001210 self.update_score()
1211
1212 def __cmp__(self, other):
1213 return cmp(self.score, other.score)
1214
1215 def update_score(self):
1216 self.score = (
1217 -len(self.info["tprovides"]),
Brian Harring0be85c62012-03-17 19:52:12 -07001218 len(self.info["needs"]),
Brian Harring11c5eeb2012-03-18 11:02:39 -07001219 not self.info["binary"],
Brian Harring867e2362012-03-17 04:05:17 -07001220 -len(self.info["provides"]),
1221 self.info["idx"],
1222 self.target,
1223 )
1224
1225
1226class ScoredHeap(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001227 """Implementation of a general purpose scored heap."""
Brian Harring867e2362012-03-17 04:05:17 -07001228
Brian Harring0be85c62012-03-17 19:52:12 -07001229 __slots__ = ("heap", "_heap_set")
1230
Brian Harring867e2362012-03-17 04:05:17 -07001231 def __init__(self, initial=()):
Brian Harring0be85c62012-03-17 19:52:12 -07001232 self.heap = list()
1233 self._heap_set = set()
1234 if initial:
1235 self.multi_put(initial)
Brian Harring867e2362012-03-17 04:05:17 -07001236
1237 def get(self):
Brian Harring0be85c62012-03-17 19:52:12 -07001238 item = heapq.heappop(self.heap)
1239 self._heap_set.remove(item.target)
1240 return item
Brian Harring867e2362012-03-17 04:05:17 -07001241
Brian Harring0be85c62012-03-17 19:52:12 -07001242 def put(self, item):
1243 if not isinstance(item, TargetState):
1244 raise ValueError("Item %r isn't a TargetState" % (item,))
1245 heapq.heappush(self.heap, item)
1246 self._heap_set.add(item.target)
Brian Harring867e2362012-03-17 04:05:17 -07001247
Brian Harring0be85c62012-03-17 19:52:12 -07001248 def multi_put(self, sequence):
1249 sequence = list(sequence)
1250 self.heap.extend(sequence)
1251 self._heap_set.update(x.target for x in sequence)
Brian Harring867e2362012-03-17 04:05:17 -07001252 self.sort()
1253
David James5c9996d2012-03-24 10:50:46 -07001254 def sort(self):
1255 heapq.heapify(self.heap)
1256
Brian Harring0be85c62012-03-17 19:52:12 -07001257 def __contains__(self, target):
1258 return target in self._heap_set
1259
1260 def __nonzero__(self):
1261 return bool(self.heap)
1262
Brian Harring867e2362012-03-17 04:05:17 -07001263 def __len__(self):
1264 return len(self.heap)
1265
1266
David Jamesfcb70ef2011-02-02 16:02:30 -08001267class EmergeQueue(object):
1268 """Class to schedule emerge jobs according to a dependency graph."""
1269
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001270 def __init__(self, deps_map, emerge, package_db, show_output, unpack_only):
David Jamesfcb70ef2011-02-02 16:02:30 -08001271 # Store the dependency graph.
1272 self._deps_map = deps_map
Brian Harring0be85c62012-03-17 19:52:12 -07001273 self._state_map = {}
David Jamesfcb70ef2011-02-02 16:02:30 -08001274 # Initialize the running queue to empty
Brian Harring0be85c62012-03-17 19:52:12 -07001275 self._build_jobs = {}
1276 self._build_ready = ScoredHeap()
1277 self._fetch_jobs = {}
1278 self._fetch_ready = ScoredHeap()
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001279 self._unpack_jobs = {}
1280 self._unpack_ready = ScoredHeap()
David Jamesfcb70ef2011-02-02 16:02:30 -08001281 # List of total package installs represented in deps_map.
1282 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
1283 self._total_jobs = len(install_jobs)
1284 self._show_output = show_output
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001285 self._unpack_only = unpack_only
David Jamesfcb70ef2011-02-02 16:02:30 -08001286
1287 if "--pretend" in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001288 print("Skipping merge because of --pretend mode.")
David Jamesfcb70ef2011-02-02 16:02:30 -08001289 sys.exit(0)
1290
David Jamesaaf49e42014-04-24 09:40:05 -07001291 # Set up a session so we can easily terminate all children.
1292 self._SetupSession()
David James7358d032011-05-19 10:40:03 -07001293
David Jamesfcb70ef2011-02-02 16:02:30 -08001294 # Setup scheduler graph object. This is used by the child processes
1295 # to help schedule jobs.
1296 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1297
1298 # Calculate how many jobs we can run in parallel. We don't want to pass
1299 # the --jobs flag over to emerge itself, because that'll tell emerge to
1300 # hide its output, and said output is quite useful for debugging hung
1301 # jobs.
1302 procs = min(self._total_jobs,
1303 emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001304 self._build_procs = self._unpack_procs = self._fetch_procs = max(1, procs)
David James8c7e5e32011-06-28 11:26:03 -07001305 self._load_avg = emerge.opts.pop("--load-average", None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001306 self._job_queue = multiprocessing.Queue()
1307 self._print_queue = multiprocessing.Queue()
Brian Harring0be85c62012-03-17 19:52:12 -07001308
1309 self._fetch_queue = multiprocessing.Queue()
1310 args = (self._fetch_queue, self._job_queue, emerge, package_db, True)
1311 self._fetch_pool = multiprocessing.Pool(self._fetch_procs, EmergeWorker,
1312 args)
1313
1314 self._build_queue = multiprocessing.Queue()
1315 args = (self._build_queue, self._job_queue, emerge, package_db)
1316 self._build_pool = multiprocessing.Pool(self._build_procs, EmergeWorker,
1317 args)
1318
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001319 if self._unpack_only:
1320 # Unpack pool only required on unpack_only jobs.
1321 self._unpack_queue = multiprocessing.Queue()
1322 args = (self._unpack_queue, self._job_queue, emerge, package_db, False,
1323 True)
1324 self._unpack_pool = multiprocessing.Pool(self._unpack_procs, EmergeWorker,
1325 args)
1326
David Jamesfcb70ef2011-02-02 16:02:30 -08001327 self._print_worker = multiprocessing.Process(target=PrintWorker,
1328 args=[self._print_queue])
1329 self._print_worker.start()
1330
1331 # Initialize the failed queue to empty.
1332 self._retry_queue = []
1333 self._failed = set()
1334
David Jamesfcb70ef2011-02-02 16:02:30 -08001335 # Setup an exit handler so that we print nice messages if we are
1336 # terminated.
1337 self._SetupExitHandler()
1338
1339 # Schedule our jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001340 self._state_map.update(
1341 (pkg, TargetState(pkg, data)) for pkg, data in deps_map.iteritems())
1342 self._fetch_ready.multi_put(self._state_map.itervalues())
David Jamesfcb70ef2011-02-02 16:02:30 -08001343
David Jamesaaf49e42014-04-24 09:40:05 -07001344 def _SetupSession(self):
1345 """Set up a session so we can easily terminate all children."""
1346 # When we call os.setsid(), this sets up a session / process group for this
1347 # process and all children. These session groups are needed so that we can
1348 # easily kill all children (including processes launched by emerge) before
1349 # we exit.
1350 #
1351 # One unfortunate side effect of os.setsid() is that it blocks CTRL-C from
1352 # being received. To work around this, we only call os.setsid() in a forked
1353 # process, so that the parent can still watch for CTRL-C. The parent will
1354 # just sit around, watching for signals and propagating them to the child,
1355 # until the child exits.
1356 #
1357 # TODO(davidjames): It would be nice if we could replace this with cgroups.
1358 pid = os.fork()
1359 if pid == 0:
1360 os.setsid()
1361 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001362 proctitle.settitle('SessionManager')
1363
David Jamesaaf49e42014-04-24 09:40:05 -07001364 def PropagateToChildren(signum, _frame):
1365 # Just propagate the signals down to the child. We'll exit when the
1366 # child does.
1367 try:
1368 os.kill(pid, signum)
1369 except OSError as ex:
1370 if ex.errno != errno.ESRCH:
1371 raise
1372 signal.signal(signal.SIGINT, PropagateToChildren)
1373 signal.signal(signal.SIGTERM, PropagateToChildren)
1374
1375 def StopGroup(_signum, _frame):
1376 # When we get stopped, stop the children.
1377 try:
1378 os.killpg(pid, signal.SIGSTOP)
1379 os.kill(0, signal.SIGSTOP)
1380 except OSError as ex:
1381 if ex.errno != errno.ESRCH:
1382 raise
1383 signal.signal(signal.SIGTSTP, StopGroup)
1384
1385 def ContinueGroup(_signum, _frame):
1386 # Launch the children again after being stopped.
1387 try:
1388 os.killpg(pid, signal.SIGCONT)
1389 except OSError as ex:
1390 if ex.errno != errno.ESRCH:
1391 raise
1392 signal.signal(signal.SIGCONT, ContinueGroup)
1393
1394 # Loop until the children exit. We exit with os._exit to be sure we
1395 # don't run any finalizers (those will be run by the child process.)
1396 # pylint: disable=W0212
1397 while True:
1398 try:
1399 # Wait for the process to exit. When it does, exit with the return
1400 # value of the subprocess.
1401 os._exit(osutils.GetExitStatus(os.waitpid(pid, 0)[1]))
1402 except OSError as ex:
1403 if ex.errno == errno.EINTR:
1404 continue
1405 traceback.print_exc()
1406 os._exit(1)
1407 except BaseException:
1408 traceback.print_exc()
1409 os._exit(1)
1410
David Jamesfcb70ef2011-02-02 16:02:30 -08001411 def _SetupExitHandler(self):
1412
David James321490a2012-12-17 12:05:56 -08001413 def ExitHandler(signum, _frame):
David James7358d032011-05-19 10:40:03 -07001414 # Set KILLED flag.
1415 KILLED.set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001416
1417 # Kill our signal handlers so we don't get called recursively
David James7358d032011-05-19 10:40:03 -07001418 signal.signal(signal.SIGINT, KillHandler)
1419 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001420
1421 # Print our current job status
Brian Harring0be85c62012-03-17 19:52:12 -07001422 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001423 if job:
1424 self._print_queue.put(JobPrinter(job, unlink=True))
1425
1426 # Notify the user that we are exiting
1427 self._Print("Exiting on signal %s" % signum)
David James7358d032011-05-19 10:40:03 -07001428 self._print_queue.put(None)
1429 self._print_worker.join()
David Jamesfcb70ef2011-02-02 16:02:30 -08001430
1431 # Kill child threads, then exit.
David James7358d032011-05-19 10:40:03 -07001432 os.killpg(0, signal.SIGKILL)
David Jamesfcb70ef2011-02-02 16:02:30 -08001433 sys.exit(1)
1434
1435 # Print out job status when we are killed
1436 signal.signal(signal.SIGINT, ExitHandler)
1437 signal.signal(signal.SIGTERM, ExitHandler)
1438
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001439 def _ScheduleUnpack(self, pkg_state):
1440 self._unpack_jobs[pkg_state.target] = None
1441 self._unpack_queue.put(pkg_state)
1442
Brian Harring0be85c62012-03-17 19:52:12 -07001443 def _Schedule(self, pkg_state):
David Jamesfcb70ef2011-02-02 16:02:30 -08001444 # We maintain a tree of all deps, if this doesn't need
David James8c7e5e32011-06-28 11:26:03 -07001445 # to be installed just free up its children and continue.
David Jamesfcb70ef2011-02-02 16:02:30 -08001446 # It is possible to reinstall deps of deps, without reinstalling
1447 # first level deps, like so:
Mike Frysingerfd969312014-04-02 22:16:42 -04001448 # virtual/target-os (merge) -> eselect (nomerge) -> python (merge)
Brian Harring0be85c62012-03-17 19:52:12 -07001449 this_pkg = pkg_state.info
1450 target = pkg_state.target
1451 if pkg_state.info is not None:
1452 if this_pkg["action"] == "nomerge":
1453 self._Finish(target)
1454 elif target not in self._build_jobs:
1455 # Kick off the build if it's marked to be built.
1456 self._build_jobs[target] = None
1457 self._build_queue.put(pkg_state)
1458 return True
David Jamesfcb70ef2011-02-02 16:02:30 -08001459
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001460 def _ScheduleLoop(self, unpack_only=False):
1461 if unpack_only:
1462 ready_queue = self._unpack_ready
1463 jobs_queue = self._unpack_jobs
1464 procs = self._unpack_procs
1465 else:
1466 ready_queue = self._build_ready
1467 jobs_queue = self._build_jobs
1468 procs = self._build_procs
1469
David James8c7e5e32011-06-28 11:26:03 -07001470 # If the current load exceeds our desired load average, don't schedule
1471 # more than one job.
1472 if self._load_avg and os.getloadavg()[0] > self._load_avg:
1473 needed_jobs = 1
1474 else:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001475 needed_jobs = procs
David James8c7e5e32011-06-28 11:26:03 -07001476
1477 # Schedule more jobs.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001478 while ready_queue and len(jobs_queue) < needed_jobs:
1479 state = ready_queue.get()
1480 if unpack_only:
1481 self._ScheduleUnpack(state)
1482 else:
1483 if state.target not in self._failed:
1484 self._Schedule(state)
David Jamesfcb70ef2011-02-02 16:02:30 -08001485
1486 def _Print(self, line):
1487 """Print a single line."""
1488 self._print_queue.put(LinePrinter(line))
1489
1490 def _Status(self):
1491 """Print status."""
1492 current_time = time.time()
1493 no_output = True
1494
1495 # Print interim output every minute if --show-output is used. Otherwise,
1496 # print notifications about running packages every 2 minutes, and print
1497 # full output for jobs that have been running for 60 minutes or more.
1498 if self._show_output:
1499 interval = 60
1500 notify_interval = 0
1501 else:
1502 interval = 60 * 60
1503 notify_interval = 60 * 2
David James321490a2012-12-17 12:05:56 -08001504 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001505 if job:
1506 last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
1507 if last_timestamp + interval < current_time:
1508 self._print_queue.put(JobPrinter(job))
1509 job.last_output_timestamp = current_time
1510 no_output = False
1511 elif (notify_interval and
1512 job.last_notify_timestamp + notify_interval < current_time):
1513 job_seconds = current_time - job.start_timestamp
1514 args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
1515 info = "Still building %s (%dm%.1fs). Logs in %s" % args
1516 job.last_notify_timestamp = current_time
1517 self._Print(info)
1518 no_output = False
1519
1520 # If we haven't printed any messages yet, print a general status message
1521 # here.
1522 if no_output:
1523 seconds = current_time - GLOBAL_START
Brian Harring0be85c62012-03-17 19:52:12 -07001524 fjobs, fready = len(self._fetch_jobs), len(self._fetch_ready)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001525 ujobs, uready = len(self._unpack_jobs), len(self._unpack_ready)
Brian Harring0be85c62012-03-17 19:52:12 -07001526 bjobs, bready = len(self._build_jobs), len(self._build_ready)
1527 retries = len(self._retry_queue)
1528 pending = max(0, len(self._deps_map) - fjobs - bjobs)
1529 line = "Pending %s/%s, " % (pending, self._total_jobs)
1530 if fjobs or fready:
1531 line += "Fetching %s/%s, " % (fjobs, fready + fjobs)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001532 if ujobs or uready:
1533 line += "Unpacking %s/%s, " % (ujobs, uready + ujobs)
Brian Harring0be85c62012-03-17 19:52:12 -07001534 if bjobs or bready or retries:
1535 line += "Building %s/%s, " % (bjobs, bready + bjobs)
1536 if retries:
1537 line += "Retrying %s, " % (retries,)
David James8c7e5e32011-06-28 11:26:03 -07001538 load = " ".join(str(x) for x in os.getloadavg())
Brian Harring0be85c62012-03-17 19:52:12 -07001539 line += ("[Time %dm%.1fs Load %s]" % (seconds/60, seconds %60, load))
1540 self._Print(line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001541
1542 def _Finish(self, target):
David James8c7e5e32011-06-28 11:26:03 -07001543 """Mark a target as completed and unblock dependencies."""
1544 this_pkg = self._deps_map[target]
1545 if this_pkg["needs"] and this_pkg["nodeps"]:
1546 # We got installed, but our deps have not been installed yet. Dependent
1547 # packages should only be installed when our needs have been fully met.
1548 this_pkg["action"] = "nomerge"
1549 else:
David James8c7e5e32011-06-28 11:26:03 -07001550 for dep in this_pkg["provides"]:
1551 dep_pkg = self._deps_map[dep]
Brian Harring0be85c62012-03-17 19:52:12 -07001552 state = self._state_map[dep]
David James8c7e5e32011-06-28 11:26:03 -07001553 del dep_pkg["needs"][target]
Brian Harring0be85c62012-03-17 19:52:12 -07001554 state.update_score()
1555 if not state.prefetched:
1556 if dep in self._fetch_ready:
1557 # If it's not currently being fetched, update the prioritization
1558 self._fetch_ready.sort()
1559 elif not dep_pkg["needs"]:
David James8c7e5e32011-06-28 11:26:03 -07001560 if dep_pkg["nodeps"] and dep_pkg["action"] == "nomerge":
1561 self._Finish(dep)
1562 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001563 self._build_ready.put(self._state_map[dep])
David James8c7e5e32011-06-28 11:26:03 -07001564 self._deps_map.pop(target)
David Jamesfcb70ef2011-02-02 16:02:30 -08001565
1566 def _Retry(self):
David James8c7e5e32011-06-28 11:26:03 -07001567 while self._retry_queue:
Brian Harring0be85c62012-03-17 19:52:12 -07001568 state = self._retry_queue.pop(0)
1569 if self._Schedule(state):
1570 self._Print("Retrying emerge of %s." % state.target)
David James8c7e5e32011-06-28 11:26:03 -07001571 break
David Jamesfcb70ef2011-02-02 16:02:30 -08001572
Brian Harringa43f5952012-04-12 01:19:34 -07001573 def _Shutdown(self):
David Jamesfcb70ef2011-02-02 16:02:30 -08001574 # Tell emerge workers to exit. They all exit when 'None' is pushed
1575 # to the queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001576
Brian Harringa43f5952012-04-12 01:19:34 -07001577 # Shutdown the workers first; then jobs (which is how they feed things back)
1578 # then finally the print queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001579
Brian Harringa43f5952012-04-12 01:19:34 -07001580 def _stop(queue, pool):
1581 if pool is None:
1582 return
1583 try:
1584 queue.put(None)
1585 pool.close()
1586 pool.join()
1587 finally:
1588 pool.terminate()
Brian Harring0be85c62012-03-17 19:52:12 -07001589
Brian Harringa43f5952012-04-12 01:19:34 -07001590 _stop(self._fetch_queue, self._fetch_pool)
1591 self._fetch_queue = self._fetch_pool = None
Brian Harring0be85c62012-03-17 19:52:12 -07001592
Brian Harringa43f5952012-04-12 01:19:34 -07001593 _stop(self._build_queue, self._build_pool)
1594 self._build_queue = self._build_pool = None
1595
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001596 if self._unpack_only:
1597 _stop(self._unpack_queue, self._unpack_pool)
1598 self._unpack_queue = self._unpack_pool = None
1599
Brian Harringa43f5952012-04-12 01:19:34 -07001600 if self._job_queue is not None:
1601 self._job_queue.close()
1602 self._job_queue = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001603
1604 # Now that our workers are finished, we can kill the print queue.
Brian Harringa43f5952012-04-12 01:19:34 -07001605 if self._print_worker is not None:
1606 try:
1607 self._print_queue.put(None)
1608 self._print_queue.close()
1609 self._print_worker.join()
1610 finally:
1611 self._print_worker.terminate()
1612 self._print_queue = self._print_worker = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001613
1614 def Run(self):
1615 """Run through the scheduled ebuilds.
1616
1617 Keep running so long as we have uninstalled packages in the
1618 dependency graph to merge.
1619 """
Brian Harringa43f5952012-04-12 01:19:34 -07001620 if not self._deps_map:
1621 return
1622
Brian Harring0be85c62012-03-17 19:52:12 -07001623 # Start the fetchers.
1624 for _ in xrange(min(self._fetch_procs, len(self._fetch_ready))):
1625 state = self._fetch_ready.get()
1626 self._fetch_jobs[state.target] = None
1627 self._fetch_queue.put(state)
1628
1629 # Print an update, then get going.
1630 self._Status()
1631
David Jamese703d0f2012-01-12 16:27:45 -08001632 retried = set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001633 while self._deps_map:
1634 # Check here that we are actually waiting for something.
Brian Harring0be85c62012-03-17 19:52:12 -07001635 if (self._build_queue.empty() and
David Jamesfcb70ef2011-02-02 16:02:30 -08001636 self._job_queue.empty() and
Brian Harring0be85c62012-03-17 19:52:12 -07001637 not self._fetch_jobs and
1638 not self._fetch_ready and
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001639 not self._unpack_jobs and
1640 not self._unpack_ready and
Brian Harring0be85c62012-03-17 19:52:12 -07001641 not self._build_jobs and
1642 not self._build_ready and
David Jamesfcb70ef2011-02-02 16:02:30 -08001643 self._deps_map):
1644 # If we have failed on a package, retry it now.
1645 if self._retry_queue:
1646 self._Retry()
1647 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001648 # Tell the user why we're exiting.
1649 if self._failed:
Mike Frysinger383367e2014-09-16 15:06:17 -04001650 print('Packages failed:\n\t%s' % '\n\t'.join(self._failed))
David James0eae23e2012-07-03 15:04:25 -07001651 status_file = os.environ.get("PARALLEL_EMERGE_STATUS_FILE")
1652 if status_file:
David James321490a2012-12-17 12:05:56 -08001653 failed_pkgs = set(portage.versions.cpv_getkey(x)
1654 for x in self._failed)
David James0eae23e2012-07-03 15:04:25 -07001655 with open(status_file, "a") as f:
1656 f.write("%s\n" % " ".join(failed_pkgs))
David Jamesfcb70ef2011-02-02 16:02:30 -08001657 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001658 print("Deadlock! Circular dependencies!")
David Jamesfcb70ef2011-02-02 16:02:30 -08001659 sys.exit(1)
1660
David James321490a2012-12-17 12:05:56 -08001661 for _ in xrange(12):
David Jamesa74289a2011-08-12 10:41:24 -07001662 try:
1663 job = self._job_queue.get(timeout=5)
1664 break
1665 except Queue.Empty:
1666 # Check if any more jobs can be scheduled.
1667 self._ScheduleLoop()
1668 else:
Brian Harring706747c2012-03-16 03:04:31 -07001669 # Print an update every 60 seconds.
David Jamesfcb70ef2011-02-02 16:02:30 -08001670 self._Status()
1671 continue
1672
1673 target = job.target
1674
Brian Harring0be85c62012-03-17 19:52:12 -07001675 if job.fetch_only:
1676 if not job.done:
1677 self._fetch_jobs[job.target] = job
1678 else:
1679 state = self._state_map[job.target]
1680 state.prefetched = True
1681 state.fetched_successfully = (job.retcode == 0)
1682 del self._fetch_jobs[job.target]
1683 self._Print("Fetched %s in %2.2fs"
1684 % (target, time.time() - job.start_timestamp))
1685
1686 if self._show_output or job.retcode != 0:
1687 self._print_queue.put(JobPrinter(job, unlink=True))
1688 else:
1689 os.unlink(job.filename)
1690 # Failure or not, let build work with it next.
1691 if not self._deps_map[job.target]["needs"]:
1692 self._build_ready.put(state)
1693 self._ScheduleLoop()
1694
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001695 if self._unpack_only and job.retcode == 0:
1696 self._unpack_ready.put(state)
1697 self._ScheduleLoop(unpack_only=True)
1698
Brian Harring0be85c62012-03-17 19:52:12 -07001699 if self._fetch_ready:
1700 state = self._fetch_ready.get()
1701 self._fetch_queue.put(state)
1702 self._fetch_jobs[state.target] = None
1703 else:
1704 # Minor optimization; shut down fetchers early since we know
1705 # the queue is empty.
1706 self._fetch_queue.put(None)
1707 continue
1708
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001709 if job.unpack_only:
1710 if not job.done:
1711 self._unpack_jobs[target] = job
1712 else:
1713 del self._unpack_jobs[target]
1714 self._Print("Unpacked %s in %2.2fs"
1715 % (target, time.time() - job.start_timestamp))
1716 if self._show_output or job.retcode != 0:
1717 self._print_queue.put(JobPrinter(job, unlink=True))
1718 else:
1719 os.unlink(job.filename)
1720 if self._unpack_ready:
1721 state = self._unpack_ready.get()
1722 self._unpack_queue.put(state)
1723 self._unpack_jobs[state.target] = None
1724 continue
1725
David Jamesfcb70ef2011-02-02 16:02:30 -08001726 if not job.done:
Brian Harring0be85c62012-03-17 19:52:12 -07001727 self._build_jobs[target] = job
David Jamesfcb70ef2011-02-02 16:02:30 -08001728 self._Print("Started %s (logged in %s)" % (target, job.filename))
1729 continue
1730
1731 # Print output of job
1732 if self._show_output or job.retcode != 0:
1733 self._print_queue.put(JobPrinter(job, unlink=True))
1734 else:
1735 os.unlink(job.filename)
Brian Harring0be85c62012-03-17 19:52:12 -07001736 del self._build_jobs[target]
David Jamesfcb70ef2011-02-02 16:02:30 -08001737
1738 seconds = time.time() - job.start_timestamp
1739 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
David James32420cc2011-08-25 21:32:46 -07001740 previously_failed = target in self._failed
David Jamesfcb70ef2011-02-02 16:02:30 -08001741
1742 # Complain if necessary.
1743 if job.retcode != 0:
1744 # Handle job failure.
David James32420cc2011-08-25 21:32:46 -07001745 if previously_failed:
David Jamesfcb70ef2011-02-02 16:02:30 -08001746 # If this job has failed previously, give up.
1747 self._Print("Failed %s. Your build has failed." % details)
1748 else:
1749 # Queue up this build to try again after a long while.
David Jamese703d0f2012-01-12 16:27:45 -08001750 retried.add(target)
Brian Harring0be85c62012-03-17 19:52:12 -07001751 self._retry_queue.append(self._state_map[target])
David Jamesfcb70ef2011-02-02 16:02:30 -08001752 self._failed.add(target)
1753 self._Print("Failed %s, retrying later." % details)
1754 else:
David James32420cc2011-08-25 21:32:46 -07001755 if previously_failed:
1756 # Remove target from list of failed packages.
1757 self._failed.remove(target)
1758
1759 self._Print("Completed %s" % details)
1760
1761 # Mark as completed and unblock waiting ebuilds.
1762 self._Finish(target)
1763
1764 if previously_failed and self._retry_queue:
David Jamesfcb70ef2011-02-02 16:02:30 -08001765 # If we have successfully retried a failed package, and there
1766 # are more failed packages, try the next one. We will only have
1767 # one retrying package actively running at a time.
1768 self._Retry()
1769
David Jamesfcb70ef2011-02-02 16:02:30 -08001770
David James8c7e5e32011-06-28 11:26:03 -07001771 # Schedule pending jobs and print an update.
1772 self._ScheduleLoop()
1773 self._Status()
David Jamesfcb70ef2011-02-02 16:02:30 -08001774
David Jamese703d0f2012-01-12 16:27:45 -08001775 # If packages were retried, output a warning.
1776 if retried:
1777 self._Print("")
1778 self._Print("WARNING: The following packages failed the first time,")
1779 self._Print("but succeeded upon retry. This might indicate incorrect")
1780 self._Print("dependencies.")
1781 for pkg in retried:
1782 self._Print(" %s" % pkg)
1783 self._Print("@@@STEP_WARNINGS@@@")
1784 self._Print("")
1785
David Jamesfcb70ef2011-02-02 16:02:30 -08001786 # Tell child threads to exit.
1787 self._Print("Merge complete")
David Jamesfcb70ef2011-02-02 16:02:30 -08001788
1789
Brian Harring30675052012-02-29 12:18:22 -08001790def main(argv):
Brian Harring8294d652012-05-23 02:20:52 -07001791 try:
1792 return real_main(argv)
1793 finally:
1794 # Work around multiprocessing sucking and not cleaning up after itself.
1795 # http://bugs.python.org/issue4106;
1796 # Step one; ensure GC is ran *prior* to the VM starting shutdown.
1797 gc.collect()
1798 # Step two; go looking for those threads and try to manually reap
1799 # them if we can.
1800 for x in threading.enumerate():
1801 # Filter on the name, and ident; if ident is None, the thread
1802 # wasn't started.
1803 if x.name == 'QueueFeederThread' and x.ident is not None:
1804 x.join(1)
David Jamesfcb70ef2011-02-02 16:02:30 -08001805
Brian Harring8294d652012-05-23 02:20:52 -07001806
Bertrand SIMONNETb35e19e2014-07-28 16:29:58 -07001807def get_db(config, root):
Mike Frysinger33fbccb2014-09-05 17:09:07 -04001808 """Return the dbapi.
Bertrand SIMONNETb35e19e2014-07-28 16:29:58 -07001809 Handles both portage 2.1.11 and 2.2.10 (where mydbapi has been removed).
1810
1811 TODO(bsimonnet): Remove this once portage has been uprevd.
1812 """
1813 try:
1814 return config.mydbapi[root]
1815 except AttributeError:
1816 # pylint: disable=W0212
1817 return config._filtered_trees[root]['graph_db']
1818
1819
Brian Harring8294d652012-05-23 02:20:52 -07001820def real_main(argv):
Brian Harring30675052012-02-29 12:18:22 -08001821 parallel_emerge_args = argv[:]
David Jamesfcb70ef2011-02-02 16:02:30 -08001822 deps = DepGraphGenerator()
Brian Harring30675052012-02-29 12:18:22 -08001823 deps.Initialize(parallel_emerge_args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001824 emerge = deps.emerge
1825
1826 if emerge.action is not None:
Brian Harring30675052012-02-29 12:18:22 -08001827 argv = deps.ParseParallelEmergeArgs(argv)
Brian Harring8294d652012-05-23 02:20:52 -07001828 return emerge_main(argv)
David Jamesfcb70ef2011-02-02 16:02:30 -08001829 elif not emerge.cmdline_packages:
1830 Usage()
Brian Harring8294d652012-05-23 02:20:52 -07001831 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001832
1833 # Unless we're in pretend mode, there's not much point running without
1834 # root access. We need to be able to install packages.
1835 #
1836 # NOTE: Even if you're running --pretend, it's a good idea to run
1837 # parallel_emerge with root access so that portage can write to the
1838 # dependency cache. This is important for performance.
David James321490a2012-12-17 12:05:56 -08001839 if "--pretend" not in emerge.opts and portage.data.secpass < 2:
Mike Frysinger383367e2014-09-16 15:06:17 -04001840 print("parallel_emerge: superuser access is required.")
Brian Harring8294d652012-05-23 02:20:52 -07001841 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001842
1843 if "--quiet" not in emerge.opts:
1844 cmdline_packages = " ".join(emerge.cmdline_packages)
Mike Frysinger383367e2014-09-16 15:06:17 -04001845 print("Starting fast-emerge.")
1846 print(" Building package %s on %s" % (cmdline_packages,
1847 deps.board or "root"))
David Jamesfcb70ef2011-02-02 16:02:30 -08001848
David James386ccd12011-05-04 20:17:42 -07001849 deps_tree, deps_info = deps.GenDependencyTree()
David Jamesfcb70ef2011-02-02 16:02:30 -08001850
1851 # You want me to be verbose? I'll give you two trees! Twice as much value.
1852 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
1853 deps.PrintTree(deps_tree)
1854
David James386ccd12011-05-04 20:17:42 -07001855 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001856
1857 # OK, time to print out our progress so far.
1858 deps.PrintInstallPlan(deps_graph)
1859 if "--tree" in emerge.opts:
1860 PrintDepsMap(deps_graph)
1861
1862 # Are we upgrading portage? If so, and there are more packages to merge,
1863 # schedule a restart of parallel_emerge to merge the rest. This ensures that
1864 # we pick up all updates to portage settings before merging any more
1865 # packages.
1866 portage_upgrade = False
1867 root = emerge.settings["ROOT"]
Don Garrett25f309a2014-03-19 14:02:12 -07001868 # pylint: disable=W0212
Bertrand SIMONNETb35e19e2014-07-28 16:29:58 -07001869 final_db = get_db(emerge.depgraph._dynamic_config, root)
David Jamesfcb70ef2011-02-02 16:02:30 -08001870 if root == "/":
1871 for db_pkg in final_db.match_pkgs("sys-apps/portage"):
1872 portage_pkg = deps_graph.get(db_pkg.cpv)
David James0ff16f22012-11-02 14:18:07 -07001873 if portage_pkg:
David Jamesfcb70ef2011-02-02 16:02:30 -08001874 portage_upgrade = True
1875 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001876 print("Upgrading portage first, then restarting...")
David Jamesfcb70ef2011-02-02 16:02:30 -08001877
David James0ff16f22012-11-02 14:18:07 -07001878 # Upgrade Portage first, then the rest of the packages.
1879 #
1880 # In order to grant the child permission to run setsid, we need to run sudo
1881 # again. We preserve SUDO_USER here in case an ebuild depends on it.
1882 if portage_upgrade:
1883 # Calculate what arguments to use when re-invoking.
1884 args = ["sudo", "-E", "SUDO_USER=%s" % os.environ.get("SUDO_USER", "")]
1885 args += [os.path.abspath(sys.argv[0])] + parallel_emerge_args
1886 args += ["--exclude=sys-apps/portage"]
1887
1888 # First upgrade Portage.
1889 passthrough_args = ("--quiet", "--pretend", "--verbose")
1890 emerge_args = [k for k in emerge.opts if k in passthrough_args]
1891 ret = emerge_main(emerge_args + ["portage"])
1892 if ret != 0:
1893 return ret
1894
1895 # Now upgrade the rest.
1896 os.execvp(args[0], args)
1897
David Jamesfcb70ef2011-02-02 16:02:30 -08001898 # Run the queued emerges.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001899 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output,
1900 deps.unpack_only)
Brian Harringa43f5952012-04-12 01:19:34 -07001901 try:
1902 scheduler.Run()
1903 finally:
Don Garrett25f309a2014-03-19 14:02:12 -07001904 # pylint: disable=W0212
Brian Harringa43f5952012-04-12 01:19:34 -07001905 scheduler._Shutdown()
David James97ce8902011-08-16 09:51:05 -07001906 scheduler = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001907
Mike Frysingerd20a6e22012-10-04 19:01:10 -04001908 clean_logs(emerge.settings)
1909
Mike Frysinger383367e2014-09-16 15:06:17 -04001910 print("Done")
Brian Harring8294d652012-05-23 02:20:52 -07001911 return 0