blob: e4443c7ab221bc33969a698d62286283420ee0d4 [file] [log] [blame]
Mike Frysinger0a647fc2012-08-06 14:36:05 -04001# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
David Jamesfcb70ef2011-02-02 16:02:30 -08002# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Program to run emerge in parallel, for significant speedup.
6
7Usage:
David James386ccd12011-05-04 20:17:42 -07008 ./parallel_emerge [--board=BOARD] [--workon=PKGS]
David Jamesfcb70ef2011-02-02 16:02:30 -08009 [--force-remote-binary=PKGS] [emerge args] package
10
David James78b6cd92012-04-02 21:36:12 -070011This script runs multiple emerge processes in parallel, using appropriate
12Portage APIs. It is faster than standard emerge because it has a
13multiprocess model instead of an asynchronous model.
David Jamesfcb70ef2011-02-02 16:02:30 -080014"""
15
Mike Frysinger1d4752b2014-11-08 04:00:18 -050016# pylint: disable=bad-whitespace
17
Mike Frysinger383367e2014-09-16 15:06:17 -040018from __future__ import print_function
19
David Jamesfcb70ef2011-02-02 16:02:30 -080020import codecs
21import copy
22import errno
Brian Harring8294d652012-05-23 02:20:52 -070023import gc
David James8c7e5e32011-06-28 11:26:03 -070024import heapq
David Jamesfcb70ef2011-02-02 16:02:30 -080025import multiprocessing
26import os
Mike Frysinger1ae28092013-10-17 17:17:22 -040027try:
28 import Queue
29except ImportError:
30 # Python-3 renamed to "queue". We still use Queue to avoid collisions
31 # with naming variables as "queue". Maybe we'll transition at some point.
32 # pylint: disable=F0401
33 import queue as Queue
David Jamesfcb70ef2011-02-02 16:02:30 -080034import signal
35import sys
36import tempfile
Brian Harring8294d652012-05-23 02:20:52 -070037import threading
David Jamesfcb70ef2011-02-02 16:02:30 -080038import time
39import traceback
David Jamesfcb70ef2011-02-02 16:02:30 -080040
Thiago Goncalesf4acc422013-07-17 10:26:35 -070041from chromite.lib import cros_build_lib
Mike Frysingere2d8f0d2014-11-01 13:09:26 -040042from chromite.lib import process_util
Mike Frysingerd74fe4a2014-04-24 11:43:38 -040043from chromite.lib import proctitle
Thiago Goncalesf4acc422013-07-17 10:26:35 -070044
David Jamesfcb70ef2011-02-02 16:02:30 -080045# If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
46# Chromium OS, the default "portage" user doesn't have the necessary
47# permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
48# is "root" here because we get called through sudo.
49#
50# We need to set this before importing any portage modules, because portage
51# looks up "PORTAGE_USERNAME" at import time.
52#
53# NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
54# encounter this case unless they have an old chroot or blow away the
55# environment by running sudo without the -E specifier.
56if "PORTAGE_USERNAME" not in os.environ:
57 homedir = os.environ.get("HOME")
58 if homedir:
59 os.environ["PORTAGE_USERNAME"] = os.path.basename(homedir)
60
61# Portage doesn't expose dependency trees in its public API, so we have to
62# make use of some private APIs here. These modules are found under
63# /usr/lib/portage/pym/.
64#
65# TODO(davidjames): Update Portage to expose public APIs for these features.
Don Garrett25f309a2014-03-19 14:02:12 -070066# pylint: disable=F0401
David Jamesfcb70ef2011-02-02 16:02:30 -080067from _emerge.actions import adjust_configs
68from _emerge.actions import load_emerge_config
69from _emerge.create_depgraph_params import create_depgraph_params
David James386ccd12011-05-04 20:17:42 -070070from _emerge.depgraph import backtrack_depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -080071from _emerge.main import emerge_main
72from _emerge.main import parse_opts
73from _emerge.Package import Package
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -070074from _emerge.post_emerge import clean_logs
David Jamesfcb70ef2011-02-02 16:02:30 -080075from _emerge.Scheduler import Scheduler
David Jamesfcb70ef2011-02-02 16:02:30 -080076from _emerge.stdout_spinner import stdout_spinner
David James386ccd12011-05-04 20:17:42 -070077from portage._global_updates import _global_updates
David Jamesfcb70ef2011-02-02 16:02:30 -080078import portage
79import portage.debug
Don Garrettf8bf7842014-03-20 17:03:42 -070080# pylint: enable=F0401
Mike Frysinger91d7da92013-02-19 15:53:46 -050081
David Jamesfcb70ef2011-02-02 16:02:30 -080082
David Jamesfcb70ef2011-02-02 16:02:30 -080083def Usage():
84 """Print usage."""
Mike Frysinger383367e2014-09-16 15:06:17 -040085 print("Usage:")
86 print(" ./parallel_emerge [--board=BOARD] [--workon=PKGS]")
87 print(" [--rebuild] [emerge args] package")
88 print()
89 print("Packages specified as workon packages are always built from source.")
90 print()
91 print("The --workon argument is mainly useful when you want to build and")
92 print("install packages that you are working on unconditionally, but do not")
93 print("to have to rev the package to indicate you want to build it from")
94 print("source. The build_packages script will automatically supply the")
95 print("workon argument to emerge, ensuring that packages selected using")
96 print("cros-workon are rebuilt.")
97 print()
98 print("The --rebuild option rebuilds packages whenever their dependencies")
99 print("are changed. This ensures that your build is correct.")
David Jamesfcb70ef2011-02-02 16:02:30 -0800100
101
David Jamesfcb70ef2011-02-02 16:02:30 -0800102# Global start time
103GLOBAL_START = time.time()
104
David James7358d032011-05-19 10:40:03 -0700105# Whether process has been killed by a signal.
106KILLED = multiprocessing.Event()
107
David Jamesfcb70ef2011-02-02 16:02:30 -0800108
109class EmergeData(object):
110 """This simple struct holds various emerge variables.
111
112 This struct helps us easily pass emerge variables around as a unit.
113 These variables are used for calculating dependencies and installing
114 packages.
115 """
116
David Jamesbf1e3442011-05-28 07:44:20 -0700117 __slots__ = ["action", "cmdline_packages", "depgraph", "favorites",
118 "mtimedb", "opts", "root_config", "scheduler_graph",
119 "settings", "spinner", "trees"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800120
121 def __init__(self):
122 # The action the user requested. If the user is installing packages, this
123 # is None. If the user is doing anything other than installing packages,
124 # this will contain the action name, which will map exactly to the
125 # long-form name of the associated emerge option.
126 #
127 # Example: If you call parallel_emerge --unmerge package, the action name
128 # will be "unmerge"
129 self.action = None
130
131 # The list of packages the user passed on the command-line.
132 self.cmdline_packages = None
133
134 # The emerge dependency graph. It'll contain all the packages involved in
135 # this merge, along with their versions.
136 self.depgraph = None
137
David Jamesbf1e3442011-05-28 07:44:20 -0700138 # The list of candidates to add to the world file.
139 self.favorites = None
140
David Jamesfcb70ef2011-02-02 16:02:30 -0800141 # A dict of the options passed to emerge. This dict has been cleaned up
142 # a bit by parse_opts, so that it's a bit easier for the emerge code to
143 # look at the options.
144 #
145 # Emerge takes a few shortcuts in its cleanup process to make parsing of
146 # the options dict easier. For example, if you pass in "--usepkg=n", the
147 # "--usepkg" flag is just left out of the dictionary altogether. Because
148 # --usepkg=n is the default, this makes parsing easier, because emerge
149 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
150 #
151 # These cleanup processes aren't applied to all options. For example, the
152 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
153 # applied by emerge, see the parse_opts function in the _emerge.main
154 # package.
155 self.opts = None
156
157 # A dictionary used by portage to maintain global state. This state is
158 # loaded from disk when portage starts up, and saved to disk whenever we
159 # call mtimedb.commit().
160 #
161 # This database contains information about global updates (i.e., what
162 # version of portage we have) and what we're currently doing. Portage
163 # saves what it is currently doing in this database so that it can be
164 # resumed when you call it with the --resume option.
165 #
166 # parallel_emerge does not save what it is currently doing in the mtimedb,
167 # so we do not support the --resume option.
168 self.mtimedb = None
169
170 # The portage configuration for our current root. This contains the portage
171 # settings (see below) and the three portage trees for our current root.
172 # (The three portage trees are explained below, in the documentation for
173 # the "trees" member.)
174 self.root_config = None
175
176 # The scheduler graph is used by emerge to calculate what packages to
177 # install. We don't actually install any deps, so this isn't really used,
178 # but we pass it in to the Scheduler object anyway.
179 self.scheduler_graph = None
180
181 # Portage settings for our current session. Most of these settings are set
182 # in make.conf inside our current install root.
183 self.settings = None
184
185 # The spinner, which spews stuff to stdout to indicate that portage is
186 # doing something. We maintain our own spinner, so we set the portage
187 # spinner to "silent" mode.
188 self.spinner = None
189
190 # The portage trees. There are separate portage trees for each root. To get
191 # the portage tree for the current root, you can look in self.trees[root],
192 # where root = self.settings["ROOT"].
193 #
194 # In each root, there are three trees: vartree, porttree, and bintree.
195 # - vartree: A database of the currently-installed packages.
196 # - porttree: A database of ebuilds, that can be used to build packages.
197 # - bintree: A database of binary packages.
198 self.trees = None
199
200
201class DepGraphGenerator(object):
202 """Grab dependency information about packages from portage.
203
204 Typical usage:
205 deps = DepGraphGenerator()
206 deps.Initialize(sys.argv[1:])
207 deps_tree, deps_info = deps.GenDependencyTree()
208 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
209 deps.PrintTree(deps_tree)
210 PrintDepsMap(deps_graph)
211 """
212
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700213 __slots__ = ["board", "emerge", "package_db", "show_output", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800214
215 def __init__(self):
216 self.board = None
217 self.emerge = EmergeData()
David Jamesfcb70ef2011-02-02 16:02:30 -0800218 self.package_db = {}
David Jamesfcb70ef2011-02-02 16:02:30 -0800219 self.show_output = False
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700220 self.unpack_only = False
David Jamesfcb70ef2011-02-02 16:02:30 -0800221
222 def ParseParallelEmergeArgs(self, argv):
223 """Read the parallel emerge arguments from the command-line.
224
225 We need to be compatible with emerge arg format. We scrape arguments that
226 are specific to parallel_emerge, and pass through the rest directly to
227 emerge.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500228
David Jamesfcb70ef2011-02-02 16:02:30 -0800229 Args:
230 argv: arguments list
Mike Frysinger1a736a82013-12-12 01:50:59 -0500231
David Jamesfcb70ef2011-02-02 16:02:30 -0800232 Returns:
233 Arguments that don't belong to parallel_emerge
234 """
235 emerge_args = []
236 for arg in argv:
237 # Specifically match arguments that are specific to parallel_emerge, and
238 # pass through the rest.
239 if arg.startswith("--board="):
240 self.board = arg.replace("--board=", "")
241 elif arg.startswith("--workon="):
242 workon_str = arg.replace("--workon=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700243 emerge_args.append("--reinstall-atoms=%s" % workon_str)
244 emerge_args.append("--usepkg-exclude=%s" % workon_str)
David Jamesfcb70ef2011-02-02 16:02:30 -0800245 elif arg.startswith("--force-remote-binary="):
246 force_remote_binary = arg.replace("--force-remote-binary=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700247 emerge_args.append("--useoldpkg-atoms=%s" % force_remote_binary)
David Jamesfcb70ef2011-02-02 16:02:30 -0800248 elif arg == "--show-output":
249 self.show_output = True
David James386ccd12011-05-04 20:17:42 -0700250 elif arg == "--rebuild":
David James7a1ea4b2011-10-13 15:06:41 -0700251 emerge_args.append("--rebuild-if-unbuilt")
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700252 elif arg == "--unpackonly":
253 emerge_args.append("--fetchonly")
254 self.unpack_only = True
David Jamesfcb70ef2011-02-02 16:02:30 -0800255 else:
256 # Not one of our options, so pass through to emerge.
257 emerge_args.append(arg)
258
David James386ccd12011-05-04 20:17:42 -0700259 # These packages take a really long time to build, so, for expediency, we
260 # are blacklisting them from automatic rebuilds because one of their
261 # dependencies needs to be recompiled.
Mike Frysinger5c2a9052014-04-15 15:52:04 -0400262 for pkg in ("chromeos-base/chromeos-chrome",):
David James7a1ea4b2011-10-13 15:06:41 -0700263 emerge_args.append("--rebuild-exclude=%s" % pkg)
David Jamesfcb70ef2011-02-02 16:02:30 -0800264
265 return emerge_args
266
267 def Initialize(self, args):
268 """Initializer. Parses arguments and sets up portage state."""
269
270 # Parse and strip out args that are just intended for parallel_emerge.
271 emerge_args = self.ParseParallelEmergeArgs(args)
272
273 # Setup various environment variables based on our current board. These
274 # variables are normally setup inside emerge-${BOARD}, but since we don't
275 # call that script, we have to set it up here. These variables serve to
276 # point our tools at /build/BOARD and to setup cross compiles to the
277 # appropriate board as configured in toolchain.conf.
278 if self.board:
Bertrand SIMONNETf6febab2014-10-03 10:59:43 -0700279 sysroot = os.environ.get('SYSROOT', cros_build_lib.GetSysroot(self.board))
Yu-Ju Hongdd9bb2b2014-01-03 17:08:26 -0800280 os.environ["PORTAGE_CONFIGROOT"] = sysroot
281 os.environ["PORTAGE_SYSROOT"] = sysroot
282 os.environ["SYSROOT"] = sysroot
David Jamesfcb70ef2011-02-02 16:02:30 -0800283
284 # Although CHROMEOS_ROOT isn't specific to boards, it's normally setup
285 # inside emerge-${BOARD}, so we set it up here for compatibility. It
286 # will be going away soon as we migrate to CROS_WORKON_SRCROOT.
287 os.environ.setdefault("CHROMEOS_ROOT", os.environ["HOME"] + "/trunk")
288
289 # Turn off interactive delays
290 os.environ["EBEEP_IGNORE"] = "1"
291 os.environ["EPAUSE_IGNORE"] = "1"
Mike Frysinger0a647fc2012-08-06 14:36:05 -0400292 os.environ["CLEAN_DELAY"] = "0"
David Jamesfcb70ef2011-02-02 16:02:30 -0800293
294 # Parse the emerge options.
David Jamesea3ca332011-05-26 11:48:29 -0700295 action, opts, cmdline_packages = parse_opts(emerge_args, silent=True)
David Jamesfcb70ef2011-02-02 16:02:30 -0800296
297 # Set environment variables based on options. Portage normally sets these
298 # environment variables in emerge_main, but we can't use that function,
299 # because it also does a bunch of other stuff that we don't want.
300 # TODO(davidjames): Patch portage to move this logic into a function we can
301 # reuse here.
302 if "--debug" in opts:
303 os.environ["PORTAGE_DEBUG"] = "1"
304 if "--config-root" in opts:
305 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
306 if "--root" in opts:
307 os.environ["ROOT"] = opts["--root"]
308 if "--accept-properties" in opts:
309 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
310
David James88d780c2014-02-05 13:03:29 -0800311 # If we're installing packages to the board, we can disable vardb locks.
312 # This is safe because we only run up to one instance of parallel_emerge in
313 # parallel.
314 # TODO(davidjames): Enable this for the host too.
315 if self.board:
David Jamesfcb70ef2011-02-02 16:02:30 -0800316 os.environ.setdefault("PORTAGE_LOCKS", "false")
David Jamesfcb70ef2011-02-02 16:02:30 -0800317
318 # Now that we've setup the necessary environment variables, we can load the
319 # emerge config from disk.
320 settings, trees, mtimedb = load_emerge_config()
321
David Jamesea3ca332011-05-26 11:48:29 -0700322 # Add in EMERGE_DEFAULT_OPTS, if specified.
323 tmpcmdline = []
324 if "--ignore-default-opts" not in opts:
325 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
326 tmpcmdline.extend(emerge_args)
327 action, opts, cmdline_packages = parse_opts(tmpcmdline)
328
329 # If we're installing to the board, we want the --root-deps option so that
330 # portage will install the build dependencies to that location as well.
331 if self.board:
332 opts.setdefault("--root-deps", True)
333
David Jamesfcb70ef2011-02-02 16:02:30 -0800334 # Check whether our portage tree is out of date. Typically, this happens
335 # when you're setting up a new portage tree, such as in setup_board and
336 # make_chroot. In that case, portage applies a bunch of global updates
337 # here. Once the updates are finished, we need to commit any changes
338 # that the global update made to our mtimedb, and reload the config.
339 #
340 # Portage normally handles this logic in emerge_main, but again, we can't
341 # use that function here.
342 if _global_updates(trees, mtimedb["updates"]):
343 mtimedb.commit()
344 settings, trees, mtimedb = load_emerge_config(trees=trees)
345
346 # Setup implied options. Portage normally handles this logic in
347 # emerge_main.
348 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
349 opts.setdefault("--buildpkg", True)
350 if "--getbinpkgonly" in opts:
351 opts.setdefault("--usepkgonly", True)
352 opts.setdefault("--getbinpkg", True)
353 if "getbinpkg" in settings.features:
354 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
355 opts["--getbinpkg"] = True
356 if "--getbinpkg" in opts or "--usepkgonly" in opts:
357 opts.setdefault("--usepkg", True)
358 if "--fetch-all-uri" in opts:
359 opts.setdefault("--fetchonly", True)
360 if "--skipfirst" in opts:
361 opts.setdefault("--resume", True)
362 if "--buildpkgonly" in opts:
363 # --buildpkgonly will not merge anything, so it overrides all binary
364 # package options.
365 for opt in ("--getbinpkg", "--getbinpkgonly",
366 "--usepkg", "--usepkgonly"):
367 opts.pop(opt, None)
368 if (settings.get("PORTAGE_DEBUG", "") == "1" and
369 "python-trace" in settings.features):
370 portage.debug.set_trace(True)
371
372 # Complain about unsupported options
David James386ccd12011-05-04 20:17:42 -0700373 for opt in ("--ask", "--ask-enter-invalid", "--resume", "--skipfirst"):
David Jamesfcb70ef2011-02-02 16:02:30 -0800374 if opt in opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400375 print("%s is not supported by parallel_emerge" % opt)
David Jamesfcb70ef2011-02-02 16:02:30 -0800376 sys.exit(1)
377
378 # Make emerge specific adjustments to the config (e.g. colors!)
379 adjust_configs(opts, trees)
380
381 # Save our configuration so far in the emerge object
382 emerge = self.emerge
383 emerge.action, emerge.opts = action, opts
384 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
385 emerge.cmdline_packages = cmdline_packages
386 root = settings["ROOT"]
387 emerge.root_config = trees[root]["root_config"]
388
David James386ccd12011-05-04 20:17:42 -0700389 if "--usepkg" in opts:
David Jamesfcb70ef2011-02-02 16:02:30 -0800390 emerge.trees[root]["bintree"].populate("--getbinpkg" in opts)
391
David Jamesfcb70ef2011-02-02 16:02:30 -0800392 def CreateDepgraph(self, emerge, packages):
393 """Create an emerge depgraph object."""
394 # Setup emerge options.
395 emerge_opts = emerge.opts.copy()
396
David James386ccd12011-05-04 20:17:42 -0700397 # Ask portage to build a dependency graph. with the options we specified
398 # above.
David Jamesfcb70ef2011-02-02 16:02:30 -0800399 params = create_depgraph_params(emerge_opts, emerge.action)
David Jamesbf1e3442011-05-28 07:44:20 -0700400 success, depgraph, favorites = backtrack_depgraph(
David James386ccd12011-05-04 20:17:42 -0700401 emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
402 packages, emerge.spinner)
403 emerge.depgraph = depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800404
David James386ccd12011-05-04 20:17:42 -0700405 # Is it impossible to honor the user's request? Bail!
406 if not success:
407 depgraph.display_problems()
408 sys.exit(1)
David Jamesfcb70ef2011-02-02 16:02:30 -0800409
410 emerge.depgraph = depgraph
David Jamesbf1e3442011-05-28 07:44:20 -0700411 emerge.favorites = favorites
David Jamesfcb70ef2011-02-02 16:02:30 -0800412
David Jamesdeebd692011-05-09 17:02:52 -0700413 # Prime and flush emerge caches.
414 root = emerge.settings["ROOT"]
415 vardb = emerge.trees[root]["vartree"].dbapi
David James0bdc5de2011-05-12 16:22:26 -0700416 if "--pretend" not in emerge.opts:
417 vardb.counter_tick()
David Jamesdeebd692011-05-09 17:02:52 -0700418 vardb.flush_cache()
419
David James386ccd12011-05-04 20:17:42 -0700420 def GenDependencyTree(self):
David Jamesfcb70ef2011-02-02 16:02:30 -0800421 """Get dependency tree info from emerge.
422
David Jamesfcb70ef2011-02-02 16:02:30 -0800423 Returns:
424 Dependency tree
425 """
426 start = time.time()
427
428 emerge = self.emerge
429
430 # Create a list of packages to merge
431 packages = set(emerge.cmdline_packages[:])
David Jamesfcb70ef2011-02-02 16:02:30 -0800432
433 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
434 # need any extra output from portage.
435 portage.util.noiselimit = -1
436
437 # My favorite feature: The silent spinner. It doesn't spin. Ever.
438 # I'd disable the colors by default too, but they look kind of cool.
439 emerge.spinner = stdout_spinner()
440 emerge.spinner.update = emerge.spinner.update_quiet
441
442 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400443 print("Calculating deps...")
David Jamesfcb70ef2011-02-02 16:02:30 -0800444
445 self.CreateDepgraph(emerge, packages)
446 depgraph = emerge.depgraph
447
448 # Build our own tree from the emerge digraph.
449 deps_tree = {}
Don Garrett25f309a2014-03-19 14:02:12 -0700450 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -0800451 digraph = depgraph._dynamic_config.digraph
David James3f778802011-08-25 19:31:45 -0700452 root = emerge.settings["ROOT"]
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -0700453 final_db = depgraph._dynamic_config._filtered_trees[root]['graph_db']
David Jamesfcb70ef2011-02-02 16:02:30 -0800454 for node, node_deps in digraph.nodes.items():
455 # Calculate dependency packages that need to be installed first. Each
456 # child on the digraph is a dependency. The "operation" field specifies
457 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
458 # contains the type of dependency (e.g. build, runtime, runtime_post,
459 # etc.)
460 #
David Jamesfcb70ef2011-02-02 16:02:30 -0800461 # Portage refers to the identifiers for packages as a CPV. This acronym
462 # stands for Component/Path/Version.
463 #
464 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
465 # Split up, this CPV would be:
466 # C -- Component: chromeos-base
467 # P -- Path: power_manager
468 # V -- Version: 0.0.1-r1
469 #
470 # We just refer to CPVs as packages here because it's easier.
471 deps = {}
472 for child, priorities in node_deps[0].items():
David James3f778802011-08-25 19:31:45 -0700473 if isinstance(child, Package) and child.root == root:
474 cpv = str(child.cpv)
475 action = str(child.operation)
476
477 # If we're uninstalling a package, check whether Portage is
478 # installing a replacement. If so, just depend on the installation
479 # of the new package, because the old package will automatically
480 # be uninstalled at that time.
481 if action == "uninstall":
482 for pkg in final_db.match_pkgs(child.slot_atom):
483 cpv = str(pkg.cpv)
484 action = "merge"
485 break
486
487 deps[cpv] = dict(action=action,
488 deptypes=[str(x) for x in priorities],
489 deps={})
David Jamesfcb70ef2011-02-02 16:02:30 -0800490
491 # We've built our list of deps, so we can add our package to the tree.
David James3f778802011-08-25 19:31:45 -0700492 if isinstance(node, Package) and node.root == root:
David Jamesfcb70ef2011-02-02 16:02:30 -0800493 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
494 deps=deps)
495
David Jamesfcb70ef2011-02-02 16:02:30 -0800496 # Ask portage for its install plan, so that we can only throw out
David James386ccd12011-05-04 20:17:42 -0700497 # dependencies that portage throws out.
David Jamesfcb70ef2011-02-02 16:02:30 -0800498 deps_info = {}
499 for pkg in depgraph.altlist():
500 if isinstance(pkg, Package):
David James3f778802011-08-25 19:31:45 -0700501 assert pkg.root == root
David Jamesfcb70ef2011-02-02 16:02:30 -0800502 self.package_db[pkg.cpv] = pkg
503
David Jamesfcb70ef2011-02-02 16:02:30 -0800504 # Save off info about the package
David James386ccd12011-05-04 20:17:42 -0700505 deps_info[str(pkg.cpv)] = {"idx": len(deps_info)}
David Jamesfcb70ef2011-02-02 16:02:30 -0800506
507 seconds = time.time() - start
508 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400509 print("Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800510
511 return deps_tree, deps_info
512
513 def PrintTree(self, deps, depth=""):
514 """Print the deps we have seen in the emerge output.
515
516 Args:
517 deps: Dependency tree structure.
518 depth: Allows printing the tree recursively, with indentation.
519 """
520 for entry in sorted(deps):
521 action = deps[entry]["action"]
Mike Frysinger383367e2014-09-16 15:06:17 -0400522 print("%s %s (%s)" % (depth, entry, action))
David Jamesfcb70ef2011-02-02 16:02:30 -0800523 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
524
David James386ccd12011-05-04 20:17:42 -0700525 def GenDependencyGraph(self, deps_tree, deps_info):
David Jamesfcb70ef2011-02-02 16:02:30 -0800526 """Generate a doubly linked dependency graph.
527
528 Args:
529 deps_tree: Dependency tree structure.
530 deps_info: More details on the dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500531
David Jamesfcb70ef2011-02-02 16:02:30 -0800532 Returns:
533 Deps graph in the form of a dict of packages, with each package
534 specifying a "needs" list and "provides" list.
535 """
536 emerge = self.emerge
David Jamesfcb70ef2011-02-02 16:02:30 -0800537
David Jamesfcb70ef2011-02-02 16:02:30 -0800538 # deps_map is the actual dependency graph.
539 #
540 # Each package specifies a "needs" list and a "provides" list. The "needs"
541 # list indicates which packages we depend on. The "provides" list
542 # indicates the reverse dependencies -- what packages need us.
543 #
544 # We also provide some other information in the dependency graph:
545 # - action: What we're planning on doing with this package. Generally,
546 # "merge", "nomerge", or "uninstall"
David Jamesfcb70ef2011-02-02 16:02:30 -0800547 deps_map = {}
548
549 def ReverseTree(packages):
550 """Convert tree to digraph.
551
552 Take the tree of package -> requirements and reverse it to a digraph of
553 buildable packages -> packages they unblock.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500554
David Jamesfcb70ef2011-02-02 16:02:30 -0800555 Args:
556 packages: Tree(s) of dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500557
David Jamesfcb70ef2011-02-02 16:02:30 -0800558 Returns:
559 Unsanitized digraph.
560 """
David James8c7e5e32011-06-28 11:26:03 -0700561 binpkg_phases = set(["setup", "preinst", "postinst"])
David Jamese5e1c0a2014-09-29 17:19:41 -0700562 needed_dep_types = set(["blocker", "buildtime", "buildtime_slot_op",
563 "runtime", "runtime_slot_op"])
564 ignored_dep_types = set(["ignored", "optional", "runtime_post", "soft"])
565 all_dep_types = ignored_dep_types | needed_dep_types
David Jamesfcb70ef2011-02-02 16:02:30 -0800566 for pkg in packages:
567
568 # Create an entry for the package
569 action = packages[pkg]["action"]
David James8c7e5e32011-06-28 11:26:03 -0700570 default_pkg = {"needs": {}, "provides": set(), "action": action,
571 "nodeps": False, "binary": False}
David Jamesfcb70ef2011-02-02 16:02:30 -0800572 this_pkg = deps_map.setdefault(pkg, default_pkg)
573
David James8c7e5e32011-06-28 11:26:03 -0700574 if pkg in deps_info:
575 this_pkg["idx"] = deps_info[pkg]["idx"]
576
577 # If a package doesn't have any defined phases that might use the
578 # dependent packages (i.e. pkg_setup, pkg_preinst, or pkg_postinst),
579 # we can install this package before its deps are ready.
580 emerge_pkg = self.package_db.get(pkg)
581 if emerge_pkg and emerge_pkg.type_name == "binary":
582 this_pkg["binary"] = True
Mike Frysinger66652ec2014-04-24 11:42:25 -0400583 defined_phases = emerge_pkg.defined_phases
David James8c7e5e32011-06-28 11:26:03 -0700584 defined_binpkg_phases = binpkg_phases.intersection(defined_phases)
585 if not defined_binpkg_phases:
586 this_pkg["nodeps"] = True
587
David Jamesfcb70ef2011-02-02 16:02:30 -0800588 # Create entries for dependencies of this package first.
589 ReverseTree(packages[pkg]["deps"])
590
591 # Add dependencies to this package.
592 for dep, dep_item in packages[pkg]["deps"].iteritems():
David James8c7e5e32011-06-28 11:26:03 -0700593 # We only need to enforce strict ordering of dependencies if the
David James3f778802011-08-25 19:31:45 -0700594 # dependency is a blocker, or is a buildtime or runtime dependency.
595 # (I.e., ignored, optional, and runtime_post dependencies don't
596 # depend on ordering.)
David James8c7e5e32011-06-28 11:26:03 -0700597 dep_types = dep_item["deptypes"]
598 if needed_dep_types.intersection(dep_types):
599 deps_map[dep]["provides"].add(pkg)
600 this_pkg["needs"][dep] = "/".join(dep_types)
David Jamesfcb70ef2011-02-02 16:02:30 -0800601
David Jamese5e1c0a2014-09-29 17:19:41 -0700602 # Verify we processed all appropriate dependency types.
603 unknown_dep_types = set(dep_types) - all_dep_types
604 if unknown_dep_types:
605 print("Unknown dependency types found:")
606 print(" %s -> %s (%s)" % (pkg, dep, "/".join(unknown_dep_types)))
607 sys.exit(1)
608
David James3f778802011-08-25 19:31:45 -0700609 # If there's a blocker, Portage may need to move files from one
610 # package to another, which requires editing the CONTENTS files of
611 # both packages. To avoid race conditions while editing this file,
612 # the two packages must not be installed in parallel, so we can't
613 # safely ignore dependencies. See http://crosbug.com/19328
614 if "blocker" in dep_types:
615 this_pkg["nodeps"] = False
616
David Jamesfcb70ef2011-02-02 16:02:30 -0800617 def FindCycles():
618 """Find cycles in the dependency tree.
619
620 Returns:
621 A dict mapping cyclic packages to a dict of the deps that cause
622 cycles. For each dep that causes cycles, it returns an example
623 traversal of the graph that shows the cycle.
624 """
625
626 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
627 """Find cycles in cyclic dependencies starting at specified package.
628
629 Args:
630 pkg: Package identifier.
631 cycles: A dict mapping cyclic packages to a dict of the deps that
632 cause cycles. For each dep that causes cycles, it returns an
633 example traversal of the graph that shows the cycle.
634 unresolved: Nodes that have been visited but are not fully processed.
635 resolved: Nodes that have been visited and are fully processed.
636 """
637 pkg_cycles = cycles.get(pkg)
638 if pkg in resolved and not pkg_cycles:
639 # If we already looked at this package, and found no cyclic
640 # dependencies, we can stop now.
641 return
642 unresolved.append(pkg)
643 for dep in deps_map[pkg]["needs"]:
644 if dep in unresolved:
645 idx = unresolved.index(dep)
646 mycycle = unresolved[idx:] + [dep]
David James321490a2012-12-17 12:05:56 -0800647 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800648 pkg1, pkg2 = mycycle[i], mycycle[i+1]
649 cycles.setdefault(pkg1, {}).setdefault(pkg2, mycycle)
650 elif not pkg_cycles or dep not in pkg_cycles:
651 # Looks like we haven't seen this edge before.
652 FindCyclesAtNode(dep, cycles, unresolved, resolved)
653 unresolved.pop()
654 resolved.add(pkg)
655
656 cycles, unresolved, resolved = {}, [], set()
657 for pkg in deps_map:
658 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
659 return cycles
660
David James386ccd12011-05-04 20:17:42 -0700661 def RemoveUnusedPackages():
David Jamesfcb70ef2011-02-02 16:02:30 -0800662 """Remove installed packages, propagating dependencies."""
David Jamesfcb70ef2011-02-02 16:02:30 -0800663 # Schedule packages that aren't on the install list for removal
664 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
665
David Jamesfcb70ef2011-02-02 16:02:30 -0800666 # Remove the packages we don't want, simplifying the graph and making
667 # it easier for us to crack cycles.
668 for pkg in sorted(rm_pkgs):
669 this_pkg = deps_map[pkg]
670 needs = this_pkg["needs"]
671 provides = this_pkg["provides"]
672 for dep in needs:
673 dep_provides = deps_map[dep]["provides"]
674 dep_provides.update(provides)
675 dep_provides.discard(pkg)
676 dep_provides.discard(dep)
677 for target in provides:
678 target_needs = deps_map[target]["needs"]
679 target_needs.update(needs)
680 target_needs.pop(pkg, None)
681 target_needs.pop(target, None)
682 del deps_map[pkg]
683
684 def PrintCycleBreak(basedep, dep, mycycle):
685 """Print details about a cycle that we are planning on breaking.
686
Mike Frysinger02e1e072013-11-10 22:11:34 -0500687 We are breaking a cycle where dep needs basedep. mycycle is an
688 example cycle which contains dep -> basedep.
689 """
David Jamesfcb70ef2011-02-02 16:02:30 -0800690
David Jamesfcb70ef2011-02-02 16:02:30 -0800691 needs = deps_map[dep]["needs"]
692 depinfo = needs.get(basedep, "deleted")
David Jamesfcb70ef2011-02-02 16:02:30 -0800693
David James3f778802011-08-25 19:31:45 -0700694 # It's OK to swap install order for blockers, as long as the two
695 # packages aren't installed in parallel. If there is a cycle, then
696 # we know the packages depend on each other already, so we can drop the
697 # blocker safely without printing a warning.
698 if depinfo == "blocker":
699 return
700
David Jamesfcb70ef2011-02-02 16:02:30 -0800701 # Notify the user that we're breaking a cycle.
Mike Frysinger383367e2014-09-16 15:06:17 -0400702 print("Breaking %s -> %s (%s)" % (dep, basedep, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800703
704 # Show cycle.
David James321490a2012-12-17 12:05:56 -0800705 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800706 pkg1, pkg2 = mycycle[i], mycycle[i+1]
707 needs = deps_map[pkg1]["needs"]
708 depinfo = needs.get(pkg2, "deleted")
709 if pkg1 == dep and pkg2 == basedep:
710 depinfo = depinfo + ", deleting"
Mike Frysinger383367e2014-09-16 15:06:17 -0400711 print(" %s -> %s (%s)" % (pkg1, pkg2, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800712
713 def SanitizeTree():
714 """Remove circular dependencies.
715
716 We prune all dependencies involved in cycles that go against the emerge
717 ordering. This has a nice property: we're guaranteed to merge
718 dependencies in the same order that portage does.
719
720 Because we don't treat any dependencies as "soft" unless they're killed
721 by a cycle, we pay attention to a larger number of dependencies when
722 merging. This hurts performance a bit, but helps reliability.
723 """
724 start = time.time()
725 cycles = FindCycles()
726 while cycles:
727 for dep, mycycles in cycles.iteritems():
728 for basedep, mycycle in mycycles.iteritems():
729 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
Matt Tennant08797302011-10-17 16:18:45 -0700730 if "--quiet" not in emerge.opts:
731 PrintCycleBreak(basedep, dep, mycycle)
David Jamesfcb70ef2011-02-02 16:02:30 -0800732 del deps_map[dep]["needs"][basedep]
733 deps_map[basedep]["provides"].remove(dep)
734 cycles = FindCycles()
735 seconds = time.time() - start
736 if "--quiet" not in emerge.opts and seconds >= 0.1:
Mike Frysinger383367e2014-09-16 15:06:17 -0400737 print("Tree sanitized in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800738
David James8c7e5e32011-06-28 11:26:03 -0700739 def FindRecursiveProvides(pkg, seen):
740 """Find all nodes that require a particular package.
741
742 Assumes that graph is acyclic.
743
744 Args:
745 pkg: Package identifier.
746 seen: Nodes that have been visited so far.
747 """
748 if pkg in seen:
749 return
750 seen.add(pkg)
751 info = deps_map[pkg]
752 info["tprovides"] = info["provides"].copy()
753 for dep in info["provides"]:
754 FindRecursiveProvides(dep, seen)
755 info["tprovides"].update(deps_map[dep]["tprovides"])
756
David Jamesa22906f2011-05-04 19:53:26 -0700757 ReverseTree(deps_tree)
David Jamesa22906f2011-05-04 19:53:26 -0700758
David James386ccd12011-05-04 20:17:42 -0700759 # We need to remove unused packages so that we can use the dependency
760 # ordering of the install process to show us what cycles to crack.
761 RemoveUnusedPackages()
David Jamesfcb70ef2011-02-02 16:02:30 -0800762 SanitizeTree()
David James8c7e5e32011-06-28 11:26:03 -0700763 seen = set()
764 for pkg in deps_map:
765 FindRecursiveProvides(pkg, seen)
David Jamesfcb70ef2011-02-02 16:02:30 -0800766 return deps_map
767
768 def PrintInstallPlan(self, deps_map):
769 """Print an emerge-style install plan.
770
771 The install plan lists what packages we're installing, in order.
772 It's useful for understanding what parallel_emerge is doing.
773
774 Args:
775 deps_map: The dependency graph.
776 """
777
778 def InstallPlanAtNode(target, deps_map):
779 nodes = []
780 nodes.append(target)
781 for dep in deps_map[target]["provides"]:
782 del deps_map[dep]["needs"][target]
783 if not deps_map[dep]["needs"]:
784 nodes.extend(InstallPlanAtNode(dep, deps_map))
785 return nodes
786
787 deps_map = copy.deepcopy(deps_map)
788 install_plan = []
789 plan = set()
790 for target, info in deps_map.iteritems():
791 if not info["needs"] and target not in plan:
792 for item in InstallPlanAtNode(target, deps_map):
793 plan.add(item)
794 install_plan.append(self.package_db[item])
795
796 for pkg in plan:
797 del deps_map[pkg]
798
799 if deps_map:
Mike Frysinger383367e2014-09-16 15:06:17 -0400800 print("Cyclic dependencies:", " ".join(deps_map))
David Jamesfcb70ef2011-02-02 16:02:30 -0800801 PrintDepsMap(deps_map)
802 sys.exit(1)
803
804 self.emerge.depgraph.display(install_plan)
805
806
807def PrintDepsMap(deps_map):
808 """Print dependency graph, for each package list it's prerequisites."""
809 for i in sorted(deps_map):
Mike Frysinger383367e2014-09-16 15:06:17 -0400810 print("%s: (%s) needs" % (i, deps_map[i]["action"]))
David Jamesfcb70ef2011-02-02 16:02:30 -0800811 needs = deps_map[i]["needs"]
812 for j in sorted(needs):
Mike Frysinger383367e2014-09-16 15:06:17 -0400813 print(" %s" % (j))
David Jamesfcb70ef2011-02-02 16:02:30 -0800814 if not needs:
Mike Frysinger383367e2014-09-16 15:06:17 -0400815 print(" no dependencies")
David Jamesfcb70ef2011-02-02 16:02:30 -0800816
817
818class EmergeJobState(object):
Don Garrett25f309a2014-03-19 14:02:12 -0700819 """Structure describing the EmergeJobState."""
820
David Jamesfcb70ef2011-02-02 16:02:30 -0800821 __slots__ = ["done", "filename", "last_notify_timestamp", "last_output_seek",
822 "last_output_timestamp", "pkgname", "retcode", "start_timestamp",
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700823 "target", "fetch_only", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800824
825 def __init__(self, target, pkgname, done, filename, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700826 retcode=None, fetch_only=False, unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800827
828 # The full name of the target we're building (e.g.
Mike Frysingerfd969312014-04-02 22:16:42 -0400829 # virtual/target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800830 self.target = target
831
Mike Frysingerfd969312014-04-02 22:16:42 -0400832 # The short name of the target we're building (e.g. target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800833 self.pkgname = pkgname
834
835 # Whether the job is done. (True if the job is done; false otherwise.)
836 self.done = done
837
838 # The filename where output is currently stored.
839 self.filename = filename
840
841 # The timestamp of the last time we printed the name of the log file. We
842 # print this at the beginning of the job, so this starts at
843 # start_timestamp.
844 self.last_notify_timestamp = start_timestamp
845
846 # The location (in bytes) of the end of the last complete line we printed.
847 # This starts off at zero. We use this to jump to the right place when we
848 # print output from the same ebuild multiple times.
849 self.last_output_seek = 0
850
851 # The timestamp of the last time we printed output. Since we haven't
852 # printed output yet, this starts at zero.
853 self.last_output_timestamp = 0
854
855 # The return code of our job, if the job is actually finished.
856 self.retcode = retcode
857
Brian Harring0be85c62012-03-17 19:52:12 -0700858 # Was this just a fetch job?
859 self.fetch_only = fetch_only
860
David Jamesfcb70ef2011-02-02 16:02:30 -0800861 # The timestamp when our job started.
862 self.start_timestamp = start_timestamp
863
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700864 # No emerge, only unpack packages.
865 self.unpack_only = unpack_only
866
David Jamesfcb70ef2011-02-02 16:02:30 -0800867
David James321490a2012-12-17 12:05:56 -0800868def KillHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700869 # Kill self and all subprocesses.
870 os.killpg(0, signal.SIGKILL)
871
Mike Frysingercc838832014-05-24 13:10:30 -0400872
David Jamesfcb70ef2011-02-02 16:02:30 -0800873def SetupWorkerSignals():
David James321490a2012-12-17 12:05:56 -0800874 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700875 # Set KILLED flag.
876 KILLED.set()
David James13cead42011-05-18 16:22:01 -0700877
David James7358d032011-05-19 10:40:03 -0700878 # Remove our signal handlers so we don't get called recursively.
879 signal.signal(signal.SIGINT, KillHandler)
880 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -0800881
882 # Ensure that we exit quietly and cleanly, if possible, when we receive
883 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all
884 # of the child processes will print details about KeyboardInterrupt
885 # exceptions, which isn't very helpful.
886 signal.signal(signal.SIGINT, ExitHandler)
887 signal.signal(signal.SIGTERM, ExitHandler)
888
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400889
890def EmergeProcess(output, target, *args, **kwargs):
David James1ed3e252011-10-05 20:26:15 -0700891 """Merge a package in a subprocess.
892
893 Args:
David James1ed3e252011-10-05 20:26:15 -0700894 output: Temporary file to write output.
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400895 target: The package we'll be processing (for display purposes).
David James6b29d052012-11-02 10:27:27 -0700896 *args: Arguments to pass to Scheduler constructor.
897 **kwargs: Keyword arguments to pass to Scheduler constructor.
David James1ed3e252011-10-05 20:26:15 -0700898
899 Returns:
900 The exit code returned by the subprocess.
901 """
902 pid = os.fork()
903 if pid == 0:
904 try:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400905 proctitle.settitle('EmergeProcess', target)
906
David James1ed3e252011-10-05 20:26:15 -0700907 # Sanity checks.
Mike Frysingerf02736e2013-11-08 15:27:00 -0500908 if sys.stdout.fileno() != 1:
909 raise Exception("sys.stdout.fileno() != 1")
910 if sys.stderr.fileno() != 2:
911 raise Exception("sys.stderr.fileno() != 2")
David James1ed3e252011-10-05 20:26:15 -0700912
913 # - Redirect 1 (stdout) and 2 (stderr) at our temporary file.
914 # - Redirect 0 to point at sys.stdin. In this case, sys.stdin
915 # points at a file reading os.devnull, because multiprocessing mucks
916 # with sys.stdin.
917 # - Leave the sys.stdin and output filehandles alone.
918 fd_pipes = {0: sys.stdin.fileno(),
919 1: output.fileno(),
920 2: output.fileno(),
921 sys.stdin.fileno(): sys.stdin.fileno(),
922 output.fileno(): output.fileno()}
Mike Frysinger66652ec2014-04-24 11:42:25 -0400923 # pylint: disable=W0212
924 portage.process._setup_pipes(fd_pipes, close_fds=False)
David James1ed3e252011-10-05 20:26:15 -0700925
926 # Portage doesn't like when sys.stdin.fileno() != 0, so point sys.stdin
927 # at the filehandle we just created in _setup_pipes.
928 if sys.stdin.fileno() != 0:
David James6b29d052012-11-02 10:27:27 -0700929 sys.__stdin__ = sys.stdin = os.fdopen(0, "r")
930
931 scheduler = Scheduler(*args, **kwargs)
932
933 # Enable blocker handling even though we're in --nodeps mode. This
934 # allows us to unmerge the blocker after we've merged the replacement.
935 scheduler._opts_ignore_blockers = frozenset()
David James1ed3e252011-10-05 20:26:15 -0700936
937 # Actually do the merge.
938 retval = scheduler.merge()
939
940 # We catch all exceptions here (including SystemExit, KeyboardInterrupt,
941 # etc) so as to ensure that we don't confuse the multiprocessing module,
942 # which expects that all forked children exit with os._exit().
David James321490a2012-12-17 12:05:56 -0800943 # pylint: disable=W0702
David James1ed3e252011-10-05 20:26:15 -0700944 except:
945 traceback.print_exc(file=output)
946 retval = 1
947 sys.stdout.flush()
948 sys.stderr.flush()
949 output.flush()
Don Garrett25f309a2014-03-19 14:02:12 -0700950 # pylint: disable=W0212
David James1ed3e252011-10-05 20:26:15 -0700951 os._exit(retval)
952 else:
953 # Return the exit code of the subprocess.
954 return os.waitpid(pid, 0)[1]
David Jamesfcb70ef2011-02-02 16:02:30 -0800955
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700956
957def UnpackPackage(pkg_state):
958 """Unpacks package described by pkg_state.
959
960 Args:
961 pkg_state: EmergeJobState object describing target.
962
963 Returns:
964 Exit code returned by subprocess.
965 """
966 pkgdir = os.environ.get("PKGDIR",
967 os.path.join(os.environ["SYSROOT"], "packages"))
968 root = os.environ.get("ROOT", os.environ["SYSROOT"])
969 path = os.path.join(pkgdir, pkg_state.target + ".tbz2")
970 comp = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)
971 cmd = [comp, "-dc"]
972 if comp.endswith("pbzip2"):
973 cmd.append("--ignore-trailing-garbage=1")
974 cmd.append(path)
975
976 result = cros_build_lib.RunCommand(cmd, cwd=root, stdout_to_pipe=True,
977 print_cmd=False, error_code_ok=True)
978
979 # If we were not successful, return now and don't attempt untar.
980 if result.returncode:
981 return result.returncode
982
983 cmd = ["sudo", "tar", "-xf", "-", "-C", root]
984 result = cros_build_lib.RunCommand(cmd, cwd=root, input=result.output,
985 print_cmd=False, error_code_ok=True)
986
987 return result.returncode
988
989
990def EmergeWorker(task_queue, job_queue, emerge, package_db, fetch_only=False,
991 unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800992 """This worker emerges any packages given to it on the task_queue.
993
994 Args:
995 task_queue: The queue of tasks for this worker to do.
996 job_queue: The queue of results from the worker.
997 emerge: An EmergeData() object.
998 package_db: A dict, mapping package ids to portage Package objects.
Brian Harring0be85c62012-03-17 19:52:12 -0700999 fetch_only: A bool, indicating if we should just fetch the target.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001000 unpack_only: A bool, indicating if we should just unpack the target.
David Jamesfcb70ef2011-02-02 16:02:30 -08001001
1002 It expects package identifiers to be passed to it via task_queue. When
1003 a task is started, it pushes the (target, filename) to the started_queue.
1004 The output is stored in filename. When a merge starts or finishes, we push
1005 EmergeJobState objects to the job_queue.
1006 """
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001007 if fetch_only:
1008 mode = 'fetch'
1009 elif unpack_only:
1010 mode = 'unpack'
1011 else:
1012 mode = 'emerge'
1013 proctitle.settitle('EmergeWorker', mode, '[idle]')
David Jamesfcb70ef2011-02-02 16:02:30 -08001014
1015 SetupWorkerSignals()
1016 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
David Jamesdeebd692011-05-09 17:02:52 -07001017
1018 # Disable flushing of caches to save on I/O.
David James7a1ea4b2011-10-13 15:06:41 -07001019 root = emerge.settings["ROOT"]
1020 vardb = emerge.trees[root]["vartree"].dbapi
Mike Frysingere56debd2014-11-19 01:54:36 -05001021 vardb._flush_cache_enabled = False # pylint: disable=protected-access
Brian Harring0be85c62012-03-17 19:52:12 -07001022 bindb = emerge.trees[root]["bintree"].dbapi
1023 # Might be a set, might be a list, might be None; no clue, just use shallow
1024 # copy to ensure we can roll it back.
Don Garrett25f309a2014-03-19 14:02:12 -07001025 # pylint: disable=W0212
Brian Harring0be85c62012-03-17 19:52:12 -07001026 original_remotepkgs = copy.copy(bindb.bintree._remotepkgs)
David Jamesdeebd692011-05-09 17:02:52 -07001027
David Jamesfcb70ef2011-02-02 16:02:30 -08001028 opts, spinner = emerge.opts, emerge.spinner
1029 opts["--nodeps"] = True
Brian Harring0be85c62012-03-17 19:52:12 -07001030 if fetch_only:
1031 opts["--fetchonly"] = True
1032
David Jamesfcb70ef2011-02-02 16:02:30 -08001033 while True:
1034 # Wait for a new item to show up on the queue. This is a blocking wait,
1035 # so if there's nothing to do, we just sit here.
Brian Harring0be85c62012-03-17 19:52:12 -07001036 pkg_state = task_queue.get()
1037 if pkg_state is None:
David Jamesfcb70ef2011-02-02 16:02:30 -08001038 # If target is None, this means that the main thread wants us to quit.
1039 # The other workers need to exit too, so we'll push the message back on
1040 # to the queue so they'll get it too.
Brian Harring0be85c62012-03-17 19:52:12 -07001041 task_queue.put(None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001042 return
David James7358d032011-05-19 10:40:03 -07001043 if KILLED.is_set():
1044 return
1045
Brian Harring0be85c62012-03-17 19:52:12 -07001046 target = pkg_state.target
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001047 proctitle.settitle('EmergeWorker', mode, target)
Brian Harring0be85c62012-03-17 19:52:12 -07001048
David Jamesfcb70ef2011-02-02 16:02:30 -08001049 db_pkg = package_db[target]
Brian Harring0be85c62012-03-17 19:52:12 -07001050
1051 if db_pkg.type_name == "binary":
1052 if not fetch_only and pkg_state.fetched_successfully:
1053 # Ensure portage doesn't think our pkg is remote- else it'll force
1054 # a redownload of it (even if the on-disk file is fine). In-memory
1055 # caching basically, implemented dumbly.
1056 bindb.bintree._remotepkgs = None
1057 else:
1058 bindb.bintree_remotepkgs = original_remotepkgs
1059
David Jamesfcb70ef2011-02-02 16:02:30 -08001060 db_pkg.root_config = emerge.root_config
1061 install_list = [db_pkg]
1062 pkgname = db_pkg.pf
1063 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False)
David James01b1e0f2012-06-07 17:18:05 -07001064 os.chmod(output.name, 644)
David Jamesfcb70ef2011-02-02 16:02:30 -08001065 start_timestamp = time.time()
Brian Harring0be85c62012-03-17 19:52:12 -07001066 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001067 fetch_only=fetch_only, unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001068 job_queue.put(job)
1069 if "--pretend" in opts:
1070 retcode = 0
1071 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001072 try:
David James386ccd12011-05-04 20:17:42 -07001073 emerge.scheduler_graph.mergelist = install_list
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001074 if unpack_only:
1075 retcode = UnpackPackage(pkg_state)
1076 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001077 retcode = EmergeProcess(output, target, settings, trees, mtimedb,
1078 opts, spinner, favorites=emerge.favorites,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001079 graph_config=emerge.scheduler_graph)
David Jamesfcb70ef2011-02-02 16:02:30 -08001080 except Exception:
1081 traceback.print_exc(file=output)
1082 retcode = 1
David James1ed3e252011-10-05 20:26:15 -07001083 output.close()
David Jamesfcb70ef2011-02-02 16:02:30 -08001084
David James7358d032011-05-19 10:40:03 -07001085 if KILLED.is_set():
1086 return
1087
David Jamesfcb70ef2011-02-02 16:02:30 -08001088 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001089 retcode, fetch_only=fetch_only,
1090 unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001091 job_queue.put(job)
1092
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001093 # Set the title back to idle as the multiprocess pool won't destroy us;
1094 # when another job comes up, it'll re-use this process.
1095 proctitle.settitle('EmergeWorker', mode, '[idle]')
1096
David Jamesfcb70ef2011-02-02 16:02:30 -08001097
1098class LinePrinter(object):
1099 """Helper object to print a single line."""
1100
1101 def __init__(self, line):
1102 self.line = line
1103
David James321490a2012-12-17 12:05:56 -08001104 def Print(self, _seek_locations):
Mike Frysinger383367e2014-09-16 15:06:17 -04001105 print(self.line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001106
1107
1108class JobPrinter(object):
1109 """Helper object to print output of a job."""
1110
1111 def __init__(self, job, unlink=False):
1112 """Print output of job.
1113
Mike Frysinger02e1e072013-11-10 22:11:34 -05001114 If unlink is True, unlink the job output file when done.
1115 """
David Jamesfcb70ef2011-02-02 16:02:30 -08001116 self.current_time = time.time()
1117 self.job = job
1118 self.unlink = unlink
1119
1120 def Print(self, seek_locations):
1121
1122 job = self.job
1123
1124 # Calculate how long the job has been running.
1125 seconds = self.current_time - job.start_timestamp
1126
1127 # Note that we've printed out the job so far.
1128 job.last_output_timestamp = self.current_time
1129
1130 # Note that we're starting the job
1131 info = "job %s (%dm%.1fs)" % (job.pkgname, seconds / 60, seconds % 60)
1132 last_output_seek = seek_locations.get(job.filename, 0)
1133 if last_output_seek:
Mike Frysinger383367e2014-09-16 15:06:17 -04001134 print("=== Continue output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001135 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001136 print("=== Start output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001137
1138 # Print actual output from job
1139 f = codecs.open(job.filename, encoding='utf-8', errors='replace')
1140 f.seek(last_output_seek)
1141 prefix = job.pkgname + ":"
1142 for line in f:
1143
1144 # Save off our position in the file
1145 if line and line[-1] == "\n":
1146 last_output_seek = f.tell()
1147 line = line[:-1]
1148
1149 # Print our line
Mike Frysinger383367e2014-09-16 15:06:17 -04001150 print(prefix, line.encode('utf-8', 'replace'))
David Jamesfcb70ef2011-02-02 16:02:30 -08001151 f.close()
1152
1153 # Save our last spot in the file so that we don't print out the same
1154 # location twice.
1155 seek_locations[job.filename] = last_output_seek
1156
1157 # Note end of output section
1158 if job.done:
Mike Frysinger383367e2014-09-16 15:06:17 -04001159 print("=== Complete: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001160 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001161 print("=== Still running: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001162
1163 if self.unlink:
1164 os.unlink(job.filename)
1165
1166
1167def PrintWorker(queue):
1168 """A worker that prints stuff to the screen as requested."""
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001169 proctitle.settitle('PrintWorker')
David Jamesfcb70ef2011-02-02 16:02:30 -08001170
David James321490a2012-12-17 12:05:56 -08001171 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -07001172 # Set KILLED flag.
1173 KILLED.set()
1174
David Jamesfcb70ef2011-02-02 16:02:30 -08001175 # Switch to default signal handlers so that we'll die after two signals.
David James7358d032011-05-19 10:40:03 -07001176 signal.signal(signal.SIGINT, KillHandler)
1177 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001178
1179 # Don't exit on the first SIGINT / SIGTERM, because the parent worker will
1180 # handle it and tell us when we need to exit.
1181 signal.signal(signal.SIGINT, ExitHandler)
1182 signal.signal(signal.SIGTERM, ExitHandler)
1183
1184 # seek_locations is a map indicating the position we are at in each file.
1185 # It starts off empty, but is set by the various Print jobs as we go along
1186 # to indicate where we left off in each file.
1187 seek_locations = {}
1188 while True:
1189 try:
1190 job = queue.get()
1191 if job:
1192 job.Print(seek_locations)
David Jamesbccf8eb2011-07-27 14:06:06 -07001193 sys.stdout.flush()
David Jamesfcb70ef2011-02-02 16:02:30 -08001194 else:
1195 break
1196 except IOError as ex:
1197 if ex.errno == errno.EINTR:
1198 # Looks like we received a signal. Keep printing.
1199 continue
1200 raise
1201
Brian Harring867e2362012-03-17 04:05:17 -07001202
Brian Harring0be85c62012-03-17 19:52:12 -07001203class TargetState(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001204 """Structure descriting the TargetState."""
Brian Harring867e2362012-03-17 04:05:17 -07001205
Brian Harring0be85c62012-03-17 19:52:12 -07001206 __slots__ = ("target", "info", "score", "prefetched", "fetched_successfully")
Brian Harring867e2362012-03-17 04:05:17 -07001207
David James321490a2012-12-17 12:05:56 -08001208 def __init__(self, target, info):
Brian Harring867e2362012-03-17 04:05:17 -07001209 self.target, self.info = target, info
Brian Harring0be85c62012-03-17 19:52:12 -07001210 self.fetched_successfully = False
1211 self.prefetched = False
David James321490a2012-12-17 12:05:56 -08001212 self.score = None
Brian Harring867e2362012-03-17 04:05:17 -07001213 self.update_score()
1214
1215 def __cmp__(self, other):
1216 return cmp(self.score, other.score)
1217
1218 def update_score(self):
1219 self.score = (
1220 -len(self.info["tprovides"]),
Brian Harring0be85c62012-03-17 19:52:12 -07001221 len(self.info["needs"]),
Brian Harring11c5eeb2012-03-18 11:02:39 -07001222 not self.info["binary"],
Brian Harring867e2362012-03-17 04:05:17 -07001223 -len(self.info["provides"]),
1224 self.info["idx"],
1225 self.target,
1226 )
1227
1228
1229class ScoredHeap(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001230 """Implementation of a general purpose scored heap."""
Brian Harring867e2362012-03-17 04:05:17 -07001231
Brian Harring0be85c62012-03-17 19:52:12 -07001232 __slots__ = ("heap", "_heap_set")
1233
Brian Harring867e2362012-03-17 04:05:17 -07001234 def __init__(self, initial=()):
Brian Harring0be85c62012-03-17 19:52:12 -07001235 self.heap = list()
1236 self._heap_set = set()
1237 if initial:
1238 self.multi_put(initial)
Brian Harring867e2362012-03-17 04:05:17 -07001239
1240 def get(self):
Brian Harring0be85c62012-03-17 19:52:12 -07001241 item = heapq.heappop(self.heap)
1242 self._heap_set.remove(item.target)
1243 return item
Brian Harring867e2362012-03-17 04:05:17 -07001244
Brian Harring0be85c62012-03-17 19:52:12 -07001245 def put(self, item):
1246 if not isinstance(item, TargetState):
1247 raise ValueError("Item %r isn't a TargetState" % (item,))
1248 heapq.heappush(self.heap, item)
1249 self._heap_set.add(item.target)
Brian Harring867e2362012-03-17 04:05:17 -07001250
Brian Harring0be85c62012-03-17 19:52:12 -07001251 def multi_put(self, sequence):
1252 sequence = list(sequence)
1253 self.heap.extend(sequence)
1254 self._heap_set.update(x.target for x in sequence)
Brian Harring867e2362012-03-17 04:05:17 -07001255 self.sort()
1256
David James5c9996d2012-03-24 10:50:46 -07001257 def sort(self):
1258 heapq.heapify(self.heap)
1259
Brian Harring0be85c62012-03-17 19:52:12 -07001260 def __contains__(self, target):
1261 return target in self._heap_set
1262
1263 def __nonzero__(self):
1264 return bool(self.heap)
1265
Brian Harring867e2362012-03-17 04:05:17 -07001266 def __len__(self):
1267 return len(self.heap)
1268
1269
David Jamesfcb70ef2011-02-02 16:02:30 -08001270class EmergeQueue(object):
1271 """Class to schedule emerge jobs according to a dependency graph."""
1272
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001273 def __init__(self, deps_map, emerge, package_db, show_output, unpack_only):
David Jamesfcb70ef2011-02-02 16:02:30 -08001274 # Store the dependency graph.
1275 self._deps_map = deps_map
Brian Harring0be85c62012-03-17 19:52:12 -07001276 self._state_map = {}
David Jamesfcb70ef2011-02-02 16:02:30 -08001277 # Initialize the running queue to empty
Brian Harring0be85c62012-03-17 19:52:12 -07001278 self._build_jobs = {}
1279 self._build_ready = ScoredHeap()
1280 self._fetch_jobs = {}
1281 self._fetch_ready = ScoredHeap()
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001282 self._unpack_jobs = {}
1283 self._unpack_ready = ScoredHeap()
David Jamesfcb70ef2011-02-02 16:02:30 -08001284 # List of total package installs represented in deps_map.
1285 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
1286 self._total_jobs = len(install_jobs)
1287 self._show_output = show_output
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001288 self._unpack_only = unpack_only
David Jamesfcb70ef2011-02-02 16:02:30 -08001289
1290 if "--pretend" in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001291 print("Skipping merge because of --pretend mode.")
David Jamesfcb70ef2011-02-02 16:02:30 -08001292 sys.exit(0)
1293
David Jamesaaf49e42014-04-24 09:40:05 -07001294 # Set up a session so we can easily terminate all children.
1295 self._SetupSession()
David James7358d032011-05-19 10:40:03 -07001296
David Jamesfcb70ef2011-02-02 16:02:30 -08001297 # Setup scheduler graph object. This is used by the child processes
1298 # to help schedule jobs.
1299 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1300
1301 # Calculate how many jobs we can run in parallel. We don't want to pass
1302 # the --jobs flag over to emerge itself, because that'll tell emerge to
1303 # hide its output, and said output is quite useful for debugging hung
1304 # jobs.
1305 procs = min(self._total_jobs,
1306 emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001307 self._build_procs = self._unpack_procs = self._fetch_procs = max(1, procs)
David James8c7e5e32011-06-28 11:26:03 -07001308 self._load_avg = emerge.opts.pop("--load-average", None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001309 self._job_queue = multiprocessing.Queue()
1310 self._print_queue = multiprocessing.Queue()
Brian Harring0be85c62012-03-17 19:52:12 -07001311
1312 self._fetch_queue = multiprocessing.Queue()
1313 args = (self._fetch_queue, self._job_queue, emerge, package_db, True)
1314 self._fetch_pool = multiprocessing.Pool(self._fetch_procs, EmergeWorker,
1315 args)
1316
1317 self._build_queue = multiprocessing.Queue()
1318 args = (self._build_queue, self._job_queue, emerge, package_db)
1319 self._build_pool = multiprocessing.Pool(self._build_procs, EmergeWorker,
1320 args)
1321
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001322 if self._unpack_only:
1323 # Unpack pool only required on unpack_only jobs.
1324 self._unpack_queue = multiprocessing.Queue()
1325 args = (self._unpack_queue, self._job_queue, emerge, package_db, False,
1326 True)
1327 self._unpack_pool = multiprocessing.Pool(self._unpack_procs, EmergeWorker,
1328 args)
1329
David Jamesfcb70ef2011-02-02 16:02:30 -08001330 self._print_worker = multiprocessing.Process(target=PrintWorker,
1331 args=[self._print_queue])
1332 self._print_worker.start()
1333
1334 # Initialize the failed queue to empty.
1335 self._retry_queue = []
1336 self._failed = set()
1337
David Jamesfcb70ef2011-02-02 16:02:30 -08001338 # Setup an exit handler so that we print nice messages if we are
1339 # terminated.
1340 self._SetupExitHandler()
1341
1342 # Schedule our jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001343 self._state_map.update(
1344 (pkg, TargetState(pkg, data)) for pkg, data in deps_map.iteritems())
1345 self._fetch_ready.multi_put(self._state_map.itervalues())
David Jamesfcb70ef2011-02-02 16:02:30 -08001346
David Jamesaaf49e42014-04-24 09:40:05 -07001347 def _SetupSession(self):
1348 """Set up a session so we can easily terminate all children."""
1349 # When we call os.setsid(), this sets up a session / process group for this
1350 # process and all children. These session groups are needed so that we can
1351 # easily kill all children (including processes launched by emerge) before
1352 # we exit.
1353 #
1354 # One unfortunate side effect of os.setsid() is that it blocks CTRL-C from
1355 # being received. To work around this, we only call os.setsid() in a forked
1356 # process, so that the parent can still watch for CTRL-C. The parent will
1357 # just sit around, watching for signals and propagating them to the child,
1358 # until the child exits.
1359 #
1360 # TODO(davidjames): It would be nice if we could replace this with cgroups.
1361 pid = os.fork()
1362 if pid == 0:
1363 os.setsid()
1364 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001365 proctitle.settitle('SessionManager')
1366
David Jamesaaf49e42014-04-24 09:40:05 -07001367 def PropagateToChildren(signum, _frame):
1368 # Just propagate the signals down to the child. We'll exit when the
1369 # child does.
1370 try:
1371 os.kill(pid, signum)
1372 except OSError as ex:
1373 if ex.errno != errno.ESRCH:
1374 raise
1375 signal.signal(signal.SIGINT, PropagateToChildren)
1376 signal.signal(signal.SIGTERM, PropagateToChildren)
1377
1378 def StopGroup(_signum, _frame):
1379 # When we get stopped, stop the children.
1380 try:
1381 os.killpg(pid, signal.SIGSTOP)
1382 os.kill(0, signal.SIGSTOP)
1383 except OSError as ex:
1384 if ex.errno != errno.ESRCH:
1385 raise
1386 signal.signal(signal.SIGTSTP, StopGroup)
1387
1388 def ContinueGroup(_signum, _frame):
1389 # Launch the children again after being stopped.
1390 try:
1391 os.killpg(pid, signal.SIGCONT)
1392 except OSError as ex:
1393 if ex.errno != errno.ESRCH:
1394 raise
1395 signal.signal(signal.SIGCONT, ContinueGroup)
1396
1397 # Loop until the children exit. We exit with os._exit to be sure we
1398 # don't run any finalizers (those will be run by the child process.)
1399 # pylint: disable=W0212
1400 while True:
1401 try:
1402 # Wait for the process to exit. When it does, exit with the return
1403 # value of the subprocess.
Mike Frysingere2d8f0d2014-11-01 13:09:26 -04001404 os._exit(process_util.GetExitStatus(os.waitpid(pid, 0)[1]))
David Jamesaaf49e42014-04-24 09:40:05 -07001405 except OSError as ex:
1406 if ex.errno == errno.EINTR:
1407 continue
1408 traceback.print_exc()
1409 os._exit(1)
1410 except BaseException:
1411 traceback.print_exc()
1412 os._exit(1)
1413
David Jamesfcb70ef2011-02-02 16:02:30 -08001414 def _SetupExitHandler(self):
1415
David James321490a2012-12-17 12:05:56 -08001416 def ExitHandler(signum, _frame):
David James7358d032011-05-19 10:40:03 -07001417 # Set KILLED flag.
1418 KILLED.set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001419
1420 # Kill our signal handlers so we don't get called recursively
David James7358d032011-05-19 10:40:03 -07001421 signal.signal(signal.SIGINT, KillHandler)
1422 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001423
1424 # Print our current job status
Brian Harring0be85c62012-03-17 19:52:12 -07001425 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001426 if job:
1427 self._print_queue.put(JobPrinter(job, unlink=True))
1428
1429 # Notify the user that we are exiting
1430 self._Print("Exiting on signal %s" % signum)
David James7358d032011-05-19 10:40:03 -07001431 self._print_queue.put(None)
1432 self._print_worker.join()
David Jamesfcb70ef2011-02-02 16:02:30 -08001433
1434 # Kill child threads, then exit.
David James7358d032011-05-19 10:40:03 -07001435 os.killpg(0, signal.SIGKILL)
David Jamesfcb70ef2011-02-02 16:02:30 -08001436 sys.exit(1)
1437
1438 # Print out job status when we are killed
1439 signal.signal(signal.SIGINT, ExitHandler)
1440 signal.signal(signal.SIGTERM, ExitHandler)
1441
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001442 def _ScheduleUnpack(self, pkg_state):
1443 self._unpack_jobs[pkg_state.target] = None
1444 self._unpack_queue.put(pkg_state)
1445
Brian Harring0be85c62012-03-17 19:52:12 -07001446 def _Schedule(self, pkg_state):
David Jamesfcb70ef2011-02-02 16:02:30 -08001447 # We maintain a tree of all deps, if this doesn't need
David James8c7e5e32011-06-28 11:26:03 -07001448 # to be installed just free up its children and continue.
David Jamesfcb70ef2011-02-02 16:02:30 -08001449 # It is possible to reinstall deps of deps, without reinstalling
1450 # first level deps, like so:
Mike Frysingerfd969312014-04-02 22:16:42 -04001451 # virtual/target-os (merge) -> eselect (nomerge) -> python (merge)
Brian Harring0be85c62012-03-17 19:52:12 -07001452 this_pkg = pkg_state.info
1453 target = pkg_state.target
1454 if pkg_state.info is not None:
1455 if this_pkg["action"] == "nomerge":
1456 self._Finish(target)
1457 elif target not in self._build_jobs:
1458 # Kick off the build if it's marked to be built.
1459 self._build_jobs[target] = None
1460 self._build_queue.put(pkg_state)
1461 return True
David Jamesfcb70ef2011-02-02 16:02:30 -08001462
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001463 def _ScheduleLoop(self, unpack_only=False):
1464 if unpack_only:
1465 ready_queue = self._unpack_ready
1466 jobs_queue = self._unpack_jobs
1467 procs = self._unpack_procs
1468 else:
1469 ready_queue = self._build_ready
1470 jobs_queue = self._build_jobs
1471 procs = self._build_procs
1472
David James8c7e5e32011-06-28 11:26:03 -07001473 # If the current load exceeds our desired load average, don't schedule
1474 # more than one job.
1475 if self._load_avg and os.getloadavg()[0] > self._load_avg:
1476 needed_jobs = 1
1477 else:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001478 needed_jobs = procs
David James8c7e5e32011-06-28 11:26:03 -07001479
1480 # Schedule more jobs.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001481 while ready_queue and len(jobs_queue) < needed_jobs:
1482 state = ready_queue.get()
1483 if unpack_only:
1484 self._ScheduleUnpack(state)
1485 else:
1486 if state.target not in self._failed:
1487 self._Schedule(state)
David Jamesfcb70ef2011-02-02 16:02:30 -08001488
1489 def _Print(self, line):
1490 """Print a single line."""
1491 self._print_queue.put(LinePrinter(line))
1492
1493 def _Status(self):
1494 """Print status."""
1495 current_time = time.time()
1496 no_output = True
1497
1498 # Print interim output every minute if --show-output is used. Otherwise,
1499 # print notifications about running packages every 2 minutes, and print
1500 # full output for jobs that have been running for 60 minutes or more.
1501 if self._show_output:
1502 interval = 60
1503 notify_interval = 0
1504 else:
1505 interval = 60 * 60
1506 notify_interval = 60 * 2
David James321490a2012-12-17 12:05:56 -08001507 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001508 if job:
1509 last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
1510 if last_timestamp + interval < current_time:
1511 self._print_queue.put(JobPrinter(job))
1512 job.last_output_timestamp = current_time
1513 no_output = False
1514 elif (notify_interval and
1515 job.last_notify_timestamp + notify_interval < current_time):
1516 job_seconds = current_time - job.start_timestamp
1517 args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
1518 info = "Still building %s (%dm%.1fs). Logs in %s" % args
1519 job.last_notify_timestamp = current_time
1520 self._Print(info)
1521 no_output = False
1522
1523 # If we haven't printed any messages yet, print a general status message
1524 # here.
1525 if no_output:
1526 seconds = current_time - GLOBAL_START
Brian Harring0be85c62012-03-17 19:52:12 -07001527 fjobs, fready = len(self._fetch_jobs), len(self._fetch_ready)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001528 ujobs, uready = len(self._unpack_jobs), len(self._unpack_ready)
Brian Harring0be85c62012-03-17 19:52:12 -07001529 bjobs, bready = len(self._build_jobs), len(self._build_ready)
1530 retries = len(self._retry_queue)
1531 pending = max(0, len(self._deps_map) - fjobs - bjobs)
1532 line = "Pending %s/%s, " % (pending, self._total_jobs)
1533 if fjobs or fready:
1534 line += "Fetching %s/%s, " % (fjobs, fready + fjobs)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001535 if ujobs or uready:
1536 line += "Unpacking %s/%s, " % (ujobs, uready + ujobs)
Brian Harring0be85c62012-03-17 19:52:12 -07001537 if bjobs or bready or retries:
1538 line += "Building %s/%s, " % (bjobs, bready + bjobs)
1539 if retries:
1540 line += "Retrying %s, " % (retries,)
David James8c7e5e32011-06-28 11:26:03 -07001541 load = " ".join(str(x) for x in os.getloadavg())
Brian Harring0be85c62012-03-17 19:52:12 -07001542 line += ("[Time %dm%.1fs Load %s]" % (seconds/60, seconds %60, load))
1543 self._Print(line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001544
1545 def _Finish(self, target):
David James8c7e5e32011-06-28 11:26:03 -07001546 """Mark a target as completed and unblock dependencies."""
1547 this_pkg = self._deps_map[target]
1548 if this_pkg["needs"] and this_pkg["nodeps"]:
1549 # We got installed, but our deps have not been installed yet. Dependent
1550 # packages should only be installed when our needs have been fully met.
1551 this_pkg["action"] = "nomerge"
1552 else:
David James8c7e5e32011-06-28 11:26:03 -07001553 for dep in this_pkg["provides"]:
1554 dep_pkg = self._deps_map[dep]
Brian Harring0be85c62012-03-17 19:52:12 -07001555 state = self._state_map[dep]
David James8c7e5e32011-06-28 11:26:03 -07001556 del dep_pkg["needs"][target]
Brian Harring0be85c62012-03-17 19:52:12 -07001557 state.update_score()
1558 if not state.prefetched:
1559 if dep in self._fetch_ready:
1560 # If it's not currently being fetched, update the prioritization
1561 self._fetch_ready.sort()
1562 elif not dep_pkg["needs"]:
David James8c7e5e32011-06-28 11:26:03 -07001563 if dep_pkg["nodeps"] and dep_pkg["action"] == "nomerge":
1564 self._Finish(dep)
1565 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001566 self._build_ready.put(self._state_map[dep])
David James8c7e5e32011-06-28 11:26:03 -07001567 self._deps_map.pop(target)
David Jamesfcb70ef2011-02-02 16:02:30 -08001568
1569 def _Retry(self):
David James8c7e5e32011-06-28 11:26:03 -07001570 while self._retry_queue:
Brian Harring0be85c62012-03-17 19:52:12 -07001571 state = self._retry_queue.pop(0)
1572 if self._Schedule(state):
1573 self._Print("Retrying emerge of %s." % state.target)
David James8c7e5e32011-06-28 11:26:03 -07001574 break
David Jamesfcb70ef2011-02-02 16:02:30 -08001575
Brian Harringa43f5952012-04-12 01:19:34 -07001576 def _Shutdown(self):
David Jamesfcb70ef2011-02-02 16:02:30 -08001577 # Tell emerge workers to exit. They all exit when 'None' is pushed
1578 # to the queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001579
Brian Harringa43f5952012-04-12 01:19:34 -07001580 # Shutdown the workers first; then jobs (which is how they feed things back)
1581 # then finally the print queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001582
Brian Harringa43f5952012-04-12 01:19:34 -07001583 def _stop(queue, pool):
1584 if pool is None:
1585 return
1586 try:
1587 queue.put(None)
1588 pool.close()
1589 pool.join()
1590 finally:
1591 pool.terminate()
Brian Harring0be85c62012-03-17 19:52:12 -07001592
Brian Harringa43f5952012-04-12 01:19:34 -07001593 _stop(self._fetch_queue, self._fetch_pool)
1594 self._fetch_queue = self._fetch_pool = None
Brian Harring0be85c62012-03-17 19:52:12 -07001595
Brian Harringa43f5952012-04-12 01:19:34 -07001596 _stop(self._build_queue, self._build_pool)
1597 self._build_queue = self._build_pool = None
1598
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001599 if self._unpack_only:
1600 _stop(self._unpack_queue, self._unpack_pool)
1601 self._unpack_queue = self._unpack_pool = None
1602
Brian Harringa43f5952012-04-12 01:19:34 -07001603 if self._job_queue is not None:
1604 self._job_queue.close()
1605 self._job_queue = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001606
1607 # Now that our workers are finished, we can kill the print queue.
Brian Harringa43f5952012-04-12 01:19:34 -07001608 if self._print_worker is not None:
1609 try:
1610 self._print_queue.put(None)
1611 self._print_queue.close()
1612 self._print_worker.join()
1613 finally:
1614 self._print_worker.terminate()
1615 self._print_queue = self._print_worker = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001616
1617 def Run(self):
1618 """Run through the scheduled ebuilds.
1619
1620 Keep running so long as we have uninstalled packages in the
1621 dependency graph to merge.
1622 """
Brian Harringa43f5952012-04-12 01:19:34 -07001623 if not self._deps_map:
1624 return
1625
Brian Harring0be85c62012-03-17 19:52:12 -07001626 # Start the fetchers.
1627 for _ in xrange(min(self._fetch_procs, len(self._fetch_ready))):
1628 state = self._fetch_ready.get()
1629 self._fetch_jobs[state.target] = None
1630 self._fetch_queue.put(state)
1631
1632 # Print an update, then get going.
1633 self._Status()
1634
David Jamese703d0f2012-01-12 16:27:45 -08001635 retried = set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001636 while self._deps_map:
1637 # Check here that we are actually waiting for something.
Brian Harring0be85c62012-03-17 19:52:12 -07001638 if (self._build_queue.empty() and
David Jamesfcb70ef2011-02-02 16:02:30 -08001639 self._job_queue.empty() and
Brian Harring0be85c62012-03-17 19:52:12 -07001640 not self._fetch_jobs and
1641 not self._fetch_ready and
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001642 not self._unpack_jobs and
1643 not self._unpack_ready and
Brian Harring0be85c62012-03-17 19:52:12 -07001644 not self._build_jobs and
1645 not self._build_ready and
David Jamesfcb70ef2011-02-02 16:02:30 -08001646 self._deps_map):
1647 # If we have failed on a package, retry it now.
1648 if self._retry_queue:
1649 self._Retry()
1650 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001651 # Tell the user why we're exiting.
1652 if self._failed:
Mike Frysinger383367e2014-09-16 15:06:17 -04001653 print('Packages failed:\n\t%s' % '\n\t'.join(self._failed))
David James0eae23e2012-07-03 15:04:25 -07001654 status_file = os.environ.get("PARALLEL_EMERGE_STATUS_FILE")
1655 if status_file:
David James321490a2012-12-17 12:05:56 -08001656 failed_pkgs = set(portage.versions.cpv_getkey(x)
1657 for x in self._failed)
David James0eae23e2012-07-03 15:04:25 -07001658 with open(status_file, "a") as f:
1659 f.write("%s\n" % " ".join(failed_pkgs))
David Jamesfcb70ef2011-02-02 16:02:30 -08001660 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001661 print("Deadlock! Circular dependencies!")
David Jamesfcb70ef2011-02-02 16:02:30 -08001662 sys.exit(1)
1663
David James321490a2012-12-17 12:05:56 -08001664 for _ in xrange(12):
David Jamesa74289a2011-08-12 10:41:24 -07001665 try:
1666 job = self._job_queue.get(timeout=5)
1667 break
1668 except Queue.Empty:
1669 # Check if any more jobs can be scheduled.
1670 self._ScheduleLoop()
1671 else:
Brian Harring706747c2012-03-16 03:04:31 -07001672 # Print an update every 60 seconds.
David Jamesfcb70ef2011-02-02 16:02:30 -08001673 self._Status()
1674 continue
1675
1676 target = job.target
1677
Brian Harring0be85c62012-03-17 19:52:12 -07001678 if job.fetch_only:
1679 if not job.done:
1680 self._fetch_jobs[job.target] = job
1681 else:
1682 state = self._state_map[job.target]
1683 state.prefetched = True
1684 state.fetched_successfully = (job.retcode == 0)
1685 del self._fetch_jobs[job.target]
1686 self._Print("Fetched %s in %2.2fs"
1687 % (target, time.time() - job.start_timestamp))
1688
1689 if self._show_output or job.retcode != 0:
1690 self._print_queue.put(JobPrinter(job, unlink=True))
1691 else:
1692 os.unlink(job.filename)
1693 # Failure or not, let build work with it next.
1694 if not self._deps_map[job.target]["needs"]:
1695 self._build_ready.put(state)
1696 self._ScheduleLoop()
1697
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001698 if self._unpack_only and job.retcode == 0:
1699 self._unpack_ready.put(state)
1700 self._ScheduleLoop(unpack_only=True)
1701
Brian Harring0be85c62012-03-17 19:52:12 -07001702 if self._fetch_ready:
1703 state = self._fetch_ready.get()
1704 self._fetch_queue.put(state)
1705 self._fetch_jobs[state.target] = None
1706 else:
1707 # Minor optimization; shut down fetchers early since we know
1708 # the queue is empty.
1709 self._fetch_queue.put(None)
1710 continue
1711
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001712 if job.unpack_only:
1713 if not job.done:
1714 self._unpack_jobs[target] = job
1715 else:
1716 del self._unpack_jobs[target]
1717 self._Print("Unpacked %s in %2.2fs"
1718 % (target, time.time() - job.start_timestamp))
1719 if self._show_output or job.retcode != 0:
1720 self._print_queue.put(JobPrinter(job, unlink=True))
1721 else:
1722 os.unlink(job.filename)
1723 if self._unpack_ready:
1724 state = self._unpack_ready.get()
1725 self._unpack_queue.put(state)
1726 self._unpack_jobs[state.target] = None
1727 continue
1728
David Jamesfcb70ef2011-02-02 16:02:30 -08001729 if not job.done:
Brian Harring0be85c62012-03-17 19:52:12 -07001730 self._build_jobs[target] = job
David Jamesfcb70ef2011-02-02 16:02:30 -08001731 self._Print("Started %s (logged in %s)" % (target, job.filename))
1732 continue
1733
1734 # Print output of job
1735 if self._show_output or job.retcode != 0:
1736 self._print_queue.put(JobPrinter(job, unlink=True))
1737 else:
1738 os.unlink(job.filename)
Brian Harring0be85c62012-03-17 19:52:12 -07001739 del self._build_jobs[target]
David Jamesfcb70ef2011-02-02 16:02:30 -08001740
1741 seconds = time.time() - job.start_timestamp
1742 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
David James32420cc2011-08-25 21:32:46 -07001743 previously_failed = target in self._failed
David Jamesfcb70ef2011-02-02 16:02:30 -08001744
1745 # Complain if necessary.
1746 if job.retcode != 0:
1747 # Handle job failure.
David James32420cc2011-08-25 21:32:46 -07001748 if previously_failed:
David Jamesfcb70ef2011-02-02 16:02:30 -08001749 # If this job has failed previously, give up.
1750 self._Print("Failed %s. Your build has failed." % details)
1751 else:
1752 # Queue up this build to try again after a long while.
David Jamese703d0f2012-01-12 16:27:45 -08001753 retried.add(target)
Brian Harring0be85c62012-03-17 19:52:12 -07001754 self._retry_queue.append(self._state_map[target])
David Jamesfcb70ef2011-02-02 16:02:30 -08001755 self._failed.add(target)
1756 self._Print("Failed %s, retrying later." % details)
1757 else:
David James32420cc2011-08-25 21:32:46 -07001758 if previously_failed:
1759 # Remove target from list of failed packages.
1760 self._failed.remove(target)
1761
1762 self._Print("Completed %s" % details)
1763
1764 # Mark as completed and unblock waiting ebuilds.
1765 self._Finish(target)
1766
1767 if previously_failed and self._retry_queue:
David Jamesfcb70ef2011-02-02 16:02:30 -08001768 # If we have successfully retried a failed package, and there
1769 # are more failed packages, try the next one. We will only have
1770 # one retrying package actively running at a time.
1771 self._Retry()
1772
David Jamesfcb70ef2011-02-02 16:02:30 -08001773
David James8c7e5e32011-06-28 11:26:03 -07001774 # Schedule pending jobs and print an update.
1775 self._ScheduleLoop()
1776 self._Status()
David Jamesfcb70ef2011-02-02 16:02:30 -08001777
David Jamese703d0f2012-01-12 16:27:45 -08001778 # If packages were retried, output a warning.
1779 if retried:
1780 self._Print("")
1781 self._Print("WARNING: The following packages failed the first time,")
1782 self._Print("but succeeded upon retry. This might indicate incorrect")
1783 self._Print("dependencies.")
1784 for pkg in retried:
1785 self._Print(" %s" % pkg)
1786 self._Print("@@@STEP_WARNINGS@@@")
1787 self._Print("")
1788
David Jamesfcb70ef2011-02-02 16:02:30 -08001789 # Tell child threads to exit.
1790 self._Print("Merge complete")
David Jamesfcb70ef2011-02-02 16:02:30 -08001791
1792
Brian Harring30675052012-02-29 12:18:22 -08001793def main(argv):
Brian Harring8294d652012-05-23 02:20:52 -07001794 try:
1795 return real_main(argv)
1796 finally:
1797 # Work around multiprocessing sucking and not cleaning up after itself.
1798 # http://bugs.python.org/issue4106;
1799 # Step one; ensure GC is ran *prior* to the VM starting shutdown.
1800 gc.collect()
1801 # Step two; go looking for those threads and try to manually reap
1802 # them if we can.
1803 for x in threading.enumerate():
1804 # Filter on the name, and ident; if ident is None, the thread
1805 # wasn't started.
1806 if x.name == 'QueueFeederThread' and x.ident is not None:
1807 x.join(1)
David Jamesfcb70ef2011-02-02 16:02:30 -08001808
Brian Harring8294d652012-05-23 02:20:52 -07001809
1810def real_main(argv):
Brian Harring30675052012-02-29 12:18:22 -08001811 parallel_emerge_args = argv[:]
David Jamesfcb70ef2011-02-02 16:02:30 -08001812 deps = DepGraphGenerator()
Brian Harring30675052012-02-29 12:18:22 -08001813 deps.Initialize(parallel_emerge_args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001814 emerge = deps.emerge
1815
1816 if emerge.action is not None:
Brian Harring30675052012-02-29 12:18:22 -08001817 argv = deps.ParseParallelEmergeArgs(argv)
Brian Harring8294d652012-05-23 02:20:52 -07001818 return emerge_main(argv)
David Jamesfcb70ef2011-02-02 16:02:30 -08001819 elif not emerge.cmdline_packages:
1820 Usage()
Brian Harring8294d652012-05-23 02:20:52 -07001821 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001822
1823 # Unless we're in pretend mode, there's not much point running without
1824 # root access. We need to be able to install packages.
1825 #
1826 # NOTE: Even if you're running --pretend, it's a good idea to run
1827 # parallel_emerge with root access so that portage can write to the
1828 # dependency cache. This is important for performance.
David James321490a2012-12-17 12:05:56 -08001829 if "--pretend" not in emerge.opts and portage.data.secpass < 2:
Mike Frysinger383367e2014-09-16 15:06:17 -04001830 print("parallel_emerge: superuser access is required.")
Brian Harring8294d652012-05-23 02:20:52 -07001831 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001832
1833 if "--quiet" not in emerge.opts:
1834 cmdline_packages = " ".join(emerge.cmdline_packages)
Mike Frysinger383367e2014-09-16 15:06:17 -04001835 print("Starting fast-emerge.")
1836 print(" Building package %s on %s" % (cmdline_packages,
1837 deps.board or "root"))
David Jamesfcb70ef2011-02-02 16:02:30 -08001838
David James386ccd12011-05-04 20:17:42 -07001839 deps_tree, deps_info = deps.GenDependencyTree()
David Jamesfcb70ef2011-02-02 16:02:30 -08001840
1841 # You want me to be verbose? I'll give you two trees! Twice as much value.
1842 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
1843 deps.PrintTree(deps_tree)
1844
David James386ccd12011-05-04 20:17:42 -07001845 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001846
1847 # OK, time to print out our progress so far.
1848 deps.PrintInstallPlan(deps_graph)
1849 if "--tree" in emerge.opts:
1850 PrintDepsMap(deps_graph)
1851
1852 # Are we upgrading portage? If so, and there are more packages to merge,
1853 # schedule a restart of parallel_emerge to merge the rest. This ensures that
1854 # we pick up all updates to portage settings before merging any more
1855 # packages.
1856 portage_upgrade = False
1857 root = emerge.settings["ROOT"]
Don Garrett25f309a2014-03-19 14:02:12 -07001858 # pylint: disable=W0212
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -07001859 final_db = emerge.depgraph._dynamic_config._filtered_trees[root]['graph_db']
David Jamesfcb70ef2011-02-02 16:02:30 -08001860 if root == "/":
1861 for db_pkg in final_db.match_pkgs("sys-apps/portage"):
1862 portage_pkg = deps_graph.get(db_pkg.cpv)
David James0ff16f22012-11-02 14:18:07 -07001863 if portage_pkg:
David Jamesfcb70ef2011-02-02 16:02:30 -08001864 portage_upgrade = True
1865 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001866 print("Upgrading portage first, then restarting...")
David Jamesfcb70ef2011-02-02 16:02:30 -08001867
David James0ff16f22012-11-02 14:18:07 -07001868 # Upgrade Portage first, then the rest of the packages.
1869 #
1870 # In order to grant the child permission to run setsid, we need to run sudo
1871 # again. We preserve SUDO_USER here in case an ebuild depends on it.
1872 if portage_upgrade:
1873 # Calculate what arguments to use when re-invoking.
1874 args = ["sudo", "-E", "SUDO_USER=%s" % os.environ.get("SUDO_USER", "")]
1875 args += [os.path.abspath(sys.argv[0])] + parallel_emerge_args
1876 args += ["--exclude=sys-apps/portage"]
1877
1878 # First upgrade Portage.
1879 passthrough_args = ("--quiet", "--pretend", "--verbose")
1880 emerge_args = [k for k in emerge.opts if k in passthrough_args]
1881 ret = emerge_main(emerge_args + ["portage"])
1882 if ret != 0:
1883 return ret
1884
1885 # Now upgrade the rest.
1886 os.execvp(args[0], args)
1887
David Jamesfcb70ef2011-02-02 16:02:30 -08001888 # Run the queued emerges.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001889 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output,
1890 deps.unpack_only)
Brian Harringa43f5952012-04-12 01:19:34 -07001891 try:
1892 scheduler.Run()
1893 finally:
Don Garrett25f309a2014-03-19 14:02:12 -07001894 # pylint: disable=W0212
Brian Harringa43f5952012-04-12 01:19:34 -07001895 scheduler._Shutdown()
David James97ce8902011-08-16 09:51:05 -07001896 scheduler = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001897
Mike Frysingerd20a6e22012-10-04 19:01:10 -04001898 clean_logs(emerge.settings)
1899
Mike Frysinger383367e2014-09-16 15:06:17 -04001900 print("Done")
Brian Harring8294d652012-05-23 02:20:52 -07001901 return 0