blob: ff493597f3fe392978a920ff9b744e075d7f405c [file] [log] [blame]
Mike Frysinger0a647fc2012-08-06 14:36:05 -04001# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
David Jamesfcb70ef2011-02-02 16:02:30 -08002# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Program to run emerge in parallel, for significant speedup.
6
7Usage:
David James386ccd12011-05-04 20:17:42 -07008 ./parallel_emerge [--board=BOARD] [--workon=PKGS]
David Jamesfcb70ef2011-02-02 16:02:30 -08009 [--force-remote-binary=PKGS] [emerge args] package
10
David James78b6cd92012-04-02 21:36:12 -070011This script runs multiple emerge processes in parallel, using appropriate
12Portage APIs. It is faster than standard emerge because it has a
13multiprocess model instead of an asynchronous model.
David Jamesfcb70ef2011-02-02 16:02:30 -080014"""
15
Mike Frysinger383367e2014-09-16 15:06:17 -040016from __future__ import print_function
17
David Jamesfcb70ef2011-02-02 16:02:30 -080018import codecs
19import copy
20import errno
Brian Harring8294d652012-05-23 02:20:52 -070021import gc
David James8c7e5e32011-06-28 11:26:03 -070022import heapq
David Jamesfcb70ef2011-02-02 16:02:30 -080023import multiprocessing
24import os
Mike Frysinger1ae28092013-10-17 17:17:22 -040025try:
26 import Queue
27except ImportError:
28 # Python-3 renamed to "queue". We still use Queue to avoid collisions
29 # with naming variables as "queue". Maybe we'll transition at some point.
30 # pylint: disable=F0401
31 import queue as Queue
David Jamesfcb70ef2011-02-02 16:02:30 -080032import signal
33import sys
34import tempfile
Brian Harring8294d652012-05-23 02:20:52 -070035import threading
David Jamesfcb70ef2011-02-02 16:02:30 -080036import time
37import traceback
David Jamesfcb70ef2011-02-02 16:02:30 -080038
Thiago Goncalesf4acc422013-07-17 10:26:35 -070039from chromite.lib import cros_build_lib
David Jamesaaf49e42014-04-24 09:40:05 -070040from chromite.lib import osutils
Mike Frysingerd74fe4a2014-04-24 11:43:38 -040041from chromite.lib import proctitle
Thiago Goncalesf4acc422013-07-17 10:26:35 -070042
David Jamesfcb70ef2011-02-02 16:02:30 -080043# If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
44# Chromium OS, the default "portage" user doesn't have the necessary
45# permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
46# is "root" here because we get called through sudo.
47#
48# We need to set this before importing any portage modules, because portage
49# looks up "PORTAGE_USERNAME" at import time.
50#
51# NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
52# encounter this case unless they have an old chroot or blow away the
53# environment by running sudo without the -E specifier.
54if "PORTAGE_USERNAME" not in os.environ:
55 homedir = os.environ.get("HOME")
56 if homedir:
57 os.environ["PORTAGE_USERNAME"] = os.path.basename(homedir)
58
59# Portage doesn't expose dependency trees in its public API, so we have to
60# make use of some private APIs here. These modules are found under
61# /usr/lib/portage/pym/.
62#
63# TODO(davidjames): Update Portage to expose public APIs for these features.
Don Garrett25f309a2014-03-19 14:02:12 -070064# pylint: disable=F0401
David Jamesfcb70ef2011-02-02 16:02:30 -080065from _emerge.actions import adjust_configs
66from _emerge.actions import load_emerge_config
67from _emerge.create_depgraph_params import create_depgraph_params
David James386ccd12011-05-04 20:17:42 -070068from _emerge.depgraph import backtrack_depgraph
Mike Frysinger901eaad2012-10-10 18:18:03 -040069try:
70 from _emerge.main import clean_logs
71except ImportError:
72 # Older portage versions did not provide clean_logs, so stub it.
73 # We need this if running in an older chroot that hasn't yet upgraded
74 # the portage version.
75 clean_logs = lambda x: None
David Jamesfcb70ef2011-02-02 16:02:30 -080076from _emerge.main import emerge_main
77from _emerge.main import parse_opts
78from _emerge.Package import Package
79from _emerge.Scheduler import Scheduler
David Jamesfcb70ef2011-02-02 16:02:30 -080080from _emerge.stdout_spinner import stdout_spinner
David James386ccd12011-05-04 20:17:42 -070081from portage._global_updates import _global_updates
David Jamesfcb70ef2011-02-02 16:02:30 -080082import portage
83import portage.debug
Don Garrettf8bf7842014-03-20 17:03:42 -070084# pylint: enable=F0401
Mike Frysinger91d7da92013-02-19 15:53:46 -050085
David Jamesfcb70ef2011-02-02 16:02:30 -080086
David Jamesfcb70ef2011-02-02 16:02:30 -080087def Usage():
88 """Print usage."""
Mike Frysinger383367e2014-09-16 15:06:17 -040089 print("Usage:")
90 print(" ./parallel_emerge [--board=BOARD] [--workon=PKGS]")
91 print(" [--rebuild] [emerge args] package")
92 print()
93 print("Packages specified as workon packages are always built from source.")
94 print()
95 print("The --workon argument is mainly useful when you want to build and")
96 print("install packages that you are working on unconditionally, but do not")
97 print("to have to rev the package to indicate you want to build it from")
98 print("source. The build_packages script will automatically supply the")
99 print("workon argument to emerge, ensuring that packages selected using")
100 print("cros-workon are rebuilt.")
101 print()
102 print("The --rebuild option rebuilds packages whenever their dependencies")
103 print("are changed. This ensures that your build is correct.")
David Jamesfcb70ef2011-02-02 16:02:30 -0800104
105
David Jamesfcb70ef2011-02-02 16:02:30 -0800106# Global start time
107GLOBAL_START = time.time()
108
David James7358d032011-05-19 10:40:03 -0700109# Whether process has been killed by a signal.
110KILLED = multiprocessing.Event()
111
David Jamesfcb70ef2011-02-02 16:02:30 -0800112
113class EmergeData(object):
114 """This simple struct holds various emerge variables.
115
116 This struct helps us easily pass emerge variables around as a unit.
117 These variables are used for calculating dependencies and installing
118 packages.
119 """
120
David Jamesbf1e3442011-05-28 07:44:20 -0700121 __slots__ = ["action", "cmdline_packages", "depgraph", "favorites",
122 "mtimedb", "opts", "root_config", "scheduler_graph",
123 "settings", "spinner", "trees"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800124
125 def __init__(self):
126 # The action the user requested. If the user is installing packages, this
127 # is None. If the user is doing anything other than installing packages,
128 # this will contain the action name, which will map exactly to the
129 # long-form name of the associated emerge option.
130 #
131 # Example: If you call parallel_emerge --unmerge package, the action name
132 # will be "unmerge"
133 self.action = None
134
135 # The list of packages the user passed on the command-line.
136 self.cmdline_packages = None
137
138 # The emerge dependency graph. It'll contain all the packages involved in
139 # this merge, along with their versions.
140 self.depgraph = None
141
David Jamesbf1e3442011-05-28 07:44:20 -0700142 # The list of candidates to add to the world file.
143 self.favorites = None
144
David Jamesfcb70ef2011-02-02 16:02:30 -0800145 # A dict of the options passed to emerge. This dict has been cleaned up
146 # a bit by parse_opts, so that it's a bit easier for the emerge code to
147 # look at the options.
148 #
149 # Emerge takes a few shortcuts in its cleanup process to make parsing of
150 # the options dict easier. For example, if you pass in "--usepkg=n", the
151 # "--usepkg" flag is just left out of the dictionary altogether. Because
152 # --usepkg=n is the default, this makes parsing easier, because emerge
153 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
154 #
155 # These cleanup processes aren't applied to all options. For example, the
156 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
157 # applied by emerge, see the parse_opts function in the _emerge.main
158 # package.
159 self.opts = None
160
161 # A dictionary used by portage to maintain global state. This state is
162 # loaded from disk when portage starts up, and saved to disk whenever we
163 # call mtimedb.commit().
164 #
165 # This database contains information about global updates (i.e., what
166 # version of portage we have) and what we're currently doing. Portage
167 # saves what it is currently doing in this database so that it can be
168 # resumed when you call it with the --resume option.
169 #
170 # parallel_emerge does not save what it is currently doing in the mtimedb,
171 # so we do not support the --resume option.
172 self.mtimedb = None
173
174 # The portage configuration for our current root. This contains the portage
175 # settings (see below) and the three portage trees for our current root.
176 # (The three portage trees are explained below, in the documentation for
177 # the "trees" member.)
178 self.root_config = None
179
180 # The scheduler graph is used by emerge to calculate what packages to
181 # install. We don't actually install any deps, so this isn't really used,
182 # but we pass it in to the Scheduler object anyway.
183 self.scheduler_graph = None
184
185 # Portage settings for our current session. Most of these settings are set
186 # in make.conf inside our current install root.
187 self.settings = None
188
189 # The spinner, which spews stuff to stdout to indicate that portage is
190 # doing something. We maintain our own spinner, so we set the portage
191 # spinner to "silent" mode.
192 self.spinner = None
193
194 # The portage trees. There are separate portage trees for each root. To get
195 # the portage tree for the current root, you can look in self.trees[root],
196 # where root = self.settings["ROOT"].
197 #
198 # In each root, there are three trees: vartree, porttree, and bintree.
199 # - vartree: A database of the currently-installed packages.
200 # - porttree: A database of ebuilds, that can be used to build packages.
201 # - bintree: A database of binary packages.
202 self.trees = None
203
204
205class DepGraphGenerator(object):
206 """Grab dependency information about packages from portage.
207
208 Typical usage:
209 deps = DepGraphGenerator()
210 deps.Initialize(sys.argv[1:])
211 deps_tree, deps_info = deps.GenDependencyTree()
212 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
213 deps.PrintTree(deps_tree)
214 PrintDepsMap(deps_graph)
215 """
216
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700217 __slots__ = ["board", "emerge", "package_db", "show_output", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800218
219 def __init__(self):
220 self.board = None
221 self.emerge = EmergeData()
David Jamesfcb70ef2011-02-02 16:02:30 -0800222 self.package_db = {}
David Jamesfcb70ef2011-02-02 16:02:30 -0800223 self.show_output = False
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700224 self.unpack_only = False
David Jamesfcb70ef2011-02-02 16:02:30 -0800225
226 def ParseParallelEmergeArgs(self, argv):
227 """Read the parallel emerge arguments from the command-line.
228
229 We need to be compatible with emerge arg format. We scrape arguments that
230 are specific to parallel_emerge, and pass through the rest directly to
231 emerge.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500232
David Jamesfcb70ef2011-02-02 16:02:30 -0800233 Args:
234 argv: arguments list
Mike Frysinger1a736a82013-12-12 01:50:59 -0500235
David Jamesfcb70ef2011-02-02 16:02:30 -0800236 Returns:
237 Arguments that don't belong to parallel_emerge
238 """
239 emerge_args = []
240 for arg in argv:
241 # Specifically match arguments that are specific to parallel_emerge, and
242 # pass through the rest.
243 if arg.startswith("--board="):
244 self.board = arg.replace("--board=", "")
245 elif arg.startswith("--workon="):
246 workon_str = arg.replace("--workon=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700247 emerge_args.append("--reinstall-atoms=%s" % workon_str)
248 emerge_args.append("--usepkg-exclude=%s" % workon_str)
David Jamesfcb70ef2011-02-02 16:02:30 -0800249 elif arg.startswith("--force-remote-binary="):
250 force_remote_binary = arg.replace("--force-remote-binary=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700251 emerge_args.append("--useoldpkg-atoms=%s" % force_remote_binary)
David Jamesfcb70ef2011-02-02 16:02:30 -0800252 elif arg == "--show-output":
253 self.show_output = True
David James386ccd12011-05-04 20:17:42 -0700254 elif arg == "--rebuild":
David James7a1ea4b2011-10-13 15:06:41 -0700255 emerge_args.append("--rebuild-if-unbuilt")
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700256 elif arg == "--unpackonly":
257 emerge_args.append("--fetchonly")
258 self.unpack_only = True
David Jamesfcb70ef2011-02-02 16:02:30 -0800259 else:
260 # Not one of our options, so pass through to emerge.
261 emerge_args.append(arg)
262
David James386ccd12011-05-04 20:17:42 -0700263 # These packages take a really long time to build, so, for expediency, we
264 # are blacklisting them from automatic rebuilds because one of their
265 # dependencies needs to be recompiled.
Mike Frysinger5c2a9052014-04-15 15:52:04 -0400266 for pkg in ("chromeos-base/chromeos-chrome",):
David James7a1ea4b2011-10-13 15:06:41 -0700267 emerge_args.append("--rebuild-exclude=%s" % pkg)
David Jamesfcb70ef2011-02-02 16:02:30 -0800268
269 return emerge_args
270
271 def Initialize(self, args):
272 """Initializer. Parses arguments and sets up portage state."""
273
274 # Parse and strip out args that are just intended for parallel_emerge.
275 emerge_args = self.ParseParallelEmergeArgs(args)
276
277 # Setup various environment variables based on our current board. These
278 # variables are normally setup inside emerge-${BOARD}, but since we don't
279 # call that script, we have to set it up here. These variables serve to
280 # point our tools at /build/BOARD and to setup cross compiles to the
281 # appropriate board as configured in toolchain.conf.
282 if self.board:
Yu-Ju Hongdd9bb2b2014-01-03 17:08:26 -0800283 sysroot = cros_build_lib.GetSysroot(board=self.board)
284 os.environ["PORTAGE_CONFIGROOT"] = sysroot
285 os.environ["PORTAGE_SYSROOT"] = sysroot
286 os.environ["SYSROOT"] = sysroot
Don Garrett0760f242014-09-23 19:37:01 -0700287 # This enables licensing in gen-package-licenses.sh.
288 # TODO(dgarrett): Remove when it's no longer optional.
289 os.environ["ENABLE_LICENSING"] = "Y"
David Jamesfcb70ef2011-02-02 16:02:30 -0800290
291 # Although CHROMEOS_ROOT isn't specific to boards, it's normally setup
292 # inside emerge-${BOARD}, so we set it up here for compatibility. It
293 # will be going away soon as we migrate to CROS_WORKON_SRCROOT.
294 os.environ.setdefault("CHROMEOS_ROOT", os.environ["HOME"] + "/trunk")
295
296 # Turn off interactive delays
297 os.environ["EBEEP_IGNORE"] = "1"
298 os.environ["EPAUSE_IGNORE"] = "1"
Mike Frysinger0a647fc2012-08-06 14:36:05 -0400299 os.environ["CLEAN_DELAY"] = "0"
David Jamesfcb70ef2011-02-02 16:02:30 -0800300
301 # Parse the emerge options.
David Jamesea3ca332011-05-26 11:48:29 -0700302 action, opts, cmdline_packages = parse_opts(emerge_args, silent=True)
David Jamesfcb70ef2011-02-02 16:02:30 -0800303
304 # Set environment variables based on options. Portage normally sets these
305 # environment variables in emerge_main, but we can't use that function,
306 # because it also does a bunch of other stuff that we don't want.
307 # TODO(davidjames): Patch portage to move this logic into a function we can
308 # reuse here.
309 if "--debug" in opts:
310 os.environ["PORTAGE_DEBUG"] = "1"
311 if "--config-root" in opts:
312 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
313 if "--root" in opts:
314 os.environ["ROOT"] = opts["--root"]
315 if "--accept-properties" in opts:
316 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
317
David James88d780c2014-02-05 13:03:29 -0800318 # If we're installing packages to the board, we can disable vardb locks.
319 # This is safe because we only run up to one instance of parallel_emerge in
320 # parallel.
321 # TODO(davidjames): Enable this for the host too.
322 if self.board:
David Jamesfcb70ef2011-02-02 16:02:30 -0800323 os.environ.setdefault("PORTAGE_LOCKS", "false")
David Jamesfcb70ef2011-02-02 16:02:30 -0800324
325 # Now that we've setup the necessary environment variables, we can load the
326 # emerge config from disk.
327 settings, trees, mtimedb = load_emerge_config()
328
David Jamesea3ca332011-05-26 11:48:29 -0700329 # Add in EMERGE_DEFAULT_OPTS, if specified.
330 tmpcmdline = []
331 if "--ignore-default-opts" not in opts:
332 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
333 tmpcmdline.extend(emerge_args)
334 action, opts, cmdline_packages = parse_opts(tmpcmdline)
335
336 # If we're installing to the board, we want the --root-deps option so that
337 # portage will install the build dependencies to that location as well.
338 if self.board:
339 opts.setdefault("--root-deps", True)
340
David Jamesfcb70ef2011-02-02 16:02:30 -0800341 # Check whether our portage tree is out of date. Typically, this happens
342 # when you're setting up a new portage tree, such as in setup_board and
343 # make_chroot. In that case, portage applies a bunch of global updates
344 # here. Once the updates are finished, we need to commit any changes
345 # that the global update made to our mtimedb, and reload the config.
346 #
347 # Portage normally handles this logic in emerge_main, but again, we can't
348 # use that function here.
349 if _global_updates(trees, mtimedb["updates"]):
350 mtimedb.commit()
351 settings, trees, mtimedb = load_emerge_config(trees=trees)
352
353 # Setup implied options. Portage normally handles this logic in
354 # emerge_main.
355 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
356 opts.setdefault("--buildpkg", True)
357 if "--getbinpkgonly" in opts:
358 opts.setdefault("--usepkgonly", True)
359 opts.setdefault("--getbinpkg", True)
360 if "getbinpkg" in settings.features:
361 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
362 opts["--getbinpkg"] = True
363 if "--getbinpkg" in opts or "--usepkgonly" in opts:
364 opts.setdefault("--usepkg", True)
365 if "--fetch-all-uri" in opts:
366 opts.setdefault("--fetchonly", True)
367 if "--skipfirst" in opts:
368 opts.setdefault("--resume", True)
369 if "--buildpkgonly" in opts:
370 # --buildpkgonly will not merge anything, so it overrides all binary
371 # package options.
372 for opt in ("--getbinpkg", "--getbinpkgonly",
373 "--usepkg", "--usepkgonly"):
374 opts.pop(opt, None)
375 if (settings.get("PORTAGE_DEBUG", "") == "1" and
376 "python-trace" in settings.features):
377 portage.debug.set_trace(True)
378
379 # Complain about unsupported options
David James386ccd12011-05-04 20:17:42 -0700380 for opt in ("--ask", "--ask-enter-invalid", "--resume", "--skipfirst"):
David Jamesfcb70ef2011-02-02 16:02:30 -0800381 if opt in opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400382 print("%s is not supported by parallel_emerge" % opt)
David Jamesfcb70ef2011-02-02 16:02:30 -0800383 sys.exit(1)
384
385 # Make emerge specific adjustments to the config (e.g. colors!)
386 adjust_configs(opts, trees)
387
388 # Save our configuration so far in the emerge object
389 emerge = self.emerge
390 emerge.action, emerge.opts = action, opts
391 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
392 emerge.cmdline_packages = cmdline_packages
393 root = settings["ROOT"]
394 emerge.root_config = trees[root]["root_config"]
395
David James386ccd12011-05-04 20:17:42 -0700396 if "--usepkg" in opts:
David Jamesfcb70ef2011-02-02 16:02:30 -0800397 emerge.trees[root]["bintree"].populate("--getbinpkg" in opts)
398
David Jamesfcb70ef2011-02-02 16:02:30 -0800399 def CreateDepgraph(self, emerge, packages):
400 """Create an emerge depgraph object."""
401 # Setup emerge options.
402 emerge_opts = emerge.opts.copy()
403
David James386ccd12011-05-04 20:17:42 -0700404 # Ask portage to build a dependency graph. with the options we specified
405 # above.
David Jamesfcb70ef2011-02-02 16:02:30 -0800406 params = create_depgraph_params(emerge_opts, emerge.action)
David Jamesbf1e3442011-05-28 07:44:20 -0700407 success, depgraph, favorites = backtrack_depgraph(
David James386ccd12011-05-04 20:17:42 -0700408 emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
409 packages, emerge.spinner)
410 emerge.depgraph = depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800411
David James386ccd12011-05-04 20:17:42 -0700412 # Is it impossible to honor the user's request? Bail!
413 if not success:
414 depgraph.display_problems()
415 sys.exit(1)
David Jamesfcb70ef2011-02-02 16:02:30 -0800416
417 emerge.depgraph = depgraph
David Jamesbf1e3442011-05-28 07:44:20 -0700418 emerge.favorites = favorites
David Jamesfcb70ef2011-02-02 16:02:30 -0800419
David Jamesdeebd692011-05-09 17:02:52 -0700420 # Prime and flush emerge caches.
421 root = emerge.settings["ROOT"]
422 vardb = emerge.trees[root]["vartree"].dbapi
David James0bdc5de2011-05-12 16:22:26 -0700423 if "--pretend" not in emerge.opts:
424 vardb.counter_tick()
David Jamesdeebd692011-05-09 17:02:52 -0700425 vardb.flush_cache()
426
David James386ccd12011-05-04 20:17:42 -0700427 def GenDependencyTree(self):
David Jamesfcb70ef2011-02-02 16:02:30 -0800428 """Get dependency tree info from emerge.
429
David Jamesfcb70ef2011-02-02 16:02:30 -0800430 Returns:
431 Dependency tree
432 """
433 start = time.time()
434
435 emerge = self.emerge
436
437 # Create a list of packages to merge
438 packages = set(emerge.cmdline_packages[:])
David Jamesfcb70ef2011-02-02 16:02:30 -0800439
440 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
441 # need any extra output from portage.
442 portage.util.noiselimit = -1
443
444 # My favorite feature: The silent spinner. It doesn't spin. Ever.
445 # I'd disable the colors by default too, but they look kind of cool.
446 emerge.spinner = stdout_spinner()
447 emerge.spinner.update = emerge.spinner.update_quiet
448
449 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400450 print("Calculating deps...")
David Jamesfcb70ef2011-02-02 16:02:30 -0800451
452 self.CreateDepgraph(emerge, packages)
453 depgraph = emerge.depgraph
454
455 # Build our own tree from the emerge digraph.
456 deps_tree = {}
Don Garrett25f309a2014-03-19 14:02:12 -0700457 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -0800458 digraph = depgraph._dynamic_config.digraph
David James3f778802011-08-25 19:31:45 -0700459 root = emerge.settings["ROOT"]
Bertrand SIMONNETb35e19e2014-07-28 16:29:58 -0700460 final_db = get_db(depgraph._dynamic_config, root)
David Jamesfcb70ef2011-02-02 16:02:30 -0800461 for node, node_deps in digraph.nodes.items():
462 # Calculate dependency packages that need to be installed first. Each
463 # child on the digraph is a dependency. The "operation" field specifies
464 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
465 # contains the type of dependency (e.g. build, runtime, runtime_post,
466 # etc.)
467 #
David Jamesfcb70ef2011-02-02 16:02:30 -0800468 # Portage refers to the identifiers for packages as a CPV. This acronym
469 # stands for Component/Path/Version.
470 #
471 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
472 # Split up, this CPV would be:
473 # C -- Component: chromeos-base
474 # P -- Path: power_manager
475 # V -- Version: 0.0.1-r1
476 #
477 # We just refer to CPVs as packages here because it's easier.
478 deps = {}
479 for child, priorities in node_deps[0].items():
David James3f778802011-08-25 19:31:45 -0700480 if isinstance(child, Package) and child.root == root:
481 cpv = str(child.cpv)
482 action = str(child.operation)
483
484 # If we're uninstalling a package, check whether Portage is
485 # installing a replacement. If so, just depend on the installation
486 # of the new package, because the old package will automatically
487 # be uninstalled at that time.
488 if action == "uninstall":
489 for pkg in final_db.match_pkgs(child.slot_atom):
490 cpv = str(pkg.cpv)
491 action = "merge"
492 break
493
494 deps[cpv] = dict(action=action,
495 deptypes=[str(x) for x in priorities],
496 deps={})
David Jamesfcb70ef2011-02-02 16:02:30 -0800497
498 # We've built our list of deps, so we can add our package to the tree.
David James3f778802011-08-25 19:31:45 -0700499 if isinstance(node, Package) and node.root == root:
David Jamesfcb70ef2011-02-02 16:02:30 -0800500 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
501 deps=deps)
502
David Jamesfcb70ef2011-02-02 16:02:30 -0800503 # Ask portage for its install plan, so that we can only throw out
David James386ccd12011-05-04 20:17:42 -0700504 # dependencies that portage throws out.
David Jamesfcb70ef2011-02-02 16:02:30 -0800505 deps_info = {}
506 for pkg in depgraph.altlist():
507 if isinstance(pkg, Package):
David James3f778802011-08-25 19:31:45 -0700508 assert pkg.root == root
David Jamesfcb70ef2011-02-02 16:02:30 -0800509 self.package_db[pkg.cpv] = pkg
510
David Jamesfcb70ef2011-02-02 16:02:30 -0800511 # Save off info about the package
David James386ccd12011-05-04 20:17:42 -0700512 deps_info[str(pkg.cpv)] = {"idx": len(deps_info)}
David Jamesfcb70ef2011-02-02 16:02:30 -0800513
514 seconds = time.time() - start
515 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400516 print("Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800517
518 return deps_tree, deps_info
519
520 def PrintTree(self, deps, depth=""):
521 """Print the deps we have seen in the emerge output.
522
523 Args:
524 deps: Dependency tree structure.
525 depth: Allows printing the tree recursively, with indentation.
526 """
527 for entry in sorted(deps):
528 action = deps[entry]["action"]
Mike Frysinger383367e2014-09-16 15:06:17 -0400529 print("%s %s (%s)" % (depth, entry, action))
David Jamesfcb70ef2011-02-02 16:02:30 -0800530 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
531
David James386ccd12011-05-04 20:17:42 -0700532 def GenDependencyGraph(self, deps_tree, deps_info):
David Jamesfcb70ef2011-02-02 16:02:30 -0800533 """Generate a doubly linked dependency graph.
534
535 Args:
536 deps_tree: Dependency tree structure.
537 deps_info: More details on the dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500538
David Jamesfcb70ef2011-02-02 16:02:30 -0800539 Returns:
540 Deps graph in the form of a dict of packages, with each package
541 specifying a "needs" list and "provides" list.
542 """
543 emerge = self.emerge
David Jamesfcb70ef2011-02-02 16:02:30 -0800544
David Jamesfcb70ef2011-02-02 16:02:30 -0800545 # deps_map is the actual dependency graph.
546 #
547 # Each package specifies a "needs" list and a "provides" list. The "needs"
548 # list indicates which packages we depend on. The "provides" list
549 # indicates the reverse dependencies -- what packages need us.
550 #
551 # We also provide some other information in the dependency graph:
552 # - action: What we're planning on doing with this package. Generally,
553 # "merge", "nomerge", or "uninstall"
David Jamesfcb70ef2011-02-02 16:02:30 -0800554 deps_map = {}
555
556 def ReverseTree(packages):
557 """Convert tree to digraph.
558
559 Take the tree of package -> requirements and reverse it to a digraph of
560 buildable packages -> packages they unblock.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500561
David Jamesfcb70ef2011-02-02 16:02:30 -0800562 Args:
563 packages: Tree(s) of dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500564
David Jamesfcb70ef2011-02-02 16:02:30 -0800565 Returns:
566 Unsanitized digraph.
567 """
David James8c7e5e32011-06-28 11:26:03 -0700568 binpkg_phases = set(["setup", "preinst", "postinst"])
David Jamese5e1c0a2014-09-29 17:19:41 -0700569 needed_dep_types = set(["blocker", "buildtime", "buildtime_slot_op",
570 "runtime", "runtime_slot_op"])
571 ignored_dep_types = set(["ignored", "optional", "runtime_post", "soft"])
572 all_dep_types = ignored_dep_types | needed_dep_types
David Jamesfcb70ef2011-02-02 16:02:30 -0800573 for pkg in packages:
574
575 # Create an entry for the package
576 action = packages[pkg]["action"]
David James8c7e5e32011-06-28 11:26:03 -0700577 default_pkg = {"needs": {}, "provides": set(), "action": action,
578 "nodeps": False, "binary": False}
David Jamesfcb70ef2011-02-02 16:02:30 -0800579 this_pkg = deps_map.setdefault(pkg, default_pkg)
580
David James8c7e5e32011-06-28 11:26:03 -0700581 if pkg in deps_info:
582 this_pkg["idx"] = deps_info[pkg]["idx"]
583
584 # If a package doesn't have any defined phases that might use the
585 # dependent packages (i.e. pkg_setup, pkg_preinst, or pkg_postinst),
586 # we can install this package before its deps are ready.
587 emerge_pkg = self.package_db.get(pkg)
588 if emerge_pkg and emerge_pkg.type_name == "binary":
589 this_pkg["binary"] = True
Mike Frysinger66652ec2014-04-24 11:42:25 -0400590 defined_phases = emerge_pkg.defined_phases
David James8c7e5e32011-06-28 11:26:03 -0700591 defined_binpkg_phases = binpkg_phases.intersection(defined_phases)
592 if not defined_binpkg_phases:
593 this_pkg["nodeps"] = True
594
David Jamesfcb70ef2011-02-02 16:02:30 -0800595 # Create entries for dependencies of this package first.
596 ReverseTree(packages[pkg]["deps"])
597
598 # Add dependencies to this package.
599 for dep, dep_item in packages[pkg]["deps"].iteritems():
David James8c7e5e32011-06-28 11:26:03 -0700600 # We only need to enforce strict ordering of dependencies if the
David James3f778802011-08-25 19:31:45 -0700601 # dependency is a blocker, or is a buildtime or runtime dependency.
602 # (I.e., ignored, optional, and runtime_post dependencies don't
603 # depend on ordering.)
David James8c7e5e32011-06-28 11:26:03 -0700604 dep_types = dep_item["deptypes"]
605 if needed_dep_types.intersection(dep_types):
606 deps_map[dep]["provides"].add(pkg)
607 this_pkg["needs"][dep] = "/".join(dep_types)
David Jamesfcb70ef2011-02-02 16:02:30 -0800608
David Jamese5e1c0a2014-09-29 17:19:41 -0700609 # Verify we processed all appropriate dependency types.
610 unknown_dep_types = set(dep_types) - all_dep_types
611 if unknown_dep_types:
612 print("Unknown dependency types found:")
613 print(" %s -> %s (%s)" % (pkg, dep, "/".join(unknown_dep_types)))
614 sys.exit(1)
615
David James3f778802011-08-25 19:31:45 -0700616 # If there's a blocker, Portage may need to move files from one
617 # package to another, which requires editing the CONTENTS files of
618 # both packages. To avoid race conditions while editing this file,
619 # the two packages must not be installed in parallel, so we can't
620 # safely ignore dependencies. See http://crosbug.com/19328
621 if "blocker" in dep_types:
622 this_pkg["nodeps"] = False
623
David Jamesfcb70ef2011-02-02 16:02:30 -0800624 def FindCycles():
625 """Find cycles in the dependency tree.
626
627 Returns:
628 A dict mapping cyclic packages to a dict of the deps that cause
629 cycles. For each dep that causes cycles, it returns an example
630 traversal of the graph that shows the cycle.
631 """
632
633 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
634 """Find cycles in cyclic dependencies starting at specified package.
635
636 Args:
637 pkg: Package identifier.
638 cycles: A dict mapping cyclic packages to a dict of the deps that
639 cause cycles. For each dep that causes cycles, it returns an
640 example traversal of the graph that shows the cycle.
641 unresolved: Nodes that have been visited but are not fully processed.
642 resolved: Nodes that have been visited and are fully processed.
643 """
644 pkg_cycles = cycles.get(pkg)
645 if pkg in resolved and not pkg_cycles:
646 # If we already looked at this package, and found no cyclic
647 # dependencies, we can stop now.
648 return
649 unresolved.append(pkg)
650 for dep in deps_map[pkg]["needs"]:
651 if dep in unresolved:
652 idx = unresolved.index(dep)
653 mycycle = unresolved[idx:] + [dep]
David James321490a2012-12-17 12:05:56 -0800654 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800655 pkg1, pkg2 = mycycle[i], mycycle[i+1]
656 cycles.setdefault(pkg1, {}).setdefault(pkg2, mycycle)
657 elif not pkg_cycles or dep not in pkg_cycles:
658 # Looks like we haven't seen this edge before.
659 FindCyclesAtNode(dep, cycles, unresolved, resolved)
660 unresolved.pop()
661 resolved.add(pkg)
662
663 cycles, unresolved, resolved = {}, [], set()
664 for pkg in deps_map:
665 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
666 return cycles
667
David James386ccd12011-05-04 20:17:42 -0700668 def RemoveUnusedPackages():
David Jamesfcb70ef2011-02-02 16:02:30 -0800669 """Remove installed packages, propagating dependencies."""
David Jamesfcb70ef2011-02-02 16:02:30 -0800670 # Schedule packages that aren't on the install list for removal
671 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
672
David Jamesfcb70ef2011-02-02 16:02:30 -0800673 # Remove the packages we don't want, simplifying the graph and making
674 # it easier for us to crack cycles.
675 for pkg in sorted(rm_pkgs):
676 this_pkg = deps_map[pkg]
677 needs = this_pkg["needs"]
678 provides = this_pkg["provides"]
679 for dep in needs:
680 dep_provides = deps_map[dep]["provides"]
681 dep_provides.update(provides)
682 dep_provides.discard(pkg)
683 dep_provides.discard(dep)
684 for target in provides:
685 target_needs = deps_map[target]["needs"]
686 target_needs.update(needs)
687 target_needs.pop(pkg, None)
688 target_needs.pop(target, None)
689 del deps_map[pkg]
690
691 def PrintCycleBreak(basedep, dep, mycycle):
692 """Print details about a cycle that we are planning on breaking.
693
Mike Frysinger02e1e072013-11-10 22:11:34 -0500694 We are breaking a cycle where dep needs basedep. mycycle is an
695 example cycle which contains dep -> basedep.
696 """
David Jamesfcb70ef2011-02-02 16:02:30 -0800697
David Jamesfcb70ef2011-02-02 16:02:30 -0800698 needs = deps_map[dep]["needs"]
699 depinfo = needs.get(basedep, "deleted")
David Jamesfcb70ef2011-02-02 16:02:30 -0800700
David James3f778802011-08-25 19:31:45 -0700701 # It's OK to swap install order for blockers, as long as the two
702 # packages aren't installed in parallel. If there is a cycle, then
703 # we know the packages depend on each other already, so we can drop the
704 # blocker safely without printing a warning.
705 if depinfo == "blocker":
706 return
707
David Jamesfcb70ef2011-02-02 16:02:30 -0800708 # Notify the user that we're breaking a cycle.
Mike Frysinger383367e2014-09-16 15:06:17 -0400709 print("Breaking %s -> %s (%s)" % (dep, basedep, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800710
711 # Show cycle.
David James321490a2012-12-17 12:05:56 -0800712 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800713 pkg1, pkg2 = mycycle[i], mycycle[i+1]
714 needs = deps_map[pkg1]["needs"]
715 depinfo = needs.get(pkg2, "deleted")
716 if pkg1 == dep and pkg2 == basedep:
717 depinfo = depinfo + ", deleting"
Mike Frysinger383367e2014-09-16 15:06:17 -0400718 print(" %s -> %s (%s)" % (pkg1, pkg2, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800719
720 def SanitizeTree():
721 """Remove circular dependencies.
722
723 We prune all dependencies involved in cycles that go against the emerge
724 ordering. This has a nice property: we're guaranteed to merge
725 dependencies in the same order that portage does.
726
727 Because we don't treat any dependencies as "soft" unless they're killed
728 by a cycle, we pay attention to a larger number of dependencies when
729 merging. This hurts performance a bit, but helps reliability.
730 """
731 start = time.time()
732 cycles = FindCycles()
733 while cycles:
734 for dep, mycycles in cycles.iteritems():
735 for basedep, mycycle in mycycles.iteritems():
736 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
Matt Tennant08797302011-10-17 16:18:45 -0700737 if "--quiet" not in emerge.opts:
738 PrintCycleBreak(basedep, dep, mycycle)
David Jamesfcb70ef2011-02-02 16:02:30 -0800739 del deps_map[dep]["needs"][basedep]
740 deps_map[basedep]["provides"].remove(dep)
741 cycles = FindCycles()
742 seconds = time.time() - start
743 if "--quiet" not in emerge.opts and seconds >= 0.1:
Mike Frysinger383367e2014-09-16 15:06:17 -0400744 print("Tree sanitized in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800745
David James8c7e5e32011-06-28 11:26:03 -0700746 def FindRecursiveProvides(pkg, seen):
747 """Find all nodes that require a particular package.
748
749 Assumes that graph is acyclic.
750
751 Args:
752 pkg: Package identifier.
753 seen: Nodes that have been visited so far.
754 """
755 if pkg in seen:
756 return
757 seen.add(pkg)
758 info = deps_map[pkg]
759 info["tprovides"] = info["provides"].copy()
760 for dep in info["provides"]:
761 FindRecursiveProvides(dep, seen)
762 info["tprovides"].update(deps_map[dep]["tprovides"])
763
David Jamesa22906f2011-05-04 19:53:26 -0700764 ReverseTree(deps_tree)
David Jamesa22906f2011-05-04 19:53:26 -0700765
David James386ccd12011-05-04 20:17:42 -0700766 # We need to remove unused packages so that we can use the dependency
767 # ordering of the install process to show us what cycles to crack.
768 RemoveUnusedPackages()
David Jamesfcb70ef2011-02-02 16:02:30 -0800769 SanitizeTree()
David James8c7e5e32011-06-28 11:26:03 -0700770 seen = set()
771 for pkg in deps_map:
772 FindRecursiveProvides(pkg, seen)
David Jamesfcb70ef2011-02-02 16:02:30 -0800773 return deps_map
774
775 def PrintInstallPlan(self, deps_map):
776 """Print an emerge-style install plan.
777
778 The install plan lists what packages we're installing, in order.
779 It's useful for understanding what parallel_emerge is doing.
780
781 Args:
782 deps_map: The dependency graph.
783 """
784
785 def InstallPlanAtNode(target, deps_map):
786 nodes = []
787 nodes.append(target)
788 for dep in deps_map[target]["provides"]:
789 del deps_map[dep]["needs"][target]
790 if not deps_map[dep]["needs"]:
791 nodes.extend(InstallPlanAtNode(dep, deps_map))
792 return nodes
793
794 deps_map = copy.deepcopy(deps_map)
795 install_plan = []
796 plan = set()
797 for target, info in deps_map.iteritems():
798 if not info["needs"] and target not in plan:
799 for item in InstallPlanAtNode(target, deps_map):
800 plan.add(item)
801 install_plan.append(self.package_db[item])
802
803 for pkg in plan:
804 del deps_map[pkg]
805
806 if deps_map:
Mike Frysinger383367e2014-09-16 15:06:17 -0400807 print("Cyclic dependencies:", " ".join(deps_map))
David Jamesfcb70ef2011-02-02 16:02:30 -0800808 PrintDepsMap(deps_map)
809 sys.exit(1)
810
811 self.emerge.depgraph.display(install_plan)
812
813
814def PrintDepsMap(deps_map):
815 """Print dependency graph, for each package list it's prerequisites."""
816 for i in sorted(deps_map):
Mike Frysinger383367e2014-09-16 15:06:17 -0400817 print("%s: (%s) needs" % (i, deps_map[i]["action"]))
David Jamesfcb70ef2011-02-02 16:02:30 -0800818 needs = deps_map[i]["needs"]
819 for j in sorted(needs):
Mike Frysinger383367e2014-09-16 15:06:17 -0400820 print(" %s" % (j))
David Jamesfcb70ef2011-02-02 16:02:30 -0800821 if not needs:
Mike Frysinger383367e2014-09-16 15:06:17 -0400822 print(" no dependencies")
David Jamesfcb70ef2011-02-02 16:02:30 -0800823
824
825class EmergeJobState(object):
Don Garrett25f309a2014-03-19 14:02:12 -0700826 """Structure describing the EmergeJobState."""
827
David Jamesfcb70ef2011-02-02 16:02:30 -0800828 __slots__ = ["done", "filename", "last_notify_timestamp", "last_output_seek",
829 "last_output_timestamp", "pkgname", "retcode", "start_timestamp",
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700830 "target", "fetch_only", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800831
832 def __init__(self, target, pkgname, done, filename, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700833 retcode=None, fetch_only=False, unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800834
835 # The full name of the target we're building (e.g.
Mike Frysingerfd969312014-04-02 22:16:42 -0400836 # virtual/target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800837 self.target = target
838
Mike Frysingerfd969312014-04-02 22:16:42 -0400839 # The short name of the target we're building (e.g. target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800840 self.pkgname = pkgname
841
842 # Whether the job is done. (True if the job is done; false otherwise.)
843 self.done = done
844
845 # The filename where output is currently stored.
846 self.filename = filename
847
848 # The timestamp of the last time we printed the name of the log file. We
849 # print this at the beginning of the job, so this starts at
850 # start_timestamp.
851 self.last_notify_timestamp = start_timestamp
852
853 # The location (in bytes) of the end of the last complete line we printed.
854 # This starts off at zero. We use this to jump to the right place when we
855 # print output from the same ebuild multiple times.
856 self.last_output_seek = 0
857
858 # The timestamp of the last time we printed output. Since we haven't
859 # printed output yet, this starts at zero.
860 self.last_output_timestamp = 0
861
862 # The return code of our job, if the job is actually finished.
863 self.retcode = retcode
864
Brian Harring0be85c62012-03-17 19:52:12 -0700865 # Was this just a fetch job?
866 self.fetch_only = fetch_only
867
David Jamesfcb70ef2011-02-02 16:02:30 -0800868 # The timestamp when our job started.
869 self.start_timestamp = start_timestamp
870
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700871 # No emerge, only unpack packages.
872 self.unpack_only = unpack_only
873
David Jamesfcb70ef2011-02-02 16:02:30 -0800874
David James321490a2012-12-17 12:05:56 -0800875def KillHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700876 # Kill self and all subprocesses.
877 os.killpg(0, signal.SIGKILL)
878
Mike Frysingercc838832014-05-24 13:10:30 -0400879
David Jamesfcb70ef2011-02-02 16:02:30 -0800880def SetupWorkerSignals():
David James321490a2012-12-17 12:05:56 -0800881 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700882 # Set KILLED flag.
883 KILLED.set()
David James13cead42011-05-18 16:22:01 -0700884
David James7358d032011-05-19 10:40:03 -0700885 # Remove our signal handlers so we don't get called recursively.
886 signal.signal(signal.SIGINT, KillHandler)
887 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -0800888
889 # Ensure that we exit quietly and cleanly, if possible, when we receive
890 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all
891 # of the child processes will print details about KeyboardInterrupt
892 # exceptions, which isn't very helpful.
893 signal.signal(signal.SIGINT, ExitHandler)
894 signal.signal(signal.SIGTERM, ExitHandler)
895
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400896
897def EmergeProcess(output, target, *args, **kwargs):
David James1ed3e252011-10-05 20:26:15 -0700898 """Merge a package in a subprocess.
899
900 Args:
David James1ed3e252011-10-05 20:26:15 -0700901 output: Temporary file to write output.
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400902 target: The package we'll be processing (for display purposes).
David James6b29d052012-11-02 10:27:27 -0700903 *args: Arguments to pass to Scheduler constructor.
904 **kwargs: Keyword arguments to pass to Scheduler constructor.
David James1ed3e252011-10-05 20:26:15 -0700905
906 Returns:
907 The exit code returned by the subprocess.
908 """
909 pid = os.fork()
910 if pid == 0:
911 try:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400912 proctitle.settitle('EmergeProcess', target)
913
David James1ed3e252011-10-05 20:26:15 -0700914 # Sanity checks.
Mike Frysingerf02736e2013-11-08 15:27:00 -0500915 if sys.stdout.fileno() != 1:
916 raise Exception("sys.stdout.fileno() != 1")
917 if sys.stderr.fileno() != 2:
918 raise Exception("sys.stderr.fileno() != 2")
David James1ed3e252011-10-05 20:26:15 -0700919
920 # - Redirect 1 (stdout) and 2 (stderr) at our temporary file.
921 # - Redirect 0 to point at sys.stdin. In this case, sys.stdin
922 # points at a file reading os.devnull, because multiprocessing mucks
923 # with sys.stdin.
924 # - Leave the sys.stdin and output filehandles alone.
925 fd_pipes = {0: sys.stdin.fileno(),
926 1: output.fileno(),
927 2: output.fileno(),
928 sys.stdin.fileno(): sys.stdin.fileno(),
929 output.fileno(): output.fileno()}
Mike Frysinger66652ec2014-04-24 11:42:25 -0400930 # pylint: disable=W0212
931 portage.process._setup_pipes(fd_pipes, close_fds=False)
David James1ed3e252011-10-05 20:26:15 -0700932
933 # Portage doesn't like when sys.stdin.fileno() != 0, so point sys.stdin
934 # at the filehandle we just created in _setup_pipes.
935 if sys.stdin.fileno() != 0:
David James6b29d052012-11-02 10:27:27 -0700936 sys.__stdin__ = sys.stdin = os.fdopen(0, "r")
937
938 scheduler = Scheduler(*args, **kwargs)
939
940 # Enable blocker handling even though we're in --nodeps mode. This
941 # allows us to unmerge the blocker after we've merged the replacement.
942 scheduler._opts_ignore_blockers = frozenset()
David James1ed3e252011-10-05 20:26:15 -0700943
944 # Actually do the merge.
945 retval = scheduler.merge()
946
947 # We catch all exceptions here (including SystemExit, KeyboardInterrupt,
948 # etc) so as to ensure that we don't confuse the multiprocessing module,
949 # which expects that all forked children exit with os._exit().
David James321490a2012-12-17 12:05:56 -0800950 # pylint: disable=W0702
David James1ed3e252011-10-05 20:26:15 -0700951 except:
952 traceback.print_exc(file=output)
953 retval = 1
954 sys.stdout.flush()
955 sys.stderr.flush()
956 output.flush()
Don Garrett25f309a2014-03-19 14:02:12 -0700957 # pylint: disable=W0212
David James1ed3e252011-10-05 20:26:15 -0700958 os._exit(retval)
959 else:
960 # Return the exit code of the subprocess.
961 return os.waitpid(pid, 0)[1]
David Jamesfcb70ef2011-02-02 16:02:30 -0800962
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700963
964def UnpackPackage(pkg_state):
965 """Unpacks package described by pkg_state.
966
967 Args:
968 pkg_state: EmergeJobState object describing target.
969
970 Returns:
971 Exit code returned by subprocess.
972 """
973 pkgdir = os.environ.get("PKGDIR",
974 os.path.join(os.environ["SYSROOT"], "packages"))
975 root = os.environ.get("ROOT", os.environ["SYSROOT"])
976 path = os.path.join(pkgdir, pkg_state.target + ".tbz2")
977 comp = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)
978 cmd = [comp, "-dc"]
979 if comp.endswith("pbzip2"):
980 cmd.append("--ignore-trailing-garbage=1")
981 cmd.append(path)
982
983 result = cros_build_lib.RunCommand(cmd, cwd=root, stdout_to_pipe=True,
984 print_cmd=False, error_code_ok=True)
985
986 # If we were not successful, return now and don't attempt untar.
987 if result.returncode:
988 return result.returncode
989
990 cmd = ["sudo", "tar", "-xf", "-", "-C", root]
991 result = cros_build_lib.RunCommand(cmd, cwd=root, input=result.output,
992 print_cmd=False, error_code_ok=True)
993
994 return result.returncode
995
996
997def EmergeWorker(task_queue, job_queue, emerge, package_db, fetch_only=False,
998 unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800999 """This worker emerges any packages given to it on the task_queue.
1000
1001 Args:
1002 task_queue: The queue of tasks for this worker to do.
1003 job_queue: The queue of results from the worker.
1004 emerge: An EmergeData() object.
1005 package_db: A dict, mapping package ids to portage Package objects.
Brian Harring0be85c62012-03-17 19:52:12 -07001006 fetch_only: A bool, indicating if we should just fetch the target.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001007 unpack_only: A bool, indicating if we should just unpack the target.
David Jamesfcb70ef2011-02-02 16:02:30 -08001008
1009 It expects package identifiers to be passed to it via task_queue. When
1010 a task is started, it pushes the (target, filename) to the started_queue.
1011 The output is stored in filename. When a merge starts or finishes, we push
1012 EmergeJobState objects to the job_queue.
1013 """
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001014 if fetch_only:
1015 mode = 'fetch'
1016 elif unpack_only:
1017 mode = 'unpack'
1018 else:
1019 mode = 'emerge'
1020 proctitle.settitle('EmergeWorker', mode, '[idle]')
David Jamesfcb70ef2011-02-02 16:02:30 -08001021
1022 SetupWorkerSignals()
1023 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
David Jamesdeebd692011-05-09 17:02:52 -07001024
1025 # Disable flushing of caches to save on I/O.
David James7a1ea4b2011-10-13 15:06:41 -07001026 root = emerge.settings["ROOT"]
1027 vardb = emerge.trees[root]["vartree"].dbapi
1028 vardb._flush_cache_enabled = False
Brian Harring0be85c62012-03-17 19:52:12 -07001029 bindb = emerge.trees[root]["bintree"].dbapi
1030 # Might be a set, might be a list, might be None; no clue, just use shallow
1031 # copy to ensure we can roll it back.
Don Garrett25f309a2014-03-19 14:02:12 -07001032 # pylint: disable=W0212
Brian Harring0be85c62012-03-17 19:52:12 -07001033 original_remotepkgs = copy.copy(bindb.bintree._remotepkgs)
David Jamesdeebd692011-05-09 17:02:52 -07001034
David Jamesfcb70ef2011-02-02 16:02:30 -08001035 opts, spinner = emerge.opts, emerge.spinner
1036 opts["--nodeps"] = True
Brian Harring0be85c62012-03-17 19:52:12 -07001037 if fetch_only:
1038 opts["--fetchonly"] = True
1039
David Jamesfcb70ef2011-02-02 16:02:30 -08001040 while True:
1041 # Wait for a new item to show up on the queue. This is a blocking wait,
1042 # so if there's nothing to do, we just sit here.
Brian Harring0be85c62012-03-17 19:52:12 -07001043 pkg_state = task_queue.get()
1044 if pkg_state is None:
David Jamesfcb70ef2011-02-02 16:02:30 -08001045 # If target is None, this means that the main thread wants us to quit.
1046 # The other workers need to exit too, so we'll push the message back on
1047 # to the queue so they'll get it too.
Brian Harring0be85c62012-03-17 19:52:12 -07001048 task_queue.put(None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001049 return
David James7358d032011-05-19 10:40:03 -07001050 if KILLED.is_set():
1051 return
1052
Brian Harring0be85c62012-03-17 19:52:12 -07001053 target = pkg_state.target
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001054 proctitle.settitle('EmergeWorker', mode, target)
Brian Harring0be85c62012-03-17 19:52:12 -07001055
David Jamesfcb70ef2011-02-02 16:02:30 -08001056 db_pkg = package_db[target]
Brian Harring0be85c62012-03-17 19:52:12 -07001057
1058 if db_pkg.type_name == "binary":
1059 if not fetch_only and pkg_state.fetched_successfully:
1060 # Ensure portage doesn't think our pkg is remote- else it'll force
1061 # a redownload of it (even if the on-disk file is fine). In-memory
1062 # caching basically, implemented dumbly.
1063 bindb.bintree._remotepkgs = None
1064 else:
1065 bindb.bintree_remotepkgs = original_remotepkgs
1066
David Jamesfcb70ef2011-02-02 16:02:30 -08001067 db_pkg.root_config = emerge.root_config
1068 install_list = [db_pkg]
1069 pkgname = db_pkg.pf
1070 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False)
David James01b1e0f2012-06-07 17:18:05 -07001071 os.chmod(output.name, 644)
David Jamesfcb70ef2011-02-02 16:02:30 -08001072 start_timestamp = time.time()
Brian Harring0be85c62012-03-17 19:52:12 -07001073 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001074 fetch_only=fetch_only, unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001075 job_queue.put(job)
1076 if "--pretend" in opts:
1077 retcode = 0
1078 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001079 try:
David James386ccd12011-05-04 20:17:42 -07001080 emerge.scheduler_graph.mergelist = install_list
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001081 if unpack_only:
1082 retcode = UnpackPackage(pkg_state)
1083 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001084 retcode = EmergeProcess(output, target, settings, trees, mtimedb,
1085 opts, spinner, favorites=emerge.favorites,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001086 graph_config=emerge.scheduler_graph)
David Jamesfcb70ef2011-02-02 16:02:30 -08001087 except Exception:
1088 traceback.print_exc(file=output)
1089 retcode = 1
David James1ed3e252011-10-05 20:26:15 -07001090 output.close()
David Jamesfcb70ef2011-02-02 16:02:30 -08001091
David James7358d032011-05-19 10:40:03 -07001092 if KILLED.is_set():
1093 return
1094
David Jamesfcb70ef2011-02-02 16:02:30 -08001095 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001096 retcode, fetch_only=fetch_only,
1097 unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001098 job_queue.put(job)
1099
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001100 # Set the title back to idle as the multiprocess pool won't destroy us;
1101 # when another job comes up, it'll re-use this process.
1102 proctitle.settitle('EmergeWorker', mode, '[idle]')
1103
David Jamesfcb70ef2011-02-02 16:02:30 -08001104
1105class LinePrinter(object):
1106 """Helper object to print a single line."""
1107
1108 def __init__(self, line):
1109 self.line = line
1110
David James321490a2012-12-17 12:05:56 -08001111 def Print(self, _seek_locations):
Mike Frysinger383367e2014-09-16 15:06:17 -04001112 print(self.line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001113
1114
1115class JobPrinter(object):
1116 """Helper object to print output of a job."""
1117
1118 def __init__(self, job, unlink=False):
1119 """Print output of job.
1120
Mike Frysinger02e1e072013-11-10 22:11:34 -05001121 If unlink is True, unlink the job output file when done.
1122 """
David Jamesfcb70ef2011-02-02 16:02:30 -08001123 self.current_time = time.time()
1124 self.job = job
1125 self.unlink = unlink
1126
1127 def Print(self, seek_locations):
1128
1129 job = self.job
1130
1131 # Calculate how long the job has been running.
1132 seconds = self.current_time - job.start_timestamp
1133
1134 # Note that we've printed out the job so far.
1135 job.last_output_timestamp = self.current_time
1136
1137 # Note that we're starting the job
1138 info = "job %s (%dm%.1fs)" % (job.pkgname, seconds / 60, seconds % 60)
1139 last_output_seek = seek_locations.get(job.filename, 0)
1140 if last_output_seek:
Mike Frysinger383367e2014-09-16 15:06:17 -04001141 print("=== Continue output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001142 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001143 print("=== Start output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001144
1145 # Print actual output from job
1146 f = codecs.open(job.filename, encoding='utf-8', errors='replace')
1147 f.seek(last_output_seek)
1148 prefix = job.pkgname + ":"
1149 for line in f:
1150
1151 # Save off our position in the file
1152 if line and line[-1] == "\n":
1153 last_output_seek = f.tell()
1154 line = line[:-1]
1155
1156 # Print our line
Mike Frysinger383367e2014-09-16 15:06:17 -04001157 print(prefix, line.encode('utf-8', 'replace'))
David Jamesfcb70ef2011-02-02 16:02:30 -08001158 f.close()
1159
1160 # Save our last spot in the file so that we don't print out the same
1161 # location twice.
1162 seek_locations[job.filename] = last_output_seek
1163
1164 # Note end of output section
1165 if job.done:
Mike Frysinger383367e2014-09-16 15:06:17 -04001166 print("=== Complete: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001167 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001168 print("=== Still running: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001169
1170 if self.unlink:
1171 os.unlink(job.filename)
1172
1173
1174def PrintWorker(queue):
1175 """A worker that prints stuff to the screen as requested."""
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001176 proctitle.settitle('PrintWorker')
David Jamesfcb70ef2011-02-02 16:02:30 -08001177
David James321490a2012-12-17 12:05:56 -08001178 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -07001179 # Set KILLED flag.
1180 KILLED.set()
1181
David Jamesfcb70ef2011-02-02 16:02:30 -08001182 # Switch to default signal handlers so that we'll die after two signals.
David James7358d032011-05-19 10:40:03 -07001183 signal.signal(signal.SIGINT, KillHandler)
1184 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001185
1186 # Don't exit on the first SIGINT / SIGTERM, because the parent worker will
1187 # handle it and tell us when we need to exit.
1188 signal.signal(signal.SIGINT, ExitHandler)
1189 signal.signal(signal.SIGTERM, ExitHandler)
1190
1191 # seek_locations is a map indicating the position we are at in each file.
1192 # It starts off empty, but is set by the various Print jobs as we go along
1193 # to indicate where we left off in each file.
1194 seek_locations = {}
1195 while True:
1196 try:
1197 job = queue.get()
1198 if job:
1199 job.Print(seek_locations)
David Jamesbccf8eb2011-07-27 14:06:06 -07001200 sys.stdout.flush()
David Jamesfcb70ef2011-02-02 16:02:30 -08001201 else:
1202 break
1203 except IOError as ex:
1204 if ex.errno == errno.EINTR:
1205 # Looks like we received a signal. Keep printing.
1206 continue
1207 raise
1208
Brian Harring867e2362012-03-17 04:05:17 -07001209
Brian Harring0be85c62012-03-17 19:52:12 -07001210class TargetState(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001211 """Structure descriting the TargetState."""
Brian Harring867e2362012-03-17 04:05:17 -07001212
Brian Harring0be85c62012-03-17 19:52:12 -07001213 __slots__ = ("target", "info", "score", "prefetched", "fetched_successfully")
Brian Harring867e2362012-03-17 04:05:17 -07001214
David James321490a2012-12-17 12:05:56 -08001215 def __init__(self, target, info):
Brian Harring867e2362012-03-17 04:05:17 -07001216 self.target, self.info = target, info
Brian Harring0be85c62012-03-17 19:52:12 -07001217 self.fetched_successfully = False
1218 self.prefetched = False
David James321490a2012-12-17 12:05:56 -08001219 self.score = None
Brian Harring867e2362012-03-17 04:05:17 -07001220 self.update_score()
1221
1222 def __cmp__(self, other):
1223 return cmp(self.score, other.score)
1224
1225 def update_score(self):
1226 self.score = (
1227 -len(self.info["tprovides"]),
Brian Harring0be85c62012-03-17 19:52:12 -07001228 len(self.info["needs"]),
Brian Harring11c5eeb2012-03-18 11:02:39 -07001229 not self.info["binary"],
Brian Harring867e2362012-03-17 04:05:17 -07001230 -len(self.info["provides"]),
1231 self.info["idx"],
1232 self.target,
1233 )
1234
1235
1236class ScoredHeap(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001237 """Implementation of a general purpose scored heap."""
Brian Harring867e2362012-03-17 04:05:17 -07001238
Brian Harring0be85c62012-03-17 19:52:12 -07001239 __slots__ = ("heap", "_heap_set")
1240
Brian Harring867e2362012-03-17 04:05:17 -07001241 def __init__(self, initial=()):
Brian Harring0be85c62012-03-17 19:52:12 -07001242 self.heap = list()
1243 self._heap_set = set()
1244 if initial:
1245 self.multi_put(initial)
Brian Harring867e2362012-03-17 04:05:17 -07001246
1247 def get(self):
Brian Harring0be85c62012-03-17 19:52:12 -07001248 item = heapq.heappop(self.heap)
1249 self._heap_set.remove(item.target)
1250 return item
Brian Harring867e2362012-03-17 04:05:17 -07001251
Brian Harring0be85c62012-03-17 19:52:12 -07001252 def put(self, item):
1253 if not isinstance(item, TargetState):
1254 raise ValueError("Item %r isn't a TargetState" % (item,))
1255 heapq.heappush(self.heap, item)
1256 self._heap_set.add(item.target)
Brian Harring867e2362012-03-17 04:05:17 -07001257
Brian Harring0be85c62012-03-17 19:52:12 -07001258 def multi_put(self, sequence):
1259 sequence = list(sequence)
1260 self.heap.extend(sequence)
1261 self._heap_set.update(x.target for x in sequence)
Brian Harring867e2362012-03-17 04:05:17 -07001262 self.sort()
1263
David James5c9996d2012-03-24 10:50:46 -07001264 def sort(self):
1265 heapq.heapify(self.heap)
1266
Brian Harring0be85c62012-03-17 19:52:12 -07001267 def __contains__(self, target):
1268 return target in self._heap_set
1269
1270 def __nonzero__(self):
1271 return bool(self.heap)
1272
Brian Harring867e2362012-03-17 04:05:17 -07001273 def __len__(self):
1274 return len(self.heap)
1275
1276
David Jamesfcb70ef2011-02-02 16:02:30 -08001277class EmergeQueue(object):
1278 """Class to schedule emerge jobs according to a dependency graph."""
1279
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001280 def __init__(self, deps_map, emerge, package_db, show_output, unpack_only):
David Jamesfcb70ef2011-02-02 16:02:30 -08001281 # Store the dependency graph.
1282 self._deps_map = deps_map
Brian Harring0be85c62012-03-17 19:52:12 -07001283 self._state_map = {}
David Jamesfcb70ef2011-02-02 16:02:30 -08001284 # Initialize the running queue to empty
Brian Harring0be85c62012-03-17 19:52:12 -07001285 self._build_jobs = {}
1286 self._build_ready = ScoredHeap()
1287 self._fetch_jobs = {}
1288 self._fetch_ready = ScoredHeap()
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001289 self._unpack_jobs = {}
1290 self._unpack_ready = ScoredHeap()
David Jamesfcb70ef2011-02-02 16:02:30 -08001291 # List of total package installs represented in deps_map.
1292 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
1293 self._total_jobs = len(install_jobs)
1294 self._show_output = show_output
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001295 self._unpack_only = unpack_only
David Jamesfcb70ef2011-02-02 16:02:30 -08001296
1297 if "--pretend" in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001298 print("Skipping merge because of --pretend mode.")
David Jamesfcb70ef2011-02-02 16:02:30 -08001299 sys.exit(0)
1300
David Jamesaaf49e42014-04-24 09:40:05 -07001301 # Set up a session so we can easily terminate all children.
1302 self._SetupSession()
David James7358d032011-05-19 10:40:03 -07001303
David Jamesfcb70ef2011-02-02 16:02:30 -08001304 # Setup scheduler graph object. This is used by the child processes
1305 # to help schedule jobs.
1306 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1307
1308 # Calculate how many jobs we can run in parallel. We don't want to pass
1309 # the --jobs flag over to emerge itself, because that'll tell emerge to
1310 # hide its output, and said output is quite useful for debugging hung
1311 # jobs.
1312 procs = min(self._total_jobs,
1313 emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001314 self._build_procs = self._unpack_procs = self._fetch_procs = max(1, procs)
David James8c7e5e32011-06-28 11:26:03 -07001315 self._load_avg = emerge.opts.pop("--load-average", None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001316 self._job_queue = multiprocessing.Queue()
1317 self._print_queue = multiprocessing.Queue()
Brian Harring0be85c62012-03-17 19:52:12 -07001318
1319 self._fetch_queue = multiprocessing.Queue()
1320 args = (self._fetch_queue, self._job_queue, emerge, package_db, True)
1321 self._fetch_pool = multiprocessing.Pool(self._fetch_procs, EmergeWorker,
1322 args)
1323
1324 self._build_queue = multiprocessing.Queue()
1325 args = (self._build_queue, self._job_queue, emerge, package_db)
1326 self._build_pool = multiprocessing.Pool(self._build_procs, EmergeWorker,
1327 args)
1328
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001329 if self._unpack_only:
1330 # Unpack pool only required on unpack_only jobs.
1331 self._unpack_queue = multiprocessing.Queue()
1332 args = (self._unpack_queue, self._job_queue, emerge, package_db, False,
1333 True)
1334 self._unpack_pool = multiprocessing.Pool(self._unpack_procs, EmergeWorker,
1335 args)
1336
David Jamesfcb70ef2011-02-02 16:02:30 -08001337 self._print_worker = multiprocessing.Process(target=PrintWorker,
1338 args=[self._print_queue])
1339 self._print_worker.start()
1340
1341 # Initialize the failed queue to empty.
1342 self._retry_queue = []
1343 self._failed = set()
1344
David Jamesfcb70ef2011-02-02 16:02:30 -08001345 # Setup an exit handler so that we print nice messages if we are
1346 # terminated.
1347 self._SetupExitHandler()
1348
1349 # Schedule our jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001350 self._state_map.update(
1351 (pkg, TargetState(pkg, data)) for pkg, data in deps_map.iteritems())
1352 self._fetch_ready.multi_put(self._state_map.itervalues())
David Jamesfcb70ef2011-02-02 16:02:30 -08001353
David Jamesaaf49e42014-04-24 09:40:05 -07001354 def _SetupSession(self):
1355 """Set up a session so we can easily terminate all children."""
1356 # When we call os.setsid(), this sets up a session / process group for this
1357 # process and all children. These session groups are needed so that we can
1358 # easily kill all children (including processes launched by emerge) before
1359 # we exit.
1360 #
1361 # One unfortunate side effect of os.setsid() is that it blocks CTRL-C from
1362 # being received. To work around this, we only call os.setsid() in a forked
1363 # process, so that the parent can still watch for CTRL-C. The parent will
1364 # just sit around, watching for signals and propagating them to the child,
1365 # until the child exits.
1366 #
1367 # TODO(davidjames): It would be nice if we could replace this with cgroups.
1368 pid = os.fork()
1369 if pid == 0:
1370 os.setsid()
1371 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001372 proctitle.settitle('SessionManager')
1373
David Jamesaaf49e42014-04-24 09:40:05 -07001374 def PropagateToChildren(signum, _frame):
1375 # Just propagate the signals down to the child. We'll exit when the
1376 # child does.
1377 try:
1378 os.kill(pid, signum)
1379 except OSError as ex:
1380 if ex.errno != errno.ESRCH:
1381 raise
1382 signal.signal(signal.SIGINT, PropagateToChildren)
1383 signal.signal(signal.SIGTERM, PropagateToChildren)
1384
1385 def StopGroup(_signum, _frame):
1386 # When we get stopped, stop the children.
1387 try:
1388 os.killpg(pid, signal.SIGSTOP)
1389 os.kill(0, signal.SIGSTOP)
1390 except OSError as ex:
1391 if ex.errno != errno.ESRCH:
1392 raise
1393 signal.signal(signal.SIGTSTP, StopGroup)
1394
1395 def ContinueGroup(_signum, _frame):
1396 # Launch the children again after being stopped.
1397 try:
1398 os.killpg(pid, signal.SIGCONT)
1399 except OSError as ex:
1400 if ex.errno != errno.ESRCH:
1401 raise
1402 signal.signal(signal.SIGCONT, ContinueGroup)
1403
1404 # Loop until the children exit. We exit with os._exit to be sure we
1405 # don't run any finalizers (those will be run by the child process.)
1406 # pylint: disable=W0212
1407 while True:
1408 try:
1409 # Wait for the process to exit. When it does, exit with the return
1410 # value of the subprocess.
1411 os._exit(osutils.GetExitStatus(os.waitpid(pid, 0)[1]))
1412 except OSError as ex:
1413 if ex.errno == errno.EINTR:
1414 continue
1415 traceback.print_exc()
1416 os._exit(1)
1417 except BaseException:
1418 traceback.print_exc()
1419 os._exit(1)
1420
David Jamesfcb70ef2011-02-02 16:02:30 -08001421 def _SetupExitHandler(self):
1422
David James321490a2012-12-17 12:05:56 -08001423 def ExitHandler(signum, _frame):
David James7358d032011-05-19 10:40:03 -07001424 # Set KILLED flag.
1425 KILLED.set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001426
1427 # Kill our signal handlers so we don't get called recursively
David James7358d032011-05-19 10:40:03 -07001428 signal.signal(signal.SIGINT, KillHandler)
1429 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001430
1431 # Print our current job status
Brian Harring0be85c62012-03-17 19:52:12 -07001432 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001433 if job:
1434 self._print_queue.put(JobPrinter(job, unlink=True))
1435
1436 # Notify the user that we are exiting
1437 self._Print("Exiting on signal %s" % signum)
David James7358d032011-05-19 10:40:03 -07001438 self._print_queue.put(None)
1439 self._print_worker.join()
David Jamesfcb70ef2011-02-02 16:02:30 -08001440
1441 # Kill child threads, then exit.
David James7358d032011-05-19 10:40:03 -07001442 os.killpg(0, signal.SIGKILL)
David Jamesfcb70ef2011-02-02 16:02:30 -08001443 sys.exit(1)
1444
1445 # Print out job status when we are killed
1446 signal.signal(signal.SIGINT, ExitHandler)
1447 signal.signal(signal.SIGTERM, ExitHandler)
1448
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001449 def _ScheduleUnpack(self, pkg_state):
1450 self._unpack_jobs[pkg_state.target] = None
1451 self._unpack_queue.put(pkg_state)
1452
Brian Harring0be85c62012-03-17 19:52:12 -07001453 def _Schedule(self, pkg_state):
David Jamesfcb70ef2011-02-02 16:02:30 -08001454 # We maintain a tree of all deps, if this doesn't need
David James8c7e5e32011-06-28 11:26:03 -07001455 # to be installed just free up its children and continue.
David Jamesfcb70ef2011-02-02 16:02:30 -08001456 # It is possible to reinstall deps of deps, without reinstalling
1457 # first level deps, like so:
Mike Frysingerfd969312014-04-02 22:16:42 -04001458 # virtual/target-os (merge) -> eselect (nomerge) -> python (merge)
Brian Harring0be85c62012-03-17 19:52:12 -07001459 this_pkg = pkg_state.info
1460 target = pkg_state.target
1461 if pkg_state.info is not None:
1462 if this_pkg["action"] == "nomerge":
1463 self._Finish(target)
1464 elif target not in self._build_jobs:
1465 # Kick off the build if it's marked to be built.
1466 self._build_jobs[target] = None
1467 self._build_queue.put(pkg_state)
1468 return True
David Jamesfcb70ef2011-02-02 16:02:30 -08001469
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001470 def _ScheduleLoop(self, unpack_only=False):
1471 if unpack_only:
1472 ready_queue = self._unpack_ready
1473 jobs_queue = self._unpack_jobs
1474 procs = self._unpack_procs
1475 else:
1476 ready_queue = self._build_ready
1477 jobs_queue = self._build_jobs
1478 procs = self._build_procs
1479
David James8c7e5e32011-06-28 11:26:03 -07001480 # If the current load exceeds our desired load average, don't schedule
1481 # more than one job.
1482 if self._load_avg and os.getloadavg()[0] > self._load_avg:
1483 needed_jobs = 1
1484 else:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001485 needed_jobs = procs
David James8c7e5e32011-06-28 11:26:03 -07001486
1487 # Schedule more jobs.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001488 while ready_queue and len(jobs_queue) < needed_jobs:
1489 state = ready_queue.get()
1490 if unpack_only:
1491 self._ScheduleUnpack(state)
1492 else:
1493 if state.target not in self._failed:
1494 self._Schedule(state)
David Jamesfcb70ef2011-02-02 16:02:30 -08001495
1496 def _Print(self, line):
1497 """Print a single line."""
1498 self._print_queue.put(LinePrinter(line))
1499
1500 def _Status(self):
1501 """Print status."""
1502 current_time = time.time()
1503 no_output = True
1504
1505 # Print interim output every minute if --show-output is used. Otherwise,
1506 # print notifications about running packages every 2 minutes, and print
1507 # full output for jobs that have been running for 60 minutes or more.
1508 if self._show_output:
1509 interval = 60
1510 notify_interval = 0
1511 else:
1512 interval = 60 * 60
1513 notify_interval = 60 * 2
David James321490a2012-12-17 12:05:56 -08001514 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001515 if job:
1516 last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
1517 if last_timestamp + interval < current_time:
1518 self._print_queue.put(JobPrinter(job))
1519 job.last_output_timestamp = current_time
1520 no_output = False
1521 elif (notify_interval and
1522 job.last_notify_timestamp + notify_interval < current_time):
1523 job_seconds = current_time - job.start_timestamp
1524 args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
1525 info = "Still building %s (%dm%.1fs). Logs in %s" % args
1526 job.last_notify_timestamp = current_time
1527 self._Print(info)
1528 no_output = False
1529
1530 # If we haven't printed any messages yet, print a general status message
1531 # here.
1532 if no_output:
1533 seconds = current_time - GLOBAL_START
Brian Harring0be85c62012-03-17 19:52:12 -07001534 fjobs, fready = len(self._fetch_jobs), len(self._fetch_ready)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001535 ujobs, uready = len(self._unpack_jobs), len(self._unpack_ready)
Brian Harring0be85c62012-03-17 19:52:12 -07001536 bjobs, bready = len(self._build_jobs), len(self._build_ready)
1537 retries = len(self._retry_queue)
1538 pending = max(0, len(self._deps_map) - fjobs - bjobs)
1539 line = "Pending %s/%s, " % (pending, self._total_jobs)
1540 if fjobs or fready:
1541 line += "Fetching %s/%s, " % (fjobs, fready + fjobs)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001542 if ujobs or uready:
1543 line += "Unpacking %s/%s, " % (ujobs, uready + ujobs)
Brian Harring0be85c62012-03-17 19:52:12 -07001544 if bjobs or bready or retries:
1545 line += "Building %s/%s, " % (bjobs, bready + bjobs)
1546 if retries:
1547 line += "Retrying %s, " % (retries,)
David James8c7e5e32011-06-28 11:26:03 -07001548 load = " ".join(str(x) for x in os.getloadavg())
Brian Harring0be85c62012-03-17 19:52:12 -07001549 line += ("[Time %dm%.1fs Load %s]" % (seconds/60, seconds %60, load))
1550 self._Print(line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001551
1552 def _Finish(self, target):
David James8c7e5e32011-06-28 11:26:03 -07001553 """Mark a target as completed and unblock dependencies."""
1554 this_pkg = self._deps_map[target]
1555 if this_pkg["needs"] and this_pkg["nodeps"]:
1556 # We got installed, but our deps have not been installed yet. Dependent
1557 # packages should only be installed when our needs have been fully met.
1558 this_pkg["action"] = "nomerge"
1559 else:
David James8c7e5e32011-06-28 11:26:03 -07001560 for dep in this_pkg["provides"]:
1561 dep_pkg = self._deps_map[dep]
Brian Harring0be85c62012-03-17 19:52:12 -07001562 state = self._state_map[dep]
David James8c7e5e32011-06-28 11:26:03 -07001563 del dep_pkg["needs"][target]
Brian Harring0be85c62012-03-17 19:52:12 -07001564 state.update_score()
1565 if not state.prefetched:
1566 if dep in self._fetch_ready:
1567 # If it's not currently being fetched, update the prioritization
1568 self._fetch_ready.sort()
1569 elif not dep_pkg["needs"]:
David James8c7e5e32011-06-28 11:26:03 -07001570 if dep_pkg["nodeps"] and dep_pkg["action"] == "nomerge":
1571 self._Finish(dep)
1572 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001573 self._build_ready.put(self._state_map[dep])
David James8c7e5e32011-06-28 11:26:03 -07001574 self._deps_map.pop(target)
David Jamesfcb70ef2011-02-02 16:02:30 -08001575
1576 def _Retry(self):
David James8c7e5e32011-06-28 11:26:03 -07001577 while self._retry_queue:
Brian Harring0be85c62012-03-17 19:52:12 -07001578 state = self._retry_queue.pop(0)
1579 if self._Schedule(state):
1580 self._Print("Retrying emerge of %s." % state.target)
David James8c7e5e32011-06-28 11:26:03 -07001581 break
David Jamesfcb70ef2011-02-02 16:02:30 -08001582
Brian Harringa43f5952012-04-12 01:19:34 -07001583 def _Shutdown(self):
David Jamesfcb70ef2011-02-02 16:02:30 -08001584 # Tell emerge workers to exit. They all exit when 'None' is pushed
1585 # to the queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001586
Brian Harringa43f5952012-04-12 01:19:34 -07001587 # Shutdown the workers first; then jobs (which is how they feed things back)
1588 # then finally the print queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001589
Brian Harringa43f5952012-04-12 01:19:34 -07001590 def _stop(queue, pool):
1591 if pool is None:
1592 return
1593 try:
1594 queue.put(None)
1595 pool.close()
1596 pool.join()
1597 finally:
1598 pool.terminate()
Brian Harring0be85c62012-03-17 19:52:12 -07001599
Brian Harringa43f5952012-04-12 01:19:34 -07001600 _stop(self._fetch_queue, self._fetch_pool)
1601 self._fetch_queue = self._fetch_pool = None
Brian Harring0be85c62012-03-17 19:52:12 -07001602
Brian Harringa43f5952012-04-12 01:19:34 -07001603 _stop(self._build_queue, self._build_pool)
1604 self._build_queue = self._build_pool = None
1605
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001606 if self._unpack_only:
1607 _stop(self._unpack_queue, self._unpack_pool)
1608 self._unpack_queue = self._unpack_pool = None
1609
Brian Harringa43f5952012-04-12 01:19:34 -07001610 if self._job_queue is not None:
1611 self._job_queue.close()
1612 self._job_queue = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001613
1614 # Now that our workers are finished, we can kill the print queue.
Brian Harringa43f5952012-04-12 01:19:34 -07001615 if self._print_worker is not None:
1616 try:
1617 self._print_queue.put(None)
1618 self._print_queue.close()
1619 self._print_worker.join()
1620 finally:
1621 self._print_worker.terminate()
1622 self._print_queue = self._print_worker = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001623
1624 def Run(self):
1625 """Run through the scheduled ebuilds.
1626
1627 Keep running so long as we have uninstalled packages in the
1628 dependency graph to merge.
1629 """
Brian Harringa43f5952012-04-12 01:19:34 -07001630 if not self._deps_map:
1631 return
1632
Brian Harring0be85c62012-03-17 19:52:12 -07001633 # Start the fetchers.
1634 for _ in xrange(min(self._fetch_procs, len(self._fetch_ready))):
1635 state = self._fetch_ready.get()
1636 self._fetch_jobs[state.target] = None
1637 self._fetch_queue.put(state)
1638
1639 # Print an update, then get going.
1640 self._Status()
1641
David Jamese703d0f2012-01-12 16:27:45 -08001642 retried = set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001643 while self._deps_map:
1644 # Check here that we are actually waiting for something.
Brian Harring0be85c62012-03-17 19:52:12 -07001645 if (self._build_queue.empty() and
David Jamesfcb70ef2011-02-02 16:02:30 -08001646 self._job_queue.empty() and
Brian Harring0be85c62012-03-17 19:52:12 -07001647 not self._fetch_jobs and
1648 not self._fetch_ready and
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001649 not self._unpack_jobs and
1650 not self._unpack_ready and
Brian Harring0be85c62012-03-17 19:52:12 -07001651 not self._build_jobs and
1652 not self._build_ready and
David Jamesfcb70ef2011-02-02 16:02:30 -08001653 self._deps_map):
1654 # If we have failed on a package, retry it now.
1655 if self._retry_queue:
1656 self._Retry()
1657 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001658 # Tell the user why we're exiting.
1659 if self._failed:
Mike Frysinger383367e2014-09-16 15:06:17 -04001660 print('Packages failed:\n\t%s' % '\n\t'.join(self._failed))
David James0eae23e2012-07-03 15:04:25 -07001661 status_file = os.environ.get("PARALLEL_EMERGE_STATUS_FILE")
1662 if status_file:
David James321490a2012-12-17 12:05:56 -08001663 failed_pkgs = set(portage.versions.cpv_getkey(x)
1664 for x in self._failed)
David James0eae23e2012-07-03 15:04:25 -07001665 with open(status_file, "a") as f:
1666 f.write("%s\n" % " ".join(failed_pkgs))
David Jamesfcb70ef2011-02-02 16:02:30 -08001667 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001668 print("Deadlock! Circular dependencies!")
David Jamesfcb70ef2011-02-02 16:02:30 -08001669 sys.exit(1)
1670
David James321490a2012-12-17 12:05:56 -08001671 for _ in xrange(12):
David Jamesa74289a2011-08-12 10:41:24 -07001672 try:
1673 job = self._job_queue.get(timeout=5)
1674 break
1675 except Queue.Empty:
1676 # Check if any more jobs can be scheduled.
1677 self._ScheduleLoop()
1678 else:
Brian Harring706747c2012-03-16 03:04:31 -07001679 # Print an update every 60 seconds.
David Jamesfcb70ef2011-02-02 16:02:30 -08001680 self._Status()
1681 continue
1682
1683 target = job.target
1684
Brian Harring0be85c62012-03-17 19:52:12 -07001685 if job.fetch_only:
1686 if not job.done:
1687 self._fetch_jobs[job.target] = job
1688 else:
1689 state = self._state_map[job.target]
1690 state.prefetched = True
1691 state.fetched_successfully = (job.retcode == 0)
1692 del self._fetch_jobs[job.target]
1693 self._Print("Fetched %s in %2.2fs"
1694 % (target, time.time() - job.start_timestamp))
1695
1696 if self._show_output or job.retcode != 0:
1697 self._print_queue.put(JobPrinter(job, unlink=True))
1698 else:
1699 os.unlink(job.filename)
1700 # Failure or not, let build work with it next.
1701 if not self._deps_map[job.target]["needs"]:
1702 self._build_ready.put(state)
1703 self._ScheduleLoop()
1704
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001705 if self._unpack_only and job.retcode == 0:
1706 self._unpack_ready.put(state)
1707 self._ScheduleLoop(unpack_only=True)
1708
Brian Harring0be85c62012-03-17 19:52:12 -07001709 if self._fetch_ready:
1710 state = self._fetch_ready.get()
1711 self._fetch_queue.put(state)
1712 self._fetch_jobs[state.target] = None
1713 else:
1714 # Minor optimization; shut down fetchers early since we know
1715 # the queue is empty.
1716 self._fetch_queue.put(None)
1717 continue
1718
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001719 if job.unpack_only:
1720 if not job.done:
1721 self._unpack_jobs[target] = job
1722 else:
1723 del self._unpack_jobs[target]
1724 self._Print("Unpacked %s in %2.2fs"
1725 % (target, time.time() - job.start_timestamp))
1726 if self._show_output or job.retcode != 0:
1727 self._print_queue.put(JobPrinter(job, unlink=True))
1728 else:
1729 os.unlink(job.filename)
1730 if self._unpack_ready:
1731 state = self._unpack_ready.get()
1732 self._unpack_queue.put(state)
1733 self._unpack_jobs[state.target] = None
1734 continue
1735
David Jamesfcb70ef2011-02-02 16:02:30 -08001736 if not job.done:
Brian Harring0be85c62012-03-17 19:52:12 -07001737 self._build_jobs[target] = job
David Jamesfcb70ef2011-02-02 16:02:30 -08001738 self._Print("Started %s (logged in %s)" % (target, job.filename))
1739 continue
1740
1741 # Print output of job
1742 if self._show_output or job.retcode != 0:
1743 self._print_queue.put(JobPrinter(job, unlink=True))
1744 else:
1745 os.unlink(job.filename)
Brian Harring0be85c62012-03-17 19:52:12 -07001746 del self._build_jobs[target]
David Jamesfcb70ef2011-02-02 16:02:30 -08001747
1748 seconds = time.time() - job.start_timestamp
1749 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
David James32420cc2011-08-25 21:32:46 -07001750 previously_failed = target in self._failed
David Jamesfcb70ef2011-02-02 16:02:30 -08001751
1752 # Complain if necessary.
1753 if job.retcode != 0:
1754 # Handle job failure.
David James32420cc2011-08-25 21:32:46 -07001755 if previously_failed:
David Jamesfcb70ef2011-02-02 16:02:30 -08001756 # If this job has failed previously, give up.
1757 self._Print("Failed %s. Your build has failed." % details)
1758 else:
1759 # Queue up this build to try again after a long while.
David Jamese703d0f2012-01-12 16:27:45 -08001760 retried.add(target)
Brian Harring0be85c62012-03-17 19:52:12 -07001761 self._retry_queue.append(self._state_map[target])
David Jamesfcb70ef2011-02-02 16:02:30 -08001762 self._failed.add(target)
1763 self._Print("Failed %s, retrying later." % details)
1764 else:
David James32420cc2011-08-25 21:32:46 -07001765 if previously_failed:
1766 # Remove target from list of failed packages.
1767 self._failed.remove(target)
1768
1769 self._Print("Completed %s" % details)
1770
1771 # Mark as completed and unblock waiting ebuilds.
1772 self._Finish(target)
1773
1774 if previously_failed and self._retry_queue:
David Jamesfcb70ef2011-02-02 16:02:30 -08001775 # If we have successfully retried a failed package, and there
1776 # are more failed packages, try the next one. We will only have
1777 # one retrying package actively running at a time.
1778 self._Retry()
1779
David Jamesfcb70ef2011-02-02 16:02:30 -08001780
David James8c7e5e32011-06-28 11:26:03 -07001781 # Schedule pending jobs and print an update.
1782 self._ScheduleLoop()
1783 self._Status()
David Jamesfcb70ef2011-02-02 16:02:30 -08001784
David Jamese703d0f2012-01-12 16:27:45 -08001785 # If packages were retried, output a warning.
1786 if retried:
1787 self._Print("")
1788 self._Print("WARNING: The following packages failed the first time,")
1789 self._Print("but succeeded upon retry. This might indicate incorrect")
1790 self._Print("dependencies.")
1791 for pkg in retried:
1792 self._Print(" %s" % pkg)
1793 self._Print("@@@STEP_WARNINGS@@@")
1794 self._Print("")
1795
David Jamesfcb70ef2011-02-02 16:02:30 -08001796 # Tell child threads to exit.
1797 self._Print("Merge complete")
David Jamesfcb70ef2011-02-02 16:02:30 -08001798
1799
Brian Harring30675052012-02-29 12:18:22 -08001800def main(argv):
Brian Harring8294d652012-05-23 02:20:52 -07001801 try:
1802 return real_main(argv)
1803 finally:
1804 # Work around multiprocessing sucking and not cleaning up after itself.
1805 # http://bugs.python.org/issue4106;
1806 # Step one; ensure GC is ran *prior* to the VM starting shutdown.
1807 gc.collect()
1808 # Step two; go looking for those threads and try to manually reap
1809 # them if we can.
1810 for x in threading.enumerate():
1811 # Filter on the name, and ident; if ident is None, the thread
1812 # wasn't started.
1813 if x.name == 'QueueFeederThread' and x.ident is not None:
1814 x.join(1)
David Jamesfcb70ef2011-02-02 16:02:30 -08001815
Brian Harring8294d652012-05-23 02:20:52 -07001816
Bertrand SIMONNETb35e19e2014-07-28 16:29:58 -07001817def get_db(config, root):
Mike Frysinger33fbccb2014-09-05 17:09:07 -04001818 """Return the dbapi.
Bertrand SIMONNETb35e19e2014-07-28 16:29:58 -07001819 Handles both portage 2.1.11 and 2.2.10 (where mydbapi has been removed).
1820
1821 TODO(bsimonnet): Remove this once portage has been uprevd.
1822 """
1823 try:
1824 return config.mydbapi[root]
1825 except AttributeError:
1826 # pylint: disable=W0212
1827 return config._filtered_trees[root]['graph_db']
1828
1829
Brian Harring8294d652012-05-23 02:20:52 -07001830def real_main(argv):
Brian Harring30675052012-02-29 12:18:22 -08001831 parallel_emerge_args = argv[:]
David Jamesfcb70ef2011-02-02 16:02:30 -08001832 deps = DepGraphGenerator()
Brian Harring30675052012-02-29 12:18:22 -08001833 deps.Initialize(parallel_emerge_args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001834 emerge = deps.emerge
1835
1836 if emerge.action is not None:
Brian Harring30675052012-02-29 12:18:22 -08001837 argv = deps.ParseParallelEmergeArgs(argv)
Brian Harring8294d652012-05-23 02:20:52 -07001838 return emerge_main(argv)
David Jamesfcb70ef2011-02-02 16:02:30 -08001839 elif not emerge.cmdline_packages:
1840 Usage()
Brian Harring8294d652012-05-23 02:20:52 -07001841 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001842
1843 # Unless we're in pretend mode, there's not much point running without
1844 # root access. We need to be able to install packages.
1845 #
1846 # NOTE: Even if you're running --pretend, it's a good idea to run
1847 # parallel_emerge with root access so that portage can write to the
1848 # dependency cache. This is important for performance.
David James321490a2012-12-17 12:05:56 -08001849 if "--pretend" not in emerge.opts and portage.data.secpass < 2:
Mike Frysinger383367e2014-09-16 15:06:17 -04001850 print("parallel_emerge: superuser access is required.")
Brian Harring8294d652012-05-23 02:20:52 -07001851 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001852
1853 if "--quiet" not in emerge.opts:
1854 cmdline_packages = " ".join(emerge.cmdline_packages)
Mike Frysinger383367e2014-09-16 15:06:17 -04001855 print("Starting fast-emerge.")
1856 print(" Building package %s on %s" % (cmdline_packages,
1857 deps.board or "root"))
David Jamesfcb70ef2011-02-02 16:02:30 -08001858
David James386ccd12011-05-04 20:17:42 -07001859 deps_tree, deps_info = deps.GenDependencyTree()
David Jamesfcb70ef2011-02-02 16:02:30 -08001860
1861 # You want me to be verbose? I'll give you two trees! Twice as much value.
1862 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
1863 deps.PrintTree(deps_tree)
1864
David James386ccd12011-05-04 20:17:42 -07001865 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001866
1867 # OK, time to print out our progress so far.
1868 deps.PrintInstallPlan(deps_graph)
1869 if "--tree" in emerge.opts:
1870 PrintDepsMap(deps_graph)
1871
1872 # Are we upgrading portage? If so, and there are more packages to merge,
1873 # schedule a restart of parallel_emerge to merge the rest. This ensures that
1874 # we pick up all updates to portage settings before merging any more
1875 # packages.
1876 portage_upgrade = False
1877 root = emerge.settings["ROOT"]
Don Garrett25f309a2014-03-19 14:02:12 -07001878 # pylint: disable=W0212
Bertrand SIMONNETb35e19e2014-07-28 16:29:58 -07001879 final_db = get_db(emerge.depgraph._dynamic_config, root)
David Jamesfcb70ef2011-02-02 16:02:30 -08001880 if root == "/":
1881 for db_pkg in final_db.match_pkgs("sys-apps/portage"):
1882 portage_pkg = deps_graph.get(db_pkg.cpv)
David James0ff16f22012-11-02 14:18:07 -07001883 if portage_pkg:
David Jamesfcb70ef2011-02-02 16:02:30 -08001884 portage_upgrade = True
1885 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001886 print("Upgrading portage first, then restarting...")
David Jamesfcb70ef2011-02-02 16:02:30 -08001887
David James0ff16f22012-11-02 14:18:07 -07001888 # Upgrade Portage first, then the rest of the packages.
1889 #
1890 # In order to grant the child permission to run setsid, we need to run sudo
1891 # again. We preserve SUDO_USER here in case an ebuild depends on it.
1892 if portage_upgrade:
1893 # Calculate what arguments to use when re-invoking.
1894 args = ["sudo", "-E", "SUDO_USER=%s" % os.environ.get("SUDO_USER", "")]
1895 args += [os.path.abspath(sys.argv[0])] + parallel_emerge_args
1896 args += ["--exclude=sys-apps/portage"]
1897
1898 # First upgrade Portage.
1899 passthrough_args = ("--quiet", "--pretend", "--verbose")
1900 emerge_args = [k for k in emerge.opts if k in passthrough_args]
1901 ret = emerge_main(emerge_args + ["portage"])
1902 if ret != 0:
1903 return ret
1904
1905 # Now upgrade the rest.
1906 os.execvp(args[0], args)
1907
David Jamesfcb70ef2011-02-02 16:02:30 -08001908 # Run the queued emerges.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001909 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output,
1910 deps.unpack_only)
Brian Harringa43f5952012-04-12 01:19:34 -07001911 try:
1912 scheduler.Run()
1913 finally:
Don Garrett25f309a2014-03-19 14:02:12 -07001914 # pylint: disable=W0212
Brian Harringa43f5952012-04-12 01:19:34 -07001915 scheduler._Shutdown()
David James97ce8902011-08-16 09:51:05 -07001916 scheduler = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001917
Mike Frysingerd20a6e22012-10-04 19:01:10 -04001918 clean_logs(emerge.settings)
1919
Mike Frysinger383367e2014-09-16 15:06:17 -04001920 print("Done")
Brian Harring8294d652012-05-23 02:20:52 -07001921 return 0