blob: 176ef34e052b2d52a2dff2aa2a1eb23c8a5be439 [file] [log] [blame]
Mike Frysinger0a647fc2012-08-06 14:36:05 -04001# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
David Jamesfcb70ef2011-02-02 16:02:30 -08002# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Program to run emerge in parallel, for significant speedup.
6
7Usage:
David James386ccd12011-05-04 20:17:42 -07008 ./parallel_emerge [--board=BOARD] [--workon=PKGS]
David Jamesfcb70ef2011-02-02 16:02:30 -08009 [--force-remote-binary=PKGS] [emerge args] package
10
David James78b6cd92012-04-02 21:36:12 -070011This script runs multiple emerge processes in parallel, using appropriate
12Portage APIs. It is faster than standard emerge because it has a
13multiprocess model instead of an asynchronous model.
David Jamesfcb70ef2011-02-02 16:02:30 -080014"""
15
Mike Frysinger383367e2014-09-16 15:06:17 -040016from __future__ import print_function
17
David Jamesfcb70ef2011-02-02 16:02:30 -080018import codecs
19import copy
20import errno
Brian Harring8294d652012-05-23 02:20:52 -070021import gc
David James8c7e5e32011-06-28 11:26:03 -070022import heapq
David Jamesfcb70ef2011-02-02 16:02:30 -080023import multiprocessing
24import os
Mike Frysinger1ae28092013-10-17 17:17:22 -040025try:
26 import Queue
27except ImportError:
28 # Python-3 renamed to "queue". We still use Queue to avoid collisions
29 # with naming variables as "queue". Maybe we'll transition at some point.
30 # pylint: disable=F0401
31 import queue as Queue
David Jamesfcb70ef2011-02-02 16:02:30 -080032import signal
Bertrand SIMONNET19d789e2014-12-09 13:36:31 -080033import subprocess
David Jamesfcb70ef2011-02-02 16:02:30 -080034import sys
35import tempfile
Brian Harring8294d652012-05-23 02:20:52 -070036import threading
David Jamesfcb70ef2011-02-02 16:02:30 -080037import time
38import traceback
David Jamesfcb70ef2011-02-02 16:02:30 -080039
Thiago Goncalesf4acc422013-07-17 10:26:35 -070040from chromite.lib import cros_build_lib
Mike Frysingere2d8f0d2014-11-01 13:09:26 -040041from chromite.lib import process_util
Mike Frysingerd74fe4a2014-04-24 11:43:38 -040042from chromite.lib import proctitle
Thiago Goncalesf4acc422013-07-17 10:26:35 -070043
David Jamesfcb70ef2011-02-02 16:02:30 -080044# If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
45# Chromium OS, the default "portage" user doesn't have the necessary
46# permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
47# is "root" here because we get called through sudo.
48#
49# We need to set this before importing any portage modules, because portage
50# looks up "PORTAGE_USERNAME" at import time.
51#
52# NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
53# encounter this case unless they have an old chroot or blow away the
54# environment by running sudo without the -E specifier.
55if "PORTAGE_USERNAME" not in os.environ:
56 homedir = os.environ.get("HOME")
57 if homedir:
58 os.environ["PORTAGE_USERNAME"] = os.path.basename(homedir)
59
Bertrand SIMONNET19d789e2014-12-09 13:36:31 -080060# Wrap Popen with a lock to ensure no two Popen are executed simultaneously in
61# the same process.
62# Two Popen call at the same time might be the cause for crbug.com/433482.
63_popen_lock = threading.Lock()
64_old_popen = subprocess.Popen
65
66def _LockedPopen(*args, **kwargs):
67 with _popen_lock:
68 return _old_popen(*args, **kwargs)
69
70subprocess.Popen = _LockedPopen
71
David Jamesfcb70ef2011-02-02 16:02:30 -080072# Portage doesn't expose dependency trees in its public API, so we have to
73# make use of some private APIs here. These modules are found under
74# /usr/lib/portage/pym/.
75#
76# TODO(davidjames): Update Portage to expose public APIs for these features.
Don Garrett25f309a2014-03-19 14:02:12 -070077# pylint: disable=F0401
David Jamesfcb70ef2011-02-02 16:02:30 -080078from _emerge.actions import adjust_configs
79from _emerge.actions import load_emerge_config
80from _emerge.create_depgraph_params import create_depgraph_params
David James386ccd12011-05-04 20:17:42 -070081from _emerge.depgraph import backtrack_depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -080082from _emerge.main import emerge_main
83from _emerge.main import parse_opts
84from _emerge.Package import Package
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -070085from _emerge.post_emerge import clean_logs
David Jamesfcb70ef2011-02-02 16:02:30 -080086from _emerge.Scheduler import Scheduler
David Jamesfcb70ef2011-02-02 16:02:30 -080087from _emerge.stdout_spinner import stdout_spinner
David James386ccd12011-05-04 20:17:42 -070088from portage._global_updates import _global_updates
David Jamesfcb70ef2011-02-02 16:02:30 -080089import portage
90import portage.debug
Don Garrettf8bf7842014-03-20 17:03:42 -070091# pylint: enable=F0401
Mike Frysinger91d7da92013-02-19 15:53:46 -050092
David Jamesfcb70ef2011-02-02 16:02:30 -080093
David Jamesfcb70ef2011-02-02 16:02:30 -080094def Usage():
95 """Print usage."""
Mike Frysinger383367e2014-09-16 15:06:17 -040096 print("Usage:")
97 print(" ./parallel_emerge [--board=BOARD] [--workon=PKGS]")
98 print(" [--rebuild] [emerge args] package")
99 print()
100 print("Packages specified as workon packages are always built from source.")
101 print()
102 print("The --workon argument is mainly useful when you want to build and")
103 print("install packages that you are working on unconditionally, but do not")
104 print("to have to rev the package to indicate you want to build it from")
105 print("source. The build_packages script will automatically supply the")
106 print("workon argument to emerge, ensuring that packages selected using")
107 print("cros-workon are rebuilt.")
108 print()
109 print("The --rebuild option rebuilds packages whenever their dependencies")
110 print("are changed. This ensures that your build is correct.")
David Jamesfcb70ef2011-02-02 16:02:30 -0800111
112
David Jamesfcb70ef2011-02-02 16:02:30 -0800113# Global start time
114GLOBAL_START = time.time()
115
David James7358d032011-05-19 10:40:03 -0700116# Whether process has been killed by a signal.
117KILLED = multiprocessing.Event()
118
David Jamesfcb70ef2011-02-02 16:02:30 -0800119
120class EmergeData(object):
121 """This simple struct holds various emerge variables.
122
123 This struct helps us easily pass emerge variables around as a unit.
124 These variables are used for calculating dependencies and installing
125 packages.
126 """
127
David Jamesbf1e3442011-05-28 07:44:20 -0700128 __slots__ = ["action", "cmdline_packages", "depgraph", "favorites",
129 "mtimedb", "opts", "root_config", "scheduler_graph",
130 "settings", "spinner", "trees"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800131
132 def __init__(self):
133 # The action the user requested. If the user is installing packages, this
134 # is None. If the user is doing anything other than installing packages,
135 # this will contain the action name, which will map exactly to the
136 # long-form name of the associated emerge option.
137 #
138 # Example: If you call parallel_emerge --unmerge package, the action name
139 # will be "unmerge"
140 self.action = None
141
142 # The list of packages the user passed on the command-line.
143 self.cmdline_packages = None
144
145 # The emerge dependency graph. It'll contain all the packages involved in
146 # this merge, along with their versions.
147 self.depgraph = None
148
David Jamesbf1e3442011-05-28 07:44:20 -0700149 # The list of candidates to add to the world file.
150 self.favorites = None
151
David Jamesfcb70ef2011-02-02 16:02:30 -0800152 # A dict of the options passed to emerge. This dict has been cleaned up
153 # a bit by parse_opts, so that it's a bit easier for the emerge code to
154 # look at the options.
155 #
156 # Emerge takes a few shortcuts in its cleanup process to make parsing of
157 # the options dict easier. For example, if you pass in "--usepkg=n", the
158 # "--usepkg" flag is just left out of the dictionary altogether. Because
159 # --usepkg=n is the default, this makes parsing easier, because emerge
160 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
161 #
162 # These cleanup processes aren't applied to all options. For example, the
163 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
164 # applied by emerge, see the parse_opts function in the _emerge.main
165 # package.
166 self.opts = None
167
168 # A dictionary used by portage to maintain global state. This state is
169 # loaded from disk when portage starts up, and saved to disk whenever we
170 # call mtimedb.commit().
171 #
172 # This database contains information about global updates (i.e., what
173 # version of portage we have) and what we're currently doing. Portage
174 # saves what it is currently doing in this database so that it can be
175 # resumed when you call it with the --resume option.
176 #
177 # parallel_emerge does not save what it is currently doing in the mtimedb,
178 # so we do not support the --resume option.
179 self.mtimedb = None
180
181 # The portage configuration for our current root. This contains the portage
182 # settings (see below) and the three portage trees for our current root.
183 # (The three portage trees are explained below, in the documentation for
184 # the "trees" member.)
185 self.root_config = None
186
187 # The scheduler graph is used by emerge to calculate what packages to
188 # install. We don't actually install any deps, so this isn't really used,
189 # but we pass it in to the Scheduler object anyway.
190 self.scheduler_graph = None
191
192 # Portage settings for our current session. Most of these settings are set
193 # in make.conf inside our current install root.
194 self.settings = None
195
196 # The spinner, which spews stuff to stdout to indicate that portage is
197 # doing something. We maintain our own spinner, so we set the portage
198 # spinner to "silent" mode.
199 self.spinner = None
200
201 # The portage trees. There are separate portage trees for each root. To get
202 # the portage tree for the current root, you can look in self.trees[root],
203 # where root = self.settings["ROOT"].
204 #
205 # In each root, there are three trees: vartree, porttree, and bintree.
206 # - vartree: A database of the currently-installed packages.
207 # - porttree: A database of ebuilds, that can be used to build packages.
208 # - bintree: A database of binary packages.
209 self.trees = None
210
211
212class DepGraphGenerator(object):
213 """Grab dependency information about packages from portage.
214
215 Typical usage:
216 deps = DepGraphGenerator()
217 deps.Initialize(sys.argv[1:])
218 deps_tree, deps_info = deps.GenDependencyTree()
219 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
220 deps.PrintTree(deps_tree)
221 PrintDepsMap(deps_graph)
222 """
223
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700224 __slots__ = ["board", "emerge", "package_db", "show_output", "sysroot",
Bertrand SIMONNET411945d2015-05-20 17:23:28 -0700225 "unpack_only", "max_retries"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800226
227 def __init__(self):
228 self.board = None
229 self.emerge = EmergeData()
David Jamesfcb70ef2011-02-02 16:02:30 -0800230 self.package_db = {}
David Jamesfcb70ef2011-02-02 16:02:30 -0800231 self.show_output = False
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700232 self.sysroot = None
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700233 self.unpack_only = False
Bertrand SIMONNET411945d2015-05-20 17:23:28 -0700234 self.max_retries = 1
David Jamesfcb70ef2011-02-02 16:02:30 -0800235
236 def ParseParallelEmergeArgs(self, argv):
237 """Read the parallel emerge arguments from the command-line.
238
239 We need to be compatible with emerge arg format. We scrape arguments that
240 are specific to parallel_emerge, and pass through the rest directly to
241 emerge.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500242
David Jamesfcb70ef2011-02-02 16:02:30 -0800243 Args:
244 argv: arguments list
Mike Frysinger1a736a82013-12-12 01:50:59 -0500245
David Jamesfcb70ef2011-02-02 16:02:30 -0800246 Returns:
247 Arguments that don't belong to parallel_emerge
248 """
249 emerge_args = []
250 for arg in argv:
251 # Specifically match arguments that are specific to parallel_emerge, and
252 # pass through the rest.
253 if arg.startswith("--board="):
254 self.board = arg.replace("--board=", "")
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700255 elif arg.startswith("--sysroot="):
256 self.sysroot = arg.replace("--sysroot=", "")
David Jamesfcb70ef2011-02-02 16:02:30 -0800257 elif arg.startswith("--workon="):
258 workon_str = arg.replace("--workon=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700259 emerge_args.append("--reinstall-atoms=%s" % workon_str)
260 emerge_args.append("--usepkg-exclude=%s" % workon_str)
David Jamesfcb70ef2011-02-02 16:02:30 -0800261 elif arg.startswith("--force-remote-binary="):
262 force_remote_binary = arg.replace("--force-remote-binary=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700263 emerge_args.append("--useoldpkg-atoms=%s" % force_remote_binary)
Bertrand SIMONNET411945d2015-05-20 17:23:28 -0700264 elif arg.startswith("--retries="):
265 self.max_retries = int(arg.replace("--retries=", ""))
David Jamesfcb70ef2011-02-02 16:02:30 -0800266 elif arg == "--show-output":
267 self.show_output = True
David James386ccd12011-05-04 20:17:42 -0700268 elif arg == "--rebuild":
David James7a1ea4b2011-10-13 15:06:41 -0700269 emerge_args.append("--rebuild-if-unbuilt")
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700270 elif arg == "--unpackonly":
271 emerge_args.append("--fetchonly")
272 self.unpack_only = True
David Jamesfcb70ef2011-02-02 16:02:30 -0800273 else:
274 # Not one of our options, so pass through to emerge.
275 emerge_args.append(arg)
276
David James386ccd12011-05-04 20:17:42 -0700277 # These packages take a really long time to build, so, for expediency, we
278 # are blacklisting them from automatic rebuilds because one of their
279 # dependencies needs to be recompiled.
Mike Frysinger5c2a9052014-04-15 15:52:04 -0400280 for pkg in ("chromeos-base/chromeos-chrome",):
David James7a1ea4b2011-10-13 15:06:41 -0700281 emerge_args.append("--rebuild-exclude=%s" % pkg)
David Jamesfcb70ef2011-02-02 16:02:30 -0800282
283 return emerge_args
284
285 def Initialize(self, args):
286 """Initializer. Parses arguments and sets up portage state."""
287
288 # Parse and strip out args that are just intended for parallel_emerge.
289 emerge_args = self.ParseParallelEmergeArgs(args)
290
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700291 if self.sysroot and self.board:
292 cros_build_lib.Die("--sysroot and --board are incompatible.")
293
David Jamesfcb70ef2011-02-02 16:02:30 -0800294 # Setup various environment variables based on our current board. These
295 # variables are normally setup inside emerge-${BOARD}, but since we don't
296 # call that script, we have to set it up here. These variables serve to
297 # point our tools at /build/BOARD and to setup cross compiles to the
298 # appropriate board as configured in toolchain.conf.
299 if self.board:
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700300 self.sysroot = os.environ.get('SYSROOT',
301 cros_build_lib.GetSysroot(self.board))
302
303 if self.sysroot:
304 os.environ["PORTAGE_CONFIGROOT"] = self.sysroot
305 os.environ["SYSROOT"] = self.sysroot
David Jamesfcb70ef2011-02-02 16:02:30 -0800306
David Jamesfcb70ef2011-02-02 16:02:30 -0800307 # Turn off interactive delays
308 os.environ["EBEEP_IGNORE"] = "1"
309 os.environ["EPAUSE_IGNORE"] = "1"
Mike Frysinger0a647fc2012-08-06 14:36:05 -0400310 os.environ["CLEAN_DELAY"] = "0"
David Jamesfcb70ef2011-02-02 16:02:30 -0800311
312 # Parse the emerge options.
David Jamesea3ca332011-05-26 11:48:29 -0700313 action, opts, cmdline_packages = parse_opts(emerge_args, silent=True)
David Jamesfcb70ef2011-02-02 16:02:30 -0800314
315 # Set environment variables based on options. Portage normally sets these
316 # environment variables in emerge_main, but we can't use that function,
317 # because it also does a bunch of other stuff that we don't want.
318 # TODO(davidjames): Patch portage to move this logic into a function we can
319 # reuse here.
320 if "--debug" in opts:
321 os.environ["PORTAGE_DEBUG"] = "1"
322 if "--config-root" in opts:
323 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
324 if "--root" in opts:
325 os.environ["ROOT"] = opts["--root"]
326 if "--accept-properties" in opts:
327 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
328
David James88d780c2014-02-05 13:03:29 -0800329 # If we're installing packages to the board, we can disable vardb locks.
330 # This is safe because we only run up to one instance of parallel_emerge in
331 # parallel.
332 # TODO(davidjames): Enable this for the host too.
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700333 if self.sysroot:
David Jamesfcb70ef2011-02-02 16:02:30 -0800334 os.environ.setdefault("PORTAGE_LOCKS", "false")
David Jamesfcb70ef2011-02-02 16:02:30 -0800335
336 # Now that we've setup the necessary environment variables, we can load the
337 # emerge config from disk.
Gilad Arnold94758762015-05-22 12:23:23 -0700338 # pylint: disable=unpacking-non-sequence
David Jamesfcb70ef2011-02-02 16:02:30 -0800339 settings, trees, mtimedb = load_emerge_config()
340
David Jamesea3ca332011-05-26 11:48:29 -0700341 # Add in EMERGE_DEFAULT_OPTS, if specified.
342 tmpcmdline = []
343 if "--ignore-default-opts" not in opts:
344 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
345 tmpcmdline.extend(emerge_args)
346 action, opts, cmdline_packages = parse_opts(tmpcmdline)
347
348 # If we're installing to the board, we want the --root-deps option so that
349 # portage will install the build dependencies to that location as well.
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700350 if self.sysroot:
David Jamesea3ca332011-05-26 11:48:29 -0700351 opts.setdefault("--root-deps", True)
352
David Jamesfcb70ef2011-02-02 16:02:30 -0800353 # Check whether our portage tree is out of date. Typically, this happens
354 # when you're setting up a new portage tree, such as in setup_board and
355 # make_chroot. In that case, portage applies a bunch of global updates
356 # here. Once the updates are finished, we need to commit any changes
357 # that the global update made to our mtimedb, and reload the config.
358 #
359 # Portage normally handles this logic in emerge_main, but again, we can't
360 # use that function here.
361 if _global_updates(trees, mtimedb["updates"]):
362 mtimedb.commit()
Gilad Arnold94758762015-05-22 12:23:23 -0700363 # pylint: disable=unpacking-non-sequence
David Jamesfcb70ef2011-02-02 16:02:30 -0800364 settings, trees, mtimedb = load_emerge_config(trees=trees)
365
366 # Setup implied options. Portage normally handles this logic in
367 # emerge_main.
368 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
369 opts.setdefault("--buildpkg", True)
370 if "--getbinpkgonly" in opts:
371 opts.setdefault("--usepkgonly", True)
372 opts.setdefault("--getbinpkg", True)
373 if "getbinpkg" in settings.features:
374 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
375 opts["--getbinpkg"] = True
376 if "--getbinpkg" in opts or "--usepkgonly" in opts:
377 opts.setdefault("--usepkg", True)
378 if "--fetch-all-uri" in opts:
379 opts.setdefault("--fetchonly", True)
380 if "--skipfirst" in opts:
381 opts.setdefault("--resume", True)
382 if "--buildpkgonly" in opts:
383 # --buildpkgonly will not merge anything, so it overrides all binary
384 # package options.
385 for opt in ("--getbinpkg", "--getbinpkgonly",
386 "--usepkg", "--usepkgonly"):
387 opts.pop(opt, None)
388 if (settings.get("PORTAGE_DEBUG", "") == "1" and
389 "python-trace" in settings.features):
390 portage.debug.set_trace(True)
391
392 # Complain about unsupported options
David James386ccd12011-05-04 20:17:42 -0700393 for opt in ("--ask", "--ask-enter-invalid", "--resume", "--skipfirst"):
David Jamesfcb70ef2011-02-02 16:02:30 -0800394 if opt in opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400395 print("%s is not supported by parallel_emerge" % opt)
David Jamesfcb70ef2011-02-02 16:02:30 -0800396 sys.exit(1)
397
398 # Make emerge specific adjustments to the config (e.g. colors!)
399 adjust_configs(opts, trees)
400
401 # Save our configuration so far in the emerge object
402 emerge = self.emerge
403 emerge.action, emerge.opts = action, opts
404 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
405 emerge.cmdline_packages = cmdline_packages
406 root = settings["ROOT"]
407 emerge.root_config = trees[root]["root_config"]
408
David James386ccd12011-05-04 20:17:42 -0700409 if "--usepkg" in opts:
David Jamesfcb70ef2011-02-02 16:02:30 -0800410 emerge.trees[root]["bintree"].populate("--getbinpkg" in opts)
411
David Jamesfcb70ef2011-02-02 16:02:30 -0800412 def CreateDepgraph(self, emerge, packages):
413 """Create an emerge depgraph object."""
414 # Setup emerge options.
415 emerge_opts = emerge.opts.copy()
416
David James386ccd12011-05-04 20:17:42 -0700417 # Ask portage to build a dependency graph. with the options we specified
418 # above.
David Jamesfcb70ef2011-02-02 16:02:30 -0800419 params = create_depgraph_params(emerge_opts, emerge.action)
David Jamesbf1e3442011-05-28 07:44:20 -0700420 success, depgraph, favorites = backtrack_depgraph(
David James386ccd12011-05-04 20:17:42 -0700421 emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
422 packages, emerge.spinner)
423 emerge.depgraph = depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800424
David James386ccd12011-05-04 20:17:42 -0700425 # Is it impossible to honor the user's request? Bail!
426 if not success:
427 depgraph.display_problems()
428 sys.exit(1)
David Jamesfcb70ef2011-02-02 16:02:30 -0800429
430 emerge.depgraph = depgraph
David Jamesbf1e3442011-05-28 07:44:20 -0700431 emerge.favorites = favorites
David Jamesfcb70ef2011-02-02 16:02:30 -0800432
David Jamesdeebd692011-05-09 17:02:52 -0700433 # Prime and flush emerge caches.
434 root = emerge.settings["ROOT"]
435 vardb = emerge.trees[root]["vartree"].dbapi
David James0bdc5de2011-05-12 16:22:26 -0700436 if "--pretend" not in emerge.opts:
437 vardb.counter_tick()
David Jamesdeebd692011-05-09 17:02:52 -0700438 vardb.flush_cache()
439
David James386ccd12011-05-04 20:17:42 -0700440 def GenDependencyTree(self):
David Jamesfcb70ef2011-02-02 16:02:30 -0800441 """Get dependency tree info from emerge.
442
David Jamesfcb70ef2011-02-02 16:02:30 -0800443 Returns:
444 Dependency tree
445 """
446 start = time.time()
447
448 emerge = self.emerge
449
450 # Create a list of packages to merge
451 packages = set(emerge.cmdline_packages[:])
David Jamesfcb70ef2011-02-02 16:02:30 -0800452
453 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
454 # need any extra output from portage.
455 portage.util.noiselimit = -1
456
457 # My favorite feature: The silent spinner. It doesn't spin. Ever.
458 # I'd disable the colors by default too, but they look kind of cool.
459 emerge.spinner = stdout_spinner()
460 emerge.spinner.update = emerge.spinner.update_quiet
461
462 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400463 print("Calculating deps...")
David Jamesfcb70ef2011-02-02 16:02:30 -0800464
465 self.CreateDepgraph(emerge, packages)
466 depgraph = emerge.depgraph
467
468 # Build our own tree from the emerge digraph.
469 deps_tree = {}
Don Garrett25f309a2014-03-19 14:02:12 -0700470 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -0800471 digraph = depgraph._dynamic_config.digraph
David James3f778802011-08-25 19:31:45 -0700472 root = emerge.settings["ROOT"]
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -0700473 final_db = depgraph._dynamic_config._filtered_trees[root]['graph_db']
David Jamesfcb70ef2011-02-02 16:02:30 -0800474 for node, node_deps in digraph.nodes.items():
475 # Calculate dependency packages that need to be installed first. Each
476 # child on the digraph is a dependency. The "operation" field specifies
477 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
478 # contains the type of dependency (e.g. build, runtime, runtime_post,
479 # etc.)
480 #
David Jamesfcb70ef2011-02-02 16:02:30 -0800481 # Portage refers to the identifiers for packages as a CPV. This acronym
482 # stands for Component/Path/Version.
483 #
484 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
485 # Split up, this CPV would be:
486 # C -- Component: chromeos-base
487 # P -- Path: power_manager
488 # V -- Version: 0.0.1-r1
489 #
490 # We just refer to CPVs as packages here because it's easier.
491 deps = {}
492 for child, priorities in node_deps[0].items():
David James3f778802011-08-25 19:31:45 -0700493 if isinstance(child, Package) and child.root == root:
494 cpv = str(child.cpv)
495 action = str(child.operation)
496
497 # If we're uninstalling a package, check whether Portage is
498 # installing a replacement. If so, just depend on the installation
499 # of the new package, because the old package will automatically
500 # be uninstalled at that time.
501 if action == "uninstall":
502 for pkg in final_db.match_pkgs(child.slot_atom):
503 cpv = str(pkg.cpv)
504 action = "merge"
505 break
506
507 deps[cpv] = dict(action=action,
508 deptypes=[str(x) for x in priorities],
509 deps={})
David Jamesfcb70ef2011-02-02 16:02:30 -0800510
511 # We've built our list of deps, so we can add our package to the tree.
David James3f778802011-08-25 19:31:45 -0700512 if isinstance(node, Package) and node.root == root:
David Jamesfcb70ef2011-02-02 16:02:30 -0800513 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
514 deps=deps)
515
David Jamesfcb70ef2011-02-02 16:02:30 -0800516 # Ask portage for its install plan, so that we can only throw out
David James386ccd12011-05-04 20:17:42 -0700517 # dependencies that portage throws out.
David Jamesfcb70ef2011-02-02 16:02:30 -0800518 deps_info = {}
519 for pkg in depgraph.altlist():
520 if isinstance(pkg, Package):
David James3f778802011-08-25 19:31:45 -0700521 assert pkg.root == root
David Jamesfcb70ef2011-02-02 16:02:30 -0800522 self.package_db[pkg.cpv] = pkg
523
David Jamesfcb70ef2011-02-02 16:02:30 -0800524 # Save off info about the package
David James386ccd12011-05-04 20:17:42 -0700525 deps_info[str(pkg.cpv)] = {"idx": len(deps_info)}
David Jamesfcb70ef2011-02-02 16:02:30 -0800526
527 seconds = time.time() - start
528 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400529 print("Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800530
531 return deps_tree, deps_info
532
533 def PrintTree(self, deps, depth=""):
534 """Print the deps we have seen in the emerge output.
535
536 Args:
Mike Frysinger6f3c48e2015-05-06 02:38:51 -0400537 deps: Dependency tree structure.
538 depth: Allows printing the tree recursively, with indentation.
David Jamesfcb70ef2011-02-02 16:02:30 -0800539 """
540 for entry in sorted(deps):
541 action = deps[entry]["action"]
Mike Frysinger383367e2014-09-16 15:06:17 -0400542 print("%s %s (%s)" % (depth, entry, action))
David Jamesfcb70ef2011-02-02 16:02:30 -0800543 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
544
David James386ccd12011-05-04 20:17:42 -0700545 def GenDependencyGraph(self, deps_tree, deps_info):
David Jamesfcb70ef2011-02-02 16:02:30 -0800546 """Generate a doubly linked dependency graph.
547
548 Args:
549 deps_tree: Dependency tree structure.
550 deps_info: More details on the dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500551
David Jamesfcb70ef2011-02-02 16:02:30 -0800552 Returns:
553 Deps graph in the form of a dict of packages, with each package
554 specifying a "needs" list and "provides" list.
555 """
556 emerge = self.emerge
David Jamesfcb70ef2011-02-02 16:02:30 -0800557
David Jamesfcb70ef2011-02-02 16:02:30 -0800558 # deps_map is the actual dependency graph.
559 #
560 # Each package specifies a "needs" list and a "provides" list. The "needs"
561 # list indicates which packages we depend on. The "provides" list
562 # indicates the reverse dependencies -- what packages need us.
563 #
564 # We also provide some other information in the dependency graph:
565 # - action: What we're planning on doing with this package. Generally,
566 # "merge", "nomerge", or "uninstall"
David Jamesfcb70ef2011-02-02 16:02:30 -0800567 deps_map = {}
568
569 def ReverseTree(packages):
570 """Convert tree to digraph.
571
572 Take the tree of package -> requirements and reverse it to a digraph of
573 buildable packages -> packages they unblock.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500574
David Jamesfcb70ef2011-02-02 16:02:30 -0800575 Args:
576 packages: Tree(s) of dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500577
David Jamesfcb70ef2011-02-02 16:02:30 -0800578 Returns:
579 Unsanitized digraph.
580 """
David James8c7e5e32011-06-28 11:26:03 -0700581 binpkg_phases = set(["setup", "preinst", "postinst"])
David Jamese5e1c0a2014-09-29 17:19:41 -0700582 needed_dep_types = set(["blocker", "buildtime", "buildtime_slot_op",
583 "runtime", "runtime_slot_op"])
584 ignored_dep_types = set(["ignored", "optional", "runtime_post", "soft"])
585 all_dep_types = ignored_dep_types | needed_dep_types
David Jamesfcb70ef2011-02-02 16:02:30 -0800586 for pkg in packages:
587
588 # Create an entry for the package
589 action = packages[pkg]["action"]
David James8c7e5e32011-06-28 11:26:03 -0700590 default_pkg = {"needs": {}, "provides": set(), "action": action,
591 "nodeps": False, "binary": False}
David Jamesfcb70ef2011-02-02 16:02:30 -0800592 this_pkg = deps_map.setdefault(pkg, default_pkg)
593
David James8c7e5e32011-06-28 11:26:03 -0700594 if pkg in deps_info:
595 this_pkg["idx"] = deps_info[pkg]["idx"]
596
597 # If a package doesn't have any defined phases that might use the
598 # dependent packages (i.e. pkg_setup, pkg_preinst, or pkg_postinst),
599 # we can install this package before its deps are ready.
600 emerge_pkg = self.package_db.get(pkg)
601 if emerge_pkg and emerge_pkg.type_name == "binary":
602 this_pkg["binary"] = True
Mike Frysinger66652ec2014-04-24 11:42:25 -0400603 defined_phases = emerge_pkg.defined_phases
David James8c7e5e32011-06-28 11:26:03 -0700604 defined_binpkg_phases = binpkg_phases.intersection(defined_phases)
605 if not defined_binpkg_phases:
606 this_pkg["nodeps"] = True
607
David Jamesfcb70ef2011-02-02 16:02:30 -0800608 # Create entries for dependencies of this package first.
609 ReverseTree(packages[pkg]["deps"])
610
611 # Add dependencies to this package.
612 for dep, dep_item in packages[pkg]["deps"].iteritems():
David James8c7e5e32011-06-28 11:26:03 -0700613 # We only need to enforce strict ordering of dependencies if the
David James3f778802011-08-25 19:31:45 -0700614 # dependency is a blocker, or is a buildtime or runtime dependency.
615 # (I.e., ignored, optional, and runtime_post dependencies don't
616 # depend on ordering.)
David James8c7e5e32011-06-28 11:26:03 -0700617 dep_types = dep_item["deptypes"]
618 if needed_dep_types.intersection(dep_types):
619 deps_map[dep]["provides"].add(pkg)
620 this_pkg["needs"][dep] = "/".join(dep_types)
David Jamesfcb70ef2011-02-02 16:02:30 -0800621
David Jamese5e1c0a2014-09-29 17:19:41 -0700622 # Verify we processed all appropriate dependency types.
623 unknown_dep_types = set(dep_types) - all_dep_types
624 if unknown_dep_types:
625 print("Unknown dependency types found:")
626 print(" %s -> %s (%s)" % (pkg, dep, "/".join(unknown_dep_types)))
627 sys.exit(1)
628
David James3f778802011-08-25 19:31:45 -0700629 # If there's a blocker, Portage may need to move files from one
630 # package to another, which requires editing the CONTENTS files of
631 # both packages. To avoid race conditions while editing this file,
632 # the two packages must not be installed in parallel, so we can't
633 # safely ignore dependencies. See http://crosbug.com/19328
634 if "blocker" in dep_types:
635 this_pkg["nodeps"] = False
636
David Jamesfcb70ef2011-02-02 16:02:30 -0800637 def FindCycles():
638 """Find cycles in the dependency tree.
639
640 Returns:
641 A dict mapping cyclic packages to a dict of the deps that cause
642 cycles. For each dep that causes cycles, it returns an example
643 traversal of the graph that shows the cycle.
644 """
645
646 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
647 """Find cycles in cyclic dependencies starting at specified package.
648
649 Args:
650 pkg: Package identifier.
651 cycles: A dict mapping cyclic packages to a dict of the deps that
652 cause cycles. For each dep that causes cycles, it returns an
653 example traversal of the graph that shows the cycle.
654 unresolved: Nodes that have been visited but are not fully processed.
655 resolved: Nodes that have been visited and are fully processed.
656 """
657 pkg_cycles = cycles.get(pkg)
658 if pkg in resolved and not pkg_cycles:
659 # If we already looked at this package, and found no cyclic
660 # dependencies, we can stop now.
661 return
662 unresolved.append(pkg)
663 for dep in deps_map[pkg]["needs"]:
664 if dep in unresolved:
665 idx = unresolved.index(dep)
666 mycycle = unresolved[idx:] + [dep]
David James321490a2012-12-17 12:05:56 -0800667 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800668 pkg1, pkg2 = mycycle[i], mycycle[i+1]
669 cycles.setdefault(pkg1, {}).setdefault(pkg2, mycycle)
670 elif not pkg_cycles or dep not in pkg_cycles:
671 # Looks like we haven't seen this edge before.
672 FindCyclesAtNode(dep, cycles, unresolved, resolved)
673 unresolved.pop()
674 resolved.add(pkg)
675
676 cycles, unresolved, resolved = {}, [], set()
677 for pkg in deps_map:
678 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
679 return cycles
680
David James386ccd12011-05-04 20:17:42 -0700681 def RemoveUnusedPackages():
David Jamesfcb70ef2011-02-02 16:02:30 -0800682 """Remove installed packages, propagating dependencies."""
David Jamesfcb70ef2011-02-02 16:02:30 -0800683 # Schedule packages that aren't on the install list for removal
684 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
685
David Jamesfcb70ef2011-02-02 16:02:30 -0800686 # Remove the packages we don't want, simplifying the graph and making
687 # it easier for us to crack cycles.
688 for pkg in sorted(rm_pkgs):
689 this_pkg = deps_map[pkg]
690 needs = this_pkg["needs"]
691 provides = this_pkg["provides"]
692 for dep in needs:
693 dep_provides = deps_map[dep]["provides"]
694 dep_provides.update(provides)
695 dep_provides.discard(pkg)
696 dep_provides.discard(dep)
697 for target in provides:
698 target_needs = deps_map[target]["needs"]
699 target_needs.update(needs)
700 target_needs.pop(pkg, None)
701 target_needs.pop(target, None)
702 del deps_map[pkg]
703
704 def PrintCycleBreak(basedep, dep, mycycle):
705 """Print details about a cycle that we are planning on breaking.
706
Mike Frysinger02e1e072013-11-10 22:11:34 -0500707 We are breaking a cycle where dep needs basedep. mycycle is an
708 example cycle which contains dep -> basedep.
709 """
David Jamesfcb70ef2011-02-02 16:02:30 -0800710
David Jamesfcb70ef2011-02-02 16:02:30 -0800711 needs = deps_map[dep]["needs"]
712 depinfo = needs.get(basedep, "deleted")
David Jamesfcb70ef2011-02-02 16:02:30 -0800713
David James3f778802011-08-25 19:31:45 -0700714 # It's OK to swap install order for blockers, as long as the two
715 # packages aren't installed in parallel. If there is a cycle, then
716 # we know the packages depend on each other already, so we can drop the
717 # blocker safely without printing a warning.
718 if depinfo == "blocker":
719 return
720
David Jamesfcb70ef2011-02-02 16:02:30 -0800721 # Notify the user that we're breaking a cycle.
Mike Frysinger383367e2014-09-16 15:06:17 -0400722 print("Breaking %s -> %s (%s)" % (dep, basedep, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800723
724 # Show cycle.
David James321490a2012-12-17 12:05:56 -0800725 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800726 pkg1, pkg2 = mycycle[i], mycycle[i+1]
727 needs = deps_map[pkg1]["needs"]
728 depinfo = needs.get(pkg2, "deleted")
729 if pkg1 == dep and pkg2 == basedep:
730 depinfo = depinfo + ", deleting"
Mike Frysinger383367e2014-09-16 15:06:17 -0400731 print(" %s -> %s (%s)" % (pkg1, pkg2, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800732
733 def SanitizeTree():
734 """Remove circular dependencies.
735
736 We prune all dependencies involved in cycles that go against the emerge
737 ordering. This has a nice property: we're guaranteed to merge
738 dependencies in the same order that portage does.
739
740 Because we don't treat any dependencies as "soft" unless they're killed
741 by a cycle, we pay attention to a larger number of dependencies when
742 merging. This hurts performance a bit, but helps reliability.
743 """
744 start = time.time()
745 cycles = FindCycles()
746 while cycles:
747 for dep, mycycles in cycles.iteritems():
748 for basedep, mycycle in mycycles.iteritems():
749 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
Matt Tennant08797302011-10-17 16:18:45 -0700750 if "--quiet" not in emerge.opts:
751 PrintCycleBreak(basedep, dep, mycycle)
David Jamesfcb70ef2011-02-02 16:02:30 -0800752 del deps_map[dep]["needs"][basedep]
753 deps_map[basedep]["provides"].remove(dep)
754 cycles = FindCycles()
755 seconds = time.time() - start
756 if "--quiet" not in emerge.opts and seconds >= 0.1:
Mike Frysinger383367e2014-09-16 15:06:17 -0400757 print("Tree sanitized in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800758
David James8c7e5e32011-06-28 11:26:03 -0700759 def FindRecursiveProvides(pkg, seen):
760 """Find all nodes that require a particular package.
761
762 Assumes that graph is acyclic.
763
764 Args:
765 pkg: Package identifier.
766 seen: Nodes that have been visited so far.
767 """
768 if pkg in seen:
769 return
770 seen.add(pkg)
771 info = deps_map[pkg]
772 info["tprovides"] = info["provides"].copy()
773 for dep in info["provides"]:
774 FindRecursiveProvides(dep, seen)
775 info["tprovides"].update(deps_map[dep]["tprovides"])
776
David Jamesa22906f2011-05-04 19:53:26 -0700777 ReverseTree(deps_tree)
David Jamesa22906f2011-05-04 19:53:26 -0700778
David James386ccd12011-05-04 20:17:42 -0700779 # We need to remove unused packages so that we can use the dependency
780 # ordering of the install process to show us what cycles to crack.
781 RemoveUnusedPackages()
David Jamesfcb70ef2011-02-02 16:02:30 -0800782 SanitizeTree()
David James8c7e5e32011-06-28 11:26:03 -0700783 seen = set()
784 for pkg in deps_map:
785 FindRecursiveProvides(pkg, seen)
David Jamesfcb70ef2011-02-02 16:02:30 -0800786 return deps_map
787
788 def PrintInstallPlan(self, deps_map):
789 """Print an emerge-style install plan.
790
791 The install plan lists what packages we're installing, in order.
792 It's useful for understanding what parallel_emerge is doing.
793
794 Args:
795 deps_map: The dependency graph.
796 """
797
798 def InstallPlanAtNode(target, deps_map):
799 nodes = []
800 nodes.append(target)
801 for dep in deps_map[target]["provides"]:
802 del deps_map[dep]["needs"][target]
803 if not deps_map[dep]["needs"]:
804 nodes.extend(InstallPlanAtNode(dep, deps_map))
805 return nodes
806
807 deps_map = copy.deepcopy(deps_map)
808 install_plan = []
809 plan = set()
810 for target, info in deps_map.iteritems():
811 if not info["needs"] and target not in plan:
812 for item in InstallPlanAtNode(target, deps_map):
813 plan.add(item)
814 install_plan.append(self.package_db[item])
815
816 for pkg in plan:
817 del deps_map[pkg]
818
819 if deps_map:
Mike Frysinger383367e2014-09-16 15:06:17 -0400820 print("Cyclic dependencies:", " ".join(deps_map))
David Jamesfcb70ef2011-02-02 16:02:30 -0800821 PrintDepsMap(deps_map)
822 sys.exit(1)
823
824 self.emerge.depgraph.display(install_plan)
825
826
827def PrintDepsMap(deps_map):
828 """Print dependency graph, for each package list it's prerequisites."""
829 for i in sorted(deps_map):
Mike Frysinger383367e2014-09-16 15:06:17 -0400830 print("%s: (%s) needs" % (i, deps_map[i]["action"]))
David Jamesfcb70ef2011-02-02 16:02:30 -0800831 needs = deps_map[i]["needs"]
832 for j in sorted(needs):
Mike Frysinger383367e2014-09-16 15:06:17 -0400833 print(" %s" % (j))
David Jamesfcb70ef2011-02-02 16:02:30 -0800834 if not needs:
Mike Frysinger383367e2014-09-16 15:06:17 -0400835 print(" no dependencies")
David Jamesfcb70ef2011-02-02 16:02:30 -0800836
837
838class EmergeJobState(object):
Don Garrett25f309a2014-03-19 14:02:12 -0700839 """Structure describing the EmergeJobState."""
840
David Jamesfcb70ef2011-02-02 16:02:30 -0800841 __slots__ = ["done", "filename", "last_notify_timestamp", "last_output_seek",
842 "last_output_timestamp", "pkgname", "retcode", "start_timestamp",
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700843 "target", "fetch_only", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800844
845 def __init__(self, target, pkgname, done, filename, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700846 retcode=None, fetch_only=False, unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800847
848 # The full name of the target we're building (e.g.
Mike Frysingerfd969312014-04-02 22:16:42 -0400849 # virtual/target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800850 self.target = target
851
Mike Frysingerfd969312014-04-02 22:16:42 -0400852 # The short name of the target we're building (e.g. target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800853 self.pkgname = pkgname
854
855 # Whether the job is done. (True if the job is done; false otherwise.)
856 self.done = done
857
858 # The filename where output is currently stored.
859 self.filename = filename
860
861 # The timestamp of the last time we printed the name of the log file. We
862 # print this at the beginning of the job, so this starts at
863 # start_timestamp.
864 self.last_notify_timestamp = start_timestamp
865
866 # The location (in bytes) of the end of the last complete line we printed.
867 # This starts off at zero. We use this to jump to the right place when we
868 # print output from the same ebuild multiple times.
869 self.last_output_seek = 0
870
871 # The timestamp of the last time we printed output. Since we haven't
872 # printed output yet, this starts at zero.
873 self.last_output_timestamp = 0
874
875 # The return code of our job, if the job is actually finished.
876 self.retcode = retcode
877
Brian Harring0be85c62012-03-17 19:52:12 -0700878 # Was this just a fetch job?
879 self.fetch_only = fetch_only
880
David Jamesfcb70ef2011-02-02 16:02:30 -0800881 # The timestamp when our job started.
882 self.start_timestamp = start_timestamp
883
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700884 # No emerge, only unpack packages.
885 self.unpack_only = unpack_only
886
David Jamesfcb70ef2011-02-02 16:02:30 -0800887
David James321490a2012-12-17 12:05:56 -0800888def KillHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700889 # Kill self and all subprocesses.
890 os.killpg(0, signal.SIGKILL)
891
Mike Frysingercc838832014-05-24 13:10:30 -0400892
David Jamesfcb70ef2011-02-02 16:02:30 -0800893def SetupWorkerSignals():
David James321490a2012-12-17 12:05:56 -0800894 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700895 # Set KILLED flag.
896 KILLED.set()
David James13cead42011-05-18 16:22:01 -0700897
David James7358d032011-05-19 10:40:03 -0700898 # Remove our signal handlers so we don't get called recursively.
899 signal.signal(signal.SIGINT, KillHandler)
900 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -0800901
902 # Ensure that we exit quietly and cleanly, if possible, when we receive
903 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all
904 # of the child processes will print details about KeyboardInterrupt
905 # exceptions, which isn't very helpful.
906 signal.signal(signal.SIGINT, ExitHandler)
907 signal.signal(signal.SIGTERM, ExitHandler)
908
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400909
910def EmergeProcess(output, target, *args, **kwargs):
David James1ed3e252011-10-05 20:26:15 -0700911 """Merge a package in a subprocess.
912
913 Args:
David James1ed3e252011-10-05 20:26:15 -0700914 output: Temporary file to write output.
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400915 target: The package we'll be processing (for display purposes).
David James6b29d052012-11-02 10:27:27 -0700916 *args: Arguments to pass to Scheduler constructor.
917 **kwargs: Keyword arguments to pass to Scheduler constructor.
David James1ed3e252011-10-05 20:26:15 -0700918
919 Returns:
920 The exit code returned by the subprocess.
921 """
922 pid = os.fork()
923 if pid == 0:
924 try:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400925 proctitle.settitle('EmergeProcess', target)
926
David James1ed3e252011-10-05 20:26:15 -0700927 # Sanity checks.
Mike Frysingerf02736e2013-11-08 15:27:00 -0500928 if sys.stdout.fileno() != 1:
929 raise Exception("sys.stdout.fileno() != 1")
930 if sys.stderr.fileno() != 2:
931 raise Exception("sys.stderr.fileno() != 2")
David James1ed3e252011-10-05 20:26:15 -0700932
933 # - Redirect 1 (stdout) and 2 (stderr) at our temporary file.
934 # - Redirect 0 to point at sys.stdin. In this case, sys.stdin
935 # points at a file reading os.devnull, because multiprocessing mucks
936 # with sys.stdin.
937 # - Leave the sys.stdin and output filehandles alone.
938 fd_pipes = {0: sys.stdin.fileno(),
939 1: output.fileno(),
940 2: output.fileno(),
941 sys.stdin.fileno(): sys.stdin.fileno(),
942 output.fileno(): output.fileno()}
Mike Frysinger66652ec2014-04-24 11:42:25 -0400943 # pylint: disable=W0212
944 portage.process._setup_pipes(fd_pipes, close_fds=False)
David James1ed3e252011-10-05 20:26:15 -0700945
946 # Portage doesn't like when sys.stdin.fileno() != 0, so point sys.stdin
947 # at the filehandle we just created in _setup_pipes.
948 if sys.stdin.fileno() != 0:
David James6b29d052012-11-02 10:27:27 -0700949 sys.__stdin__ = sys.stdin = os.fdopen(0, "r")
950
951 scheduler = Scheduler(*args, **kwargs)
952
953 # Enable blocker handling even though we're in --nodeps mode. This
954 # allows us to unmerge the blocker after we've merged the replacement.
955 scheduler._opts_ignore_blockers = frozenset()
David James1ed3e252011-10-05 20:26:15 -0700956
957 # Actually do the merge.
958 retval = scheduler.merge()
959
960 # We catch all exceptions here (including SystemExit, KeyboardInterrupt,
961 # etc) so as to ensure that we don't confuse the multiprocessing module,
962 # which expects that all forked children exit with os._exit().
David James321490a2012-12-17 12:05:56 -0800963 # pylint: disable=W0702
David James1ed3e252011-10-05 20:26:15 -0700964 except:
965 traceback.print_exc(file=output)
966 retval = 1
967 sys.stdout.flush()
968 sys.stderr.flush()
969 output.flush()
Don Garrett25f309a2014-03-19 14:02:12 -0700970 # pylint: disable=W0212
David James1ed3e252011-10-05 20:26:15 -0700971 os._exit(retval)
972 else:
973 # Return the exit code of the subprocess.
974 return os.waitpid(pid, 0)[1]
David Jamesfcb70ef2011-02-02 16:02:30 -0800975
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700976
977def UnpackPackage(pkg_state):
978 """Unpacks package described by pkg_state.
979
980 Args:
981 pkg_state: EmergeJobState object describing target.
982
983 Returns:
984 Exit code returned by subprocess.
985 """
986 pkgdir = os.environ.get("PKGDIR",
987 os.path.join(os.environ["SYSROOT"], "packages"))
988 root = os.environ.get("ROOT", os.environ["SYSROOT"])
989 path = os.path.join(pkgdir, pkg_state.target + ".tbz2")
990 comp = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)
991 cmd = [comp, "-dc"]
992 if comp.endswith("pbzip2"):
993 cmd.append("--ignore-trailing-garbage=1")
994 cmd.append(path)
995
996 result = cros_build_lib.RunCommand(cmd, cwd=root, stdout_to_pipe=True,
997 print_cmd=False, error_code_ok=True)
998
999 # If we were not successful, return now and don't attempt untar.
1000 if result.returncode:
1001 return result.returncode
1002
1003 cmd = ["sudo", "tar", "-xf", "-", "-C", root]
1004 result = cros_build_lib.RunCommand(cmd, cwd=root, input=result.output,
1005 print_cmd=False, error_code_ok=True)
1006
1007 return result.returncode
1008
1009
1010def EmergeWorker(task_queue, job_queue, emerge, package_db, fetch_only=False,
1011 unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -08001012 """This worker emerges any packages given to it on the task_queue.
1013
1014 Args:
1015 task_queue: The queue of tasks for this worker to do.
1016 job_queue: The queue of results from the worker.
1017 emerge: An EmergeData() object.
1018 package_db: A dict, mapping package ids to portage Package objects.
Brian Harring0be85c62012-03-17 19:52:12 -07001019 fetch_only: A bool, indicating if we should just fetch the target.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001020 unpack_only: A bool, indicating if we should just unpack the target.
David Jamesfcb70ef2011-02-02 16:02:30 -08001021
1022 It expects package identifiers to be passed to it via task_queue. When
1023 a task is started, it pushes the (target, filename) to the started_queue.
1024 The output is stored in filename. When a merge starts or finishes, we push
1025 EmergeJobState objects to the job_queue.
1026 """
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001027 if fetch_only:
1028 mode = 'fetch'
1029 elif unpack_only:
1030 mode = 'unpack'
1031 else:
1032 mode = 'emerge'
1033 proctitle.settitle('EmergeWorker', mode, '[idle]')
David Jamesfcb70ef2011-02-02 16:02:30 -08001034
1035 SetupWorkerSignals()
1036 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
David Jamesdeebd692011-05-09 17:02:52 -07001037
1038 # Disable flushing of caches to save on I/O.
David James7a1ea4b2011-10-13 15:06:41 -07001039 root = emerge.settings["ROOT"]
1040 vardb = emerge.trees[root]["vartree"].dbapi
Mike Frysingere56debd2014-11-19 01:54:36 -05001041 vardb._flush_cache_enabled = False # pylint: disable=protected-access
Brian Harring0be85c62012-03-17 19:52:12 -07001042 bindb = emerge.trees[root]["bintree"].dbapi
1043 # Might be a set, might be a list, might be None; no clue, just use shallow
1044 # copy to ensure we can roll it back.
Don Garrett25f309a2014-03-19 14:02:12 -07001045 # pylint: disable=W0212
Brian Harring0be85c62012-03-17 19:52:12 -07001046 original_remotepkgs = copy.copy(bindb.bintree._remotepkgs)
David Jamesdeebd692011-05-09 17:02:52 -07001047
David Jamesfcb70ef2011-02-02 16:02:30 -08001048 opts, spinner = emerge.opts, emerge.spinner
1049 opts["--nodeps"] = True
Brian Harring0be85c62012-03-17 19:52:12 -07001050 if fetch_only:
1051 opts["--fetchonly"] = True
1052
David Jamesfcb70ef2011-02-02 16:02:30 -08001053 while True:
1054 # Wait for a new item to show up on the queue. This is a blocking wait,
1055 # so if there's nothing to do, we just sit here.
Brian Harring0be85c62012-03-17 19:52:12 -07001056 pkg_state = task_queue.get()
1057 if pkg_state is None:
David Jamesfcb70ef2011-02-02 16:02:30 -08001058 # If target is None, this means that the main thread wants us to quit.
1059 # The other workers need to exit too, so we'll push the message back on
1060 # to the queue so they'll get it too.
Brian Harring0be85c62012-03-17 19:52:12 -07001061 task_queue.put(None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001062 return
David James7358d032011-05-19 10:40:03 -07001063 if KILLED.is_set():
1064 return
1065
Brian Harring0be85c62012-03-17 19:52:12 -07001066 target = pkg_state.target
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001067 proctitle.settitle('EmergeWorker', mode, target)
Brian Harring0be85c62012-03-17 19:52:12 -07001068
David Jamesfcb70ef2011-02-02 16:02:30 -08001069 db_pkg = package_db[target]
Brian Harring0be85c62012-03-17 19:52:12 -07001070
1071 if db_pkg.type_name == "binary":
1072 if not fetch_only and pkg_state.fetched_successfully:
1073 # Ensure portage doesn't think our pkg is remote- else it'll force
1074 # a redownload of it (even if the on-disk file is fine). In-memory
1075 # caching basically, implemented dumbly.
1076 bindb.bintree._remotepkgs = None
1077 else:
1078 bindb.bintree_remotepkgs = original_remotepkgs
1079
David Jamesfcb70ef2011-02-02 16:02:30 -08001080 db_pkg.root_config = emerge.root_config
1081 install_list = [db_pkg]
1082 pkgname = db_pkg.pf
1083 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False)
David James01b1e0f2012-06-07 17:18:05 -07001084 os.chmod(output.name, 644)
David Jamesfcb70ef2011-02-02 16:02:30 -08001085 start_timestamp = time.time()
Brian Harring0be85c62012-03-17 19:52:12 -07001086 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001087 fetch_only=fetch_only, unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001088 job_queue.put(job)
1089 if "--pretend" in opts:
1090 retcode = 0
1091 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001092 try:
David James386ccd12011-05-04 20:17:42 -07001093 emerge.scheduler_graph.mergelist = install_list
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001094 if unpack_only:
1095 retcode = UnpackPackage(pkg_state)
1096 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001097 retcode = EmergeProcess(output, target, settings, trees, mtimedb,
1098 opts, spinner, favorites=emerge.favorites,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001099 graph_config=emerge.scheduler_graph)
David Jamesfcb70ef2011-02-02 16:02:30 -08001100 except Exception:
1101 traceback.print_exc(file=output)
1102 retcode = 1
David James1ed3e252011-10-05 20:26:15 -07001103 output.close()
David Jamesfcb70ef2011-02-02 16:02:30 -08001104
David James7358d032011-05-19 10:40:03 -07001105 if KILLED.is_set():
1106 return
1107
David Jamesfcb70ef2011-02-02 16:02:30 -08001108 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001109 retcode, fetch_only=fetch_only,
1110 unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001111 job_queue.put(job)
1112
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001113 # Set the title back to idle as the multiprocess pool won't destroy us;
1114 # when another job comes up, it'll re-use this process.
1115 proctitle.settitle('EmergeWorker', mode, '[idle]')
1116
David Jamesfcb70ef2011-02-02 16:02:30 -08001117
1118class LinePrinter(object):
1119 """Helper object to print a single line."""
1120
1121 def __init__(self, line):
1122 self.line = line
1123
David James321490a2012-12-17 12:05:56 -08001124 def Print(self, _seek_locations):
Mike Frysinger383367e2014-09-16 15:06:17 -04001125 print(self.line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001126
1127
1128class JobPrinter(object):
1129 """Helper object to print output of a job."""
1130
1131 def __init__(self, job, unlink=False):
1132 """Print output of job.
1133
Mike Frysinger02e1e072013-11-10 22:11:34 -05001134 If unlink is True, unlink the job output file when done.
1135 """
David Jamesfcb70ef2011-02-02 16:02:30 -08001136 self.current_time = time.time()
1137 self.job = job
1138 self.unlink = unlink
1139
1140 def Print(self, seek_locations):
1141
1142 job = self.job
1143
1144 # Calculate how long the job has been running.
1145 seconds = self.current_time - job.start_timestamp
1146
1147 # Note that we've printed out the job so far.
1148 job.last_output_timestamp = self.current_time
1149
1150 # Note that we're starting the job
1151 info = "job %s (%dm%.1fs)" % (job.pkgname, seconds / 60, seconds % 60)
1152 last_output_seek = seek_locations.get(job.filename, 0)
1153 if last_output_seek:
Mike Frysinger383367e2014-09-16 15:06:17 -04001154 print("=== Continue output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001155 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001156 print("=== Start output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001157
1158 # Print actual output from job
1159 f = codecs.open(job.filename, encoding='utf-8', errors='replace')
1160 f.seek(last_output_seek)
1161 prefix = job.pkgname + ":"
1162 for line in f:
1163
1164 # Save off our position in the file
1165 if line and line[-1] == "\n":
1166 last_output_seek = f.tell()
1167 line = line[:-1]
1168
1169 # Print our line
Mike Frysinger383367e2014-09-16 15:06:17 -04001170 print(prefix, line.encode('utf-8', 'replace'))
David Jamesfcb70ef2011-02-02 16:02:30 -08001171 f.close()
1172
1173 # Save our last spot in the file so that we don't print out the same
1174 # location twice.
1175 seek_locations[job.filename] = last_output_seek
1176
1177 # Note end of output section
1178 if job.done:
Mike Frysinger383367e2014-09-16 15:06:17 -04001179 print("=== Complete: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001180 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001181 print("=== Still running: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001182
1183 if self.unlink:
1184 os.unlink(job.filename)
1185
1186
1187def PrintWorker(queue):
1188 """A worker that prints stuff to the screen as requested."""
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001189 proctitle.settitle('PrintWorker')
David Jamesfcb70ef2011-02-02 16:02:30 -08001190
David James321490a2012-12-17 12:05:56 -08001191 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -07001192 # Set KILLED flag.
1193 KILLED.set()
1194
David Jamesfcb70ef2011-02-02 16:02:30 -08001195 # Switch to default signal handlers so that we'll die after two signals.
David James7358d032011-05-19 10:40:03 -07001196 signal.signal(signal.SIGINT, KillHandler)
1197 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001198
1199 # Don't exit on the first SIGINT / SIGTERM, because the parent worker will
1200 # handle it and tell us when we need to exit.
1201 signal.signal(signal.SIGINT, ExitHandler)
1202 signal.signal(signal.SIGTERM, ExitHandler)
1203
1204 # seek_locations is a map indicating the position we are at in each file.
1205 # It starts off empty, but is set by the various Print jobs as we go along
1206 # to indicate where we left off in each file.
1207 seek_locations = {}
1208 while True:
1209 try:
1210 job = queue.get()
1211 if job:
1212 job.Print(seek_locations)
David Jamesbccf8eb2011-07-27 14:06:06 -07001213 sys.stdout.flush()
David Jamesfcb70ef2011-02-02 16:02:30 -08001214 else:
1215 break
1216 except IOError as ex:
1217 if ex.errno == errno.EINTR:
1218 # Looks like we received a signal. Keep printing.
1219 continue
1220 raise
1221
Brian Harring867e2362012-03-17 04:05:17 -07001222
Brian Harring0be85c62012-03-17 19:52:12 -07001223class TargetState(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001224 """Structure descriting the TargetState."""
Brian Harring867e2362012-03-17 04:05:17 -07001225
Brian Harring0be85c62012-03-17 19:52:12 -07001226 __slots__ = ("target", "info", "score", "prefetched", "fetched_successfully")
Brian Harring867e2362012-03-17 04:05:17 -07001227
David James321490a2012-12-17 12:05:56 -08001228 def __init__(self, target, info):
Brian Harring867e2362012-03-17 04:05:17 -07001229 self.target, self.info = target, info
Brian Harring0be85c62012-03-17 19:52:12 -07001230 self.fetched_successfully = False
1231 self.prefetched = False
David James321490a2012-12-17 12:05:56 -08001232 self.score = None
Brian Harring867e2362012-03-17 04:05:17 -07001233 self.update_score()
1234
1235 def __cmp__(self, other):
1236 return cmp(self.score, other.score)
1237
1238 def update_score(self):
1239 self.score = (
1240 -len(self.info["tprovides"]),
Brian Harring0be85c62012-03-17 19:52:12 -07001241 len(self.info["needs"]),
Brian Harring11c5eeb2012-03-18 11:02:39 -07001242 not self.info["binary"],
Brian Harring867e2362012-03-17 04:05:17 -07001243 -len(self.info["provides"]),
1244 self.info["idx"],
1245 self.target,
1246 )
1247
1248
1249class ScoredHeap(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001250 """Implementation of a general purpose scored heap."""
Brian Harring867e2362012-03-17 04:05:17 -07001251
Brian Harring0be85c62012-03-17 19:52:12 -07001252 __slots__ = ("heap", "_heap_set")
1253
Brian Harring867e2362012-03-17 04:05:17 -07001254 def __init__(self, initial=()):
Brian Harring0be85c62012-03-17 19:52:12 -07001255 self.heap = list()
1256 self._heap_set = set()
1257 if initial:
1258 self.multi_put(initial)
Brian Harring867e2362012-03-17 04:05:17 -07001259
1260 def get(self):
Brian Harring0be85c62012-03-17 19:52:12 -07001261 item = heapq.heappop(self.heap)
1262 self._heap_set.remove(item.target)
1263 return item
Brian Harring867e2362012-03-17 04:05:17 -07001264
Brian Harring0be85c62012-03-17 19:52:12 -07001265 def put(self, item):
1266 if not isinstance(item, TargetState):
1267 raise ValueError("Item %r isn't a TargetState" % (item,))
1268 heapq.heappush(self.heap, item)
1269 self._heap_set.add(item.target)
Brian Harring867e2362012-03-17 04:05:17 -07001270
Brian Harring0be85c62012-03-17 19:52:12 -07001271 def multi_put(self, sequence):
1272 sequence = list(sequence)
1273 self.heap.extend(sequence)
1274 self._heap_set.update(x.target for x in sequence)
Brian Harring867e2362012-03-17 04:05:17 -07001275 self.sort()
1276
David James5c9996d2012-03-24 10:50:46 -07001277 def sort(self):
1278 heapq.heapify(self.heap)
1279
Brian Harring0be85c62012-03-17 19:52:12 -07001280 def __contains__(self, target):
1281 return target in self._heap_set
1282
1283 def __nonzero__(self):
1284 return bool(self.heap)
1285
Brian Harring867e2362012-03-17 04:05:17 -07001286 def __len__(self):
1287 return len(self.heap)
1288
1289
David Jamesfcb70ef2011-02-02 16:02:30 -08001290class EmergeQueue(object):
1291 """Class to schedule emerge jobs according to a dependency graph."""
1292
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001293 def __init__(self, deps_map, emerge, package_db, show_output, unpack_only,
1294 max_retries):
David Jamesfcb70ef2011-02-02 16:02:30 -08001295 # Store the dependency graph.
1296 self._deps_map = deps_map
Brian Harring0be85c62012-03-17 19:52:12 -07001297 self._state_map = {}
David Jamesfcb70ef2011-02-02 16:02:30 -08001298 # Initialize the running queue to empty
Brian Harring0be85c62012-03-17 19:52:12 -07001299 self._build_jobs = {}
1300 self._build_ready = ScoredHeap()
1301 self._fetch_jobs = {}
1302 self._fetch_ready = ScoredHeap()
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001303 self._unpack_jobs = {}
1304 self._unpack_ready = ScoredHeap()
David Jamesfcb70ef2011-02-02 16:02:30 -08001305 # List of total package installs represented in deps_map.
1306 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
1307 self._total_jobs = len(install_jobs)
1308 self._show_output = show_output
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001309 self._unpack_only = unpack_only
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001310 self._max_retries = max_retries
David Jamesfcb70ef2011-02-02 16:02:30 -08001311
1312 if "--pretend" in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001313 print("Skipping merge because of --pretend mode.")
David Jamesfcb70ef2011-02-02 16:02:30 -08001314 sys.exit(0)
1315
David Jamesaaf49e42014-04-24 09:40:05 -07001316 # Set up a session so we can easily terminate all children.
1317 self._SetupSession()
David James7358d032011-05-19 10:40:03 -07001318
David Jamesfcb70ef2011-02-02 16:02:30 -08001319 # Setup scheduler graph object. This is used by the child processes
1320 # to help schedule jobs.
1321 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1322
1323 # Calculate how many jobs we can run in parallel. We don't want to pass
1324 # the --jobs flag over to emerge itself, because that'll tell emerge to
1325 # hide its output, and said output is quite useful for debugging hung
1326 # jobs.
1327 procs = min(self._total_jobs,
1328 emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
Nam T. Nguyenf7098b32015-05-08 11:04:48 -07001329 self._build_procs = self._unpack_procs = max(1, procs)
1330 # Fetch is IO bound, we can use more processes.
1331 self._fetch_procs = max(4, procs)
David James8c7e5e32011-06-28 11:26:03 -07001332 self._load_avg = emerge.opts.pop("--load-average", None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001333 self._job_queue = multiprocessing.Queue()
1334 self._print_queue = multiprocessing.Queue()
Brian Harring0be85c62012-03-17 19:52:12 -07001335
1336 self._fetch_queue = multiprocessing.Queue()
1337 args = (self._fetch_queue, self._job_queue, emerge, package_db, True)
1338 self._fetch_pool = multiprocessing.Pool(self._fetch_procs, EmergeWorker,
1339 args)
1340
1341 self._build_queue = multiprocessing.Queue()
1342 args = (self._build_queue, self._job_queue, emerge, package_db)
1343 self._build_pool = multiprocessing.Pool(self._build_procs, EmergeWorker,
1344 args)
1345
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001346 if self._unpack_only:
1347 # Unpack pool only required on unpack_only jobs.
1348 self._unpack_queue = multiprocessing.Queue()
1349 args = (self._unpack_queue, self._job_queue, emerge, package_db, False,
1350 True)
1351 self._unpack_pool = multiprocessing.Pool(self._unpack_procs, EmergeWorker,
1352 args)
1353
David Jamesfcb70ef2011-02-02 16:02:30 -08001354 self._print_worker = multiprocessing.Process(target=PrintWorker,
1355 args=[self._print_queue])
1356 self._print_worker.start()
1357
1358 # Initialize the failed queue to empty.
1359 self._retry_queue = []
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001360 self._failed_count = dict()
David Jamesfcb70ef2011-02-02 16:02:30 -08001361
David Jamesfcb70ef2011-02-02 16:02:30 -08001362 # Setup an exit handler so that we print nice messages if we are
1363 # terminated.
1364 self._SetupExitHandler()
1365
1366 # Schedule our jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001367 self._state_map.update(
1368 (pkg, TargetState(pkg, data)) for pkg, data in deps_map.iteritems())
1369 self._fetch_ready.multi_put(self._state_map.itervalues())
David Jamesfcb70ef2011-02-02 16:02:30 -08001370
David Jamesaaf49e42014-04-24 09:40:05 -07001371 def _SetupSession(self):
1372 """Set up a session so we can easily terminate all children."""
1373 # When we call os.setsid(), this sets up a session / process group for this
1374 # process and all children. These session groups are needed so that we can
1375 # easily kill all children (including processes launched by emerge) before
1376 # we exit.
1377 #
1378 # One unfortunate side effect of os.setsid() is that it blocks CTRL-C from
1379 # being received. To work around this, we only call os.setsid() in a forked
1380 # process, so that the parent can still watch for CTRL-C. The parent will
1381 # just sit around, watching for signals and propagating them to the child,
1382 # until the child exits.
1383 #
1384 # TODO(davidjames): It would be nice if we could replace this with cgroups.
1385 pid = os.fork()
1386 if pid == 0:
1387 os.setsid()
1388 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001389 proctitle.settitle('SessionManager')
1390
David Jamesaaf49e42014-04-24 09:40:05 -07001391 def PropagateToChildren(signum, _frame):
1392 # Just propagate the signals down to the child. We'll exit when the
1393 # child does.
1394 try:
1395 os.kill(pid, signum)
1396 except OSError as ex:
1397 if ex.errno != errno.ESRCH:
1398 raise
1399 signal.signal(signal.SIGINT, PropagateToChildren)
1400 signal.signal(signal.SIGTERM, PropagateToChildren)
1401
1402 def StopGroup(_signum, _frame):
1403 # When we get stopped, stop the children.
1404 try:
1405 os.killpg(pid, signal.SIGSTOP)
1406 os.kill(0, signal.SIGSTOP)
1407 except OSError as ex:
1408 if ex.errno != errno.ESRCH:
1409 raise
1410 signal.signal(signal.SIGTSTP, StopGroup)
1411
1412 def ContinueGroup(_signum, _frame):
1413 # Launch the children again after being stopped.
1414 try:
1415 os.killpg(pid, signal.SIGCONT)
1416 except OSError as ex:
1417 if ex.errno != errno.ESRCH:
1418 raise
1419 signal.signal(signal.SIGCONT, ContinueGroup)
1420
1421 # Loop until the children exit. We exit with os._exit to be sure we
1422 # don't run any finalizers (those will be run by the child process.)
1423 # pylint: disable=W0212
1424 while True:
1425 try:
1426 # Wait for the process to exit. When it does, exit with the return
1427 # value of the subprocess.
Mike Frysingere2d8f0d2014-11-01 13:09:26 -04001428 os._exit(process_util.GetExitStatus(os.waitpid(pid, 0)[1]))
David Jamesaaf49e42014-04-24 09:40:05 -07001429 except OSError as ex:
1430 if ex.errno == errno.EINTR:
1431 continue
1432 traceback.print_exc()
1433 os._exit(1)
1434 except BaseException:
1435 traceback.print_exc()
1436 os._exit(1)
1437
David Jamesfcb70ef2011-02-02 16:02:30 -08001438 def _SetupExitHandler(self):
1439
David James321490a2012-12-17 12:05:56 -08001440 def ExitHandler(signum, _frame):
David James7358d032011-05-19 10:40:03 -07001441 # Set KILLED flag.
1442 KILLED.set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001443
1444 # Kill our signal handlers so we don't get called recursively
David James7358d032011-05-19 10:40:03 -07001445 signal.signal(signal.SIGINT, KillHandler)
1446 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001447
1448 # Print our current job status
Brian Harring0be85c62012-03-17 19:52:12 -07001449 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001450 if job:
1451 self._print_queue.put(JobPrinter(job, unlink=True))
1452
1453 # Notify the user that we are exiting
1454 self._Print("Exiting on signal %s" % signum)
David James7358d032011-05-19 10:40:03 -07001455 self._print_queue.put(None)
1456 self._print_worker.join()
David Jamesfcb70ef2011-02-02 16:02:30 -08001457
1458 # Kill child threads, then exit.
David James7358d032011-05-19 10:40:03 -07001459 os.killpg(0, signal.SIGKILL)
David Jamesfcb70ef2011-02-02 16:02:30 -08001460 sys.exit(1)
1461
1462 # Print out job status when we are killed
1463 signal.signal(signal.SIGINT, ExitHandler)
1464 signal.signal(signal.SIGTERM, ExitHandler)
1465
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001466 def _ScheduleUnpack(self, pkg_state):
1467 self._unpack_jobs[pkg_state.target] = None
1468 self._unpack_queue.put(pkg_state)
1469
Brian Harring0be85c62012-03-17 19:52:12 -07001470 def _Schedule(self, pkg_state):
David Jamesfcb70ef2011-02-02 16:02:30 -08001471 # We maintain a tree of all deps, if this doesn't need
David James8c7e5e32011-06-28 11:26:03 -07001472 # to be installed just free up its children and continue.
David Jamesfcb70ef2011-02-02 16:02:30 -08001473 # It is possible to reinstall deps of deps, without reinstalling
1474 # first level deps, like so:
Mike Frysingerfd969312014-04-02 22:16:42 -04001475 # virtual/target-os (merge) -> eselect (nomerge) -> python (merge)
Brian Harring0be85c62012-03-17 19:52:12 -07001476 this_pkg = pkg_state.info
1477 target = pkg_state.target
1478 if pkg_state.info is not None:
1479 if this_pkg["action"] == "nomerge":
1480 self._Finish(target)
1481 elif target not in self._build_jobs:
1482 # Kick off the build if it's marked to be built.
1483 self._build_jobs[target] = None
1484 self._build_queue.put(pkg_state)
1485 return True
David Jamesfcb70ef2011-02-02 16:02:30 -08001486
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001487 def _ScheduleLoop(self, unpack_only=False):
1488 if unpack_only:
1489 ready_queue = self._unpack_ready
1490 jobs_queue = self._unpack_jobs
1491 procs = self._unpack_procs
1492 else:
1493 ready_queue = self._build_ready
1494 jobs_queue = self._build_jobs
1495 procs = self._build_procs
1496
David James8c7e5e32011-06-28 11:26:03 -07001497 # If the current load exceeds our desired load average, don't schedule
1498 # more than one job.
1499 if self._load_avg and os.getloadavg()[0] > self._load_avg:
1500 needed_jobs = 1
1501 else:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001502 needed_jobs = procs
David James8c7e5e32011-06-28 11:26:03 -07001503
1504 # Schedule more jobs.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001505 while ready_queue and len(jobs_queue) < needed_jobs:
1506 state = ready_queue.get()
1507 if unpack_only:
1508 self._ScheduleUnpack(state)
1509 else:
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001510 if state.target not in self._failed_count:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001511 self._Schedule(state)
David Jamesfcb70ef2011-02-02 16:02:30 -08001512
1513 def _Print(self, line):
1514 """Print a single line."""
1515 self._print_queue.put(LinePrinter(line))
1516
1517 def _Status(self):
1518 """Print status."""
1519 current_time = time.time()
1520 no_output = True
1521
1522 # Print interim output every minute if --show-output is used. Otherwise,
1523 # print notifications about running packages every 2 minutes, and print
1524 # full output for jobs that have been running for 60 minutes or more.
1525 if self._show_output:
1526 interval = 60
1527 notify_interval = 0
1528 else:
1529 interval = 60 * 60
1530 notify_interval = 60 * 2
David James321490a2012-12-17 12:05:56 -08001531 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001532 if job:
1533 last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
1534 if last_timestamp + interval < current_time:
1535 self._print_queue.put(JobPrinter(job))
1536 job.last_output_timestamp = current_time
1537 no_output = False
1538 elif (notify_interval and
1539 job.last_notify_timestamp + notify_interval < current_time):
1540 job_seconds = current_time - job.start_timestamp
1541 args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
1542 info = "Still building %s (%dm%.1fs). Logs in %s" % args
1543 job.last_notify_timestamp = current_time
1544 self._Print(info)
1545 no_output = False
1546
1547 # If we haven't printed any messages yet, print a general status message
1548 # here.
1549 if no_output:
1550 seconds = current_time - GLOBAL_START
Brian Harring0be85c62012-03-17 19:52:12 -07001551 fjobs, fready = len(self._fetch_jobs), len(self._fetch_ready)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001552 ujobs, uready = len(self._unpack_jobs), len(self._unpack_ready)
Brian Harring0be85c62012-03-17 19:52:12 -07001553 bjobs, bready = len(self._build_jobs), len(self._build_ready)
1554 retries = len(self._retry_queue)
1555 pending = max(0, len(self._deps_map) - fjobs - bjobs)
1556 line = "Pending %s/%s, " % (pending, self._total_jobs)
1557 if fjobs or fready:
1558 line += "Fetching %s/%s, " % (fjobs, fready + fjobs)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001559 if ujobs or uready:
1560 line += "Unpacking %s/%s, " % (ujobs, uready + ujobs)
Brian Harring0be85c62012-03-17 19:52:12 -07001561 if bjobs or bready or retries:
1562 line += "Building %s/%s, " % (bjobs, bready + bjobs)
1563 if retries:
1564 line += "Retrying %s, " % (retries,)
Mike Frysingerd6e2df02014-11-26 02:55:04 -05001565 load = " ".join(str(x) for x in os.getloadavg())
1566 line += ("[Time %dm%.1fs Load %s]" % (seconds / 60, seconds % 60, load))
Brian Harring0be85c62012-03-17 19:52:12 -07001567 self._Print(line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001568
1569 def _Finish(self, target):
David James8c7e5e32011-06-28 11:26:03 -07001570 """Mark a target as completed and unblock dependencies."""
1571 this_pkg = self._deps_map[target]
1572 if this_pkg["needs"] and this_pkg["nodeps"]:
1573 # We got installed, but our deps have not been installed yet. Dependent
1574 # packages should only be installed when our needs have been fully met.
1575 this_pkg["action"] = "nomerge"
1576 else:
David James8c7e5e32011-06-28 11:26:03 -07001577 for dep in this_pkg["provides"]:
1578 dep_pkg = self._deps_map[dep]
Brian Harring0be85c62012-03-17 19:52:12 -07001579 state = self._state_map[dep]
David James8c7e5e32011-06-28 11:26:03 -07001580 del dep_pkg["needs"][target]
Brian Harring0be85c62012-03-17 19:52:12 -07001581 state.update_score()
1582 if not state.prefetched:
1583 if dep in self._fetch_ready:
1584 # If it's not currently being fetched, update the prioritization
1585 self._fetch_ready.sort()
1586 elif not dep_pkg["needs"]:
David James8c7e5e32011-06-28 11:26:03 -07001587 if dep_pkg["nodeps"] and dep_pkg["action"] == "nomerge":
1588 self._Finish(dep)
1589 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001590 self._build_ready.put(self._state_map[dep])
David James8c7e5e32011-06-28 11:26:03 -07001591 self._deps_map.pop(target)
David Jamesfcb70ef2011-02-02 16:02:30 -08001592
1593 def _Retry(self):
David James8c7e5e32011-06-28 11:26:03 -07001594 while self._retry_queue:
Brian Harring0be85c62012-03-17 19:52:12 -07001595 state = self._retry_queue.pop(0)
1596 if self._Schedule(state):
1597 self._Print("Retrying emerge of %s." % state.target)
David James8c7e5e32011-06-28 11:26:03 -07001598 break
David Jamesfcb70ef2011-02-02 16:02:30 -08001599
Brian Harringa43f5952012-04-12 01:19:34 -07001600 def _Shutdown(self):
David Jamesfcb70ef2011-02-02 16:02:30 -08001601 # Tell emerge workers to exit. They all exit when 'None' is pushed
1602 # to the queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001603
Brian Harringa43f5952012-04-12 01:19:34 -07001604 # Shutdown the workers first; then jobs (which is how they feed things back)
1605 # then finally the print queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001606
Brian Harringa43f5952012-04-12 01:19:34 -07001607 def _stop(queue, pool):
1608 if pool is None:
1609 return
1610 try:
1611 queue.put(None)
1612 pool.close()
1613 pool.join()
1614 finally:
1615 pool.terminate()
Brian Harring0be85c62012-03-17 19:52:12 -07001616
Brian Harringa43f5952012-04-12 01:19:34 -07001617 _stop(self._fetch_queue, self._fetch_pool)
1618 self._fetch_queue = self._fetch_pool = None
Brian Harring0be85c62012-03-17 19:52:12 -07001619
Brian Harringa43f5952012-04-12 01:19:34 -07001620 _stop(self._build_queue, self._build_pool)
1621 self._build_queue = self._build_pool = None
1622
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001623 if self._unpack_only:
1624 _stop(self._unpack_queue, self._unpack_pool)
1625 self._unpack_queue = self._unpack_pool = None
1626
Brian Harringa43f5952012-04-12 01:19:34 -07001627 if self._job_queue is not None:
1628 self._job_queue.close()
1629 self._job_queue = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001630
1631 # Now that our workers are finished, we can kill the print queue.
Brian Harringa43f5952012-04-12 01:19:34 -07001632 if self._print_worker is not None:
1633 try:
1634 self._print_queue.put(None)
1635 self._print_queue.close()
1636 self._print_worker.join()
1637 finally:
1638 self._print_worker.terminate()
1639 self._print_queue = self._print_worker = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001640
1641 def Run(self):
1642 """Run through the scheduled ebuilds.
1643
1644 Keep running so long as we have uninstalled packages in the
1645 dependency graph to merge.
1646 """
Brian Harringa43f5952012-04-12 01:19:34 -07001647 if not self._deps_map:
1648 return
1649
Brian Harring0be85c62012-03-17 19:52:12 -07001650 # Start the fetchers.
1651 for _ in xrange(min(self._fetch_procs, len(self._fetch_ready))):
1652 state = self._fetch_ready.get()
1653 self._fetch_jobs[state.target] = None
1654 self._fetch_queue.put(state)
1655
1656 # Print an update, then get going.
1657 self._Status()
1658
David Jamesfcb70ef2011-02-02 16:02:30 -08001659 while self._deps_map:
1660 # Check here that we are actually waiting for something.
Brian Harring0be85c62012-03-17 19:52:12 -07001661 if (self._build_queue.empty() and
David Jamesfcb70ef2011-02-02 16:02:30 -08001662 self._job_queue.empty() and
Brian Harring0be85c62012-03-17 19:52:12 -07001663 not self._fetch_jobs and
1664 not self._fetch_ready and
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001665 not self._unpack_jobs and
1666 not self._unpack_ready and
Brian Harring0be85c62012-03-17 19:52:12 -07001667 not self._build_jobs and
1668 not self._build_ready and
David Jamesfcb70ef2011-02-02 16:02:30 -08001669 self._deps_map):
1670 # If we have failed on a package, retry it now.
1671 if self._retry_queue:
1672 self._Retry()
1673 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001674 # Tell the user why we're exiting.
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001675 if self._failed_count:
1676 print('Packages failed:\n\t%s' %
1677 '\n\t'.join(self._failed_count.iterkeys()))
David James0eae23e2012-07-03 15:04:25 -07001678 status_file = os.environ.get("PARALLEL_EMERGE_STATUS_FILE")
1679 if status_file:
David James321490a2012-12-17 12:05:56 -08001680 failed_pkgs = set(portage.versions.cpv_getkey(x)
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001681 for x in self._failed_count.iterkeys())
David James0eae23e2012-07-03 15:04:25 -07001682 with open(status_file, "a") as f:
1683 f.write("%s\n" % " ".join(failed_pkgs))
David Jamesfcb70ef2011-02-02 16:02:30 -08001684 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001685 print("Deadlock! Circular dependencies!")
David Jamesfcb70ef2011-02-02 16:02:30 -08001686 sys.exit(1)
1687
David James321490a2012-12-17 12:05:56 -08001688 for _ in xrange(12):
David Jamesa74289a2011-08-12 10:41:24 -07001689 try:
1690 job = self._job_queue.get(timeout=5)
1691 break
1692 except Queue.Empty:
1693 # Check if any more jobs can be scheduled.
1694 self._ScheduleLoop()
1695 else:
Brian Harring706747c2012-03-16 03:04:31 -07001696 # Print an update every 60 seconds.
David Jamesfcb70ef2011-02-02 16:02:30 -08001697 self._Status()
1698 continue
1699
1700 target = job.target
1701
Brian Harring0be85c62012-03-17 19:52:12 -07001702 if job.fetch_only:
1703 if not job.done:
1704 self._fetch_jobs[job.target] = job
1705 else:
1706 state = self._state_map[job.target]
1707 state.prefetched = True
1708 state.fetched_successfully = (job.retcode == 0)
1709 del self._fetch_jobs[job.target]
1710 self._Print("Fetched %s in %2.2fs"
1711 % (target, time.time() - job.start_timestamp))
1712
1713 if self._show_output or job.retcode != 0:
1714 self._print_queue.put(JobPrinter(job, unlink=True))
1715 else:
1716 os.unlink(job.filename)
1717 # Failure or not, let build work with it next.
1718 if not self._deps_map[job.target]["needs"]:
1719 self._build_ready.put(state)
1720 self._ScheduleLoop()
1721
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001722 if self._unpack_only and job.retcode == 0:
1723 self._unpack_ready.put(state)
1724 self._ScheduleLoop(unpack_only=True)
1725
Brian Harring0be85c62012-03-17 19:52:12 -07001726 if self._fetch_ready:
1727 state = self._fetch_ready.get()
1728 self._fetch_queue.put(state)
1729 self._fetch_jobs[state.target] = None
1730 else:
1731 # Minor optimization; shut down fetchers early since we know
1732 # the queue is empty.
1733 self._fetch_queue.put(None)
1734 continue
1735
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001736 if job.unpack_only:
1737 if not job.done:
1738 self._unpack_jobs[target] = job
1739 else:
1740 del self._unpack_jobs[target]
1741 self._Print("Unpacked %s in %2.2fs"
1742 % (target, time.time() - job.start_timestamp))
1743 if self._show_output or job.retcode != 0:
1744 self._print_queue.put(JobPrinter(job, unlink=True))
1745 else:
1746 os.unlink(job.filename)
1747 if self._unpack_ready:
1748 state = self._unpack_ready.get()
1749 self._unpack_queue.put(state)
1750 self._unpack_jobs[state.target] = None
1751 continue
1752
David Jamesfcb70ef2011-02-02 16:02:30 -08001753 if not job.done:
Brian Harring0be85c62012-03-17 19:52:12 -07001754 self._build_jobs[target] = job
David Jamesfcb70ef2011-02-02 16:02:30 -08001755 self._Print("Started %s (logged in %s)" % (target, job.filename))
1756 continue
1757
1758 # Print output of job
1759 if self._show_output or job.retcode != 0:
1760 self._print_queue.put(JobPrinter(job, unlink=True))
1761 else:
1762 os.unlink(job.filename)
Brian Harring0be85c62012-03-17 19:52:12 -07001763 del self._build_jobs[target]
David Jamesfcb70ef2011-02-02 16:02:30 -08001764
1765 seconds = time.time() - job.start_timestamp
1766 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
1767
1768 # Complain if necessary.
1769 if job.retcode != 0:
1770 # Handle job failure.
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001771 failed_count = self._failed_count.get(target, 0)
1772 if failed_count >= self._max_retries:
1773 # If this job has failed and can't be retried, give up.
David Jamesfcb70ef2011-02-02 16:02:30 -08001774 self._Print("Failed %s. Your build has failed." % details)
1775 else:
1776 # Queue up this build to try again after a long while.
Brian Harring0be85c62012-03-17 19:52:12 -07001777 self._retry_queue.append(self._state_map[target])
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001778 self._failed_count[target] = failed_count + 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001779 self._Print("Failed %s, retrying later." % details)
1780 else:
David James32420cc2011-08-25 21:32:46 -07001781 self._Print("Completed %s" % details)
1782
1783 # Mark as completed and unblock waiting ebuilds.
1784 self._Finish(target)
1785
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001786 if target in self._failed_count and self._retry_queue:
David Jamesfcb70ef2011-02-02 16:02:30 -08001787 # If we have successfully retried a failed package, and there
1788 # are more failed packages, try the next one. We will only have
1789 # one retrying package actively running at a time.
1790 self._Retry()
1791
David Jamesfcb70ef2011-02-02 16:02:30 -08001792
David James8c7e5e32011-06-28 11:26:03 -07001793 # Schedule pending jobs and print an update.
1794 self._ScheduleLoop()
1795 self._Status()
David Jamesfcb70ef2011-02-02 16:02:30 -08001796
David Jamese703d0f2012-01-12 16:27:45 -08001797 # If packages were retried, output a warning.
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001798 if self._failed_count:
David Jamese703d0f2012-01-12 16:27:45 -08001799 self._Print("")
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001800 self._Print("WARNING: The following packages failed once or more,")
David Jamese703d0f2012-01-12 16:27:45 -08001801 self._Print("but succeeded upon retry. This might indicate incorrect")
1802 self._Print("dependencies.")
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001803 for pkg in self._failed_count.iterkeys():
David Jamese703d0f2012-01-12 16:27:45 -08001804 self._Print(" %s" % pkg)
1805 self._Print("@@@STEP_WARNINGS@@@")
1806 self._Print("")
1807
David Jamesfcb70ef2011-02-02 16:02:30 -08001808 # Tell child threads to exit.
1809 self._Print("Merge complete")
David Jamesfcb70ef2011-02-02 16:02:30 -08001810
1811
Brian Harring30675052012-02-29 12:18:22 -08001812def main(argv):
Brian Harring8294d652012-05-23 02:20:52 -07001813 try:
1814 return real_main(argv)
1815 finally:
1816 # Work around multiprocessing sucking and not cleaning up after itself.
1817 # http://bugs.python.org/issue4106;
1818 # Step one; ensure GC is ran *prior* to the VM starting shutdown.
1819 gc.collect()
1820 # Step two; go looking for those threads and try to manually reap
1821 # them if we can.
1822 for x in threading.enumerate():
1823 # Filter on the name, and ident; if ident is None, the thread
1824 # wasn't started.
1825 if x.name == 'QueueFeederThread' and x.ident is not None:
1826 x.join(1)
David Jamesfcb70ef2011-02-02 16:02:30 -08001827
Brian Harring8294d652012-05-23 02:20:52 -07001828
1829def real_main(argv):
Brian Harring30675052012-02-29 12:18:22 -08001830 parallel_emerge_args = argv[:]
David Jamesfcb70ef2011-02-02 16:02:30 -08001831 deps = DepGraphGenerator()
Brian Harring30675052012-02-29 12:18:22 -08001832 deps.Initialize(parallel_emerge_args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001833 emerge = deps.emerge
1834
1835 if emerge.action is not None:
Brian Harring30675052012-02-29 12:18:22 -08001836 argv = deps.ParseParallelEmergeArgs(argv)
Brian Harring8294d652012-05-23 02:20:52 -07001837 return emerge_main(argv)
David Jamesfcb70ef2011-02-02 16:02:30 -08001838 elif not emerge.cmdline_packages:
1839 Usage()
Brian Harring8294d652012-05-23 02:20:52 -07001840 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001841
1842 # Unless we're in pretend mode, there's not much point running without
1843 # root access. We need to be able to install packages.
1844 #
1845 # NOTE: Even if you're running --pretend, it's a good idea to run
1846 # parallel_emerge with root access so that portage can write to the
1847 # dependency cache. This is important for performance.
David James321490a2012-12-17 12:05:56 -08001848 if "--pretend" not in emerge.opts and portage.data.secpass < 2:
Mike Frysinger383367e2014-09-16 15:06:17 -04001849 print("parallel_emerge: superuser access is required.")
Brian Harring8294d652012-05-23 02:20:52 -07001850 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001851
1852 if "--quiet" not in emerge.opts:
1853 cmdline_packages = " ".join(emerge.cmdline_packages)
Mike Frysinger383367e2014-09-16 15:06:17 -04001854 print("Starting fast-emerge.")
1855 print(" Building package %s on %s" % (cmdline_packages,
Gilad Arnold05f94b02015-05-22 10:41:05 -07001856 deps.sysroot or "root"))
David Jamesfcb70ef2011-02-02 16:02:30 -08001857
David James386ccd12011-05-04 20:17:42 -07001858 deps_tree, deps_info = deps.GenDependencyTree()
David Jamesfcb70ef2011-02-02 16:02:30 -08001859
1860 # You want me to be verbose? I'll give you two trees! Twice as much value.
1861 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
1862 deps.PrintTree(deps_tree)
1863
David James386ccd12011-05-04 20:17:42 -07001864 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001865
1866 # OK, time to print out our progress so far.
1867 deps.PrintInstallPlan(deps_graph)
1868 if "--tree" in emerge.opts:
1869 PrintDepsMap(deps_graph)
1870
1871 # Are we upgrading portage? If so, and there are more packages to merge,
1872 # schedule a restart of parallel_emerge to merge the rest. This ensures that
1873 # we pick up all updates to portage settings before merging any more
1874 # packages.
1875 portage_upgrade = False
1876 root = emerge.settings["ROOT"]
Don Garrett25f309a2014-03-19 14:02:12 -07001877 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -08001878 if root == "/":
Gilad Arnoldcead28a2015-05-22 10:45:02 -07001879 final_db = emerge.depgraph._dynamic_config._filtered_trees[root]['graph_db']
David Jamesfcb70ef2011-02-02 16:02:30 -08001880 for db_pkg in final_db.match_pkgs("sys-apps/portage"):
1881 portage_pkg = deps_graph.get(db_pkg.cpv)
David James0ff16f22012-11-02 14:18:07 -07001882 if portage_pkg:
David Jamesfcb70ef2011-02-02 16:02:30 -08001883 portage_upgrade = True
1884 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001885 print("Upgrading portage first, then restarting...")
David Jamesfcb70ef2011-02-02 16:02:30 -08001886
David James0ff16f22012-11-02 14:18:07 -07001887 # Upgrade Portage first, then the rest of the packages.
1888 #
1889 # In order to grant the child permission to run setsid, we need to run sudo
1890 # again. We preserve SUDO_USER here in case an ebuild depends on it.
1891 if portage_upgrade:
1892 # Calculate what arguments to use when re-invoking.
1893 args = ["sudo", "-E", "SUDO_USER=%s" % os.environ.get("SUDO_USER", "")]
1894 args += [os.path.abspath(sys.argv[0])] + parallel_emerge_args
1895 args += ["--exclude=sys-apps/portage"]
1896
1897 # First upgrade Portage.
1898 passthrough_args = ("--quiet", "--pretend", "--verbose")
1899 emerge_args = [k for k in emerge.opts if k in passthrough_args]
1900 ret = emerge_main(emerge_args + ["portage"])
1901 if ret != 0:
1902 return ret
1903
1904 # Now upgrade the rest.
1905 os.execvp(args[0], args)
1906
Bertrand SIMONNETc03c8ee2014-12-10 17:02:55 -08001907 # Attempt to solve crbug.com/433482
1908 # The file descriptor error appears only when getting userpriv_groups
1909 # (lazily generated). Loading userpriv_groups here will reduce the number of
1910 # calls from few hundreds to one.
1911 portage.data._get_global('userpriv_groups')
1912
David Jamesfcb70ef2011-02-02 16:02:30 -08001913 # Run the queued emerges.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001914 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output,
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001915 deps.unpack_only, deps.max_retries)
Brian Harringa43f5952012-04-12 01:19:34 -07001916 try:
1917 scheduler.Run()
1918 finally:
Don Garrett25f309a2014-03-19 14:02:12 -07001919 # pylint: disable=W0212
Brian Harringa43f5952012-04-12 01:19:34 -07001920 scheduler._Shutdown()
David James97ce8902011-08-16 09:51:05 -07001921 scheduler = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001922
Mike Frysingerd20a6e22012-10-04 19:01:10 -04001923 clean_logs(emerge.settings)
1924
Mike Frysinger383367e2014-09-16 15:06:17 -04001925 print("Done")
Brian Harring8294d652012-05-23 02:20:52 -07001926 return 0