blob: d109352db9c2384e9827aa3da8fb592aaf29f69c [file] [log] [blame]
Mike Frysinger0a647fc2012-08-06 14:36:05 -04001# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
David Jamesfcb70ef2011-02-02 16:02:30 -08002# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Program to run emerge in parallel, for significant speedup.
6
7Usage:
David James386ccd12011-05-04 20:17:42 -07008 ./parallel_emerge [--board=BOARD] [--workon=PKGS]
David Jamesfcb70ef2011-02-02 16:02:30 -08009 [--force-remote-binary=PKGS] [emerge args] package
10
David James78b6cd92012-04-02 21:36:12 -070011This script runs multiple emerge processes in parallel, using appropriate
12Portage APIs. It is faster than standard emerge because it has a
13multiprocess model instead of an asynchronous model.
David Jamesfcb70ef2011-02-02 16:02:30 -080014"""
15
Mike Frysinger383367e2014-09-16 15:06:17 -040016from __future__ import print_function
17
David Jamesfcb70ef2011-02-02 16:02:30 -080018import codecs
19import copy
20import errno
Brian Harring8294d652012-05-23 02:20:52 -070021import gc
David James8c7e5e32011-06-28 11:26:03 -070022import heapq
David Jamesfcb70ef2011-02-02 16:02:30 -080023import multiprocessing
24import os
Mike Frysinger1ae28092013-10-17 17:17:22 -040025try:
26 import Queue
27except ImportError:
28 # Python-3 renamed to "queue". We still use Queue to avoid collisions
29 # with naming variables as "queue". Maybe we'll transition at some point.
30 # pylint: disable=F0401
31 import queue as Queue
David Jamesfcb70ef2011-02-02 16:02:30 -080032import signal
Bertrand SIMONNET19d789e2014-12-09 13:36:31 -080033import subprocess
David Jamesfcb70ef2011-02-02 16:02:30 -080034import sys
35import tempfile
Brian Harring8294d652012-05-23 02:20:52 -070036import threading
David Jamesfcb70ef2011-02-02 16:02:30 -080037import time
38import traceback
David Jamesfcb70ef2011-02-02 16:02:30 -080039
Thiago Goncalesf4acc422013-07-17 10:26:35 -070040from chromite.lib import cros_build_lib
Chris Ching5fcbd622016-11-28 09:22:15 -070041from chromite.lib import cros_event
Mike Frysingere2d8f0d2014-11-01 13:09:26 -040042from chromite.lib import process_util
Mike Frysingerd74fe4a2014-04-24 11:43:38 -040043from chromite.lib import proctitle
Thiago Goncalesf4acc422013-07-17 10:26:35 -070044
David Jamesfcb70ef2011-02-02 16:02:30 -080045# If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
46# Chromium OS, the default "portage" user doesn't have the necessary
47# permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
48# is "root" here because we get called through sudo.
49#
50# We need to set this before importing any portage modules, because portage
51# looks up "PORTAGE_USERNAME" at import time.
52#
53# NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
54# encounter this case unless they have an old chroot or blow away the
55# environment by running sudo without the -E specifier.
56if "PORTAGE_USERNAME" not in os.environ:
57 homedir = os.environ.get("HOME")
58 if homedir:
59 os.environ["PORTAGE_USERNAME"] = os.path.basename(homedir)
60
Bertrand SIMONNET19d789e2014-12-09 13:36:31 -080061# Wrap Popen with a lock to ensure no two Popen are executed simultaneously in
62# the same process.
63# Two Popen call at the same time might be the cause for crbug.com/433482.
64_popen_lock = threading.Lock()
65_old_popen = subprocess.Popen
66
67def _LockedPopen(*args, **kwargs):
68 with _popen_lock:
69 return _old_popen(*args, **kwargs)
70
71subprocess.Popen = _LockedPopen
72
David Jamesfcb70ef2011-02-02 16:02:30 -080073# Portage doesn't expose dependency trees in its public API, so we have to
74# make use of some private APIs here. These modules are found under
75# /usr/lib/portage/pym/.
76#
77# TODO(davidjames): Update Portage to expose public APIs for these features.
Don Garrett25f309a2014-03-19 14:02:12 -070078# pylint: disable=F0401
David Jamesfcb70ef2011-02-02 16:02:30 -080079from _emerge.actions import adjust_configs
80from _emerge.actions import load_emerge_config
81from _emerge.create_depgraph_params import create_depgraph_params
David James386ccd12011-05-04 20:17:42 -070082from _emerge.depgraph import backtrack_depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -080083from _emerge.main import emerge_main
84from _emerge.main import parse_opts
85from _emerge.Package import Package
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -070086from _emerge.post_emerge import clean_logs
David Jamesfcb70ef2011-02-02 16:02:30 -080087from _emerge.Scheduler import Scheduler
David Jamesfcb70ef2011-02-02 16:02:30 -080088from _emerge.stdout_spinner import stdout_spinner
David James386ccd12011-05-04 20:17:42 -070089from portage._global_updates import _global_updates
David Jamesfcb70ef2011-02-02 16:02:30 -080090import portage
91import portage.debug
Don Garrettf8bf7842014-03-20 17:03:42 -070092# pylint: enable=F0401
Mike Frysinger91d7da92013-02-19 15:53:46 -050093
David Jamesfcb70ef2011-02-02 16:02:30 -080094
David Jamesfcb70ef2011-02-02 16:02:30 -080095def Usage():
96 """Print usage."""
Mike Frysinger383367e2014-09-16 15:06:17 -040097 print("Usage:")
Chris Ching5fcbd622016-11-28 09:22:15 -070098 print(" ./parallel_emerge [--board=BOARD] [--workon=PKGS] [--rebuild]")
99 print(" [--eventlogfile=FILE] [emerge args] package")
Mike Frysinger383367e2014-09-16 15:06:17 -0400100 print()
101 print("Packages specified as workon packages are always built from source.")
102 print()
103 print("The --workon argument is mainly useful when you want to build and")
104 print("install packages that you are working on unconditionally, but do not")
105 print("to have to rev the package to indicate you want to build it from")
106 print("source. The build_packages script will automatically supply the")
107 print("workon argument to emerge, ensuring that packages selected using")
108 print("cros-workon are rebuilt.")
109 print()
110 print("The --rebuild option rebuilds packages whenever their dependencies")
111 print("are changed. This ensures that your build is correct.")
Chris Ching5fcbd622016-11-28 09:22:15 -0700112 print()
113 print("The --eventlogfile writes events to the given file. File is")
114 print("is overwritten if it exists.")
David Jamesfcb70ef2011-02-02 16:02:30 -0800115
116
David Jamesfcb70ef2011-02-02 16:02:30 -0800117# Global start time
118GLOBAL_START = time.time()
119
David James7358d032011-05-19 10:40:03 -0700120# Whether process has been killed by a signal.
121KILLED = multiprocessing.Event()
122
David Jamesfcb70ef2011-02-02 16:02:30 -0800123
124class EmergeData(object):
125 """This simple struct holds various emerge variables.
126
127 This struct helps us easily pass emerge variables around as a unit.
128 These variables are used for calculating dependencies and installing
129 packages.
130 """
131
David Jamesbf1e3442011-05-28 07:44:20 -0700132 __slots__ = ["action", "cmdline_packages", "depgraph", "favorites",
133 "mtimedb", "opts", "root_config", "scheduler_graph",
134 "settings", "spinner", "trees"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800135
136 def __init__(self):
137 # The action the user requested. If the user is installing packages, this
138 # is None. If the user is doing anything other than installing packages,
139 # this will contain the action name, which will map exactly to the
140 # long-form name of the associated emerge option.
141 #
142 # Example: If you call parallel_emerge --unmerge package, the action name
143 # will be "unmerge"
144 self.action = None
145
146 # The list of packages the user passed on the command-line.
147 self.cmdline_packages = None
148
149 # The emerge dependency graph. It'll contain all the packages involved in
150 # this merge, along with their versions.
151 self.depgraph = None
152
David Jamesbf1e3442011-05-28 07:44:20 -0700153 # The list of candidates to add to the world file.
154 self.favorites = None
155
David Jamesfcb70ef2011-02-02 16:02:30 -0800156 # A dict of the options passed to emerge. This dict has been cleaned up
157 # a bit by parse_opts, so that it's a bit easier for the emerge code to
158 # look at the options.
159 #
160 # Emerge takes a few shortcuts in its cleanup process to make parsing of
161 # the options dict easier. For example, if you pass in "--usepkg=n", the
162 # "--usepkg" flag is just left out of the dictionary altogether. Because
163 # --usepkg=n is the default, this makes parsing easier, because emerge
164 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
165 #
166 # These cleanup processes aren't applied to all options. For example, the
167 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
168 # applied by emerge, see the parse_opts function in the _emerge.main
169 # package.
170 self.opts = None
171
172 # A dictionary used by portage to maintain global state. This state is
173 # loaded from disk when portage starts up, and saved to disk whenever we
174 # call mtimedb.commit().
175 #
176 # This database contains information about global updates (i.e., what
177 # version of portage we have) and what we're currently doing. Portage
178 # saves what it is currently doing in this database so that it can be
179 # resumed when you call it with the --resume option.
180 #
181 # parallel_emerge does not save what it is currently doing in the mtimedb,
182 # so we do not support the --resume option.
183 self.mtimedb = None
184
185 # The portage configuration for our current root. This contains the portage
186 # settings (see below) and the three portage trees for our current root.
187 # (The three portage trees are explained below, in the documentation for
188 # the "trees" member.)
189 self.root_config = None
190
191 # The scheduler graph is used by emerge to calculate what packages to
192 # install. We don't actually install any deps, so this isn't really used,
193 # but we pass it in to the Scheduler object anyway.
194 self.scheduler_graph = None
195
196 # Portage settings for our current session. Most of these settings are set
197 # in make.conf inside our current install root.
198 self.settings = None
199
200 # The spinner, which spews stuff to stdout to indicate that portage is
201 # doing something. We maintain our own spinner, so we set the portage
202 # spinner to "silent" mode.
203 self.spinner = None
204
205 # The portage trees. There are separate portage trees for each root. To get
206 # the portage tree for the current root, you can look in self.trees[root],
207 # where root = self.settings["ROOT"].
208 #
209 # In each root, there are three trees: vartree, porttree, and bintree.
210 # - vartree: A database of the currently-installed packages.
211 # - porttree: A database of ebuilds, that can be used to build packages.
212 # - bintree: A database of binary packages.
213 self.trees = None
214
215
216class DepGraphGenerator(object):
217 """Grab dependency information about packages from portage.
218
219 Typical usage:
220 deps = DepGraphGenerator()
221 deps.Initialize(sys.argv[1:])
222 deps_tree, deps_info = deps.GenDependencyTree()
223 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
224 deps.PrintTree(deps_tree)
225 PrintDepsMap(deps_graph)
226 """
227
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700228 __slots__ = ["board", "emerge", "package_db", "show_output", "sysroot",
Bertrand SIMONNET411945d2015-05-20 17:23:28 -0700229 "unpack_only", "max_retries"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800230
231 def __init__(self):
232 self.board = None
233 self.emerge = EmergeData()
David Jamesfcb70ef2011-02-02 16:02:30 -0800234 self.package_db = {}
David Jamesfcb70ef2011-02-02 16:02:30 -0800235 self.show_output = False
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700236 self.sysroot = None
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700237 self.unpack_only = False
Bertrand SIMONNET411945d2015-05-20 17:23:28 -0700238 self.max_retries = 1
David Jamesfcb70ef2011-02-02 16:02:30 -0800239
240 def ParseParallelEmergeArgs(self, argv):
241 """Read the parallel emerge arguments from the command-line.
242
243 We need to be compatible with emerge arg format. We scrape arguments that
244 are specific to parallel_emerge, and pass through the rest directly to
245 emerge.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500246
David Jamesfcb70ef2011-02-02 16:02:30 -0800247 Args:
248 argv: arguments list
Mike Frysinger1a736a82013-12-12 01:50:59 -0500249
David Jamesfcb70ef2011-02-02 16:02:30 -0800250 Returns:
251 Arguments that don't belong to parallel_emerge
252 """
253 emerge_args = []
254 for arg in argv:
255 # Specifically match arguments that are specific to parallel_emerge, and
256 # pass through the rest.
257 if arg.startswith("--board="):
258 self.board = arg.replace("--board=", "")
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700259 elif arg.startswith("--sysroot="):
260 self.sysroot = arg.replace("--sysroot=", "")
David Jamesfcb70ef2011-02-02 16:02:30 -0800261 elif arg.startswith("--workon="):
262 workon_str = arg.replace("--workon=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700263 emerge_args.append("--reinstall-atoms=%s" % workon_str)
264 emerge_args.append("--usepkg-exclude=%s" % workon_str)
David Jamesfcb70ef2011-02-02 16:02:30 -0800265 elif arg.startswith("--force-remote-binary="):
266 force_remote_binary = arg.replace("--force-remote-binary=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700267 emerge_args.append("--useoldpkg-atoms=%s" % force_remote_binary)
Bertrand SIMONNET411945d2015-05-20 17:23:28 -0700268 elif arg.startswith("--retries="):
269 self.max_retries = int(arg.replace("--retries=", ""))
David Jamesfcb70ef2011-02-02 16:02:30 -0800270 elif arg == "--show-output":
271 self.show_output = True
David James386ccd12011-05-04 20:17:42 -0700272 elif arg == "--rebuild":
David James7a1ea4b2011-10-13 15:06:41 -0700273 emerge_args.append("--rebuild-if-unbuilt")
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700274 elif arg == "--unpackonly":
275 emerge_args.append("--fetchonly")
276 self.unpack_only = True
Chris Ching5fcbd622016-11-28 09:22:15 -0700277 elif arg.startswith("--eventlogfile="):
278 log_file_name = arg.replace("--eventlogfile=", "")
Chris Ching4a2ebd62017-04-26 16:30:05 -0600279 event_logger = cros_event.getEventFileLogger(log_file_name)
280 event_logger.setKind('ParallelEmerge')
281 cros_event.setEventLogger(event_logger)
David Jamesfcb70ef2011-02-02 16:02:30 -0800282 else:
283 # Not one of our options, so pass through to emerge.
284 emerge_args.append(arg)
285
David James386ccd12011-05-04 20:17:42 -0700286 # These packages take a really long time to build, so, for expediency, we
287 # are blacklisting them from automatic rebuilds because one of their
288 # dependencies needs to be recompiled.
Mike Frysinger5c2a9052014-04-15 15:52:04 -0400289 for pkg in ("chromeos-base/chromeos-chrome",):
David James7a1ea4b2011-10-13 15:06:41 -0700290 emerge_args.append("--rebuild-exclude=%s" % pkg)
David Jamesfcb70ef2011-02-02 16:02:30 -0800291
292 return emerge_args
293
294 def Initialize(self, args):
295 """Initializer. Parses arguments and sets up portage state."""
296
297 # Parse and strip out args that are just intended for parallel_emerge.
298 emerge_args = self.ParseParallelEmergeArgs(args)
299
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700300 if self.sysroot and self.board:
301 cros_build_lib.Die("--sysroot and --board are incompatible.")
302
David Jamesfcb70ef2011-02-02 16:02:30 -0800303 # Setup various environment variables based on our current board. These
304 # variables are normally setup inside emerge-${BOARD}, but since we don't
305 # call that script, we have to set it up here. These variables serve to
306 # point our tools at /build/BOARD and to setup cross compiles to the
307 # appropriate board as configured in toolchain.conf.
308 if self.board:
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700309 self.sysroot = os.environ.get('SYSROOT',
310 cros_build_lib.GetSysroot(self.board))
311
312 if self.sysroot:
313 os.environ["PORTAGE_CONFIGROOT"] = self.sysroot
314 os.environ["SYSROOT"] = self.sysroot
David Jamesfcb70ef2011-02-02 16:02:30 -0800315
David Jamesfcb70ef2011-02-02 16:02:30 -0800316 # Turn off interactive delays
317 os.environ["EBEEP_IGNORE"] = "1"
318 os.environ["EPAUSE_IGNORE"] = "1"
Mike Frysinger0a647fc2012-08-06 14:36:05 -0400319 os.environ["CLEAN_DELAY"] = "0"
David Jamesfcb70ef2011-02-02 16:02:30 -0800320
321 # Parse the emerge options.
David Jamesea3ca332011-05-26 11:48:29 -0700322 action, opts, cmdline_packages = parse_opts(emerge_args, silent=True)
David Jamesfcb70ef2011-02-02 16:02:30 -0800323
324 # Set environment variables based on options. Portage normally sets these
325 # environment variables in emerge_main, but we can't use that function,
326 # because it also does a bunch of other stuff that we don't want.
327 # TODO(davidjames): Patch portage to move this logic into a function we can
328 # reuse here.
329 if "--debug" in opts:
330 os.environ["PORTAGE_DEBUG"] = "1"
331 if "--config-root" in opts:
332 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
333 if "--root" in opts:
334 os.environ["ROOT"] = opts["--root"]
335 if "--accept-properties" in opts:
336 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
337
David James88d780c2014-02-05 13:03:29 -0800338 # If we're installing packages to the board, we can disable vardb locks.
339 # This is safe because we only run up to one instance of parallel_emerge in
340 # parallel.
341 # TODO(davidjames): Enable this for the host too.
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700342 if self.sysroot:
David Jamesfcb70ef2011-02-02 16:02:30 -0800343 os.environ.setdefault("PORTAGE_LOCKS", "false")
David Jamesfcb70ef2011-02-02 16:02:30 -0800344
345 # Now that we've setup the necessary environment variables, we can load the
346 # emerge config from disk.
Gilad Arnold94758762015-05-22 12:23:23 -0700347 # pylint: disable=unpacking-non-sequence
David Jamesfcb70ef2011-02-02 16:02:30 -0800348 settings, trees, mtimedb = load_emerge_config()
349
David Jamesea3ca332011-05-26 11:48:29 -0700350 # Add in EMERGE_DEFAULT_OPTS, if specified.
351 tmpcmdline = []
352 if "--ignore-default-opts" not in opts:
353 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
354 tmpcmdline.extend(emerge_args)
355 action, opts, cmdline_packages = parse_opts(tmpcmdline)
356
357 # If we're installing to the board, we want the --root-deps option so that
358 # portage will install the build dependencies to that location as well.
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700359 if self.sysroot:
David Jamesea3ca332011-05-26 11:48:29 -0700360 opts.setdefault("--root-deps", True)
361
David Jamesfcb70ef2011-02-02 16:02:30 -0800362 # Check whether our portage tree is out of date. Typically, this happens
363 # when you're setting up a new portage tree, such as in setup_board and
364 # make_chroot. In that case, portage applies a bunch of global updates
365 # here. Once the updates are finished, we need to commit any changes
366 # that the global update made to our mtimedb, and reload the config.
367 #
368 # Portage normally handles this logic in emerge_main, but again, we can't
369 # use that function here.
370 if _global_updates(trees, mtimedb["updates"]):
371 mtimedb.commit()
Gilad Arnold94758762015-05-22 12:23:23 -0700372 # pylint: disable=unpacking-non-sequence
David Jamesfcb70ef2011-02-02 16:02:30 -0800373 settings, trees, mtimedb = load_emerge_config(trees=trees)
374
375 # Setup implied options. Portage normally handles this logic in
376 # emerge_main.
377 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
378 opts.setdefault("--buildpkg", True)
379 if "--getbinpkgonly" in opts:
380 opts.setdefault("--usepkgonly", True)
381 opts.setdefault("--getbinpkg", True)
382 if "getbinpkg" in settings.features:
383 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
384 opts["--getbinpkg"] = True
385 if "--getbinpkg" in opts or "--usepkgonly" in opts:
386 opts.setdefault("--usepkg", True)
387 if "--fetch-all-uri" in opts:
388 opts.setdefault("--fetchonly", True)
389 if "--skipfirst" in opts:
390 opts.setdefault("--resume", True)
391 if "--buildpkgonly" in opts:
392 # --buildpkgonly will not merge anything, so it overrides all binary
393 # package options.
394 for opt in ("--getbinpkg", "--getbinpkgonly",
395 "--usepkg", "--usepkgonly"):
396 opts.pop(opt, None)
397 if (settings.get("PORTAGE_DEBUG", "") == "1" and
398 "python-trace" in settings.features):
399 portage.debug.set_trace(True)
400
401 # Complain about unsupported options
David James386ccd12011-05-04 20:17:42 -0700402 for opt in ("--ask", "--ask-enter-invalid", "--resume", "--skipfirst"):
David Jamesfcb70ef2011-02-02 16:02:30 -0800403 if opt in opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400404 print("%s is not supported by parallel_emerge" % opt)
David Jamesfcb70ef2011-02-02 16:02:30 -0800405 sys.exit(1)
406
407 # Make emerge specific adjustments to the config (e.g. colors!)
408 adjust_configs(opts, trees)
409
410 # Save our configuration so far in the emerge object
411 emerge = self.emerge
412 emerge.action, emerge.opts = action, opts
413 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
414 emerge.cmdline_packages = cmdline_packages
415 root = settings["ROOT"]
416 emerge.root_config = trees[root]["root_config"]
417
David James386ccd12011-05-04 20:17:42 -0700418 if "--usepkg" in opts:
David Jamesfcb70ef2011-02-02 16:02:30 -0800419 emerge.trees[root]["bintree"].populate("--getbinpkg" in opts)
420
David Jamesfcb70ef2011-02-02 16:02:30 -0800421 def CreateDepgraph(self, emerge, packages):
422 """Create an emerge depgraph object."""
423 # Setup emerge options.
424 emerge_opts = emerge.opts.copy()
425
David James386ccd12011-05-04 20:17:42 -0700426 # Ask portage to build a dependency graph. with the options we specified
427 # above.
David Jamesfcb70ef2011-02-02 16:02:30 -0800428 params = create_depgraph_params(emerge_opts, emerge.action)
David Jamesbf1e3442011-05-28 07:44:20 -0700429 success, depgraph, favorites = backtrack_depgraph(
David James386ccd12011-05-04 20:17:42 -0700430 emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
431 packages, emerge.spinner)
432 emerge.depgraph = depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800433
David James386ccd12011-05-04 20:17:42 -0700434 # Is it impossible to honor the user's request? Bail!
435 if not success:
436 depgraph.display_problems()
437 sys.exit(1)
David Jamesfcb70ef2011-02-02 16:02:30 -0800438
439 emerge.depgraph = depgraph
David Jamesbf1e3442011-05-28 07:44:20 -0700440 emerge.favorites = favorites
David Jamesfcb70ef2011-02-02 16:02:30 -0800441
David Jamesdeebd692011-05-09 17:02:52 -0700442 # Prime and flush emerge caches.
443 root = emerge.settings["ROOT"]
444 vardb = emerge.trees[root]["vartree"].dbapi
David James0bdc5de2011-05-12 16:22:26 -0700445 if "--pretend" not in emerge.opts:
446 vardb.counter_tick()
David Jamesdeebd692011-05-09 17:02:52 -0700447 vardb.flush_cache()
448
David James386ccd12011-05-04 20:17:42 -0700449 def GenDependencyTree(self):
David Jamesfcb70ef2011-02-02 16:02:30 -0800450 """Get dependency tree info from emerge.
451
David Jamesfcb70ef2011-02-02 16:02:30 -0800452 Returns:
453 Dependency tree
454 """
455 start = time.time()
456
457 emerge = self.emerge
458
459 # Create a list of packages to merge
460 packages = set(emerge.cmdline_packages[:])
David Jamesfcb70ef2011-02-02 16:02:30 -0800461
462 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
463 # need any extra output from portage.
464 portage.util.noiselimit = -1
465
466 # My favorite feature: The silent spinner. It doesn't spin. Ever.
467 # I'd disable the colors by default too, but they look kind of cool.
468 emerge.spinner = stdout_spinner()
469 emerge.spinner.update = emerge.spinner.update_quiet
470
471 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400472 print("Calculating deps...")
David Jamesfcb70ef2011-02-02 16:02:30 -0800473
Chris Ching4a2ebd62017-04-26 16:30:05 -0600474 with cros_event.newEvent(task_name="GenerateDepTree"):
Chris Ching5fcbd622016-11-28 09:22:15 -0700475 self.CreateDepgraph(emerge, packages)
476 depgraph = emerge.depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800477
478 # Build our own tree from the emerge digraph.
479 deps_tree = {}
Don Garrett25f309a2014-03-19 14:02:12 -0700480 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -0800481 digraph = depgraph._dynamic_config.digraph
David James3f778802011-08-25 19:31:45 -0700482 root = emerge.settings["ROOT"]
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -0700483 final_db = depgraph._dynamic_config._filtered_trees[root]['graph_db']
David Jamesfcb70ef2011-02-02 16:02:30 -0800484 for node, node_deps in digraph.nodes.items():
485 # Calculate dependency packages that need to be installed first. Each
486 # child on the digraph is a dependency. The "operation" field specifies
487 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
488 # contains the type of dependency (e.g. build, runtime, runtime_post,
489 # etc.)
490 #
David Jamesfcb70ef2011-02-02 16:02:30 -0800491 # Portage refers to the identifiers for packages as a CPV. This acronym
492 # stands for Component/Path/Version.
493 #
494 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
495 # Split up, this CPV would be:
496 # C -- Component: chromeos-base
497 # P -- Path: power_manager
498 # V -- Version: 0.0.1-r1
499 #
500 # We just refer to CPVs as packages here because it's easier.
501 deps = {}
502 for child, priorities in node_deps[0].items():
David James3f778802011-08-25 19:31:45 -0700503 if isinstance(child, Package) and child.root == root:
504 cpv = str(child.cpv)
505 action = str(child.operation)
506
507 # If we're uninstalling a package, check whether Portage is
508 # installing a replacement. If so, just depend on the installation
509 # of the new package, because the old package will automatically
510 # be uninstalled at that time.
511 if action == "uninstall":
512 for pkg in final_db.match_pkgs(child.slot_atom):
513 cpv = str(pkg.cpv)
514 action = "merge"
515 break
516
517 deps[cpv] = dict(action=action,
518 deptypes=[str(x) for x in priorities],
519 deps={})
David Jamesfcb70ef2011-02-02 16:02:30 -0800520
521 # We've built our list of deps, so we can add our package to the tree.
David James3f778802011-08-25 19:31:45 -0700522 if isinstance(node, Package) and node.root == root:
David Jamesfcb70ef2011-02-02 16:02:30 -0800523 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
524 deps=deps)
525
David Jamesfcb70ef2011-02-02 16:02:30 -0800526 # Ask portage for its install plan, so that we can only throw out
David James386ccd12011-05-04 20:17:42 -0700527 # dependencies that portage throws out.
David Jamesfcb70ef2011-02-02 16:02:30 -0800528 deps_info = {}
529 for pkg in depgraph.altlist():
530 if isinstance(pkg, Package):
David James3f778802011-08-25 19:31:45 -0700531 assert pkg.root == root
David Jamesfcb70ef2011-02-02 16:02:30 -0800532 self.package_db[pkg.cpv] = pkg
533
David Jamesfcb70ef2011-02-02 16:02:30 -0800534 # Save off info about the package
David James386ccd12011-05-04 20:17:42 -0700535 deps_info[str(pkg.cpv)] = {"idx": len(deps_info)}
David Jamesfcb70ef2011-02-02 16:02:30 -0800536
537 seconds = time.time() - start
538 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400539 print("Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800540
541 return deps_tree, deps_info
542
543 def PrintTree(self, deps, depth=""):
544 """Print the deps we have seen in the emerge output.
545
546 Args:
Mike Frysinger6f3c48e2015-05-06 02:38:51 -0400547 deps: Dependency tree structure.
548 depth: Allows printing the tree recursively, with indentation.
David Jamesfcb70ef2011-02-02 16:02:30 -0800549 """
550 for entry in sorted(deps):
551 action = deps[entry]["action"]
Mike Frysinger383367e2014-09-16 15:06:17 -0400552 print("%s %s (%s)" % (depth, entry, action))
David Jamesfcb70ef2011-02-02 16:02:30 -0800553 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
554
David James386ccd12011-05-04 20:17:42 -0700555 def GenDependencyGraph(self, deps_tree, deps_info):
David Jamesfcb70ef2011-02-02 16:02:30 -0800556 """Generate a doubly linked dependency graph.
557
558 Args:
559 deps_tree: Dependency tree structure.
560 deps_info: More details on the dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500561
David Jamesfcb70ef2011-02-02 16:02:30 -0800562 Returns:
563 Deps graph in the form of a dict of packages, with each package
564 specifying a "needs" list and "provides" list.
565 """
566 emerge = self.emerge
David Jamesfcb70ef2011-02-02 16:02:30 -0800567
David Jamesfcb70ef2011-02-02 16:02:30 -0800568 # deps_map is the actual dependency graph.
569 #
570 # Each package specifies a "needs" list and a "provides" list. The "needs"
571 # list indicates which packages we depend on. The "provides" list
572 # indicates the reverse dependencies -- what packages need us.
573 #
574 # We also provide some other information in the dependency graph:
575 # - action: What we're planning on doing with this package. Generally,
576 # "merge", "nomerge", or "uninstall"
David Jamesfcb70ef2011-02-02 16:02:30 -0800577 deps_map = {}
578
579 def ReverseTree(packages):
580 """Convert tree to digraph.
581
582 Take the tree of package -> requirements and reverse it to a digraph of
583 buildable packages -> packages they unblock.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500584
David Jamesfcb70ef2011-02-02 16:02:30 -0800585 Args:
586 packages: Tree(s) of dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500587
David Jamesfcb70ef2011-02-02 16:02:30 -0800588 Returns:
589 Unsanitized digraph.
590 """
David James8c7e5e32011-06-28 11:26:03 -0700591 binpkg_phases = set(["setup", "preinst", "postinst"])
David Jamese5e1c0a2014-09-29 17:19:41 -0700592 needed_dep_types = set(["blocker", "buildtime", "buildtime_slot_op",
593 "runtime", "runtime_slot_op"])
594 ignored_dep_types = set(["ignored", "optional", "runtime_post", "soft"])
595 all_dep_types = ignored_dep_types | needed_dep_types
David Jamesfcb70ef2011-02-02 16:02:30 -0800596 for pkg in packages:
597
598 # Create an entry for the package
599 action = packages[pkg]["action"]
David James8c7e5e32011-06-28 11:26:03 -0700600 default_pkg = {"needs": {}, "provides": set(), "action": action,
601 "nodeps": False, "binary": False}
David Jamesfcb70ef2011-02-02 16:02:30 -0800602 this_pkg = deps_map.setdefault(pkg, default_pkg)
603
David James8c7e5e32011-06-28 11:26:03 -0700604 if pkg in deps_info:
605 this_pkg["idx"] = deps_info[pkg]["idx"]
606
607 # If a package doesn't have any defined phases that might use the
608 # dependent packages (i.e. pkg_setup, pkg_preinst, or pkg_postinst),
609 # we can install this package before its deps are ready.
610 emerge_pkg = self.package_db.get(pkg)
611 if emerge_pkg and emerge_pkg.type_name == "binary":
612 this_pkg["binary"] = True
Mike Frysinger66652ec2014-04-24 11:42:25 -0400613 defined_phases = emerge_pkg.defined_phases
David James8c7e5e32011-06-28 11:26:03 -0700614 defined_binpkg_phases = binpkg_phases.intersection(defined_phases)
615 if not defined_binpkg_phases:
616 this_pkg["nodeps"] = True
617
David Jamesfcb70ef2011-02-02 16:02:30 -0800618 # Create entries for dependencies of this package first.
619 ReverseTree(packages[pkg]["deps"])
620
621 # Add dependencies to this package.
622 for dep, dep_item in packages[pkg]["deps"].iteritems():
David James8c7e5e32011-06-28 11:26:03 -0700623 # We only need to enforce strict ordering of dependencies if the
David James3f778802011-08-25 19:31:45 -0700624 # dependency is a blocker, or is a buildtime or runtime dependency.
625 # (I.e., ignored, optional, and runtime_post dependencies don't
626 # depend on ordering.)
David James8c7e5e32011-06-28 11:26:03 -0700627 dep_types = dep_item["deptypes"]
628 if needed_dep_types.intersection(dep_types):
629 deps_map[dep]["provides"].add(pkg)
630 this_pkg["needs"][dep] = "/".join(dep_types)
David Jamesfcb70ef2011-02-02 16:02:30 -0800631
David Jamese5e1c0a2014-09-29 17:19:41 -0700632 # Verify we processed all appropriate dependency types.
633 unknown_dep_types = set(dep_types) - all_dep_types
634 if unknown_dep_types:
635 print("Unknown dependency types found:")
636 print(" %s -> %s (%s)" % (pkg, dep, "/".join(unknown_dep_types)))
637 sys.exit(1)
638
David James3f778802011-08-25 19:31:45 -0700639 # If there's a blocker, Portage may need to move files from one
640 # package to another, which requires editing the CONTENTS files of
641 # both packages. To avoid race conditions while editing this file,
642 # the two packages must not be installed in parallel, so we can't
643 # safely ignore dependencies. See http://crosbug.com/19328
644 if "blocker" in dep_types:
645 this_pkg["nodeps"] = False
646
David Jamesfcb70ef2011-02-02 16:02:30 -0800647 def FindCycles():
648 """Find cycles in the dependency tree.
649
650 Returns:
651 A dict mapping cyclic packages to a dict of the deps that cause
652 cycles. For each dep that causes cycles, it returns an example
653 traversal of the graph that shows the cycle.
654 """
655
656 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
657 """Find cycles in cyclic dependencies starting at specified package.
658
659 Args:
660 pkg: Package identifier.
661 cycles: A dict mapping cyclic packages to a dict of the deps that
662 cause cycles. For each dep that causes cycles, it returns an
663 example traversal of the graph that shows the cycle.
664 unresolved: Nodes that have been visited but are not fully processed.
665 resolved: Nodes that have been visited and are fully processed.
666 """
667 pkg_cycles = cycles.get(pkg)
668 if pkg in resolved and not pkg_cycles:
669 # If we already looked at this package, and found no cyclic
670 # dependencies, we can stop now.
671 return
672 unresolved.append(pkg)
673 for dep in deps_map[pkg]["needs"]:
674 if dep in unresolved:
675 idx = unresolved.index(dep)
676 mycycle = unresolved[idx:] + [dep]
David James321490a2012-12-17 12:05:56 -0800677 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800678 pkg1, pkg2 = mycycle[i], mycycle[i+1]
679 cycles.setdefault(pkg1, {}).setdefault(pkg2, mycycle)
680 elif not pkg_cycles or dep not in pkg_cycles:
681 # Looks like we haven't seen this edge before.
682 FindCyclesAtNode(dep, cycles, unresolved, resolved)
683 unresolved.pop()
684 resolved.add(pkg)
685
686 cycles, unresolved, resolved = {}, [], set()
687 for pkg in deps_map:
688 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
689 return cycles
690
David James386ccd12011-05-04 20:17:42 -0700691 def RemoveUnusedPackages():
David Jamesfcb70ef2011-02-02 16:02:30 -0800692 """Remove installed packages, propagating dependencies."""
David Jamesfcb70ef2011-02-02 16:02:30 -0800693 # Schedule packages that aren't on the install list for removal
694 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
695
David Jamesfcb70ef2011-02-02 16:02:30 -0800696 # Remove the packages we don't want, simplifying the graph and making
697 # it easier for us to crack cycles.
698 for pkg in sorted(rm_pkgs):
699 this_pkg = deps_map[pkg]
700 needs = this_pkg["needs"]
701 provides = this_pkg["provides"]
702 for dep in needs:
703 dep_provides = deps_map[dep]["provides"]
704 dep_provides.update(provides)
705 dep_provides.discard(pkg)
706 dep_provides.discard(dep)
707 for target in provides:
708 target_needs = deps_map[target]["needs"]
709 target_needs.update(needs)
710 target_needs.pop(pkg, None)
711 target_needs.pop(target, None)
712 del deps_map[pkg]
713
714 def PrintCycleBreak(basedep, dep, mycycle):
715 """Print details about a cycle that we are planning on breaking.
716
Mike Frysinger02e1e072013-11-10 22:11:34 -0500717 We are breaking a cycle where dep needs basedep. mycycle is an
718 example cycle which contains dep -> basedep.
719 """
David Jamesfcb70ef2011-02-02 16:02:30 -0800720
David Jamesfcb70ef2011-02-02 16:02:30 -0800721 needs = deps_map[dep]["needs"]
722 depinfo = needs.get(basedep, "deleted")
David Jamesfcb70ef2011-02-02 16:02:30 -0800723
David James3f778802011-08-25 19:31:45 -0700724 # It's OK to swap install order for blockers, as long as the two
725 # packages aren't installed in parallel. If there is a cycle, then
726 # we know the packages depend on each other already, so we can drop the
727 # blocker safely without printing a warning.
728 if depinfo == "blocker":
729 return
730
David Jamesfcb70ef2011-02-02 16:02:30 -0800731 # Notify the user that we're breaking a cycle.
Mike Frysinger383367e2014-09-16 15:06:17 -0400732 print("Breaking %s -> %s (%s)" % (dep, basedep, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800733
734 # Show cycle.
David James321490a2012-12-17 12:05:56 -0800735 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800736 pkg1, pkg2 = mycycle[i], mycycle[i+1]
737 needs = deps_map[pkg1]["needs"]
738 depinfo = needs.get(pkg2, "deleted")
739 if pkg1 == dep and pkg2 == basedep:
740 depinfo = depinfo + ", deleting"
Mike Frysinger383367e2014-09-16 15:06:17 -0400741 print(" %s -> %s (%s)" % (pkg1, pkg2, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800742
743 def SanitizeTree():
744 """Remove circular dependencies.
745
746 We prune all dependencies involved in cycles that go against the emerge
747 ordering. This has a nice property: we're guaranteed to merge
748 dependencies in the same order that portage does.
749
750 Because we don't treat any dependencies as "soft" unless they're killed
751 by a cycle, we pay attention to a larger number of dependencies when
752 merging. This hurts performance a bit, but helps reliability.
753 """
754 start = time.time()
755 cycles = FindCycles()
756 while cycles:
757 for dep, mycycles in cycles.iteritems():
758 for basedep, mycycle in mycycles.iteritems():
759 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
Matt Tennant08797302011-10-17 16:18:45 -0700760 if "--quiet" not in emerge.opts:
761 PrintCycleBreak(basedep, dep, mycycle)
David Jamesfcb70ef2011-02-02 16:02:30 -0800762 del deps_map[dep]["needs"][basedep]
763 deps_map[basedep]["provides"].remove(dep)
764 cycles = FindCycles()
765 seconds = time.time() - start
766 if "--quiet" not in emerge.opts and seconds >= 0.1:
Mike Frysinger383367e2014-09-16 15:06:17 -0400767 print("Tree sanitized in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800768
David James8c7e5e32011-06-28 11:26:03 -0700769 def FindRecursiveProvides(pkg, seen):
770 """Find all nodes that require a particular package.
771
772 Assumes that graph is acyclic.
773
774 Args:
775 pkg: Package identifier.
776 seen: Nodes that have been visited so far.
777 """
778 if pkg in seen:
779 return
780 seen.add(pkg)
781 info = deps_map[pkg]
782 info["tprovides"] = info["provides"].copy()
783 for dep in info["provides"]:
784 FindRecursiveProvides(dep, seen)
785 info["tprovides"].update(deps_map[dep]["tprovides"])
786
David Jamesa22906f2011-05-04 19:53:26 -0700787 ReverseTree(deps_tree)
David Jamesa22906f2011-05-04 19:53:26 -0700788
David James386ccd12011-05-04 20:17:42 -0700789 # We need to remove unused packages so that we can use the dependency
790 # ordering of the install process to show us what cycles to crack.
791 RemoveUnusedPackages()
David Jamesfcb70ef2011-02-02 16:02:30 -0800792 SanitizeTree()
David James8c7e5e32011-06-28 11:26:03 -0700793 seen = set()
794 for pkg in deps_map:
795 FindRecursiveProvides(pkg, seen)
David Jamesfcb70ef2011-02-02 16:02:30 -0800796 return deps_map
797
798 def PrintInstallPlan(self, deps_map):
799 """Print an emerge-style install plan.
800
801 The install plan lists what packages we're installing, in order.
802 It's useful for understanding what parallel_emerge is doing.
803
804 Args:
805 deps_map: The dependency graph.
806 """
807
808 def InstallPlanAtNode(target, deps_map):
809 nodes = []
810 nodes.append(target)
811 for dep in deps_map[target]["provides"]:
812 del deps_map[dep]["needs"][target]
813 if not deps_map[dep]["needs"]:
814 nodes.extend(InstallPlanAtNode(dep, deps_map))
815 return nodes
816
817 deps_map = copy.deepcopy(deps_map)
818 install_plan = []
819 plan = set()
820 for target, info in deps_map.iteritems():
821 if not info["needs"] and target not in plan:
822 for item in InstallPlanAtNode(target, deps_map):
823 plan.add(item)
824 install_plan.append(self.package_db[item])
825
826 for pkg in plan:
827 del deps_map[pkg]
828
829 if deps_map:
Mike Frysinger383367e2014-09-16 15:06:17 -0400830 print("Cyclic dependencies:", " ".join(deps_map))
David Jamesfcb70ef2011-02-02 16:02:30 -0800831 PrintDepsMap(deps_map)
832 sys.exit(1)
833
834 self.emerge.depgraph.display(install_plan)
835
836
837def PrintDepsMap(deps_map):
838 """Print dependency graph, for each package list it's prerequisites."""
839 for i in sorted(deps_map):
Mike Frysinger383367e2014-09-16 15:06:17 -0400840 print("%s: (%s) needs" % (i, deps_map[i]["action"]))
David Jamesfcb70ef2011-02-02 16:02:30 -0800841 needs = deps_map[i]["needs"]
842 for j in sorted(needs):
Mike Frysinger383367e2014-09-16 15:06:17 -0400843 print(" %s" % (j))
David Jamesfcb70ef2011-02-02 16:02:30 -0800844 if not needs:
Mike Frysinger383367e2014-09-16 15:06:17 -0400845 print(" no dependencies")
David Jamesfcb70ef2011-02-02 16:02:30 -0800846
847
848class EmergeJobState(object):
Don Garrett25f309a2014-03-19 14:02:12 -0700849 """Structure describing the EmergeJobState."""
850
David Jamesfcb70ef2011-02-02 16:02:30 -0800851 __slots__ = ["done", "filename", "last_notify_timestamp", "last_output_seek",
852 "last_output_timestamp", "pkgname", "retcode", "start_timestamp",
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700853 "target", "fetch_only", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800854
855 def __init__(self, target, pkgname, done, filename, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700856 retcode=None, fetch_only=False, unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800857
858 # The full name of the target we're building (e.g.
Mike Frysingerfd969312014-04-02 22:16:42 -0400859 # virtual/target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800860 self.target = target
861
Mike Frysingerfd969312014-04-02 22:16:42 -0400862 # The short name of the target we're building (e.g. target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800863 self.pkgname = pkgname
864
865 # Whether the job is done. (True if the job is done; false otherwise.)
866 self.done = done
867
868 # The filename where output is currently stored.
869 self.filename = filename
870
871 # The timestamp of the last time we printed the name of the log file. We
872 # print this at the beginning of the job, so this starts at
873 # start_timestamp.
874 self.last_notify_timestamp = start_timestamp
875
876 # The location (in bytes) of the end of the last complete line we printed.
877 # This starts off at zero. We use this to jump to the right place when we
878 # print output from the same ebuild multiple times.
879 self.last_output_seek = 0
880
881 # The timestamp of the last time we printed output. Since we haven't
882 # printed output yet, this starts at zero.
883 self.last_output_timestamp = 0
884
885 # The return code of our job, if the job is actually finished.
886 self.retcode = retcode
887
Brian Harring0be85c62012-03-17 19:52:12 -0700888 # Was this just a fetch job?
889 self.fetch_only = fetch_only
890
David Jamesfcb70ef2011-02-02 16:02:30 -0800891 # The timestamp when our job started.
892 self.start_timestamp = start_timestamp
893
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700894 # No emerge, only unpack packages.
895 self.unpack_only = unpack_only
896
David Jamesfcb70ef2011-02-02 16:02:30 -0800897
David James321490a2012-12-17 12:05:56 -0800898def KillHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700899 # Kill self and all subprocesses.
900 os.killpg(0, signal.SIGKILL)
901
Mike Frysingercc838832014-05-24 13:10:30 -0400902
David Jamesfcb70ef2011-02-02 16:02:30 -0800903def SetupWorkerSignals():
David James321490a2012-12-17 12:05:56 -0800904 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700905 # Set KILLED flag.
906 KILLED.set()
David James13cead42011-05-18 16:22:01 -0700907
David James7358d032011-05-19 10:40:03 -0700908 # Remove our signal handlers so we don't get called recursively.
909 signal.signal(signal.SIGINT, KillHandler)
910 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -0800911
912 # Ensure that we exit quietly and cleanly, if possible, when we receive
913 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all
914 # of the child processes will print details about KeyboardInterrupt
915 # exceptions, which isn't very helpful.
916 signal.signal(signal.SIGINT, ExitHandler)
917 signal.signal(signal.SIGTERM, ExitHandler)
918
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400919
920def EmergeProcess(output, target, *args, **kwargs):
David James1ed3e252011-10-05 20:26:15 -0700921 """Merge a package in a subprocess.
922
923 Args:
David James1ed3e252011-10-05 20:26:15 -0700924 output: Temporary file to write output.
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400925 target: The package we'll be processing (for display purposes).
David James6b29d052012-11-02 10:27:27 -0700926 *args: Arguments to pass to Scheduler constructor.
927 **kwargs: Keyword arguments to pass to Scheduler constructor.
David James1ed3e252011-10-05 20:26:15 -0700928
929 Returns:
930 The exit code returned by the subprocess.
931 """
Chris Ching4a2ebd62017-04-26 16:30:05 -0600932 event = cros_event.newEvent(task_name="EmergePackage",
933 name=target)
David James1ed3e252011-10-05 20:26:15 -0700934 pid = os.fork()
935 if pid == 0:
936 try:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400937 proctitle.settitle('EmergeProcess', target)
938
David James1ed3e252011-10-05 20:26:15 -0700939 # Sanity checks.
Mike Frysingerf02736e2013-11-08 15:27:00 -0500940 if sys.stdout.fileno() != 1:
941 raise Exception("sys.stdout.fileno() != 1")
942 if sys.stderr.fileno() != 2:
943 raise Exception("sys.stderr.fileno() != 2")
David James1ed3e252011-10-05 20:26:15 -0700944
945 # - Redirect 1 (stdout) and 2 (stderr) at our temporary file.
946 # - Redirect 0 to point at sys.stdin. In this case, sys.stdin
947 # points at a file reading os.devnull, because multiprocessing mucks
948 # with sys.stdin.
949 # - Leave the sys.stdin and output filehandles alone.
950 fd_pipes = {0: sys.stdin.fileno(),
951 1: output.fileno(),
952 2: output.fileno(),
953 sys.stdin.fileno(): sys.stdin.fileno(),
954 output.fileno(): output.fileno()}
Mike Frysinger66652ec2014-04-24 11:42:25 -0400955 # pylint: disable=W0212
956 portage.process._setup_pipes(fd_pipes, close_fds=False)
David James1ed3e252011-10-05 20:26:15 -0700957
958 # Portage doesn't like when sys.stdin.fileno() != 0, so point sys.stdin
959 # at the filehandle we just created in _setup_pipes.
960 if sys.stdin.fileno() != 0:
David James6b29d052012-11-02 10:27:27 -0700961 sys.__stdin__ = sys.stdin = os.fdopen(0, "r")
962
963 scheduler = Scheduler(*args, **kwargs)
964
965 # Enable blocker handling even though we're in --nodeps mode. This
966 # allows us to unmerge the blocker after we've merged the replacement.
967 scheduler._opts_ignore_blockers = frozenset()
David James1ed3e252011-10-05 20:26:15 -0700968
969 # Actually do the merge.
Chris Ching5fcbd622016-11-28 09:22:15 -0700970 with event:
971 retval = scheduler.merge()
972 if retval != 0:
973 event.fail(message="non-zero value returned")
David James1ed3e252011-10-05 20:26:15 -0700974
975 # We catch all exceptions here (including SystemExit, KeyboardInterrupt,
976 # etc) so as to ensure that we don't confuse the multiprocessing module,
977 # which expects that all forked children exit with os._exit().
David James321490a2012-12-17 12:05:56 -0800978 # pylint: disable=W0702
David James1ed3e252011-10-05 20:26:15 -0700979 except:
980 traceback.print_exc(file=output)
981 retval = 1
982 sys.stdout.flush()
983 sys.stderr.flush()
984 output.flush()
Don Garrett25f309a2014-03-19 14:02:12 -0700985 # pylint: disable=W0212
David James1ed3e252011-10-05 20:26:15 -0700986 os._exit(retval)
987 else:
988 # Return the exit code of the subprocess.
989 return os.waitpid(pid, 0)[1]
David Jamesfcb70ef2011-02-02 16:02:30 -0800990
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700991
992def UnpackPackage(pkg_state):
993 """Unpacks package described by pkg_state.
994
995 Args:
996 pkg_state: EmergeJobState object describing target.
997
998 Returns:
999 Exit code returned by subprocess.
1000 """
1001 pkgdir = os.environ.get("PKGDIR",
1002 os.path.join(os.environ["SYSROOT"], "packages"))
1003 root = os.environ.get("ROOT", os.environ["SYSROOT"])
1004 path = os.path.join(pkgdir, pkg_state.target + ".tbz2")
1005 comp = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)
1006 cmd = [comp, "-dc"]
1007 if comp.endswith("pbzip2"):
1008 cmd.append("--ignore-trailing-garbage=1")
1009 cmd.append(path)
1010
Chris Ching4a2ebd62017-04-26 16:30:05 -06001011 with cros_event.newEvent(task_name="UnpackPackage", **pkg_state) as event:
Chris Ching5fcbd622016-11-28 09:22:15 -07001012 result = cros_build_lib.RunCommand(cmd, cwd=root, stdout_to_pipe=True,
1013 print_cmd=False, error_code_ok=True)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001014
Chris Ching5fcbd622016-11-28 09:22:15 -07001015 # If we were not successful, return now and don't attempt untar.
1016 if result.returncode != 0:
1017 event.fail("error compressing: returned {}".format(result.returncode))
1018 return result.returncode
1019
1020 cmd = ["sudo", "tar", "-xf", "-", "-C", root]
1021
1022 result = cros_build_lib.RunCommand(cmd, cwd=root, input=result.output,
1023 print_cmd=False, error_code_ok=True)
1024 if result.returncode != 0:
1025 event.fail("error extracting:returned {}".format(result.returncode))
1026
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001027 return result.returncode
1028
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001029
1030def EmergeWorker(task_queue, job_queue, emerge, package_db, fetch_only=False,
1031 unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -08001032 """This worker emerges any packages given to it on the task_queue.
1033
1034 Args:
1035 task_queue: The queue of tasks for this worker to do.
1036 job_queue: The queue of results from the worker.
1037 emerge: An EmergeData() object.
1038 package_db: A dict, mapping package ids to portage Package objects.
Brian Harring0be85c62012-03-17 19:52:12 -07001039 fetch_only: A bool, indicating if we should just fetch the target.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001040 unpack_only: A bool, indicating if we should just unpack the target.
David Jamesfcb70ef2011-02-02 16:02:30 -08001041
1042 It expects package identifiers to be passed to it via task_queue. When
1043 a task is started, it pushes the (target, filename) to the started_queue.
1044 The output is stored in filename. When a merge starts or finishes, we push
1045 EmergeJobState objects to the job_queue.
1046 """
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001047 if fetch_only:
1048 mode = 'fetch'
1049 elif unpack_only:
1050 mode = 'unpack'
1051 else:
1052 mode = 'emerge'
1053 proctitle.settitle('EmergeWorker', mode, '[idle]')
David Jamesfcb70ef2011-02-02 16:02:30 -08001054
1055 SetupWorkerSignals()
1056 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
David Jamesdeebd692011-05-09 17:02:52 -07001057
1058 # Disable flushing of caches to save on I/O.
David James7a1ea4b2011-10-13 15:06:41 -07001059 root = emerge.settings["ROOT"]
1060 vardb = emerge.trees[root]["vartree"].dbapi
Mike Frysingere56debd2014-11-19 01:54:36 -05001061 vardb._flush_cache_enabled = False # pylint: disable=protected-access
Brian Harring0be85c62012-03-17 19:52:12 -07001062 bindb = emerge.trees[root]["bintree"].dbapi
1063 # Might be a set, might be a list, might be None; no clue, just use shallow
1064 # copy to ensure we can roll it back.
Don Garrett25f309a2014-03-19 14:02:12 -07001065 # pylint: disable=W0212
Brian Harring0be85c62012-03-17 19:52:12 -07001066 original_remotepkgs = copy.copy(bindb.bintree._remotepkgs)
David Jamesdeebd692011-05-09 17:02:52 -07001067
David Jamesfcb70ef2011-02-02 16:02:30 -08001068 opts, spinner = emerge.opts, emerge.spinner
1069 opts["--nodeps"] = True
Brian Harring0be85c62012-03-17 19:52:12 -07001070 if fetch_only:
1071 opts["--fetchonly"] = True
1072
David Jamesfcb70ef2011-02-02 16:02:30 -08001073 while True:
1074 # Wait for a new item to show up on the queue. This is a blocking wait,
1075 # so if there's nothing to do, we just sit here.
Brian Harring0be85c62012-03-17 19:52:12 -07001076 pkg_state = task_queue.get()
1077 if pkg_state is None:
David Jamesfcb70ef2011-02-02 16:02:30 -08001078 # If target is None, this means that the main thread wants us to quit.
1079 # The other workers need to exit too, so we'll push the message back on
1080 # to the queue so they'll get it too.
Brian Harring0be85c62012-03-17 19:52:12 -07001081 task_queue.put(None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001082 return
David James7358d032011-05-19 10:40:03 -07001083 if KILLED.is_set():
1084 return
1085
Brian Harring0be85c62012-03-17 19:52:12 -07001086 target = pkg_state.target
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001087 proctitle.settitle('EmergeWorker', mode, target)
Brian Harring0be85c62012-03-17 19:52:12 -07001088
David Jamesfcb70ef2011-02-02 16:02:30 -08001089 db_pkg = package_db[target]
Brian Harring0be85c62012-03-17 19:52:12 -07001090
1091 if db_pkg.type_name == "binary":
1092 if not fetch_only and pkg_state.fetched_successfully:
1093 # Ensure portage doesn't think our pkg is remote- else it'll force
1094 # a redownload of it (even if the on-disk file is fine). In-memory
1095 # caching basically, implemented dumbly.
1096 bindb.bintree._remotepkgs = None
1097 else:
1098 bindb.bintree_remotepkgs = original_remotepkgs
1099
David Jamesfcb70ef2011-02-02 16:02:30 -08001100 db_pkg.root_config = emerge.root_config
1101 install_list = [db_pkg]
1102 pkgname = db_pkg.pf
1103 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False)
David James01b1e0f2012-06-07 17:18:05 -07001104 os.chmod(output.name, 644)
David Jamesfcb70ef2011-02-02 16:02:30 -08001105 start_timestamp = time.time()
Brian Harring0be85c62012-03-17 19:52:12 -07001106 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001107 fetch_only=fetch_only, unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001108 job_queue.put(job)
1109 if "--pretend" in opts:
1110 retcode = 0
1111 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001112 try:
David James386ccd12011-05-04 20:17:42 -07001113 emerge.scheduler_graph.mergelist = install_list
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001114 if unpack_only:
1115 retcode = UnpackPackage(pkg_state)
1116 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001117 retcode = EmergeProcess(output, target, settings, trees, mtimedb,
1118 opts, spinner, favorites=emerge.favorites,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001119 graph_config=emerge.scheduler_graph)
David Jamesfcb70ef2011-02-02 16:02:30 -08001120 except Exception:
1121 traceback.print_exc(file=output)
1122 retcode = 1
David James1ed3e252011-10-05 20:26:15 -07001123 output.close()
David Jamesfcb70ef2011-02-02 16:02:30 -08001124
David James7358d032011-05-19 10:40:03 -07001125 if KILLED.is_set():
1126 return
1127
David Jamesfcb70ef2011-02-02 16:02:30 -08001128 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001129 retcode, fetch_only=fetch_only,
1130 unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001131 job_queue.put(job)
1132
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001133 # Set the title back to idle as the multiprocess pool won't destroy us;
1134 # when another job comes up, it'll re-use this process.
1135 proctitle.settitle('EmergeWorker', mode, '[idle]')
1136
David Jamesfcb70ef2011-02-02 16:02:30 -08001137
1138class LinePrinter(object):
1139 """Helper object to print a single line."""
1140
1141 def __init__(self, line):
1142 self.line = line
1143
David James321490a2012-12-17 12:05:56 -08001144 def Print(self, _seek_locations):
Mike Frysinger383367e2014-09-16 15:06:17 -04001145 print(self.line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001146
1147
1148class JobPrinter(object):
1149 """Helper object to print output of a job."""
1150
1151 def __init__(self, job, unlink=False):
1152 """Print output of job.
1153
Mike Frysinger02e1e072013-11-10 22:11:34 -05001154 If unlink is True, unlink the job output file when done.
1155 """
David Jamesfcb70ef2011-02-02 16:02:30 -08001156 self.current_time = time.time()
1157 self.job = job
1158 self.unlink = unlink
1159
1160 def Print(self, seek_locations):
1161
1162 job = self.job
1163
1164 # Calculate how long the job has been running.
1165 seconds = self.current_time - job.start_timestamp
1166
1167 # Note that we've printed out the job so far.
1168 job.last_output_timestamp = self.current_time
1169
1170 # Note that we're starting the job
1171 info = "job %s (%dm%.1fs)" % (job.pkgname, seconds / 60, seconds % 60)
1172 last_output_seek = seek_locations.get(job.filename, 0)
1173 if last_output_seek:
Mike Frysinger383367e2014-09-16 15:06:17 -04001174 print("=== Continue output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001175 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001176 print("=== Start output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001177
1178 # Print actual output from job
1179 f = codecs.open(job.filename, encoding='utf-8', errors='replace')
1180 f.seek(last_output_seek)
1181 prefix = job.pkgname + ":"
1182 for line in f:
1183
1184 # Save off our position in the file
1185 if line and line[-1] == "\n":
1186 last_output_seek = f.tell()
1187 line = line[:-1]
1188
1189 # Print our line
Mike Frysinger383367e2014-09-16 15:06:17 -04001190 print(prefix, line.encode('utf-8', 'replace'))
David Jamesfcb70ef2011-02-02 16:02:30 -08001191 f.close()
1192
1193 # Save our last spot in the file so that we don't print out the same
1194 # location twice.
1195 seek_locations[job.filename] = last_output_seek
1196
1197 # Note end of output section
1198 if job.done:
Mike Frysinger383367e2014-09-16 15:06:17 -04001199 print("=== Complete: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001200 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001201 print("=== Still running: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001202
1203 if self.unlink:
1204 os.unlink(job.filename)
1205
1206
1207def PrintWorker(queue):
1208 """A worker that prints stuff to the screen as requested."""
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001209 proctitle.settitle('PrintWorker')
David Jamesfcb70ef2011-02-02 16:02:30 -08001210
David James321490a2012-12-17 12:05:56 -08001211 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -07001212 # Set KILLED flag.
1213 KILLED.set()
1214
David Jamesfcb70ef2011-02-02 16:02:30 -08001215 # Switch to default signal handlers so that we'll die after two signals.
David James7358d032011-05-19 10:40:03 -07001216 signal.signal(signal.SIGINT, KillHandler)
1217 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001218
1219 # Don't exit on the first SIGINT / SIGTERM, because the parent worker will
1220 # handle it and tell us when we need to exit.
1221 signal.signal(signal.SIGINT, ExitHandler)
1222 signal.signal(signal.SIGTERM, ExitHandler)
1223
1224 # seek_locations is a map indicating the position we are at in each file.
1225 # It starts off empty, but is set by the various Print jobs as we go along
1226 # to indicate where we left off in each file.
1227 seek_locations = {}
1228 while True:
1229 try:
1230 job = queue.get()
1231 if job:
1232 job.Print(seek_locations)
David Jamesbccf8eb2011-07-27 14:06:06 -07001233 sys.stdout.flush()
David Jamesfcb70ef2011-02-02 16:02:30 -08001234 else:
1235 break
1236 except IOError as ex:
1237 if ex.errno == errno.EINTR:
1238 # Looks like we received a signal. Keep printing.
1239 continue
1240 raise
1241
Brian Harring867e2362012-03-17 04:05:17 -07001242
Brian Harring0be85c62012-03-17 19:52:12 -07001243class TargetState(object):
Chris Ching5fcbd622016-11-28 09:22:15 -07001244 """Structure describing the TargetState."""
Brian Harring867e2362012-03-17 04:05:17 -07001245
Brian Harring0be85c62012-03-17 19:52:12 -07001246 __slots__ = ("target", "info", "score", "prefetched", "fetched_successfully")
Brian Harring867e2362012-03-17 04:05:17 -07001247
David James321490a2012-12-17 12:05:56 -08001248 def __init__(self, target, info):
Brian Harring867e2362012-03-17 04:05:17 -07001249 self.target, self.info = target, info
Brian Harring0be85c62012-03-17 19:52:12 -07001250 self.fetched_successfully = False
1251 self.prefetched = False
David James321490a2012-12-17 12:05:56 -08001252 self.score = None
Brian Harring867e2362012-03-17 04:05:17 -07001253 self.update_score()
1254
1255 def __cmp__(self, other):
1256 return cmp(self.score, other.score)
1257
1258 def update_score(self):
1259 self.score = (
1260 -len(self.info["tprovides"]),
Brian Harring0be85c62012-03-17 19:52:12 -07001261 len(self.info["needs"]),
Brian Harring11c5eeb2012-03-18 11:02:39 -07001262 not self.info["binary"],
Brian Harring867e2362012-03-17 04:05:17 -07001263 -len(self.info["provides"]),
1264 self.info["idx"],
1265 self.target,
1266 )
1267
1268
1269class ScoredHeap(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001270 """Implementation of a general purpose scored heap."""
Brian Harring867e2362012-03-17 04:05:17 -07001271
Brian Harring0be85c62012-03-17 19:52:12 -07001272 __slots__ = ("heap", "_heap_set")
1273
Brian Harring867e2362012-03-17 04:05:17 -07001274 def __init__(self, initial=()):
Brian Harring0be85c62012-03-17 19:52:12 -07001275 self.heap = list()
1276 self._heap_set = set()
1277 if initial:
1278 self.multi_put(initial)
Brian Harring867e2362012-03-17 04:05:17 -07001279
1280 def get(self):
Brian Harring0be85c62012-03-17 19:52:12 -07001281 item = heapq.heappop(self.heap)
1282 self._heap_set.remove(item.target)
1283 return item
Brian Harring867e2362012-03-17 04:05:17 -07001284
Brian Harring0be85c62012-03-17 19:52:12 -07001285 def put(self, item):
1286 if not isinstance(item, TargetState):
1287 raise ValueError("Item %r isn't a TargetState" % (item,))
1288 heapq.heappush(self.heap, item)
1289 self._heap_set.add(item.target)
Brian Harring867e2362012-03-17 04:05:17 -07001290
Brian Harring0be85c62012-03-17 19:52:12 -07001291 def multi_put(self, sequence):
1292 sequence = list(sequence)
1293 self.heap.extend(sequence)
1294 self._heap_set.update(x.target for x in sequence)
Brian Harring867e2362012-03-17 04:05:17 -07001295 self.sort()
1296
David James5c9996d2012-03-24 10:50:46 -07001297 def sort(self):
1298 heapq.heapify(self.heap)
1299
Brian Harring0be85c62012-03-17 19:52:12 -07001300 def __contains__(self, target):
1301 return target in self._heap_set
1302
1303 def __nonzero__(self):
1304 return bool(self.heap)
1305
Brian Harring867e2362012-03-17 04:05:17 -07001306 def __len__(self):
1307 return len(self.heap)
1308
1309
David Jamesfcb70ef2011-02-02 16:02:30 -08001310class EmergeQueue(object):
1311 """Class to schedule emerge jobs according to a dependency graph."""
1312
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001313 def __init__(self, deps_map, emerge, package_db, show_output, unpack_only,
1314 max_retries):
David Jamesfcb70ef2011-02-02 16:02:30 -08001315 # Store the dependency graph.
1316 self._deps_map = deps_map
Brian Harring0be85c62012-03-17 19:52:12 -07001317 self._state_map = {}
David Jamesfcb70ef2011-02-02 16:02:30 -08001318 # Initialize the running queue to empty
Brian Harring0be85c62012-03-17 19:52:12 -07001319 self._build_jobs = {}
1320 self._build_ready = ScoredHeap()
1321 self._fetch_jobs = {}
1322 self._fetch_ready = ScoredHeap()
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001323 self._unpack_jobs = {}
1324 self._unpack_ready = ScoredHeap()
David Jamesfcb70ef2011-02-02 16:02:30 -08001325 # List of total package installs represented in deps_map.
1326 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
1327 self._total_jobs = len(install_jobs)
1328 self._show_output = show_output
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001329 self._unpack_only = unpack_only
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001330 self._max_retries = max_retries
David Jamesfcb70ef2011-02-02 16:02:30 -08001331
1332 if "--pretend" in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001333 print("Skipping merge because of --pretend mode.")
David Jamesfcb70ef2011-02-02 16:02:30 -08001334 sys.exit(0)
1335
David Jamesaaf49e42014-04-24 09:40:05 -07001336 # Set up a session so we can easily terminate all children.
1337 self._SetupSession()
David James7358d032011-05-19 10:40:03 -07001338
David Jamesfcb70ef2011-02-02 16:02:30 -08001339 # Setup scheduler graph object. This is used by the child processes
1340 # to help schedule jobs.
1341 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1342
1343 # Calculate how many jobs we can run in parallel. We don't want to pass
1344 # the --jobs flag over to emerge itself, because that'll tell emerge to
1345 # hide its output, and said output is quite useful for debugging hung
1346 # jobs.
1347 procs = min(self._total_jobs,
1348 emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
Nam T. Nguyenf7098b32015-05-08 11:04:48 -07001349 self._build_procs = self._unpack_procs = max(1, procs)
1350 # Fetch is IO bound, we can use more processes.
1351 self._fetch_procs = max(4, procs)
David James8c7e5e32011-06-28 11:26:03 -07001352 self._load_avg = emerge.opts.pop("--load-average", None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001353 self._job_queue = multiprocessing.Queue()
1354 self._print_queue = multiprocessing.Queue()
Brian Harring0be85c62012-03-17 19:52:12 -07001355
1356 self._fetch_queue = multiprocessing.Queue()
1357 args = (self._fetch_queue, self._job_queue, emerge, package_db, True)
1358 self._fetch_pool = multiprocessing.Pool(self._fetch_procs, EmergeWorker,
1359 args)
1360
1361 self._build_queue = multiprocessing.Queue()
1362 args = (self._build_queue, self._job_queue, emerge, package_db)
1363 self._build_pool = multiprocessing.Pool(self._build_procs, EmergeWorker,
1364 args)
1365
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001366 if self._unpack_only:
1367 # Unpack pool only required on unpack_only jobs.
1368 self._unpack_queue = multiprocessing.Queue()
1369 args = (self._unpack_queue, self._job_queue, emerge, package_db, False,
1370 True)
1371 self._unpack_pool = multiprocessing.Pool(self._unpack_procs, EmergeWorker,
1372 args)
1373
David Jamesfcb70ef2011-02-02 16:02:30 -08001374 self._print_worker = multiprocessing.Process(target=PrintWorker,
1375 args=[self._print_queue])
1376 self._print_worker.start()
1377
1378 # Initialize the failed queue to empty.
1379 self._retry_queue = []
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001380 self._failed_count = dict()
David Jamesfcb70ef2011-02-02 16:02:30 -08001381
David Jamesfcb70ef2011-02-02 16:02:30 -08001382 # Setup an exit handler so that we print nice messages if we are
1383 # terminated.
1384 self._SetupExitHandler()
1385
1386 # Schedule our jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001387 self._state_map.update(
1388 (pkg, TargetState(pkg, data)) for pkg, data in deps_map.iteritems())
1389 self._fetch_ready.multi_put(self._state_map.itervalues())
David Jamesfcb70ef2011-02-02 16:02:30 -08001390
David Jamesaaf49e42014-04-24 09:40:05 -07001391 def _SetupSession(self):
1392 """Set up a session so we can easily terminate all children."""
1393 # When we call os.setsid(), this sets up a session / process group for this
1394 # process and all children. These session groups are needed so that we can
1395 # easily kill all children (including processes launched by emerge) before
1396 # we exit.
1397 #
1398 # One unfortunate side effect of os.setsid() is that it blocks CTRL-C from
1399 # being received. To work around this, we only call os.setsid() in a forked
1400 # process, so that the parent can still watch for CTRL-C. The parent will
1401 # just sit around, watching for signals and propagating them to the child,
1402 # until the child exits.
1403 #
1404 # TODO(davidjames): It would be nice if we could replace this with cgroups.
1405 pid = os.fork()
1406 if pid == 0:
1407 os.setsid()
1408 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001409 proctitle.settitle('SessionManager')
1410
David Jamesaaf49e42014-04-24 09:40:05 -07001411 def PropagateToChildren(signum, _frame):
1412 # Just propagate the signals down to the child. We'll exit when the
1413 # child does.
1414 try:
1415 os.kill(pid, signum)
1416 except OSError as ex:
1417 if ex.errno != errno.ESRCH:
1418 raise
1419 signal.signal(signal.SIGINT, PropagateToChildren)
1420 signal.signal(signal.SIGTERM, PropagateToChildren)
1421
1422 def StopGroup(_signum, _frame):
1423 # When we get stopped, stop the children.
1424 try:
1425 os.killpg(pid, signal.SIGSTOP)
1426 os.kill(0, signal.SIGSTOP)
1427 except OSError as ex:
1428 if ex.errno != errno.ESRCH:
1429 raise
1430 signal.signal(signal.SIGTSTP, StopGroup)
1431
1432 def ContinueGroup(_signum, _frame):
1433 # Launch the children again after being stopped.
1434 try:
1435 os.killpg(pid, signal.SIGCONT)
1436 except OSError as ex:
1437 if ex.errno != errno.ESRCH:
1438 raise
1439 signal.signal(signal.SIGCONT, ContinueGroup)
1440
1441 # Loop until the children exit. We exit with os._exit to be sure we
1442 # don't run any finalizers (those will be run by the child process.)
1443 # pylint: disable=W0212
1444 while True:
1445 try:
1446 # Wait for the process to exit. When it does, exit with the return
1447 # value of the subprocess.
Mike Frysingere2d8f0d2014-11-01 13:09:26 -04001448 os._exit(process_util.GetExitStatus(os.waitpid(pid, 0)[1]))
David Jamesaaf49e42014-04-24 09:40:05 -07001449 except OSError as ex:
1450 if ex.errno == errno.EINTR:
1451 continue
1452 traceback.print_exc()
1453 os._exit(1)
1454 except BaseException:
1455 traceback.print_exc()
1456 os._exit(1)
1457
David Jamesfcb70ef2011-02-02 16:02:30 -08001458 def _SetupExitHandler(self):
1459
David James321490a2012-12-17 12:05:56 -08001460 def ExitHandler(signum, _frame):
David James7358d032011-05-19 10:40:03 -07001461 # Set KILLED flag.
1462 KILLED.set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001463
1464 # Kill our signal handlers so we don't get called recursively
David James7358d032011-05-19 10:40:03 -07001465 signal.signal(signal.SIGINT, KillHandler)
1466 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001467
1468 # Print our current job status
Brian Harring0be85c62012-03-17 19:52:12 -07001469 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001470 if job:
1471 self._print_queue.put(JobPrinter(job, unlink=True))
1472
1473 # Notify the user that we are exiting
1474 self._Print("Exiting on signal %s" % signum)
David James7358d032011-05-19 10:40:03 -07001475 self._print_queue.put(None)
1476 self._print_worker.join()
David Jamesfcb70ef2011-02-02 16:02:30 -08001477
1478 # Kill child threads, then exit.
David James7358d032011-05-19 10:40:03 -07001479 os.killpg(0, signal.SIGKILL)
David Jamesfcb70ef2011-02-02 16:02:30 -08001480 sys.exit(1)
1481
1482 # Print out job status when we are killed
1483 signal.signal(signal.SIGINT, ExitHandler)
1484 signal.signal(signal.SIGTERM, ExitHandler)
1485
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001486 def _ScheduleUnpack(self, pkg_state):
1487 self._unpack_jobs[pkg_state.target] = None
1488 self._unpack_queue.put(pkg_state)
1489
Brian Harring0be85c62012-03-17 19:52:12 -07001490 def _Schedule(self, pkg_state):
David Jamesfcb70ef2011-02-02 16:02:30 -08001491 # We maintain a tree of all deps, if this doesn't need
David James8c7e5e32011-06-28 11:26:03 -07001492 # to be installed just free up its children and continue.
David Jamesfcb70ef2011-02-02 16:02:30 -08001493 # It is possible to reinstall deps of deps, without reinstalling
1494 # first level deps, like so:
Mike Frysingerfd969312014-04-02 22:16:42 -04001495 # virtual/target-os (merge) -> eselect (nomerge) -> python (merge)
Brian Harring0be85c62012-03-17 19:52:12 -07001496 this_pkg = pkg_state.info
1497 target = pkg_state.target
1498 if pkg_state.info is not None:
1499 if this_pkg["action"] == "nomerge":
1500 self._Finish(target)
1501 elif target not in self._build_jobs:
1502 # Kick off the build if it's marked to be built.
1503 self._build_jobs[target] = None
1504 self._build_queue.put(pkg_state)
1505 return True
David Jamesfcb70ef2011-02-02 16:02:30 -08001506
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001507 def _ScheduleLoop(self, unpack_only=False):
1508 if unpack_only:
1509 ready_queue = self._unpack_ready
1510 jobs_queue = self._unpack_jobs
1511 procs = self._unpack_procs
1512 else:
1513 ready_queue = self._build_ready
1514 jobs_queue = self._build_jobs
1515 procs = self._build_procs
1516
David James8c7e5e32011-06-28 11:26:03 -07001517 # If the current load exceeds our desired load average, don't schedule
1518 # more than one job.
1519 if self._load_avg and os.getloadavg()[0] > self._load_avg:
1520 needed_jobs = 1
1521 else:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001522 needed_jobs = procs
David James8c7e5e32011-06-28 11:26:03 -07001523
1524 # Schedule more jobs.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001525 while ready_queue and len(jobs_queue) < needed_jobs:
1526 state = ready_queue.get()
1527 if unpack_only:
1528 self._ScheduleUnpack(state)
1529 else:
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001530 if state.target not in self._failed_count:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001531 self._Schedule(state)
David Jamesfcb70ef2011-02-02 16:02:30 -08001532
1533 def _Print(self, line):
1534 """Print a single line."""
1535 self._print_queue.put(LinePrinter(line))
1536
1537 def _Status(self):
1538 """Print status."""
1539 current_time = time.time()
Aviv Keshet3b381682015-11-12 13:15:06 -08001540 current_time_struct = time.localtime(current_time)
David Jamesfcb70ef2011-02-02 16:02:30 -08001541 no_output = True
1542
1543 # Print interim output every minute if --show-output is used. Otherwise,
1544 # print notifications about running packages every 2 minutes, and print
1545 # full output for jobs that have been running for 60 minutes or more.
1546 if self._show_output:
1547 interval = 60
1548 notify_interval = 0
1549 else:
1550 interval = 60 * 60
1551 notify_interval = 60 * 2
David James321490a2012-12-17 12:05:56 -08001552 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001553 if job:
1554 last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
1555 if last_timestamp + interval < current_time:
1556 self._print_queue.put(JobPrinter(job))
1557 job.last_output_timestamp = current_time
1558 no_output = False
1559 elif (notify_interval and
1560 job.last_notify_timestamp + notify_interval < current_time):
1561 job_seconds = current_time - job.start_timestamp
1562 args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
1563 info = "Still building %s (%dm%.1fs). Logs in %s" % args
1564 job.last_notify_timestamp = current_time
1565 self._Print(info)
1566 no_output = False
1567
1568 # If we haven't printed any messages yet, print a general status message
1569 # here.
1570 if no_output:
1571 seconds = current_time - GLOBAL_START
Brian Harring0be85c62012-03-17 19:52:12 -07001572 fjobs, fready = len(self._fetch_jobs), len(self._fetch_ready)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001573 ujobs, uready = len(self._unpack_jobs), len(self._unpack_ready)
Brian Harring0be85c62012-03-17 19:52:12 -07001574 bjobs, bready = len(self._build_jobs), len(self._build_ready)
1575 retries = len(self._retry_queue)
1576 pending = max(0, len(self._deps_map) - fjobs - bjobs)
1577 line = "Pending %s/%s, " % (pending, self._total_jobs)
1578 if fjobs or fready:
1579 line += "Fetching %s/%s, " % (fjobs, fready + fjobs)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001580 if ujobs or uready:
1581 line += "Unpacking %s/%s, " % (ujobs, uready + ujobs)
Brian Harring0be85c62012-03-17 19:52:12 -07001582 if bjobs or bready or retries:
1583 line += "Building %s/%s, " % (bjobs, bready + bjobs)
1584 if retries:
1585 line += "Retrying %s, " % (retries,)
Mike Frysingerd6e2df02014-11-26 02:55:04 -05001586 load = " ".join(str(x) for x in os.getloadavg())
Aviv Keshet3b381682015-11-12 13:15:06 -08001587 line += ("[Time %s | Elapsed %dm%.1fs | Load %s]" % (
1588 time.strftime('%H:%M:%S', current_time_struct), seconds / 60,
1589 seconds % 60, load))
Brian Harring0be85c62012-03-17 19:52:12 -07001590 self._Print(line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001591
1592 def _Finish(self, target):
David James8c7e5e32011-06-28 11:26:03 -07001593 """Mark a target as completed and unblock dependencies."""
1594 this_pkg = self._deps_map[target]
1595 if this_pkg["needs"] and this_pkg["nodeps"]:
1596 # We got installed, but our deps have not been installed yet. Dependent
1597 # packages should only be installed when our needs have been fully met.
1598 this_pkg["action"] = "nomerge"
1599 else:
David James8c7e5e32011-06-28 11:26:03 -07001600 for dep in this_pkg["provides"]:
1601 dep_pkg = self._deps_map[dep]
Brian Harring0be85c62012-03-17 19:52:12 -07001602 state = self._state_map[dep]
David James8c7e5e32011-06-28 11:26:03 -07001603 del dep_pkg["needs"][target]
Brian Harring0be85c62012-03-17 19:52:12 -07001604 state.update_score()
1605 if not state.prefetched:
1606 if dep in self._fetch_ready:
1607 # If it's not currently being fetched, update the prioritization
1608 self._fetch_ready.sort()
1609 elif not dep_pkg["needs"]:
David James8c7e5e32011-06-28 11:26:03 -07001610 if dep_pkg["nodeps"] and dep_pkg["action"] == "nomerge":
1611 self._Finish(dep)
1612 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001613 self._build_ready.put(self._state_map[dep])
David James8c7e5e32011-06-28 11:26:03 -07001614 self._deps_map.pop(target)
David Jamesfcb70ef2011-02-02 16:02:30 -08001615
1616 def _Retry(self):
David James8c7e5e32011-06-28 11:26:03 -07001617 while self._retry_queue:
Brian Harring0be85c62012-03-17 19:52:12 -07001618 state = self._retry_queue.pop(0)
1619 if self._Schedule(state):
1620 self._Print("Retrying emerge of %s." % state.target)
David James8c7e5e32011-06-28 11:26:03 -07001621 break
David Jamesfcb70ef2011-02-02 16:02:30 -08001622
Brian Harringa43f5952012-04-12 01:19:34 -07001623 def _Shutdown(self):
David Jamesfcb70ef2011-02-02 16:02:30 -08001624 # Tell emerge workers to exit. They all exit when 'None' is pushed
1625 # to the queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001626
Brian Harringa43f5952012-04-12 01:19:34 -07001627 # Shutdown the workers first; then jobs (which is how they feed things back)
1628 # then finally the print queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001629
Brian Harringa43f5952012-04-12 01:19:34 -07001630 def _stop(queue, pool):
1631 if pool is None:
1632 return
1633 try:
1634 queue.put(None)
1635 pool.close()
1636 pool.join()
1637 finally:
1638 pool.terminate()
Brian Harring0be85c62012-03-17 19:52:12 -07001639
Brian Harringa43f5952012-04-12 01:19:34 -07001640 _stop(self._fetch_queue, self._fetch_pool)
1641 self._fetch_queue = self._fetch_pool = None
Brian Harring0be85c62012-03-17 19:52:12 -07001642
Brian Harringa43f5952012-04-12 01:19:34 -07001643 _stop(self._build_queue, self._build_pool)
1644 self._build_queue = self._build_pool = None
1645
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001646 if self._unpack_only:
1647 _stop(self._unpack_queue, self._unpack_pool)
1648 self._unpack_queue = self._unpack_pool = None
1649
Brian Harringa43f5952012-04-12 01:19:34 -07001650 if self._job_queue is not None:
1651 self._job_queue.close()
1652 self._job_queue = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001653
1654 # Now that our workers are finished, we can kill the print queue.
Brian Harringa43f5952012-04-12 01:19:34 -07001655 if self._print_worker is not None:
1656 try:
1657 self._print_queue.put(None)
1658 self._print_queue.close()
1659 self._print_worker.join()
1660 finally:
1661 self._print_worker.terminate()
1662 self._print_queue = self._print_worker = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001663
1664 def Run(self):
1665 """Run through the scheduled ebuilds.
1666
1667 Keep running so long as we have uninstalled packages in the
1668 dependency graph to merge.
1669 """
Brian Harringa43f5952012-04-12 01:19:34 -07001670 if not self._deps_map:
1671 return
1672
Brian Harring0be85c62012-03-17 19:52:12 -07001673 # Start the fetchers.
1674 for _ in xrange(min(self._fetch_procs, len(self._fetch_ready))):
1675 state = self._fetch_ready.get()
1676 self._fetch_jobs[state.target] = None
1677 self._fetch_queue.put(state)
1678
1679 # Print an update, then get going.
1680 self._Status()
1681
David Jamesfcb70ef2011-02-02 16:02:30 -08001682 while self._deps_map:
1683 # Check here that we are actually waiting for something.
Brian Harring0be85c62012-03-17 19:52:12 -07001684 if (self._build_queue.empty() and
David Jamesfcb70ef2011-02-02 16:02:30 -08001685 self._job_queue.empty() and
Brian Harring0be85c62012-03-17 19:52:12 -07001686 not self._fetch_jobs and
1687 not self._fetch_ready and
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001688 not self._unpack_jobs and
1689 not self._unpack_ready and
Brian Harring0be85c62012-03-17 19:52:12 -07001690 not self._build_jobs and
1691 not self._build_ready and
David Jamesfcb70ef2011-02-02 16:02:30 -08001692 self._deps_map):
1693 # If we have failed on a package, retry it now.
1694 if self._retry_queue:
1695 self._Retry()
1696 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001697 # Tell the user why we're exiting.
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001698 if self._failed_count:
1699 print('Packages failed:\n\t%s' %
1700 '\n\t'.join(self._failed_count.iterkeys()))
David James0eae23e2012-07-03 15:04:25 -07001701 status_file = os.environ.get("PARALLEL_EMERGE_STATUS_FILE")
1702 if status_file:
David James321490a2012-12-17 12:05:56 -08001703 failed_pkgs = set(portage.versions.cpv_getkey(x)
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001704 for x in self._failed_count.iterkeys())
David James0eae23e2012-07-03 15:04:25 -07001705 with open(status_file, "a") as f:
1706 f.write("%s\n" % " ".join(failed_pkgs))
David Jamesfcb70ef2011-02-02 16:02:30 -08001707 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001708 print("Deadlock! Circular dependencies!")
David Jamesfcb70ef2011-02-02 16:02:30 -08001709 sys.exit(1)
1710
David James321490a2012-12-17 12:05:56 -08001711 for _ in xrange(12):
David Jamesa74289a2011-08-12 10:41:24 -07001712 try:
1713 job = self._job_queue.get(timeout=5)
1714 break
1715 except Queue.Empty:
1716 # Check if any more jobs can be scheduled.
1717 self._ScheduleLoop()
1718 else:
Brian Harring706747c2012-03-16 03:04:31 -07001719 # Print an update every 60 seconds.
David Jamesfcb70ef2011-02-02 16:02:30 -08001720 self._Status()
1721 continue
1722
1723 target = job.target
1724
Brian Harring0be85c62012-03-17 19:52:12 -07001725 if job.fetch_only:
1726 if not job.done:
1727 self._fetch_jobs[job.target] = job
1728 else:
1729 state = self._state_map[job.target]
1730 state.prefetched = True
1731 state.fetched_successfully = (job.retcode == 0)
1732 del self._fetch_jobs[job.target]
1733 self._Print("Fetched %s in %2.2fs"
1734 % (target, time.time() - job.start_timestamp))
1735
1736 if self._show_output or job.retcode != 0:
1737 self._print_queue.put(JobPrinter(job, unlink=True))
1738 else:
1739 os.unlink(job.filename)
1740 # Failure or not, let build work with it next.
1741 if not self._deps_map[job.target]["needs"]:
1742 self._build_ready.put(state)
1743 self._ScheduleLoop()
1744
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001745 if self._unpack_only and job.retcode == 0:
1746 self._unpack_ready.put(state)
1747 self._ScheduleLoop(unpack_only=True)
1748
Brian Harring0be85c62012-03-17 19:52:12 -07001749 if self._fetch_ready:
1750 state = self._fetch_ready.get()
1751 self._fetch_queue.put(state)
1752 self._fetch_jobs[state.target] = None
1753 else:
1754 # Minor optimization; shut down fetchers early since we know
1755 # the queue is empty.
1756 self._fetch_queue.put(None)
1757 continue
1758
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001759 if job.unpack_only:
1760 if not job.done:
1761 self._unpack_jobs[target] = job
1762 else:
1763 del self._unpack_jobs[target]
1764 self._Print("Unpacked %s in %2.2fs"
1765 % (target, time.time() - job.start_timestamp))
1766 if self._show_output or job.retcode != 0:
1767 self._print_queue.put(JobPrinter(job, unlink=True))
1768 else:
1769 os.unlink(job.filename)
1770 if self._unpack_ready:
1771 state = self._unpack_ready.get()
1772 self._unpack_queue.put(state)
1773 self._unpack_jobs[state.target] = None
1774 continue
1775
David Jamesfcb70ef2011-02-02 16:02:30 -08001776 if not job.done:
Brian Harring0be85c62012-03-17 19:52:12 -07001777 self._build_jobs[target] = job
David Jamesfcb70ef2011-02-02 16:02:30 -08001778 self._Print("Started %s (logged in %s)" % (target, job.filename))
1779 continue
1780
1781 # Print output of job
1782 if self._show_output or job.retcode != 0:
1783 self._print_queue.put(JobPrinter(job, unlink=True))
1784 else:
1785 os.unlink(job.filename)
Brian Harring0be85c62012-03-17 19:52:12 -07001786 del self._build_jobs[target]
David Jamesfcb70ef2011-02-02 16:02:30 -08001787
1788 seconds = time.time() - job.start_timestamp
1789 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
1790
1791 # Complain if necessary.
1792 if job.retcode != 0:
1793 # Handle job failure.
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001794 failed_count = self._failed_count.get(target, 0)
1795 if failed_count >= self._max_retries:
1796 # If this job has failed and can't be retried, give up.
David Jamesfcb70ef2011-02-02 16:02:30 -08001797 self._Print("Failed %s. Your build has failed." % details)
1798 else:
1799 # Queue up this build to try again after a long while.
Brian Harring0be85c62012-03-17 19:52:12 -07001800 self._retry_queue.append(self._state_map[target])
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001801 self._failed_count[target] = failed_count + 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001802 self._Print("Failed %s, retrying later." % details)
1803 else:
David James32420cc2011-08-25 21:32:46 -07001804 self._Print("Completed %s" % details)
1805
1806 # Mark as completed and unblock waiting ebuilds.
1807 self._Finish(target)
1808
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001809 if target in self._failed_count and self._retry_queue:
David Jamesfcb70ef2011-02-02 16:02:30 -08001810 # If we have successfully retried a failed package, and there
1811 # are more failed packages, try the next one. We will only have
1812 # one retrying package actively running at a time.
1813 self._Retry()
1814
David Jamesfcb70ef2011-02-02 16:02:30 -08001815
David James8c7e5e32011-06-28 11:26:03 -07001816 # Schedule pending jobs and print an update.
1817 self._ScheduleLoop()
1818 self._Status()
David Jamesfcb70ef2011-02-02 16:02:30 -08001819
David Jamese703d0f2012-01-12 16:27:45 -08001820 # If packages were retried, output a warning.
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001821 if self._failed_count:
David Jamese703d0f2012-01-12 16:27:45 -08001822 self._Print("")
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001823 self._Print("WARNING: The following packages failed once or more,")
David Jamese703d0f2012-01-12 16:27:45 -08001824 self._Print("but succeeded upon retry. This might indicate incorrect")
1825 self._Print("dependencies.")
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001826 for pkg in self._failed_count.iterkeys():
David Jamese703d0f2012-01-12 16:27:45 -08001827 self._Print(" %s" % pkg)
1828 self._Print("@@@STEP_WARNINGS@@@")
1829 self._Print("")
1830
David Jamesfcb70ef2011-02-02 16:02:30 -08001831 # Tell child threads to exit.
1832 self._Print("Merge complete")
David Jamesfcb70ef2011-02-02 16:02:30 -08001833
1834
Brian Harring30675052012-02-29 12:18:22 -08001835def main(argv):
Brian Harring8294d652012-05-23 02:20:52 -07001836 try:
1837 return real_main(argv)
1838 finally:
1839 # Work around multiprocessing sucking and not cleaning up after itself.
1840 # http://bugs.python.org/issue4106;
1841 # Step one; ensure GC is ran *prior* to the VM starting shutdown.
1842 gc.collect()
1843 # Step two; go looking for those threads and try to manually reap
1844 # them if we can.
1845 for x in threading.enumerate():
1846 # Filter on the name, and ident; if ident is None, the thread
1847 # wasn't started.
1848 if x.name == 'QueueFeederThread' and x.ident is not None:
1849 x.join(1)
David Jamesfcb70ef2011-02-02 16:02:30 -08001850
Brian Harring8294d652012-05-23 02:20:52 -07001851
1852def real_main(argv):
Brian Harring30675052012-02-29 12:18:22 -08001853 parallel_emerge_args = argv[:]
David Jamesfcb70ef2011-02-02 16:02:30 -08001854 deps = DepGraphGenerator()
Brian Harring30675052012-02-29 12:18:22 -08001855 deps.Initialize(parallel_emerge_args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001856 emerge = deps.emerge
1857
1858 if emerge.action is not None:
Brian Harring30675052012-02-29 12:18:22 -08001859 argv = deps.ParseParallelEmergeArgs(argv)
Brian Harring8294d652012-05-23 02:20:52 -07001860 return emerge_main(argv)
David Jamesfcb70ef2011-02-02 16:02:30 -08001861 elif not emerge.cmdline_packages:
1862 Usage()
Brian Harring8294d652012-05-23 02:20:52 -07001863 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001864
1865 # Unless we're in pretend mode, there's not much point running without
1866 # root access. We need to be able to install packages.
1867 #
1868 # NOTE: Even if you're running --pretend, it's a good idea to run
1869 # parallel_emerge with root access so that portage can write to the
1870 # dependency cache. This is important for performance.
David James321490a2012-12-17 12:05:56 -08001871 if "--pretend" not in emerge.opts and portage.data.secpass < 2:
Mike Frysinger383367e2014-09-16 15:06:17 -04001872 print("parallel_emerge: superuser access is required.")
Brian Harring8294d652012-05-23 02:20:52 -07001873 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001874
1875 if "--quiet" not in emerge.opts:
1876 cmdline_packages = " ".join(emerge.cmdline_packages)
Mike Frysinger383367e2014-09-16 15:06:17 -04001877 print("Starting fast-emerge.")
1878 print(" Building package %s on %s" % (cmdline_packages,
Gilad Arnold05f94b02015-05-22 10:41:05 -07001879 deps.sysroot or "root"))
David Jamesfcb70ef2011-02-02 16:02:30 -08001880
David James386ccd12011-05-04 20:17:42 -07001881 deps_tree, deps_info = deps.GenDependencyTree()
David Jamesfcb70ef2011-02-02 16:02:30 -08001882
1883 # You want me to be verbose? I'll give you two trees! Twice as much value.
1884 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
1885 deps.PrintTree(deps_tree)
1886
David James386ccd12011-05-04 20:17:42 -07001887 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001888
1889 # OK, time to print out our progress so far.
1890 deps.PrintInstallPlan(deps_graph)
1891 if "--tree" in emerge.opts:
1892 PrintDepsMap(deps_graph)
1893
1894 # Are we upgrading portage? If so, and there are more packages to merge,
1895 # schedule a restart of parallel_emerge to merge the rest. This ensures that
1896 # we pick up all updates to portage settings before merging any more
1897 # packages.
1898 portage_upgrade = False
1899 root = emerge.settings["ROOT"]
Don Garrett25f309a2014-03-19 14:02:12 -07001900 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -08001901 if root == "/":
Gilad Arnoldcead28a2015-05-22 10:45:02 -07001902 final_db = emerge.depgraph._dynamic_config._filtered_trees[root]['graph_db']
David Jamesfcb70ef2011-02-02 16:02:30 -08001903 for db_pkg in final_db.match_pkgs("sys-apps/portage"):
1904 portage_pkg = deps_graph.get(db_pkg.cpv)
David James0ff16f22012-11-02 14:18:07 -07001905 if portage_pkg:
David Jamesfcb70ef2011-02-02 16:02:30 -08001906 portage_upgrade = True
1907 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001908 print("Upgrading portage first, then restarting...")
David Jamesfcb70ef2011-02-02 16:02:30 -08001909
David James0ff16f22012-11-02 14:18:07 -07001910 # Upgrade Portage first, then the rest of the packages.
1911 #
1912 # In order to grant the child permission to run setsid, we need to run sudo
1913 # again. We preserve SUDO_USER here in case an ebuild depends on it.
1914 if portage_upgrade:
1915 # Calculate what arguments to use when re-invoking.
1916 args = ["sudo", "-E", "SUDO_USER=%s" % os.environ.get("SUDO_USER", "")]
1917 args += [os.path.abspath(sys.argv[0])] + parallel_emerge_args
1918 args += ["--exclude=sys-apps/portage"]
1919
1920 # First upgrade Portage.
1921 passthrough_args = ("--quiet", "--pretend", "--verbose")
1922 emerge_args = [k for k in emerge.opts if k in passthrough_args]
1923 ret = emerge_main(emerge_args + ["portage"])
1924 if ret != 0:
1925 return ret
1926
1927 # Now upgrade the rest.
1928 os.execvp(args[0], args)
1929
Bertrand SIMONNETc03c8ee2014-12-10 17:02:55 -08001930 # Attempt to solve crbug.com/433482
1931 # The file descriptor error appears only when getting userpriv_groups
1932 # (lazily generated). Loading userpriv_groups here will reduce the number of
1933 # calls from few hundreds to one.
1934 portage.data._get_global('userpriv_groups')
1935
David Jamesfcb70ef2011-02-02 16:02:30 -08001936 # Run the queued emerges.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001937 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output,
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001938 deps.unpack_only, deps.max_retries)
Brian Harringa43f5952012-04-12 01:19:34 -07001939 try:
1940 scheduler.Run()
1941 finally:
Don Garrett25f309a2014-03-19 14:02:12 -07001942 # pylint: disable=W0212
Brian Harringa43f5952012-04-12 01:19:34 -07001943 scheduler._Shutdown()
David James97ce8902011-08-16 09:51:05 -07001944 scheduler = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001945
Mike Frysingerd20a6e22012-10-04 19:01:10 -04001946 clean_logs(emerge.settings)
1947
Mike Frysinger383367e2014-09-16 15:06:17 -04001948 print("Done")
Brian Harring8294d652012-05-23 02:20:52 -07001949 return 0