blob: b97dcf1348ddbf7fe464481561e5fb760053f975 [file] [log] [blame]
Mike Frysingere58c0e22017-10-04 15:43:30 -04001# -*- coding: utf-8 -*-
Mike Frysinger0a647fc2012-08-06 14:36:05 -04002# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
David Jamesfcb70ef2011-02-02 16:02:30 -08003# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""Program to run emerge in parallel, for significant speedup.
7
8Usage:
David James386ccd12011-05-04 20:17:42 -07009 ./parallel_emerge [--board=BOARD] [--workon=PKGS]
David Jamesfcb70ef2011-02-02 16:02:30 -080010 [--force-remote-binary=PKGS] [emerge args] package
11
David James78b6cd92012-04-02 21:36:12 -070012This script runs multiple emerge processes in parallel, using appropriate
13Portage APIs. It is faster than standard emerge because it has a
14multiprocess model instead of an asynchronous model.
David Jamesfcb70ef2011-02-02 16:02:30 -080015"""
16
Mike Frysinger383367e2014-09-16 15:06:17 -040017from __future__ import print_function
18
David Jamesfcb70ef2011-02-02 16:02:30 -080019import codecs
20import copy
21import errno
Brian Harring8294d652012-05-23 02:20:52 -070022import gc
David James8c7e5e32011-06-28 11:26:03 -070023import heapq
David Jamesfcb70ef2011-02-02 16:02:30 -080024import multiprocessing
25import os
Mike Frysinger1ae28092013-10-17 17:17:22 -040026try:
27 import Queue
28except ImportError:
29 # Python-3 renamed to "queue". We still use Queue to avoid collisions
30 # with naming variables as "queue". Maybe we'll transition at some point.
Mike Frysinger27e21b72018-07-12 14:20:21 -040031 # pylint: disable=import-error
Mike Frysinger1ae28092013-10-17 17:17:22 -040032 import queue as Queue
David Jamesfcb70ef2011-02-02 16:02:30 -080033import signal
Bertrand SIMONNET19d789e2014-12-09 13:36:31 -080034import subprocess
David Jamesfcb70ef2011-02-02 16:02:30 -080035import sys
36import tempfile
Brian Harring8294d652012-05-23 02:20:52 -070037import threading
David Jamesfcb70ef2011-02-02 16:02:30 -080038import time
39import traceback
David Jamesfcb70ef2011-02-02 16:02:30 -080040
Thiago Goncalesf4acc422013-07-17 10:26:35 -070041from chromite.lib import cros_build_lib
Chris Ching5fcbd622016-11-28 09:22:15 -070042from chromite.lib import cros_event
Gregory Meinke5c50bf92018-04-30 10:51:40 -060043from chromite.lib import osutils
Chris Chingb8eba812017-06-22 09:54:48 -060044from chromite.lib import portage_util
Mike Frysingere2d8f0d2014-11-01 13:09:26 -040045from chromite.lib import process_util
Mike Frysingerd74fe4a2014-04-24 11:43:38 -040046from chromite.lib import proctitle
Thiago Goncalesf4acc422013-07-17 10:26:35 -070047
David Jamesfcb70ef2011-02-02 16:02:30 -080048# If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
49# Chromium OS, the default "portage" user doesn't have the necessary
50# permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
51# is "root" here because we get called through sudo.
52#
53# We need to set this before importing any portage modules, because portage
54# looks up "PORTAGE_USERNAME" at import time.
55#
56# NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
57# encounter this case unless they have an old chroot or blow away the
58# environment by running sudo without the -E specifier.
59if "PORTAGE_USERNAME" not in os.environ:
60 homedir = os.environ.get("HOME")
61 if homedir:
62 os.environ["PORTAGE_USERNAME"] = os.path.basename(homedir)
63
Bertrand SIMONNET19d789e2014-12-09 13:36:31 -080064# Wrap Popen with a lock to ensure no two Popen are executed simultaneously in
65# the same process.
66# Two Popen call at the same time might be the cause for crbug.com/433482.
67_popen_lock = threading.Lock()
68_old_popen = subprocess.Popen
69
70def _LockedPopen(*args, **kwargs):
71 with _popen_lock:
72 return _old_popen(*args, **kwargs)
73
74subprocess.Popen = _LockedPopen
75
David Jamesfcb70ef2011-02-02 16:02:30 -080076# Portage doesn't expose dependency trees in its public API, so we have to
77# make use of some private APIs here. These modules are found under
78# /usr/lib/portage/pym/.
79#
80# TODO(davidjames): Update Portage to expose public APIs for these features.
Mike Frysinger27e21b72018-07-12 14:20:21 -040081# pylint: disable=import-error
David Jamesfcb70ef2011-02-02 16:02:30 -080082from _emerge.actions import adjust_configs
83from _emerge.actions import load_emerge_config
84from _emerge.create_depgraph_params import create_depgraph_params
David James386ccd12011-05-04 20:17:42 -070085from _emerge.depgraph import backtrack_depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -080086from _emerge.main import emerge_main
87from _emerge.main import parse_opts
88from _emerge.Package import Package
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -070089from _emerge.post_emerge import clean_logs
David Jamesfcb70ef2011-02-02 16:02:30 -080090from _emerge.Scheduler import Scheduler
David Jamesfcb70ef2011-02-02 16:02:30 -080091from _emerge.stdout_spinner import stdout_spinner
David James386ccd12011-05-04 20:17:42 -070092from portage._global_updates import _global_updates
David Jamesfcb70ef2011-02-02 16:02:30 -080093import portage
94import portage.debug
Mike Frysinger27e21b72018-07-12 14:20:21 -040095# pylint: enable=import-error
Mike Frysinger91d7da92013-02-19 15:53:46 -050096
David Jamesfcb70ef2011-02-02 16:02:30 -080097
David Jamesfcb70ef2011-02-02 16:02:30 -080098def Usage():
99 """Print usage."""
Mike Frysinger383367e2014-09-16 15:06:17 -0400100 print("Usage:")
Chris Ching5fcbd622016-11-28 09:22:15 -0700101 print(" ./parallel_emerge [--board=BOARD] [--workon=PKGS] [--rebuild]")
102 print(" [--eventlogfile=FILE] [emerge args] package")
Mike Frysinger383367e2014-09-16 15:06:17 -0400103 print()
104 print("Packages specified as workon packages are always built from source.")
105 print()
106 print("The --workon argument is mainly useful when you want to build and")
107 print("install packages that you are working on unconditionally, but do not")
108 print("to have to rev the package to indicate you want to build it from")
109 print("source. The build_packages script will automatically supply the")
110 print("workon argument to emerge, ensuring that packages selected using")
111 print("cros-workon are rebuilt.")
112 print()
113 print("The --rebuild option rebuilds packages whenever their dependencies")
114 print("are changed. This ensures that your build is correct.")
Chris Ching5fcbd622016-11-28 09:22:15 -0700115 print()
116 print("The --eventlogfile writes events to the given file. File is")
117 print("is overwritten if it exists.")
David Jamesfcb70ef2011-02-02 16:02:30 -0800118
119
David Jamesfcb70ef2011-02-02 16:02:30 -0800120# Global start time
121GLOBAL_START = time.time()
122
David James7358d032011-05-19 10:40:03 -0700123# Whether process has been killed by a signal.
124KILLED = multiprocessing.Event()
125
David Jamesfcb70ef2011-02-02 16:02:30 -0800126
127class EmergeData(object):
128 """This simple struct holds various emerge variables.
129
130 This struct helps us easily pass emerge variables around as a unit.
131 These variables are used for calculating dependencies and installing
132 packages.
133 """
134
David Jamesbf1e3442011-05-28 07:44:20 -0700135 __slots__ = ["action", "cmdline_packages", "depgraph", "favorites",
136 "mtimedb", "opts", "root_config", "scheduler_graph",
137 "settings", "spinner", "trees"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800138
139 def __init__(self):
140 # The action the user requested. If the user is installing packages, this
141 # is None. If the user is doing anything other than installing packages,
142 # this will contain the action name, which will map exactly to the
143 # long-form name of the associated emerge option.
144 #
145 # Example: If you call parallel_emerge --unmerge package, the action name
146 # will be "unmerge"
147 self.action = None
148
149 # The list of packages the user passed on the command-line.
150 self.cmdline_packages = None
151
152 # The emerge dependency graph. It'll contain all the packages involved in
153 # this merge, along with their versions.
154 self.depgraph = None
155
David Jamesbf1e3442011-05-28 07:44:20 -0700156 # The list of candidates to add to the world file.
157 self.favorites = None
158
David Jamesfcb70ef2011-02-02 16:02:30 -0800159 # A dict of the options passed to emerge. This dict has been cleaned up
160 # a bit by parse_opts, so that it's a bit easier for the emerge code to
161 # look at the options.
162 #
163 # Emerge takes a few shortcuts in its cleanup process to make parsing of
164 # the options dict easier. For example, if you pass in "--usepkg=n", the
165 # "--usepkg" flag is just left out of the dictionary altogether. Because
166 # --usepkg=n is the default, this makes parsing easier, because emerge
167 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
168 #
169 # These cleanup processes aren't applied to all options. For example, the
170 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
171 # applied by emerge, see the parse_opts function in the _emerge.main
172 # package.
173 self.opts = None
174
175 # A dictionary used by portage to maintain global state. This state is
176 # loaded from disk when portage starts up, and saved to disk whenever we
177 # call mtimedb.commit().
178 #
179 # This database contains information about global updates (i.e., what
180 # version of portage we have) and what we're currently doing. Portage
181 # saves what it is currently doing in this database so that it can be
182 # resumed when you call it with the --resume option.
183 #
184 # parallel_emerge does not save what it is currently doing in the mtimedb,
185 # so we do not support the --resume option.
186 self.mtimedb = None
187
188 # The portage configuration for our current root. This contains the portage
189 # settings (see below) and the three portage trees for our current root.
190 # (The three portage trees are explained below, in the documentation for
191 # the "trees" member.)
192 self.root_config = None
193
194 # The scheduler graph is used by emerge to calculate what packages to
195 # install. We don't actually install any deps, so this isn't really used,
196 # but we pass it in to the Scheduler object anyway.
197 self.scheduler_graph = None
198
199 # Portage settings for our current session. Most of these settings are set
200 # in make.conf inside our current install root.
201 self.settings = None
202
203 # The spinner, which spews stuff to stdout to indicate that portage is
204 # doing something. We maintain our own spinner, so we set the portage
205 # spinner to "silent" mode.
206 self.spinner = None
207
208 # The portage trees. There are separate portage trees for each root. To get
209 # the portage tree for the current root, you can look in self.trees[root],
210 # where root = self.settings["ROOT"].
211 #
212 # In each root, there are three trees: vartree, porttree, and bintree.
213 # - vartree: A database of the currently-installed packages.
214 # - porttree: A database of ebuilds, that can be used to build packages.
215 # - bintree: A database of binary packages.
216 self.trees = None
217
218
219class DepGraphGenerator(object):
220 """Grab dependency information about packages from portage.
221
222 Typical usage:
223 deps = DepGraphGenerator()
224 deps.Initialize(sys.argv[1:])
225 deps_tree, deps_info = deps.GenDependencyTree()
226 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
227 deps.PrintTree(deps_tree)
228 PrintDepsMap(deps_graph)
229 """
230
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700231 __slots__ = ["board", "emerge", "package_db", "show_output", "sysroot",
Gregory Meinkebec9b442018-04-17 12:01:19 -0600232 "unpack_only", "max_retries", "install_plan_filename"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800233
234 def __init__(self):
235 self.board = None
236 self.emerge = EmergeData()
David Jamesfcb70ef2011-02-02 16:02:30 -0800237 self.package_db = {}
David Jamesfcb70ef2011-02-02 16:02:30 -0800238 self.show_output = False
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700239 self.sysroot = None
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700240 self.unpack_only = False
Bertrand SIMONNET411945d2015-05-20 17:23:28 -0700241 self.max_retries = 1
Gregory Meinkebec9b442018-04-17 12:01:19 -0600242 self.install_plan_filename = None
David Jamesfcb70ef2011-02-02 16:02:30 -0800243
244 def ParseParallelEmergeArgs(self, argv):
245 """Read the parallel emerge arguments from the command-line.
246
247 We need to be compatible with emerge arg format. We scrape arguments that
248 are specific to parallel_emerge, and pass through the rest directly to
249 emerge.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500250
David Jamesfcb70ef2011-02-02 16:02:30 -0800251 Args:
252 argv: arguments list
Mike Frysinger1a736a82013-12-12 01:50:59 -0500253
David Jamesfcb70ef2011-02-02 16:02:30 -0800254 Returns:
255 Arguments that don't belong to parallel_emerge
256 """
257 emerge_args = []
258 for arg in argv:
259 # Specifically match arguments that are specific to parallel_emerge, and
260 # pass through the rest.
261 if arg.startswith("--board="):
262 self.board = arg.replace("--board=", "")
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700263 elif arg.startswith("--sysroot="):
264 self.sysroot = arg.replace("--sysroot=", "")
David Jamesfcb70ef2011-02-02 16:02:30 -0800265 elif arg.startswith("--workon="):
266 workon_str = arg.replace("--workon=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700267 emerge_args.append("--reinstall-atoms=%s" % workon_str)
268 emerge_args.append("--usepkg-exclude=%s" % workon_str)
David Jamesfcb70ef2011-02-02 16:02:30 -0800269 elif arg.startswith("--force-remote-binary="):
270 force_remote_binary = arg.replace("--force-remote-binary=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700271 emerge_args.append("--useoldpkg-atoms=%s" % force_remote_binary)
Bertrand SIMONNET411945d2015-05-20 17:23:28 -0700272 elif arg.startswith("--retries="):
273 self.max_retries = int(arg.replace("--retries=", ""))
David Jamesfcb70ef2011-02-02 16:02:30 -0800274 elif arg == "--show-output":
275 self.show_output = True
David James386ccd12011-05-04 20:17:42 -0700276 elif arg == "--rebuild":
David James7a1ea4b2011-10-13 15:06:41 -0700277 emerge_args.append("--rebuild-if-unbuilt")
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700278 elif arg == "--unpackonly":
279 emerge_args.append("--fetchonly")
280 self.unpack_only = True
Chris Ching5fcbd622016-11-28 09:22:15 -0700281 elif arg.startswith("--eventlogfile="):
282 log_file_name = arg.replace("--eventlogfile=", "")
Chris Ching4a2ebd62017-04-26 16:30:05 -0600283 event_logger = cros_event.getEventFileLogger(log_file_name)
284 event_logger.setKind('ParallelEmerge')
285 cros_event.setEventLogger(event_logger)
Gregory Meinkebec9b442018-04-17 12:01:19 -0600286 elif arg.startswith("--install-plan-filename"):
287 # No emerge equivalent, used to calculate the list of packages
288 # that changed and we will need to calculate reverse dependencies.
289 self.install_plan_filename = arg.replace("--install-plan-filename=", "")
David Jamesfcb70ef2011-02-02 16:02:30 -0800290 else:
291 # Not one of our options, so pass through to emerge.
292 emerge_args.append(arg)
293
David James386ccd12011-05-04 20:17:42 -0700294 # These packages take a really long time to build, so, for expediency, we
295 # are blacklisting them from automatic rebuilds because one of their
296 # dependencies needs to be recompiled.
Mike Frysinger5c2a9052014-04-15 15:52:04 -0400297 for pkg in ("chromeos-base/chromeos-chrome",):
David James7a1ea4b2011-10-13 15:06:41 -0700298 emerge_args.append("--rebuild-exclude=%s" % pkg)
David Jamesfcb70ef2011-02-02 16:02:30 -0800299
300 return emerge_args
301
302 def Initialize(self, args):
303 """Initializer. Parses arguments and sets up portage state."""
304
305 # Parse and strip out args that are just intended for parallel_emerge.
306 emerge_args = self.ParseParallelEmergeArgs(args)
307
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700308 if self.sysroot and self.board:
309 cros_build_lib.Die("--sysroot and --board are incompatible.")
310
David Jamesfcb70ef2011-02-02 16:02:30 -0800311 # Setup various environment variables based on our current board. These
312 # variables are normally setup inside emerge-${BOARD}, but since we don't
313 # call that script, we have to set it up here. These variables serve to
314 # point our tools at /build/BOARD and to setup cross compiles to the
315 # appropriate board as configured in toolchain.conf.
316 if self.board:
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700317 self.sysroot = os.environ.get('SYSROOT',
318 cros_build_lib.GetSysroot(self.board))
319
320 if self.sysroot:
321 os.environ["PORTAGE_CONFIGROOT"] = self.sysroot
322 os.environ["SYSROOT"] = self.sysroot
David Jamesfcb70ef2011-02-02 16:02:30 -0800323
David Jamesfcb70ef2011-02-02 16:02:30 -0800324 # Turn off interactive delays
325 os.environ["EBEEP_IGNORE"] = "1"
326 os.environ["EPAUSE_IGNORE"] = "1"
Mike Frysinger0a647fc2012-08-06 14:36:05 -0400327 os.environ["CLEAN_DELAY"] = "0"
David Jamesfcb70ef2011-02-02 16:02:30 -0800328
329 # Parse the emerge options.
David Jamesea3ca332011-05-26 11:48:29 -0700330 action, opts, cmdline_packages = parse_opts(emerge_args, silent=True)
David Jamesfcb70ef2011-02-02 16:02:30 -0800331
332 # Set environment variables based on options. Portage normally sets these
333 # environment variables in emerge_main, but we can't use that function,
334 # because it also does a bunch of other stuff that we don't want.
335 # TODO(davidjames): Patch portage to move this logic into a function we can
336 # reuse here.
337 if "--debug" in opts:
338 os.environ["PORTAGE_DEBUG"] = "1"
339 if "--config-root" in opts:
340 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
341 if "--root" in opts:
342 os.environ["ROOT"] = opts["--root"]
343 if "--accept-properties" in opts:
344 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
345
David James88d780c2014-02-05 13:03:29 -0800346 # If we're installing packages to the board, we can disable vardb locks.
347 # This is safe because we only run up to one instance of parallel_emerge in
348 # parallel.
349 # TODO(davidjames): Enable this for the host too.
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700350 if self.sysroot:
David Jamesfcb70ef2011-02-02 16:02:30 -0800351 os.environ.setdefault("PORTAGE_LOCKS", "false")
David Jamesfcb70ef2011-02-02 16:02:30 -0800352
353 # Now that we've setup the necessary environment variables, we can load the
354 # emerge config from disk.
Gilad Arnold94758762015-05-22 12:23:23 -0700355 # pylint: disable=unpacking-non-sequence
David Jamesfcb70ef2011-02-02 16:02:30 -0800356 settings, trees, mtimedb = load_emerge_config()
357
David Jamesea3ca332011-05-26 11:48:29 -0700358 # Add in EMERGE_DEFAULT_OPTS, if specified.
359 tmpcmdline = []
360 if "--ignore-default-opts" not in opts:
361 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
362 tmpcmdline.extend(emerge_args)
363 action, opts, cmdline_packages = parse_opts(tmpcmdline)
364
365 # If we're installing to the board, we want the --root-deps option so that
366 # portage will install the build dependencies to that location as well.
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700367 if self.sysroot:
David Jamesea3ca332011-05-26 11:48:29 -0700368 opts.setdefault("--root-deps", True)
369
David Jamesfcb70ef2011-02-02 16:02:30 -0800370 # Check whether our portage tree is out of date. Typically, this happens
371 # when you're setting up a new portage tree, such as in setup_board and
372 # make_chroot. In that case, portage applies a bunch of global updates
373 # here. Once the updates are finished, we need to commit any changes
374 # that the global update made to our mtimedb, and reload the config.
375 #
376 # Portage normally handles this logic in emerge_main, but again, we can't
377 # use that function here.
378 if _global_updates(trees, mtimedb["updates"]):
379 mtimedb.commit()
Gilad Arnold94758762015-05-22 12:23:23 -0700380 # pylint: disable=unpacking-non-sequence
David Jamesfcb70ef2011-02-02 16:02:30 -0800381 settings, trees, mtimedb = load_emerge_config(trees=trees)
382
383 # Setup implied options. Portage normally handles this logic in
384 # emerge_main.
385 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
386 opts.setdefault("--buildpkg", True)
387 if "--getbinpkgonly" in opts:
388 opts.setdefault("--usepkgonly", True)
389 opts.setdefault("--getbinpkg", True)
390 if "getbinpkg" in settings.features:
391 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
392 opts["--getbinpkg"] = True
393 if "--getbinpkg" in opts or "--usepkgonly" in opts:
394 opts.setdefault("--usepkg", True)
395 if "--fetch-all-uri" in opts:
396 opts.setdefault("--fetchonly", True)
397 if "--skipfirst" in opts:
398 opts.setdefault("--resume", True)
399 if "--buildpkgonly" in opts:
400 # --buildpkgonly will not merge anything, so it overrides all binary
401 # package options.
402 for opt in ("--getbinpkg", "--getbinpkgonly",
403 "--usepkg", "--usepkgonly"):
404 opts.pop(opt, None)
405 if (settings.get("PORTAGE_DEBUG", "") == "1" and
406 "python-trace" in settings.features):
407 portage.debug.set_trace(True)
408
409 # Complain about unsupported options
David James386ccd12011-05-04 20:17:42 -0700410 for opt in ("--ask", "--ask-enter-invalid", "--resume", "--skipfirst"):
David Jamesfcb70ef2011-02-02 16:02:30 -0800411 if opt in opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400412 print("%s is not supported by parallel_emerge" % opt)
David Jamesfcb70ef2011-02-02 16:02:30 -0800413 sys.exit(1)
414
415 # Make emerge specific adjustments to the config (e.g. colors!)
416 adjust_configs(opts, trees)
417
418 # Save our configuration so far in the emerge object
419 emerge = self.emerge
420 emerge.action, emerge.opts = action, opts
421 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
422 emerge.cmdline_packages = cmdline_packages
423 root = settings["ROOT"]
424 emerge.root_config = trees[root]["root_config"]
425
David James386ccd12011-05-04 20:17:42 -0700426 if "--usepkg" in opts:
David Jamesfcb70ef2011-02-02 16:02:30 -0800427 emerge.trees[root]["bintree"].populate("--getbinpkg" in opts)
428
David Jamesfcb70ef2011-02-02 16:02:30 -0800429 def CreateDepgraph(self, emerge, packages):
430 """Create an emerge depgraph object."""
431 # Setup emerge options.
432 emerge_opts = emerge.opts.copy()
433
David James386ccd12011-05-04 20:17:42 -0700434 # Ask portage to build a dependency graph. with the options we specified
435 # above.
David Jamesfcb70ef2011-02-02 16:02:30 -0800436 params = create_depgraph_params(emerge_opts, emerge.action)
David Jamesbf1e3442011-05-28 07:44:20 -0700437 success, depgraph, favorites = backtrack_depgraph(
David James386ccd12011-05-04 20:17:42 -0700438 emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
439 packages, emerge.spinner)
440 emerge.depgraph = depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800441
David James386ccd12011-05-04 20:17:42 -0700442 # Is it impossible to honor the user's request? Bail!
443 if not success:
444 depgraph.display_problems()
445 sys.exit(1)
David Jamesfcb70ef2011-02-02 16:02:30 -0800446
447 emerge.depgraph = depgraph
David Jamesbf1e3442011-05-28 07:44:20 -0700448 emerge.favorites = favorites
David Jamesfcb70ef2011-02-02 16:02:30 -0800449
David Jamesdeebd692011-05-09 17:02:52 -0700450 # Prime and flush emerge caches.
451 root = emerge.settings["ROOT"]
452 vardb = emerge.trees[root]["vartree"].dbapi
David James0bdc5de2011-05-12 16:22:26 -0700453 if "--pretend" not in emerge.opts:
454 vardb.counter_tick()
David Jamesdeebd692011-05-09 17:02:52 -0700455 vardb.flush_cache()
456
David James386ccd12011-05-04 20:17:42 -0700457 def GenDependencyTree(self):
David Jamesfcb70ef2011-02-02 16:02:30 -0800458 """Get dependency tree info from emerge.
459
David Jamesfcb70ef2011-02-02 16:02:30 -0800460 Returns:
461 Dependency tree
462 """
463 start = time.time()
464
465 emerge = self.emerge
466
467 # Create a list of packages to merge
468 packages = set(emerge.cmdline_packages[:])
David Jamesfcb70ef2011-02-02 16:02:30 -0800469
470 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
471 # need any extra output from portage.
472 portage.util.noiselimit = -1
473
474 # My favorite feature: The silent spinner. It doesn't spin. Ever.
475 # I'd disable the colors by default too, but they look kind of cool.
476 emerge.spinner = stdout_spinner()
477 emerge.spinner.update = emerge.spinner.update_quiet
478
479 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400480 print("Calculating deps...")
David Jamesfcb70ef2011-02-02 16:02:30 -0800481
Chris Ching4a2ebd62017-04-26 16:30:05 -0600482 with cros_event.newEvent(task_name="GenerateDepTree"):
Chris Ching5fcbd622016-11-28 09:22:15 -0700483 self.CreateDepgraph(emerge, packages)
484 depgraph = emerge.depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800485
486 # Build our own tree from the emerge digraph.
487 deps_tree = {}
Mike Frysinger27e21b72018-07-12 14:20:21 -0400488 # pylint: disable=protected-access
David Jamesfcb70ef2011-02-02 16:02:30 -0800489 digraph = depgraph._dynamic_config.digraph
David James3f778802011-08-25 19:31:45 -0700490 root = emerge.settings["ROOT"]
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -0700491 final_db = depgraph._dynamic_config._filtered_trees[root]['graph_db']
David Jamesfcb70ef2011-02-02 16:02:30 -0800492 for node, node_deps in digraph.nodes.items():
493 # Calculate dependency packages that need to be installed first. Each
494 # child on the digraph is a dependency. The "operation" field specifies
495 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
496 # contains the type of dependency (e.g. build, runtime, runtime_post,
497 # etc.)
498 #
David Jamesfcb70ef2011-02-02 16:02:30 -0800499 # Portage refers to the identifiers for packages as a CPV. This acronym
500 # stands for Component/Path/Version.
501 #
502 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
503 # Split up, this CPV would be:
504 # C -- Component: chromeos-base
505 # P -- Path: power_manager
506 # V -- Version: 0.0.1-r1
507 #
508 # We just refer to CPVs as packages here because it's easier.
509 deps = {}
510 for child, priorities in node_deps[0].items():
David James3f778802011-08-25 19:31:45 -0700511 if isinstance(child, Package) and child.root == root:
512 cpv = str(child.cpv)
513 action = str(child.operation)
514
515 # If we're uninstalling a package, check whether Portage is
516 # installing a replacement. If so, just depend on the installation
517 # of the new package, because the old package will automatically
518 # be uninstalled at that time.
519 if action == "uninstall":
520 for pkg in final_db.match_pkgs(child.slot_atom):
521 cpv = str(pkg.cpv)
522 action = "merge"
523 break
524
525 deps[cpv] = dict(action=action,
526 deptypes=[str(x) for x in priorities],
527 deps={})
David Jamesfcb70ef2011-02-02 16:02:30 -0800528
529 # We've built our list of deps, so we can add our package to the tree.
David James3f778802011-08-25 19:31:45 -0700530 if isinstance(node, Package) and node.root == root:
David Jamesfcb70ef2011-02-02 16:02:30 -0800531 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
532 deps=deps)
533
David Jamesfcb70ef2011-02-02 16:02:30 -0800534 # Ask portage for its install plan, so that we can only throw out
David James386ccd12011-05-04 20:17:42 -0700535 # dependencies that portage throws out.
David Jamesfcb70ef2011-02-02 16:02:30 -0800536 deps_info = {}
537 for pkg in depgraph.altlist():
538 if isinstance(pkg, Package):
David James3f778802011-08-25 19:31:45 -0700539 assert pkg.root == root
David Jamesfcb70ef2011-02-02 16:02:30 -0800540 self.package_db[pkg.cpv] = pkg
541
David Jamesfcb70ef2011-02-02 16:02:30 -0800542 # Save off info about the package
David James386ccd12011-05-04 20:17:42 -0700543 deps_info[str(pkg.cpv)] = {"idx": len(deps_info)}
David Jamesfcb70ef2011-02-02 16:02:30 -0800544
545 seconds = time.time() - start
546 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400547 print("Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800548
Gregory Meinkebec9b442018-04-17 12:01:19 -0600549 # Calculate the install plan packages and append to temp file. They will be
550 # used to calculate all the reverse dependencies on these change packages.
551 if self.install_plan_filename:
Gregory Meinke5c50bf92018-04-30 10:51:40 -0600552 # Always write the file even if nothing to do, scripts expect existence.
553 output = '\n'.join(deps_info)
554 if len(output) > 0:
555 # add a trailing newline only if the output is not empty.
556 output += '\n'
557 osutils.WriteFile(self.install_plan_filename,
558 output,
559 mode='a')
David Jamesfcb70ef2011-02-02 16:02:30 -0800560 return deps_tree, deps_info
561
562 def PrintTree(self, deps, depth=""):
563 """Print the deps we have seen in the emerge output.
564
565 Args:
Mike Frysinger6f3c48e2015-05-06 02:38:51 -0400566 deps: Dependency tree structure.
567 depth: Allows printing the tree recursively, with indentation.
David Jamesfcb70ef2011-02-02 16:02:30 -0800568 """
569 for entry in sorted(deps):
570 action = deps[entry]["action"]
Mike Frysinger383367e2014-09-16 15:06:17 -0400571 print("%s %s (%s)" % (depth, entry, action))
David Jamesfcb70ef2011-02-02 16:02:30 -0800572 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
573
David James386ccd12011-05-04 20:17:42 -0700574 def GenDependencyGraph(self, deps_tree, deps_info):
David Jamesfcb70ef2011-02-02 16:02:30 -0800575 """Generate a doubly linked dependency graph.
576
577 Args:
578 deps_tree: Dependency tree structure.
579 deps_info: More details on the dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500580
David Jamesfcb70ef2011-02-02 16:02:30 -0800581 Returns:
582 Deps graph in the form of a dict of packages, with each package
583 specifying a "needs" list and "provides" list.
584 """
585 emerge = self.emerge
David Jamesfcb70ef2011-02-02 16:02:30 -0800586
David Jamesfcb70ef2011-02-02 16:02:30 -0800587 # deps_map is the actual dependency graph.
588 #
589 # Each package specifies a "needs" list and a "provides" list. The "needs"
590 # list indicates which packages we depend on. The "provides" list
591 # indicates the reverse dependencies -- what packages need us.
592 #
593 # We also provide some other information in the dependency graph:
594 # - action: What we're planning on doing with this package. Generally,
595 # "merge", "nomerge", or "uninstall"
David Jamesfcb70ef2011-02-02 16:02:30 -0800596 deps_map = {}
597
598 def ReverseTree(packages):
599 """Convert tree to digraph.
600
601 Take the tree of package -> requirements and reverse it to a digraph of
602 buildable packages -> packages they unblock.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500603
David Jamesfcb70ef2011-02-02 16:02:30 -0800604 Args:
605 packages: Tree(s) of dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500606
David Jamesfcb70ef2011-02-02 16:02:30 -0800607 Returns:
608 Unsanitized digraph.
609 """
David James8c7e5e32011-06-28 11:26:03 -0700610 binpkg_phases = set(["setup", "preinst", "postinst"])
David Jamese5e1c0a2014-09-29 17:19:41 -0700611 needed_dep_types = set(["blocker", "buildtime", "buildtime_slot_op",
612 "runtime", "runtime_slot_op"])
Benjamin Gordon670b6972017-08-29 13:43:56 -0600613 ignored_dep_types = set(["ignored", "runtime_post", "soft"])
614
615 # There's a bug in the Portage library where it always returns 'optional'
616 # and never 'buildtime' for the digraph while --usepkg is enabled; even
617 # when the package is being rebuilt. To work around this, we treat
618 # 'optional' as needed when we are using --usepkg. See crbug.com/756240 .
619 if "--usepkg" in self.emerge.opts:
620 needed_dep_types.add("optional")
621 else:
622 ignored_dep_types.add("optional")
623
David Jamese5e1c0a2014-09-29 17:19:41 -0700624 all_dep_types = ignored_dep_types | needed_dep_types
David Jamesfcb70ef2011-02-02 16:02:30 -0800625 for pkg in packages:
626
627 # Create an entry for the package
628 action = packages[pkg]["action"]
David James8c7e5e32011-06-28 11:26:03 -0700629 default_pkg = {"needs": {}, "provides": set(), "action": action,
630 "nodeps": False, "binary": False}
David Jamesfcb70ef2011-02-02 16:02:30 -0800631 this_pkg = deps_map.setdefault(pkg, default_pkg)
632
David James8c7e5e32011-06-28 11:26:03 -0700633 if pkg in deps_info:
634 this_pkg["idx"] = deps_info[pkg]["idx"]
635
636 # If a package doesn't have any defined phases that might use the
637 # dependent packages (i.e. pkg_setup, pkg_preinst, or pkg_postinst),
638 # we can install this package before its deps are ready.
639 emerge_pkg = self.package_db.get(pkg)
640 if emerge_pkg and emerge_pkg.type_name == "binary":
641 this_pkg["binary"] = True
Mike Frysinger66652ec2014-04-24 11:42:25 -0400642 defined_phases = emerge_pkg.defined_phases
David James8c7e5e32011-06-28 11:26:03 -0700643 defined_binpkg_phases = binpkg_phases.intersection(defined_phases)
644 if not defined_binpkg_phases:
645 this_pkg["nodeps"] = True
646
David Jamesfcb70ef2011-02-02 16:02:30 -0800647 # Create entries for dependencies of this package first.
648 ReverseTree(packages[pkg]["deps"])
649
650 # Add dependencies to this package.
651 for dep, dep_item in packages[pkg]["deps"].iteritems():
David James8c7e5e32011-06-28 11:26:03 -0700652 # We only need to enforce strict ordering of dependencies if the
David James3f778802011-08-25 19:31:45 -0700653 # dependency is a blocker, or is a buildtime or runtime dependency.
654 # (I.e., ignored, optional, and runtime_post dependencies don't
655 # depend on ordering.)
David James8c7e5e32011-06-28 11:26:03 -0700656 dep_types = dep_item["deptypes"]
657 if needed_dep_types.intersection(dep_types):
658 deps_map[dep]["provides"].add(pkg)
659 this_pkg["needs"][dep] = "/".join(dep_types)
David Jamesfcb70ef2011-02-02 16:02:30 -0800660
David Jamese5e1c0a2014-09-29 17:19:41 -0700661 # Verify we processed all appropriate dependency types.
662 unknown_dep_types = set(dep_types) - all_dep_types
663 if unknown_dep_types:
664 print("Unknown dependency types found:")
665 print(" %s -> %s (%s)" % (pkg, dep, "/".join(unknown_dep_types)))
666 sys.exit(1)
667
David James3f778802011-08-25 19:31:45 -0700668 # If there's a blocker, Portage may need to move files from one
669 # package to another, which requires editing the CONTENTS files of
670 # both packages. To avoid race conditions while editing this file,
671 # the two packages must not be installed in parallel, so we can't
672 # safely ignore dependencies. See http://crosbug.com/19328
673 if "blocker" in dep_types:
674 this_pkg["nodeps"] = False
675
David Jamesfcb70ef2011-02-02 16:02:30 -0800676 def FindCycles():
677 """Find cycles in the dependency tree.
678
679 Returns:
680 A dict mapping cyclic packages to a dict of the deps that cause
681 cycles. For each dep that causes cycles, it returns an example
682 traversal of the graph that shows the cycle.
683 """
684
685 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
686 """Find cycles in cyclic dependencies starting at specified package.
687
688 Args:
689 pkg: Package identifier.
690 cycles: A dict mapping cyclic packages to a dict of the deps that
691 cause cycles. For each dep that causes cycles, it returns an
692 example traversal of the graph that shows the cycle.
693 unresolved: Nodes that have been visited but are not fully processed.
694 resolved: Nodes that have been visited and are fully processed.
695 """
696 pkg_cycles = cycles.get(pkg)
697 if pkg in resolved and not pkg_cycles:
698 # If we already looked at this package, and found no cyclic
699 # dependencies, we can stop now.
700 return
701 unresolved.append(pkg)
702 for dep in deps_map[pkg]["needs"]:
703 if dep in unresolved:
704 idx = unresolved.index(dep)
705 mycycle = unresolved[idx:] + [dep]
David James321490a2012-12-17 12:05:56 -0800706 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800707 pkg1, pkg2 = mycycle[i], mycycle[i+1]
708 cycles.setdefault(pkg1, {}).setdefault(pkg2, mycycle)
709 elif not pkg_cycles or dep not in pkg_cycles:
710 # Looks like we haven't seen this edge before.
711 FindCyclesAtNode(dep, cycles, unresolved, resolved)
712 unresolved.pop()
713 resolved.add(pkg)
714
715 cycles, unresolved, resolved = {}, [], set()
716 for pkg in deps_map:
717 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
718 return cycles
719
David James386ccd12011-05-04 20:17:42 -0700720 def RemoveUnusedPackages():
David Jamesfcb70ef2011-02-02 16:02:30 -0800721 """Remove installed packages, propagating dependencies."""
David Jamesfcb70ef2011-02-02 16:02:30 -0800722 # Schedule packages that aren't on the install list for removal
723 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
724
David Jamesfcb70ef2011-02-02 16:02:30 -0800725 # Remove the packages we don't want, simplifying the graph and making
726 # it easier for us to crack cycles.
727 for pkg in sorted(rm_pkgs):
728 this_pkg = deps_map[pkg]
729 needs = this_pkg["needs"]
730 provides = this_pkg["provides"]
731 for dep in needs:
732 dep_provides = deps_map[dep]["provides"]
733 dep_provides.update(provides)
734 dep_provides.discard(pkg)
735 dep_provides.discard(dep)
736 for target in provides:
737 target_needs = deps_map[target]["needs"]
738 target_needs.update(needs)
739 target_needs.pop(pkg, None)
740 target_needs.pop(target, None)
741 del deps_map[pkg]
742
743 def PrintCycleBreak(basedep, dep, mycycle):
744 """Print details about a cycle that we are planning on breaking.
745
Mike Frysinger02e1e072013-11-10 22:11:34 -0500746 We are breaking a cycle where dep needs basedep. mycycle is an
747 example cycle which contains dep -> basedep.
748 """
David Jamesfcb70ef2011-02-02 16:02:30 -0800749
David Jamesfcb70ef2011-02-02 16:02:30 -0800750 needs = deps_map[dep]["needs"]
751 depinfo = needs.get(basedep, "deleted")
David Jamesfcb70ef2011-02-02 16:02:30 -0800752
David James3f778802011-08-25 19:31:45 -0700753 # It's OK to swap install order for blockers, as long as the two
754 # packages aren't installed in parallel. If there is a cycle, then
755 # we know the packages depend on each other already, so we can drop the
756 # blocker safely without printing a warning.
757 if depinfo == "blocker":
758 return
759
David Jamesfcb70ef2011-02-02 16:02:30 -0800760 # Notify the user that we're breaking a cycle.
Mike Frysinger383367e2014-09-16 15:06:17 -0400761 print("Breaking %s -> %s (%s)" % (dep, basedep, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800762
763 # Show cycle.
David James321490a2012-12-17 12:05:56 -0800764 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800765 pkg1, pkg2 = mycycle[i], mycycle[i+1]
766 needs = deps_map[pkg1]["needs"]
767 depinfo = needs.get(pkg2, "deleted")
768 if pkg1 == dep and pkg2 == basedep:
769 depinfo = depinfo + ", deleting"
Mike Frysinger383367e2014-09-16 15:06:17 -0400770 print(" %s -> %s (%s)" % (pkg1, pkg2, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800771
772 def SanitizeTree():
773 """Remove circular dependencies.
774
775 We prune all dependencies involved in cycles that go against the emerge
776 ordering. This has a nice property: we're guaranteed to merge
777 dependencies in the same order that portage does.
778
779 Because we don't treat any dependencies as "soft" unless they're killed
780 by a cycle, we pay attention to a larger number of dependencies when
781 merging. This hurts performance a bit, but helps reliability.
782 """
783 start = time.time()
784 cycles = FindCycles()
785 while cycles:
786 for dep, mycycles in cycles.iteritems():
787 for basedep, mycycle in mycycles.iteritems():
788 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
Matt Tennant08797302011-10-17 16:18:45 -0700789 if "--quiet" not in emerge.opts:
790 PrintCycleBreak(basedep, dep, mycycle)
David Jamesfcb70ef2011-02-02 16:02:30 -0800791 del deps_map[dep]["needs"][basedep]
792 deps_map[basedep]["provides"].remove(dep)
793 cycles = FindCycles()
794 seconds = time.time() - start
795 if "--quiet" not in emerge.opts and seconds >= 0.1:
Mike Frysinger383367e2014-09-16 15:06:17 -0400796 print("Tree sanitized in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800797
David James8c7e5e32011-06-28 11:26:03 -0700798 def FindRecursiveProvides(pkg, seen):
799 """Find all nodes that require a particular package.
800
801 Assumes that graph is acyclic.
802
803 Args:
804 pkg: Package identifier.
805 seen: Nodes that have been visited so far.
806 """
807 if pkg in seen:
808 return
809 seen.add(pkg)
810 info = deps_map[pkg]
811 info["tprovides"] = info["provides"].copy()
812 for dep in info["provides"]:
813 FindRecursiveProvides(dep, seen)
814 info["tprovides"].update(deps_map[dep]["tprovides"])
815
David Jamesa22906f2011-05-04 19:53:26 -0700816 ReverseTree(deps_tree)
David Jamesa22906f2011-05-04 19:53:26 -0700817
David James386ccd12011-05-04 20:17:42 -0700818 # We need to remove unused packages so that we can use the dependency
819 # ordering of the install process to show us what cycles to crack.
820 RemoveUnusedPackages()
David Jamesfcb70ef2011-02-02 16:02:30 -0800821 SanitizeTree()
David James8c7e5e32011-06-28 11:26:03 -0700822 seen = set()
823 for pkg in deps_map:
824 FindRecursiveProvides(pkg, seen)
David Jamesfcb70ef2011-02-02 16:02:30 -0800825 return deps_map
826
827 def PrintInstallPlan(self, deps_map):
828 """Print an emerge-style install plan.
829
830 The install plan lists what packages we're installing, in order.
831 It's useful for understanding what parallel_emerge is doing.
832
833 Args:
834 deps_map: The dependency graph.
835 """
836
837 def InstallPlanAtNode(target, deps_map):
838 nodes = []
839 nodes.append(target)
840 for dep in deps_map[target]["provides"]:
841 del deps_map[dep]["needs"][target]
842 if not deps_map[dep]["needs"]:
843 nodes.extend(InstallPlanAtNode(dep, deps_map))
844 return nodes
845
846 deps_map = copy.deepcopy(deps_map)
847 install_plan = []
848 plan = set()
849 for target, info in deps_map.iteritems():
850 if not info["needs"] and target not in plan:
851 for item in InstallPlanAtNode(target, deps_map):
852 plan.add(item)
853 install_plan.append(self.package_db[item])
854
855 for pkg in plan:
856 del deps_map[pkg]
857
858 if deps_map:
Mike Frysinger383367e2014-09-16 15:06:17 -0400859 print("Cyclic dependencies:", " ".join(deps_map))
David Jamesfcb70ef2011-02-02 16:02:30 -0800860 PrintDepsMap(deps_map)
861 sys.exit(1)
862
863 self.emerge.depgraph.display(install_plan)
864
865
866def PrintDepsMap(deps_map):
867 """Print dependency graph, for each package list it's prerequisites."""
868 for i in sorted(deps_map):
Mike Frysinger383367e2014-09-16 15:06:17 -0400869 print("%s: (%s) needs" % (i, deps_map[i]["action"]))
David Jamesfcb70ef2011-02-02 16:02:30 -0800870 needs = deps_map[i]["needs"]
871 for j in sorted(needs):
Mike Frysinger383367e2014-09-16 15:06:17 -0400872 print(" %s" % (j))
David Jamesfcb70ef2011-02-02 16:02:30 -0800873 if not needs:
Mike Frysinger383367e2014-09-16 15:06:17 -0400874 print(" no dependencies")
David Jamesfcb70ef2011-02-02 16:02:30 -0800875
876
877class EmergeJobState(object):
Don Garrett25f309a2014-03-19 14:02:12 -0700878 """Structure describing the EmergeJobState."""
879
David Jamesfcb70ef2011-02-02 16:02:30 -0800880 __slots__ = ["done", "filename", "last_notify_timestamp", "last_output_seek",
881 "last_output_timestamp", "pkgname", "retcode", "start_timestamp",
Chris Ching73486ab2017-04-26 18:02:37 -0600882 "target", "try_count", "fetch_only", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800883
884 def __init__(self, target, pkgname, done, filename, start_timestamp,
Chris Ching73486ab2017-04-26 18:02:37 -0600885 retcode=None, fetch_only=False, try_count=0, unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800886
887 # The full name of the target we're building (e.g.
Mike Frysingerfd969312014-04-02 22:16:42 -0400888 # virtual/target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800889 self.target = target
890
Mike Frysingerfd969312014-04-02 22:16:42 -0400891 # The short name of the target we're building (e.g. target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800892 self.pkgname = pkgname
893
894 # Whether the job is done. (True if the job is done; false otherwise.)
895 self.done = done
896
897 # The filename where output is currently stored.
898 self.filename = filename
899
900 # The timestamp of the last time we printed the name of the log file. We
901 # print this at the beginning of the job, so this starts at
902 # start_timestamp.
903 self.last_notify_timestamp = start_timestamp
904
905 # The location (in bytes) of the end of the last complete line we printed.
906 # This starts off at zero. We use this to jump to the right place when we
907 # print output from the same ebuild multiple times.
908 self.last_output_seek = 0
909
910 # The timestamp of the last time we printed output. Since we haven't
911 # printed output yet, this starts at zero.
912 self.last_output_timestamp = 0
913
914 # The return code of our job, if the job is actually finished.
915 self.retcode = retcode
916
Chris Ching73486ab2017-04-26 18:02:37 -0600917 # Number of tries for this job
918 self.try_count = try_count
919
Brian Harring0be85c62012-03-17 19:52:12 -0700920 # Was this just a fetch job?
921 self.fetch_only = fetch_only
922
David Jamesfcb70ef2011-02-02 16:02:30 -0800923 # The timestamp when our job started.
924 self.start_timestamp = start_timestamp
925
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700926 # No emerge, only unpack packages.
927 self.unpack_only = unpack_only
928
David Jamesfcb70ef2011-02-02 16:02:30 -0800929
David James321490a2012-12-17 12:05:56 -0800930def KillHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700931 # Kill self and all subprocesses.
932 os.killpg(0, signal.SIGKILL)
933
Mike Frysingercc838832014-05-24 13:10:30 -0400934
David Jamesfcb70ef2011-02-02 16:02:30 -0800935def SetupWorkerSignals():
David James321490a2012-12-17 12:05:56 -0800936 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700937 # Set KILLED flag.
938 KILLED.set()
David James13cead42011-05-18 16:22:01 -0700939
David James7358d032011-05-19 10:40:03 -0700940 # Remove our signal handlers so we don't get called recursively.
941 signal.signal(signal.SIGINT, KillHandler)
942 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -0800943
944 # Ensure that we exit quietly and cleanly, if possible, when we receive
945 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all
946 # of the child processes will print details about KeyboardInterrupt
947 # exceptions, which isn't very helpful.
948 signal.signal(signal.SIGINT, ExitHandler)
949 signal.signal(signal.SIGTERM, ExitHandler)
950
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400951
Chris Ching73486ab2017-04-26 18:02:37 -0600952def EmergeProcess(output, job_state, *args, **kwargs):
David James1ed3e252011-10-05 20:26:15 -0700953 """Merge a package in a subprocess.
954
955 Args:
David James1ed3e252011-10-05 20:26:15 -0700956 output: Temporary file to write output.
Chris Ching73486ab2017-04-26 18:02:37 -0600957 job_state: Stored state of package
David James6b29d052012-11-02 10:27:27 -0700958 *args: Arguments to pass to Scheduler constructor.
959 **kwargs: Keyword arguments to pass to Scheduler constructor.
David James1ed3e252011-10-05 20:26:15 -0700960
961 Returns:
962 The exit code returned by the subprocess.
963 """
Chris Chingb8eba812017-06-22 09:54:48 -0600964
Chris Ching73486ab2017-04-26 18:02:37 -0600965 target = job_state.target
966
967 job_state.try_count += 1
968
Chris Chingb8eba812017-06-22 09:54:48 -0600969 cpv = portage_util.SplitCPV(target)
Chris Ching73486ab2017-04-26 18:02:37 -0600970
Chris Ching4a2ebd62017-04-26 16:30:05 -0600971 event = cros_event.newEvent(task_name="EmergePackage",
Chris Chingb8eba812017-06-22 09:54:48 -0600972 name=cpv.package,
973 category=cpv.category,
Chris Ching73486ab2017-04-26 18:02:37 -0600974 version=cpv.version,
975 try_count=job_state.try_count)
David James1ed3e252011-10-05 20:26:15 -0700976 pid = os.fork()
977 if pid == 0:
978 try:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400979 proctitle.settitle('EmergeProcess', target)
980
David James1ed3e252011-10-05 20:26:15 -0700981 # Sanity checks.
Mike Frysingerf02736e2013-11-08 15:27:00 -0500982 if sys.stdout.fileno() != 1:
983 raise Exception("sys.stdout.fileno() != 1")
984 if sys.stderr.fileno() != 2:
985 raise Exception("sys.stderr.fileno() != 2")
David James1ed3e252011-10-05 20:26:15 -0700986
987 # - Redirect 1 (stdout) and 2 (stderr) at our temporary file.
988 # - Redirect 0 to point at sys.stdin. In this case, sys.stdin
989 # points at a file reading os.devnull, because multiprocessing mucks
990 # with sys.stdin.
991 # - Leave the sys.stdin and output filehandles alone.
992 fd_pipes = {0: sys.stdin.fileno(),
993 1: output.fileno(),
994 2: output.fileno(),
995 sys.stdin.fileno(): sys.stdin.fileno(),
996 output.fileno(): output.fileno()}
Mike Frysinger27e21b72018-07-12 14:20:21 -0400997 # pylint: disable=protected-access
Mike Frysinger66652ec2014-04-24 11:42:25 -0400998 portage.process._setup_pipes(fd_pipes, close_fds=False)
David James1ed3e252011-10-05 20:26:15 -0700999
1000 # Portage doesn't like when sys.stdin.fileno() != 0, so point sys.stdin
1001 # at the filehandle we just created in _setup_pipes.
1002 if sys.stdin.fileno() != 0:
David James6b29d052012-11-02 10:27:27 -07001003 sys.__stdin__ = sys.stdin = os.fdopen(0, "r")
1004
1005 scheduler = Scheduler(*args, **kwargs)
1006
1007 # Enable blocker handling even though we're in --nodeps mode. This
1008 # allows us to unmerge the blocker after we've merged the replacement.
1009 scheduler._opts_ignore_blockers = frozenset()
David James1ed3e252011-10-05 20:26:15 -07001010
1011 # Actually do the merge.
Chris Ching5fcbd622016-11-28 09:22:15 -07001012 with event:
Chris Ching73486ab2017-04-26 18:02:37 -06001013 job_state.retcode = scheduler.merge()
1014 if job_state.retcode != 0:
Chris Ching5fcbd622016-11-28 09:22:15 -07001015 event.fail(message="non-zero value returned")
David James1ed3e252011-10-05 20:26:15 -07001016
1017 # We catch all exceptions here (including SystemExit, KeyboardInterrupt,
1018 # etc) so as to ensure that we don't confuse the multiprocessing module,
1019 # which expects that all forked children exit with os._exit().
Mike Frysinger27e21b72018-07-12 14:20:21 -04001020 # pylint: disable=bare-except
David James1ed3e252011-10-05 20:26:15 -07001021 except:
1022 traceback.print_exc(file=output)
Chris Ching73486ab2017-04-26 18:02:37 -06001023 job_state.retcode = 1
David James1ed3e252011-10-05 20:26:15 -07001024 sys.stdout.flush()
1025 sys.stderr.flush()
1026 output.flush()
Mike Frysinger27e21b72018-07-12 14:20:21 -04001027 # pylint: disable=protected-access
Chris Ching73486ab2017-04-26 18:02:37 -06001028 os._exit(job_state.retcode)
David James1ed3e252011-10-05 20:26:15 -07001029 else:
1030 # Return the exit code of the subprocess.
1031 return os.waitpid(pid, 0)[1]
David Jamesfcb70ef2011-02-02 16:02:30 -08001032
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001033
1034def UnpackPackage(pkg_state):
1035 """Unpacks package described by pkg_state.
1036
1037 Args:
1038 pkg_state: EmergeJobState object describing target.
1039
1040 Returns:
1041 Exit code returned by subprocess.
1042 """
1043 pkgdir = os.environ.get("PKGDIR",
1044 os.path.join(os.environ["SYSROOT"], "packages"))
1045 root = os.environ.get("ROOT", os.environ["SYSROOT"])
1046 path = os.path.join(pkgdir, pkg_state.target + ".tbz2")
1047 comp = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)
1048 cmd = [comp, "-dc"]
1049 if comp.endswith("pbzip2"):
1050 cmd.append("--ignore-trailing-garbage=1")
1051 cmd.append(path)
1052
Chris Ching4a2ebd62017-04-26 16:30:05 -06001053 with cros_event.newEvent(task_name="UnpackPackage", **pkg_state) as event:
Chris Ching5fcbd622016-11-28 09:22:15 -07001054 result = cros_build_lib.RunCommand(cmd, cwd=root, stdout_to_pipe=True,
1055 print_cmd=False, error_code_ok=True)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001056
Chris Ching5fcbd622016-11-28 09:22:15 -07001057 # If we were not successful, return now and don't attempt untar.
1058 if result.returncode != 0:
1059 event.fail("error compressing: returned {}".format(result.returncode))
1060 return result.returncode
1061
1062 cmd = ["sudo", "tar", "-xf", "-", "-C", root]
1063
1064 result = cros_build_lib.RunCommand(cmd, cwd=root, input=result.output,
1065 print_cmd=False, error_code_ok=True)
1066 if result.returncode != 0:
1067 event.fail("error extracting:returned {}".format(result.returncode))
1068
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001069 return result.returncode
1070
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001071
1072def EmergeWorker(task_queue, job_queue, emerge, package_db, fetch_only=False,
1073 unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -08001074 """This worker emerges any packages given to it on the task_queue.
1075
1076 Args:
1077 task_queue: The queue of tasks for this worker to do.
1078 job_queue: The queue of results from the worker.
1079 emerge: An EmergeData() object.
1080 package_db: A dict, mapping package ids to portage Package objects.
Brian Harring0be85c62012-03-17 19:52:12 -07001081 fetch_only: A bool, indicating if we should just fetch the target.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001082 unpack_only: A bool, indicating if we should just unpack the target.
David Jamesfcb70ef2011-02-02 16:02:30 -08001083
1084 It expects package identifiers to be passed to it via task_queue. When
1085 a task is started, it pushes the (target, filename) to the started_queue.
1086 The output is stored in filename. When a merge starts or finishes, we push
1087 EmergeJobState objects to the job_queue.
1088 """
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001089 if fetch_only:
1090 mode = 'fetch'
1091 elif unpack_only:
1092 mode = 'unpack'
1093 else:
1094 mode = 'emerge'
1095 proctitle.settitle('EmergeWorker', mode, '[idle]')
David Jamesfcb70ef2011-02-02 16:02:30 -08001096
1097 SetupWorkerSignals()
1098 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
David Jamesdeebd692011-05-09 17:02:52 -07001099
1100 # Disable flushing of caches to save on I/O.
David James7a1ea4b2011-10-13 15:06:41 -07001101 root = emerge.settings["ROOT"]
1102 vardb = emerge.trees[root]["vartree"].dbapi
Mike Frysingere56debd2014-11-19 01:54:36 -05001103 vardb._flush_cache_enabled = False # pylint: disable=protected-access
Brian Harring0be85c62012-03-17 19:52:12 -07001104 bindb = emerge.trees[root]["bintree"].dbapi
1105 # Might be a set, might be a list, might be None; no clue, just use shallow
1106 # copy to ensure we can roll it back.
Mike Frysinger27e21b72018-07-12 14:20:21 -04001107 # pylint: disable=protected-access
Brian Harring0be85c62012-03-17 19:52:12 -07001108 original_remotepkgs = copy.copy(bindb.bintree._remotepkgs)
David Jamesdeebd692011-05-09 17:02:52 -07001109
David Jamesfcb70ef2011-02-02 16:02:30 -08001110 opts, spinner = emerge.opts, emerge.spinner
1111 opts["--nodeps"] = True
Brian Harring0be85c62012-03-17 19:52:12 -07001112 if fetch_only:
1113 opts["--fetchonly"] = True
1114
David Jamesfcb70ef2011-02-02 16:02:30 -08001115 while True:
1116 # Wait for a new item to show up on the queue. This is a blocking wait,
1117 # so if there's nothing to do, we just sit here.
Brian Harring0be85c62012-03-17 19:52:12 -07001118 pkg_state = task_queue.get()
1119 if pkg_state is None:
David Jamesfcb70ef2011-02-02 16:02:30 -08001120 # If target is None, this means that the main thread wants us to quit.
1121 # The other workers need to exit too, so we'll push the message back on
1122 # to the queue so they'll get it too.
Brian Harring0be85c62012-03-17 19:52:12 -07001123 task_queue.put(None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001124 return
David James7358d032011-05-19 10:40:03 -07001125 if KILLED.is_set():
1126 return
1127
Brian Harring0be85c62012-03-17 19:52:12 -07001128 target = pkg_state.target
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001129 proctitle.settitle('EmergeWorker', mode, target)
Brian Harring0be85c62012-03-17 19:52:12 -07001130
David Jamesfcb70ef2011-02-02 16:02:30 -08001131 db_pkg = package_db[target]
Brian Harring0be85c62012-03-17 19:52:12 -07001132
1133 if db_pkg.type_name == "binary":
1134 if not fetch_only and pkg_state.fetched_successfully:
1135 # Ensure portage doesn't think our pkg is remote- else it'll force
1136 # a redownload of it (even if the on-disk file is fine). In-memory
1137 # caching basically, implemented dumbly.
1138 bindb.bintree._remotepkgs = None
1139 else:
1140 bindb.bintree_remotepkgs = original_remotepkgs
1141
David Jamesfcb70ef2011-02-02 16:02:30 -08001142 db_pkg.root_config = emerge.root_config
1143 install_list = [db_pkg]
1144 pkgname = db_pkg.pf
1145 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False)
Mike Frysinger0444f4c2018-08-03 15:12:46 -04001146 os.chmod(output.name, 0o644)
David Jamesfcb70ef2011-02-02 16:02:30 -08001147 start_timestamp = time.time()
Brian Harring0be85c62012-03-17 19:52:12 -07001148 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001149 fetch_only=fetch_only, unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001150 job_queue.put(job)
1151 if "--pretend" in opts:
Chris Ching73486ab2017-04-26 18:02:37 -06001152 job.retcode = 0
David Jamesfcb70ef2011-02-02 16:02:30 -08001153 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001154 try:
David James386ccd12011-05-04 20:17:42 -07001155 emerge.scheduler_graph.mergelist = install_list
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001156 if unpack_only:
Chris Ching73486ab2017-04-26 18:02:37 -06001157 job.retcode = UnpackPackage(pkg_state)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001158 else:
Chris Ching73486ab2017-04-26 18:02:37 -06001159 job.retcode = EmergeProcess(output, job, settings, trees, mtimedb,
1160 opts, spinner,
1161 favorites=emerge.favorites,
1162 graph_config=emerge.scheduler_graph)
David Jamesfcb70ef2011-02-02 16:02:30 -08001163 except Exception:
1164 traceback.print_exc(file=output)
Chris Ching73486ab2017-04-26 18:02:37 -06001165 job.retcode = 1
David James1ed3e252011-10-05 20:26:15 -07001166 output.close()
David Jamesfcb70ef2011-02-02 16:02:30 -08001167
David James7358d032011-05-19 10:40:03 -07001168 if KILLED.is_set():
1169 return
1170
David Jamesfcb70ef2011-02-02 16:02:30 -08001171 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp,
Chris Ching73486ab2017-04-26 18:02:37 -06001172 job.retcode, fetch_only=fetch_only,
1173 try_count=job.try_count, unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001174 job_queue.put(job)
1175
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001176 # Set the title back to idle as the multiprocess pool won't destroy us;
1177 # when another job comes up, it'll re-use this process.
1178 proctitle.settitle('EmergeWorker', mode, '[idle]')
1179
David Jamesfcb70ef2011-02-02 16:02:30 -08001180
1181class LinePrinter(object):
1182 """Helper object to print a single line."""
1183
1184 def __init__(self, line):
1185 self.line = line
1186
David James321490a2012-12-17 12:05:56 -08001187 def Print(self, _seek_locations):
Mike Frysinger383367e2014-09-16 15:06:17 -04001188 print(self.line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001189
1190
1191class JobPrinter(object):
1192 """Helper object to print output of a job."""
1193
1194 def __init__(self, job, unlink=False):
1195 """Print output of job.
1196
Mike Frysinger02e1e072013-11-10 22:11:34 -05001197 If unlink is True, unlink the job output file when done.
1198 """
David Jamesfcb70ef2011-02-02 16:02:30 -08001199 self.current_time = time.time()
1200 self.job = job
1201 self.unlink = unlink
1202
1203 def Print(self, seek_locations):
1204
1205 job = self.job
1206
1207 # Calculate how long the job has been running.
1208 seconds = self.current_time - job.start_timestamp
1209
1210 # Note that we've printed out the job so far.
1211 job.last_output_timestamp = self.current_time
1212
1213 # Note that we're starting the job
1214 info = "job %s (%dm%.1fs)" % (job.pkgname, seconds / 60, seconds % 60)
1215 last_output_seek = seek_locations.get(job.filename, 0)
1216 if last_output_seek:
Mike Frysinger383367e2014-09-16 15:06:17 -04001217 print("=== Continue output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001218 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001219 print("=== Start output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001220
1221 # Print actual output from job
1222 f = codecs.open(job.filename, encoding='utf-8', errors='replace')
1223 f.seek(last_output_seek)
1224 prefix = job.pkgname + ":"
1225 for line in f:
1226
1227 # Save off our position in the file
1228 if line and line[-1] == "\n":
1229 last_output_seek = f.tell()
1230 line = line[:-1]
1231
1232 # Print our line
Mike Frysinger383367e2014-09-16 15:06:17 -04001233 print(prefix, line.encode('utf-8', 'replace'))
David Jamesfcb70ef2011-02-02 16:02:30 -08001234 f.close()
1235
1236 # Save our last spot in the file so that we don't print out the same
1237 # location twice.
1238 seek_locations[job.filename] = last_output_seek
1239
1240 # Note end of output section
1241 if job.done:
Mike Frysinger383367e2014-09-16 15:06:17 -04001242 print("=== Complete: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001243 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001244 print("=== Still running: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001245
1246 if self.unlink:
1247 os.unlink(job.filename)
1248
1249
1250def PrintWorker(queue):
1251 """A worker that prints stuff to the screen as requested."""
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001252 proctitle.settitle('PrintWorker')
David Jamesfcb70ef2011-02-02 16:02:30 -08001253
David James321490a2012-12-17 12:05:56 -08001254 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -07001255 # Set KILLED flag.
1256 KILLED.set()
1257
David Jamesfcb70ef2011-02-02 16:02:30 -08001258 # Switch to default signal handlers so that we'll die after two signals.
David James7358d032011-05-19 10:40:03 -07001259 signal.signal(signal.SIGINT, KillHandler)
1260 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001261
1262 # Don't exit on the first SIGINT / SIGTERM, because the parent worker will
1263 # handle it and tell us when we need to exit.
1264 signal.signal(signal.SIGINT, ExitHandler)
1265 signal.signal(signal.SIGTERM, ExitHandler)
1266
1267 # seek_locations is a map indicating the position we are at in each file.
1268 # It starts off empty, but is set by the various Print jobs as we go along
1269 # to indicate where we left off in each file.
1270 seek_locations = {}
1271 while True:
1272 try:
1273 job = queue.get()
1274 if job:
1275 job.Print(seek_locations)
David Jamesbccf8eb2011-07-27 14:06:06 -07001276 sys.stdout.flush()
David Jamesfcb70ef2011-02-02 16:02:30 -08001277 else:
1278 break
1279 except IOError as ex:
1280 if ex.errno == errno.EINTR:
1281 # Looks like we received a signal. Keep printing.
1282 continue
1283 raise
1284
Brian Harring867e2362012-03-17 04:05:17 -07001285
Brian Harring0be85c62012-03-17 19:52:12 -07001286class TargetState(object):
Chris Ching5fcbd622016-11-28 09:22:15 -07001287 """Structure describing the TargetState."""
Brian Harring867e2362012-03-17 04:05:17 -07001288
Brian Harring0be85c62012-03-17 19:52:12 -07001289 __slots__ = ("target", "info", "score", "prefetched", "fetched_successfully")
Brian Harring867e2362012-03-17 04:05:17 -07001290
David James321490a2012-12-17 12:05:56 -08001291 def __init__(self, target, info):
Brian Harring867e2362012-03-17 04:05:17 -07001292 self.target, self.info = target, info
Brian Harring0be85c62012-03-17 19:52:12 -07001293 self.fetched_successfully = False
1294 self.prefetched = False
David James321490a2012-12-17 12:05:56 -08001295 self.score = None
Brian Harring867e2362012-03-17 04:05:17 -07001296 self.update_score()
1297
1298 def __cmp__(self, other):
1299 return cmp(self.score, other.score)
1300
1301 def update_score(self):
1302 self.score = (
1303 -len(self.info["tprovides"]),
Brian Harring0be85c62012-03-17 19:52:12 -07001304 len(self.info["needs"]),
Brian Harring11c5eeb2012-03-18 11:02:39 -07001305 not self.info["binary"],
Brian Harring867e2362012-03-17 04:05:17 -07001306 -len(self.info["provides"]),
1307 self.info["idx"],
1308 self.target,
1309 )
1310
1311
1312class ScoredHeap(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001313 """Implementation of a general purpose scored heap."""
Brian Harring867e2362012-03-17 04:05:17 -07001314
Brian Harring0be85c62012-03-17 19:52:12 -07001315 __slots__ = ("heap", "_heap_set")
1316
Brian Harring867e2362012-03-17 04:05:17 -07001317 def __init__(self, initial=()):
Brian Harring0be85c62012-03-17 19:52:12 -07001318 self.heap = list()
1319 self._heap_set = set()
1320 if initial:
1321 self.multi_put(initial)
Brian Harring867e2362012-03-17 04:05:17 -07001322
1323 def get(self):
Brian Harring0be85c62012-03-17 19:52:12 -07001324 item = heapq.heappop(self.heap)
1325 self._heap_set.remove(item.target)
1326 return item
Brian Harring867e2362012-03-17 04:05:17 -07001327
Brian Harring0be85c62012-03-17 19:52:12 -07001328 def put(self, item):
1329 if not isinstance(item, TargetState):
1330 raise ValueError("Item %r isn't a TargetState" % (item,))
1331 heapq.heappush(self.heap, item)
1332 self._heap_set.add(item.target)
Brian Harring867e2362012-03-17 04:05:17 -07001333
Brian Harring0be85c62012-03-17 19:52:12 -07001334 def multi_put(self, sequence):
1335 sequence = list(sequence)
1336 self.heap.extend(sequence)
1337 self._heap_set.update(x.target for x in sequence)
Brian Harring867e2362012-03-17 04:05:17 -07001338 self.sort()
1339
David James5c9996d2012-03-24 10:50:46 -07001340 def sort(self):
1341 heapq.heapify(self.heap)
1342
Brian Harring0be85c62012-03-17 19:52:12 -07001343 def __contains__(self, target):
1344 return target in self._heap_set
1345
1346 def __nonzero__(self):
1347 return bool(self.heap)
1348
Brian Harring867e2362012-03-17 04:05:17 -07001349 def __len__(self):
1350 return len(self.heap)
1351
1352
David Jamesfcb70ef2011-02-02 16:02:30 -08001353class EmergeQueue(object):
1354 """Class to schedule emerge jobs according to a dependency graph."""
1355
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001356 def __init__(self, deps_map, emerge, package_db, show_output, unpack_only,
1357 max_retries):
David Jamesfcb70ef2011-02-02 16:02:30 -08001358 # Store the dependency graph.
1359 self._deps_map = deps_map
Brian Harring0be85c62012-03-17 19:52:12 -07001360 self._state_map = {}
David Jamesfcb70ef2011-02-02 16:02:30 -08001361 # Initialize the running queue to empty
Brian Harring0be85c62012-03-17 19:52:12 -07001362 self._build_jobs = {}
1363 self._build_ready = ScoredHeap()
1364 self._fetch_jobs = {}
1365 self._fetch_ready = ScoredHeap()
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001366 self._unpack_jobs = {}
1367 self._unpack_ready = ScoredHeap()
David Jamesfcb70ef2011-02-02 16:02:30 -08001368 # List of total package installs represented in deps_map.
1369 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
1370 self._total_jobs = len(install_jobs)
1371 self._show_output = show_output
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001372 self._unpack_only = unpack_only
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001373 self._max_retries = max_retries
David Jamesfcb70ef2011-02-02 16:02:30 -08001374
1375 if "--pretend" in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001376 print("Skipping merge because of --pretend mode.")
David Jamesfcb70ef2011-02-02 16:02:30 -08001377 sys.exit(0)
1378
David Jamesaaf49e42014-04-24 09:40:05 -07001379 # Set up a session so we can easily terminate all children.
1380 self._SetupSession()
David James7358d032011-05-19 10:40:03 -07001381
David Jamesfcb70ef2011-02-02 16:02:30 -08001382 # Setup scheduler graph object. This is used by the child processes
1383 # to help schedule jobs.
1384 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1385
1386 # Calculate how many jobs we can run in parallel. We don't want to pass
1387 # the --jobs flag over to emerge itself, because that'll tell emerge to
1388 # hide its output, and said output is quite useful for debugging hung
1389 # jobs.
1390 procs = min(self._total_jobs,
1391 emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
Nam T. Nguyenf7098b32015-05-08 11:04:48 -07001392 self._build_procs = self._unpack_procs = max(1, procs)
1393 # Fetch is IO bound, we can use more processes.
1394 self._fetch_procs = max(4, procs)
David James8c7e5e32011-06-28 11:26:03 -07001395 self._load_avg = emerge.opts.pop("--load-average", None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001396 self._job_queue = multiprocessing.Queue()
1397 self._print_queue = multiprocessing.Queue()
Brian Harring0be85c62012-03-17 19:52:12 -07001398
1399 self._fetch_queue = multiprocessing.Queue()
1400 args = (self._fetch_queue, self._job_queue, emerge, package_db, True)
1401 self._fetch_pool = multiprocessing.Pool(self._fetch_procs, EmergeWorker,
1402 args)
1403
1404 self._build_queue = multiprocessing.Queue()
1405 args = (self._build_queue, self._job_queue, emerge, package_db)
1406 self._build_pool = multiprocessing.Pool(self._build_procs, EmergeWorker,
1407 args)
1408
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001409 if self._unpack_only:
1410 # Unpack pool only required on unpack_only jobs.
1411 self._unpack_queue = multiprocessing.Queue()
1412 args = (self._unpack_queue, self._job_queue, emerge, package_db, False,
1413 True)
1414 self._unpack_pool = multiprocessing.Pool(self._unpack_procs, EmergeWorker,
1415 args)
1416
David Jamesfcb70ef2011-02-02 16:02:30 -08001417 self._print_worker = multiprocessing.Process(target=PrintWorker,
1418 args=[self._print_queue])
1419 self._print_worker.start()
1420
1421 # Initialize the failed queue to empty.
1422 self._retry_queue = []
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001423 self._failed_count = dict()
David Jamesfcb70ef2011-02-02 16:02:30 -08001424
David Jamesfcb70ef2011-02-02 16:02:30 -08001425 # Setup an exit handler so that we print nice messages if we are
1426 # terminated.
1427 self._SetupExitHandler()
1428
1429 # Schedule our jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001430 self._state_map.update(
1431 (pkg, TargetState(pkg, data)) for pkg, data in deps_map.iteritems())
1432 self._fetch_ready.multi_put(self._state_map.itervalues())
David Jamesfcb70ef2011-02-02 16:02:30 -08001433
David Jamesaaf49e42014-04-24 09:40:05 -07001434 def _SetupSession(self):
1435 """Set up a session so we can easily terminate all children."""
1436 # When we call os.setsid(), this sets up a session / process group for this
1437 # process and all children. These session groups are needed so that we can
1438 # easily kill all children (including processes launched by emerge) before
1439 # we exit.
1440 #
1441 # One unfortunate side effect of os.setsid() is that it blocks CTRL-C from
1442 # being received. To work around this, we only call os.setsid() in a forked
1443 # process, so that the parent can still watch for CTRL-C. The parent will
1444 # just sit around, watching for signals and propagating them to the child,
1445 # until the child exits.
1446 #
1447 # TODO(davidjames): It would be nice if we could replace this with cgroups.
1448 pid = os.fork()
1449 if pid == 0:
1450 os.setsid()
1451 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001452 proctitle.settitle('SessionManager')
1453
David Jamesaaf49e42014-04-24 09:40:05 -07001454 def PropagateToChildren(signum, _frame):
1455 # Just propagate the signals down to the child. We'll exit when the
1456 # child does.
1457 try:
1458 os.kill(pid, signum)
1459 except OSError as ex:
1460 if ex.errno != errno.ESRCH:
1461 raise
1462 signal.signal(signal.SIGINT, PropagateToChildren)
1463 signal.signal(signal.SIGTERM, PropagateToChildren)
1464
1465 def StopGroup(_signum, _frame):
1466 # When we get stopped, stop the children.
1467 try:
1468 os.killpg(pid, signal.SIGSTOP)
1469 os.kill(0, signal.SIGSTOP)
1470 except OSError as ex:
1471 if ex.errno != errno.ESRCH:
1472 raise
1473 signal.signal(signal.SIGTSTP, StopGroup)
1474
1475 def ContinueGroup(_signum, _frame):
1476 # Launch the children again after being stopped.
1477 try:
1478 os.killpg(pid, signal.SIGCONT)
1479 except OSError as ex:
1480 if ex.errno != errno.ESRCH:
1481 raise
1482 signal.signal(signal.SIGCONT, ContinueGroup)
1483
1484 # Loop until the children exit. We exit with os._exit to be sure we
1485 # don't run any finalizers (those will be run by the child process.)
Mike Frysinger27e21b72018-07-12 14:20:21 -04001486 # pylint: disable=protected-access
David Jamesaaf49e42014-04-24 09:40:05 -07001487 while True:
1488 try:
1489 # Wait for the process to exit. When it does, exit with the return
1490 # value of the subprocess.
Mike Frysingere2d8f0d2014-11-01 13:09:26 -04001491 os._exit(process_util.GetExitStatus(os.waitpid(pid, 0)[1]))
David Jamesaaf49e42014-04-24 09:40:05 -07001492 except OSError as ex:
1493 if ex.errno == errno.EINTR:
1494 continue
1495 traceback.print_exc()
1496 os._exit(1)
1497 except BaseException:
1498 traceback.print_exc()
1499 os._exit(1)
1500
David Jamesfcb70ef2011-02-02 16:02:30 -08001501 def _SetupExitHandler(self):
1502
David James321490a2012-12-17 12:05:56 -08001503 def ExitHandler(signum, _frame):
David James7358d032011-05-19 10:40:03 -07001504 # Set KILLED flag.
1505 KILLED.set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001506
1507 # Kill our signal handlers so we don't get called recursively
David James7358d032011-05-19 10:40:03 -07001508 signal.signal(signal.SIGINT, KillHandler)
1509 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001510
1511 # Print our current job status
Brian Harring0be85c62012-03-17 19:52:12 -07001512 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001513 if job:
1514 self._print_queue.put(JobPrinter(job, unlink=True))
1515
1516 # Notify the user that we are exiting
1517 self._Print("Exiting on signal %s" % signum)
David James7358d032011-05-19 10:40:03 -07001518 self._print_queue.put(None)
1519 self._print_worker.join()
David Jamesfcb70ef2011-02-02 16:02:30 -08001520
1521 # Kill child threads, then exit.
David James7358d032011-05-19 10:40:03 -07001522 os.killpg(0, signal.SIGKILL)
David Jamesfcb70ef2011-02-02 16:02:30 -08001523 sys.exit(1)
1524
1525 # Print out job status when we are killed
1526 signal.signal(signal.SIGINT, ExitHandler)
1527 signal.signal(signal.SIGTERM, ExitHandler)
1528
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001529 def _ScheduleUnpack(self, pkg_state):
1530 self._unpack_jobs[pkg_state.target] = None
1531 self._unpack_queue.put(pkg_state)
1532
Brian Harring0be85c62012-03-17 19:52:12 -07001533 def _Schedule(self, pkg_state):
David Jamesfcb70ef2011-02-02 16:02:30 -08001534 # We maintain a tree of all deps, if this doesn't need
David James8c7e5e32011-06-28 11:26:03 -07001535 # to be installed just free up its children and continue.
David Jamesfcb70ef2011-02-02 16:02:30 -08001536 # It is possible to reinstall deps of deps, without reinstalling
1537 # first level deps, like so:
Mike Frysingerfd969312014-04-02 22:16:42 -04001538 # virtual/target-os (merge) -> eselect (nomerge) -> python (merge)
Brian Harring0be85c62012-03-17 19:52:12 -07001539 this_pkg = pkg_state.info
1540 target = pkg_state.target
1541 if pkg_state.info is not None:
1542 if this_pkg["action"] == "nomerge":
1543 self._Finish(target)
1544 elif target not in self._build_jobs:
1545 # Kick off the build if it's marked to be built.
1546 self._build_jobs[target] = None
1547 self._build_queue.put(pkg_state)
1548 return True
David Jamesfcb70ef2011-02-02 16:02:30 -08001549
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001550 def _ScheduleLoop(self, unpack_only=False):
1551 if unpack_only:
1552 ready_queue = self._unpack_ready
1553 jobs_queue = self._unpack_jobs
1554 procs = self._unpack_procs
1555 else:
1556 ready_queue = self._build_ready
1557 jobs_queue = self._build_jobs
1558 procs = self._build_procs
1559
David James8c7e5e32011-06-28 11:26:03 -07001560 # If the current load exceeds our desired load average, don't schedule
1561 # more than one job.
1562 if self._load_avg and os.getloadavg()[0] > self._load_avg:
1563 needed_jobs = 1
1564 else:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001565 needed_jobs = procs
David James8c7e5e32011-06-28 11:26:03 -07001566
1567 # Schedule more jobs.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001568 while ready_queue and len(jobs_queue) < needed_jobs:
1569 state = ready_queue.get()
1570 if unpack_only:
1571 self._ScheduleUnpack(state)
1572 else:
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001573 if state.target not in self._failed_count:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001574 self._Schedule(state)
David Jamesfcb70ef2011-02-02 16:02:30 -08001575
1576 def _Print(self, line):
1577 """Print a single line."""
1578 self._print_queue.put(LinePrinter(line))
1579
1580 def _Status(self):
1581 """Print status."""
1582 current_time = time.time()
Aviv Keshet3b381682015-11-12 13:15:06 -08001583 current_time_struct = time.localtime(current_time)
David Jamesfcb70ef2011-02-02 16:02:30 -08001584 no_output = True
1585
1586 # Print interim output every minute if --show-output is used. Otherwise,
1587 # print notifications about running packages every 2 minutes, and print
1588 # full output for jobs that have been running for 60 minutes or more.
1589 if self._show_output:
1590 interval = 60
1591 notify_interval = 0
1592 else:
1593 interval = 60 * 60
1594 notify_interval = 60 * 2
David James321490a2012-12-17 12:05:56 -08001595 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001596 if job:
1597 last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
1598 if last_timestamp + interval < current_time:
1599 self._print_queue.put(JobPrinter(job))
1600 job.last_output_timestamp = current_time
1601 no_output = False
1602 elif (notify_interval and
1603 job.last_notify_timestamp + notify_interval < current_time):
1604 job_seconds = current_time - job.start_timestamp
1605 args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
1606 info = "Still building %s (%dm%.1fs). Logs in %s" % args
1607 job.last_notify_timestamp = current_time
1608 self._Print(info)
1609 no_output = False
1610
1611 # If we haven't printed any messages yet, print a general status message
1612 # here.
1613 if no_output:
1614 seconds = current_time - GLOBAL_START
Brian Harring0be85c62012-03-17 19:52:12 -07001615 fjobs, fready = len(self._fetch_jobs), len(self._fetch_ready)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001616 ujobs, uready = len(self._unpack_jobs), len(self._unpack_ready)
Brian Harring0be85c62012-03-17 19:52:12 -07001617 bjobs, bready = len(self._build_jobs), len(self._build_ready)
1618 retries = len(self._retry_queue)
1619 pending = max(0, len(self._deps_map) - fjobs - bjobs)
1620 line = "Pending %s/%s, " % (pending, self._total_jobs)
1621 if fjobs or fready:
1622 line += "Fetching %s/%s, " % (fjobs, fready + fjobs)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001623 if ujobs or uready:
1624 line += "Unpacking %s/%s, " % (ujobs, uready + ujobs)
Brian Harring0be85c62012-03-17 19:52:12 -07001625 if bjobs or bready or retries:
1626 line += "Building %s/%s, " % (bjobs, bready + bjobs)
1627 if retries:
1628 line += "Retrying %s, " % (retries,)
Mike Frysingerd6e2df02014-11-26 02:55:04 -05001629 load = " ".join(str(x) for x in os.getloadavg())
Aviv Keshet3b381682015-11-12 13:15:06 -08001630 line += ("[Time %s | Elapsed %dm%.1fs | Load %s]" % (
1631 time.strftime('%H:%M:%S', current_time_struct), seconds / 60,
1632 seconds % 60, load))
Brian Harring0be85c62012-03-17 19:52:12 -07001633 self._Print(line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001634
1635 def _Finish(self, target):
David James8c7e5e32011-06-28 11:26:03 -07001636 """Mark a target as completed and unblock dependencies."""
1637 this_pkg = self._deps_map[target]
1638 if this_pkg["needs"] and this_pkg["nodeps"]:
1639 # We got installed, but our deps have not been installed yet. Dependent
1640 # packages should only be installed when our needs have been fully met.
1641 this_pkg["action"] = "nomerge"
1642 else:
David James8c7e5e32011-06-28 11:26:03 -07001643 for dep in this_pkg["provides"]:
1644 dep_pkg = self._deps_map[dep]
Brian Harring0be85c62012-03-17 19:52:12 -07001645 state = self._state_map[dep]
David James8c7e5e32011-06-28 11:26:03 -07001646 del dep_pkg["needs"][target]
Brian Harring0be85c62012-03-17 19:52:12 -07001647 state.update_score()
1648 if not state.prefetched:
1649 if dep in self._fetch_ready:
1650 # If it's not currently being fetched, update the prioritization
1651 self._fetch_ready.sort()
1652 elif not dep_pkg["needs"]:
David James8c7e5e32011-06-28 11:26:03 -07001653 if dep_pkg["nodeps"] and dep_pkg["action"] == "nomerge":
1654 self._Finish(dep)
1655 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001656 self._build_ready.put(self._state_map[dep])
David James8c7e5e32011-06-28 11:26:03 -07001657 self._deps_map.pop(target)
David Jamesfcb70ef2011-02-02 16:02:30 -08001658
1659 def _Retry(self):
David James8c7e5e32011-06-28 11:26:03 -07001660 while self._retry_queue:
Brian Harring0be85c62012-03-17 19:52:12 -07001661 state = self._retry_queue.pop(0)
1662 if self._Schedule(state):
1663 self._Print("Retrying emerge of %s." % state.target)
David James8c7e5e32011-06-28 11:26:03 -07001664 break
David Jamesfcb70ef2011-02-02 16:02:30 -08001665
Brian Harringa43f5952012-04-12 01:19:34 -07001666 def _Shutdown(self):
David Jamesfcb70ef2011-02-02 16:02:30 -08001667 # Tell emerge workers to exit. They all exit when 'None' is pushed
1668 # to the queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001669
Brian Harringa43f5952012-04-12 01:19:34 -07001670 # Shutdown the workers first; then jobs (which is how they feed things back)
1671 # then finally the print queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001672
Brian Harringa43f5952012-04-12 01:19:34 -07001673 def _stop(queue, pool):
1674 if pool is None:
1675 return
1676 try:
1677 queue.put(None)
1678 pool.close()
1679 pool.join()
1680 finally:
1681 pool.terminate()
Brian Harring0be85c62012-03-17 19:52:12 -07001682
Brian Harringa43f5952012-04-12 01:19:34 -07001683 _stop(self._fetch_queue, self._fetch_pool)
1684 self._fetch_queue = self._fetch_pool = None
Brian Harring0be85c62012-03-17 19:52:12 -07001685
Brian Harringa43f5952012-04-12 01:19:34 -07001686 _stop(self._build_queue, self._build_pool)
1687 self._build_queue = self._build_pool = None
1688
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001689 if self._unpack_only:
1690 _stop(self._unpack_queue, self._unpack_pool)
1691 self._unpack_queue = self._unpack_pool = None
1692
Brian Harringa43f5952012-04-12 01:19:34 -07001693 if self._job_queue is not None:
1694 self._job_queue.close()
1695 self._job_queue = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001696
1697 # Now that our workers are finished, we can kill the print queue.
Brian Harringa43f5952012-04-12 01:19:34 -07001698 if self._print_worker is not None:
1699 try:
1700 self._print_queue.put(None)
1701 self._print_queue.close()
1702 self._print_worker.join()
1703 finally:
1704 self._print_worker.terminate()
1705 self._print_queue = self._print_worker = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001706
1707 def Run(self):
1708 """Run through the scheduled ebuilds.
1709
1710 Keep running so long as we have uninstalled packages in the
1711 dependency graph to merge.
1712 """
Brian Harringa43f5952012-04-12 01:19:34 -07001713 if not self._deps_map:
1714 return
1715
Brian Harring0be85c62012-03-17 19:52:12 -07001716 # Start the fetchers.
1717 for _ in xrange(min(self._fetch_procs, len(self._fetch_ready))):
1718 state = self._fetch_ready.get()
1719 self._fetch_jobs[state.target] = None
1720 self._fetch_queue.put(state)
1721
1722 # Print an update, then get going.
1723 self._Status()
1724
David Jamesfcb70ef2011-02-02 16:02:30 -08001725 while self._deps_map:
1726 # Check here that we are actually waiting for something.
Brian Harring0be85c62012-03-17 19:52:12 -07001727 if (self._build_queue.empty() and
David Jamesfcb70ef2011-02-02 16:02:30 -08001728 self._job_queue.empty() and
Brian Harring0be85c62012-03-17 19:52:12 -07001729 not self._fetch_jobs and
1730 not self._fetch_ready and
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001731 not self._unpack_jobs and
1732 not self._unpack_ready and
Brian Harring0be85c62012-03-17 19:52:12 -07001733 not self._build_jobs and
1734 not self._build_ready and
David Jamesfcb70ef2011-02-02 16:02:30 -08001735 self._deps_map):
1736 # If we have failed on a package, retry it now.
1737 if self._retry_queue:
1738 self._Retry()
1739 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001740 # Tell the user why we're exiting.
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001741 if self._failed_count:
1742 print('Packages failed:\n\t%s' %
1743 '\n\t'.join(self._failed_count.iterkeys()))
David James0eae23e2012-07-03 15:04:25 -07001744 status_file = os.environ.get("PARALLEL_EMERGE_STATUS_FILE")
1745 if status_file:
David James321490a2012-12-17 12:05:56 -08001746 failed_pkgs = set(portage.versions.cpv_getkey(x)
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001747 for x in self._failed_count.iterkeys())
David James0eae23e2012-07-03 15:04:25 -07001748 with open(status_file, "a") as f:
1749 f.write("%s\n" % " ".join(failed_pkgs))
David Jamesfcb70ef2011-02-02 16:02:30 -08001750 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001751 print("Deadlock! Circular dependencies!")
David Jamesfcb70ef2011-02-02 16:02:30 -08001752 sys.exit(1)
1753
David James321490a2012-12-17 12:05:56 -08001754 for _ in xrange(12):
David Jamesa74289a2011-08-12 10:41:24 -07001755 try:
1756 job = self._job_queue.get(timeout=5)
1757 break
1758 except Queue.Empty:
1759 # Check if any more jobs can be scheduled.
1760 self._ScheduleLoop()
1761 else:
Brian Harring706747c2012-03-16 03:04:31 -07001762 # Print an update every 60 seconds.
David Jamesfcb70ef2011-02-02 16:02:30 -08001763 self._Status()
1764 continue
1765
1766 target = job.target
1767
Brian Harring0be85c62012-03-17 19:52:12 -07001768 if job.fetch_only:
1769 if not job.done:
1770 self._fetch_jobs[job.target] = job
1771 else:
1772 state = self._state_map[job.target]
1773 state.prefetched = True
1774 state.fetched_successfully = (job.retcode == 0)
1775 del self._fetch_jobs[job.target]
1776 self._Print("Fetched %s in %2.2fs"
1777 % (target, time.time() - job.start_timestamp))
1778
1779 if self._show_output or job.retcode != 0:
1780 self._print_queue.put(JobPrinter(job, unlink=True))
1781 else:
1782 os.unlink(job.filename)
1783 # Failure or not, let build work with it next.
1784 if not self._deps_map[job.target]["needs"]:
1785 self._build_ready.put(state)
1786 self._ScheduleLoop()
1787
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001788 if self._unpack_only and job.retcode == 0:
1789 self._unpack_ready.put(state)
1790 self._ScheduleLoop(unpack_only=True)
1791
Brian Harring0be85c62012-03-17 19:52:12 -07001792 if self._fetch_ready:
1793 state = self._fetch_ready.get()
1794 self._fetch_queue.put(state)
1795 self._fetch_jobs[state.target] = None
1796 else:
1797 # Minor optimization; shut down fetchers early since we know
1798 # the queue is empty.
1799 self._fetch_queue.put(None)
1800 continue
1801
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001802 if job.unpack_only:
1803 if not job.done:
1804 self._unpack_jobs[target] = job
1805 else:
1806 del self._unpack_jobs[target]
1807 self._Print("Unpacked %s in %2.2fs"
1808 % (target, time.time() - job.start_timestamp))
1809 if self._show_output or job.retcode != 0:
1810 self._print_queue.put(JobPrinter(job, unlink=True))
1811 else:
1812 os.unlink(job.filename)
1813 if self._unpack_ready:
1814 state = self._unpack_ready.get()
1815 self._unpack_queue.put(state)
1816 self._unpack_jobs[state.target] = None
1817 continue
1818
David Jamesfcb70ef2011-02-02 16:02:30 -08001819 if not job.done:
Brian Harring0be85c62012-03-17 19:52:12 -07001820 self._build_jobs[target] = job
David Jamesfcb70ef2011-02-02 16:02:30 -08001821 self._Print("Started %s (logged in %s)" % (target, job.filename))
1822 continue
1823
1824 # Print output of job
1825 if self._show_output or job.retcode != 0:
1826 self._print_queue.put(JobPrinter(job, unlink=True))
1827 else:
1828 os.unlink(job.filename)
Brian Harring0be85c62012-03-17 19:52:12 -07001829 del self._build_jobs[target]
David Jamesfcb70ef2011-02-02 16:02:30 -08001830
1831 seconds = time.time() - job.start_timestamp
1832 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
1833
1834 # Complain if necessary.
1835 if job.retcode != 0:
1836 # Handle job failure.
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001837 failed_count = self._failed_count.get(target, 0)
1838 if failed_count >= self._max_retries:
1839 # If this job has failed and can't be retried, give up.
David Jamesfcb70ef2011-02-02 16:02:30 -08001840 self._Print("Failed %s. Your build has failed." % details)
1841 else:
1842 # Queue up this build to try again after a long while.
Brian Harring0be85c62012-03-17 19:52:12 -07001843 self._retry_queue.append(self._state_map[target])
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001844 self._failed_count[target] = failed_count + 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001845 self._Print("Failed %s, retrying later." % details)
1846 else:
David James32420cc2011-08-25 21:32:46 -07001847 self._Print("Completed %s" % details)
1848
1849 # Mark as completed and unblock waiting ebuilds.
1850 self._Finish(target)
1851
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001852 if target in self._failed_count and self._retry_queue:
David Jamesfcb70ef2011-02-02 16:02:30 -08001853 # If we have successfully retried a failed package, and there
1854 # are more failed packages, try the next one. We will only have
1855 # one retrying package actively running at a time.
1856 self._Retry()
1857
David Jamesfcb70ef2011-02-02 16:02:30 -08001858
David James8c7e5e32011-06-28 11:26:03 -07001859 # Schedule pending jobs and print an update.
1860 self._ScheduleLoop()
1861 self._Status()
David Jamesfcb70ef2011-02-02 16:02:30 -08001862
David Jamese703d0f2012-01-12 16:27:45 -08001863 # If packages were retried, output a warning.
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001864 if self._failed_count:
David Jamese703d0f2012-01-12 16:27:45 -08001865 self._Print("")
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001866 self._Print("WARNING: The following packages failed once or more,")
David Jamese703d0f2012-01-12 16:27:45 -08001867 self._Print("but succeeded upon retry. This might indicate incorrect")
1868 self._Print("dependencies.")
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001869 for pkg in self._failed_count.iterkeys():
David Jamese703d0f2012-01-12 16:27:45 -08001870 self._Print(" %s" % pkg)
1871 self._Print("@@@STEP_WARNINGS@@@")
1872 self._Print("")
1873
David Jamesfcb70ef2011-02-02 16:02:30 -08001874 # Tell child threads to exit.
1875 self._Print("Merge complete")
David Jamesfcb70ef2011-02-02 16:02:30 -08001876
1877
Brian Harring30675052012-02-29 12:18:22 -08001878def main(argv):
Brian Harring8294d652012-05-23 02:20:52 -07001879 try:
1880 return real_main(argv)
1881 finally:
1882 # Work around multiprocessing sucking and not cleaning up after itself.
1883 # http://bugs.python.org/issue4106;
1884 # Step one; ensure GC is ran *prior* to the VM starting shutdown.
1885 gc.collect()
1886 # Step two; go looking for those threads and try to manually reap
1887 # them if we can.
1888 for x in threading.enumerate():
1889 # Filter on the name, and ident; if ident is None, the thread
1890 # wasn't started.
1891 if x.name == 'QueueFeederThread' and x.ident is not None:
1892 x.join(1)
David Jamesfcb70ef2011-02-02 16:02:30 -08001893
Brian Harring8294d652012-05-23 02:20:52 -07001894
1895def real_main(argv):
Brian Harring30675052012-02-29 12:18:22 -08001896 parallel_emerge_args = argv[:]
David Jamesfcb70ef2011-02-02 16:02:30 -08001897 deps = DepGraphGenerator()
Brian Harring30675052012-02-29 12:18:22 -08001898 deps.Initialize(parallel_emerge_args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001899 emerge = deps.emerge
1900
1901 if emerge.action is not None:
Brian Harring30675052012-02-29 12:18:22 -08001902 argv = deps.ParseParallelEmergeArgs(argv)
Brian Harring8294d652012-05-23 02:20:52 -07001903 return emerge_main(argv)
David Jamesfcb70ef2011-02-02 16:02:30 -08001904 elif not emerge.cmdline_packages:
1905 Usage()
Brian Harring8294d652012-05-23 02:20:52 -07001906 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001907
1908 # Unless we're in pretend mode, there's not much point running without
1909 # root access. We need to be able to install packages.
1910 #
1911 # NOTE: Even if you're running --pretend, it's a good idea to run
1912 # parallel_emerge with root access so that portage can write to the
1913 # dependency cache. This is important for performance.
David James321490a2012-12-17 12:05:56 -08001914 if "--pretend" not in emerge.opts and portage.data.secpass < 2:
Mike Frysinger383367e2014-09-16 15:06:17 -04001915 print("parallel_emerge: superuser access is required.")
Brian Harring8294d652012-05-23 02:20:52 -07001916 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001917
1918 if "--quiet" not in emerge.opts:
1919 cmdline_packages = " ".join(emerge.cmdline_packages)
Mike Frysinger383367e2014-09-16 15:06:17 -04001920 print("Starting fast-emerge.")
1921 print(" Building package %s on %s" % (cmdline_packages,
Gilad Arnold05f94b02015-05-22 10:41:05 -07001922 deps.sysroot or "root"))
David Jamesfcb70ef2011-02-02 16:02:30 -08001923
David James386ccd12011-05-04 20:17:42 -07001924 deps_tree, deps_info = deps.GenDependencyTree()
David Jamesfcb70ef2011-02-02 16:02:30 -08001925
1926 # You want me to be verbose? I'll give you two trees! Twice as much value.
1927 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
1928 deps.PrintTree(deps_tree)
1929
David James386ccd12011-05-04 20:17:42 -07001930 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001931
1932 # OK, time to print out our progress so far.
1933 deps.PrintInstallPlan(deps_graph)
1934 if "--tree" in emerge.opts:
1935 PrintDepsMap(deps_graph)
1936
1937 # Are we upgrading portage? If so, and there are more packages to merge,
1938 # schedule a restart of parallel_emerge to merge the rest. This ensures that
1939 # we pick up all updates to portage settings before merging any more
1940 # packages.
1941 portage_upgrade = False
1942 root = emerge.settings["ROOT"]
Mike Frysinger27e21b72018-07-12 14:20:21 -04001943 # pylint: disable=protected-access
David Jamesfcb70ef2011-02-02 16:02:30 -08001944 if root == "/":
Gilad Arnoldcead28a2015-05-22 10:45:02 -07001945 final_db = emerge.depgraph._dynamic_config._filtered_trees[root]['graph_db']
Mike Frysinger3fb56ef2018-01-05 19:00:04 -05001946 for db_pkg in final_db.cp_list("sys-apps/portage"):
David Jamesfcb70ef2011-02-02 16:02:30 -08001947 portage_pkg = deps_graph.get(db_pkg.cpv)
David James0ff16f22012-11-02 14:18:07 -07001948 if portage_pkg:
David Jamesfcb70ef2011-02-02 16:02:30 -08001949 portage_upgrade = True
1950 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001951 print("Upgrading portage first, then restarting...")
David Jamesfcb70ef2011-02-02 16:02:30 -08001952
David James0ff16f22012-11-02 14:18:07 -07001953 # Upgrade Portage first, then the rest of the packages.
1954 #
1955 # In order to grant the child permission to run setsid, we need to run sudo
1956 # again. We preserve SUDO_USER here in case an ebuild depends on it.
1957 if portage_upgrade:
1958 # Calculate what arguments to use when re-invoking.
1959 args = ["sudo", "-E", "SUDO_USER=%s" % os.environ.get("SUDO_USER", "")]
1960 args += [os.path.abspath(sys.argv[0])] + parallel_emerge_args
1961 args += ["--exclude=sys-apps/portage"]
1962
1963 # First upgrade Portage.
1964 passthrough_args = ("--quiet", "--pretend", "--verbose")
1965 emerge_args = [k for k in emerge.opts if k in passthrough_args]
1966 ret = emerge_main(emerge_args + ["portage"])
1967 if ret != 0:
1968 return ret
1969
1970 # Now upgrade the rest.
1971 os.execvp(args[0], args)
1972
Bertrand SIMONNETc03c8ee2014-12-10 17:02:55 -08001973 # Attempt to solve crbug.com/433482
1974 # The file descriptor error appears only when getting userpriv_groups
1975 # (lazily generated). Loading userpriv_groups here will reduce the number of
1976 # calls from few hundreds to one.
1977 portage.data._get_global('userpriv_groups')
1978
David Jamesfcb70ef2011-02-02 16:02:30 -08001979 # Run the queued emerges.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001980 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output,
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001981 deps.unpack_only, deps.max_retries)
Brian Harringa43f5952012-04-12 01:19:34 -07001982 try:
1983 scheduler.Run()
1984 finally:
Mike Frysinger27e21b72018-07-12 14:20:21 -04001985 # pylint: disable=protected-access
Brian Harringa43f5952012-04-12 01:19:34 -07001986 scheduler._Shutdown()
David James97ce8902011-08-16 09:51:05 -07001987 scheduler = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001988
Mike Frysingerd20a6e22012-10-04 19:01:10 -04001989 clean_logs(emerge.settings)
1990
Mike Frysinger383367e2014-09-16 15:06:17 -04001991 print("Done")
Brian Harring8294d652012-05-23 02:20:52 -07001992 return 0