blob: a70d32026d1252e3c902e093393cd63b1041169e [file] [log] [blame]
Mike Frysingere58c0e22017-10-04 15:43:30 -04001# -*- coding: utf-8 -*-
Mike Frysinger0a647fc2012-08-06 14:36:05 -04002# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
David Jamesfcb70ef2011-02-02 16:02:30 -08003# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""Program to run emerge in parallel, for significant speedup.
7
8Usage:
David James386ccd12011-05-04 20:17:42 -07009 ./parallel_emerge [--board=BOARD] [--workon=PKGS]
David Jamesfcb70ef2011-02-02 16:02:30 -080010 [--force-remote-binary=PKGS] [emerge args] package
11
David James78b6cd92012-04-02 21:36:12 -070012This script runs multiple emerge processes in parallel, using appropriate
13Portage APIs. It is faster than standard emerge because it has a
14multiprocess model instead of an asynchronous model.
David Jamesfcb70ef2011-02-02 16:02:30 -080015"""
16
Mike Frysinger383367e2014-09-16 15:06:17 -040017from __future__ import print_function
18
David Jamesfcb70ef2011-02-02 16:02:30 -080019import codecs
20import copy
21import errno
Brian Harring8294d652012-05-23 02:20:52 -070022import gc
David James8c7e5e32011-06-28 11:26:03 -070023import heapq
David Jamesfcb70ef2011-02-02 16:02:30 -080024import multiprocessing
25import os
Mike Frysinger1ae28092013-10-17 17:17:22 -040026try:
27 import Queue
28except ImportError:
29 # Python-3 renamed to "queue". We still use Queue to avoid collisions
30 # with naming variables as "queue". Maybe we'll transition at some point.
31 # pylint: disable=F0401
32 import queue as Queue
David Jamesfcb70ef2011-02-02 16:02:30 -080033import signal
Bertrand SIMONNET19d789e2014-12-09 13:36:31 -080034import subprocess
David Jamesfcb70ef2011-02-02 16:02:30 -080035import sys
36import tempfile
Brian Harring8294d652012-05-23 02:20:52 -070037import threading
David Jamesfcb70ef2011-02-02 16:02:30 -080038import time
39import traceback
David Jamesfcb70ef2011-02-02 16:02:30 -080040
Thiago Goncalesf4acc422013-07-17 10:26:35 -070041from chromite.lib import cros_build_lib
Chris Ching5fcbd622016-11-28 09:22:15 -070042from chromite.lib import cros_event
Chris Chingb8eba812017-06-22 09:54:48 -060043from chromite.lib import portage_util
Mike Frysingere2d8f0d2014-11-01 13:09:26 -040044from chromite.lib import process_util
Mike Frysingerd74fe4a2014-04-24 11:43:38 -040045from chromite.lib import proctitle
Thiago Goncalesf4acc422013-07-17 10:26:35 -070046
David Jamesfcb70ef2011-02-02 16:02:30 -080047# If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
48# Chromium OS, the default "portage" user doesn't have the necessary
49# permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
50# is "root" here because we get called through sudo.
51#
52# We need to set this before importing any portage modules, because portage
53# looks up "PORTAGE_USERNAME" at import time.
54#
55# NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
56# encounter this case unless they have an old chroot or blow away the
57# environment by running sudo without the -E specifier.
58if "PORTAGE_USERNAME" not in os.environ:
59 homedir = os.environ.get("HOME")
60 if homedir:
61 os.environ["PORTAGE_USERNAME"] = os.path.basename(homedir)
62
Bertrand SIMONNET19d789e2014-12-09 13:36:31 -080063# Wrap Popen with a lock to ensure no two Popen are executed simultaneously in
64# the same process.
65# Two Popen call at the same time might be the cause for crbug.com/433482.
66_popen_lock = threading.Lock()
67_old_popen = subprocess.Popen
68
69def _LockedPopen(*args, **kwargs):
70 with _popen_lock:
71 return _old_popen(*args, **kwargs)
72
73subprocess.Popen = _LockedPopen
74
David Jamesfcb70ef2011-02-02 16:02:30 -080075# Portage doesn't expose dependency trees in its public API, so we have to
76# make use of some private APIs here. These modules are found under
77# /usr/lib/portage/pym/.
78#
79# TODO(davidjames): Update Portage to expose public APIs for these features.
Don Garrett25f309a2014-03-19 14:02:12 -070080# pylint: disable=F0401
David Jamesfcb70ef2011-02-02 16:02:30 -080081from _emerge.actions import adjust_configs
82from _emerge.actions import load_emerge_config
83from _emerge.create_depgraph_params import create_depgraph_params
David James386ccd12011-05-04 20:17:42 -070084from _emerge.depgraph import backtrack_depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -080085from _emerge.main import emerge_main
86from _emerge.main import parse_opts
87from _emerge.Package import Package
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -070088from _emerge.post_emerge import clean_logs
David Jamesfcb70ef2011-02-02 16:02:30 -080089from _emerge.Scheduler import Scheduler
David Jamesfcb70ef2011-02-02 16:02:30 -080090from _emerge.stdout_spinner import stdout_spinner
David James386ccd12011-05-04 20:17:42 -070091from portage._global_updates import _global_updates
David Jamesfcb70ef2011-02-02 16:02:30 -080092import portage
93import portage.debug
Don Garrettf8bf7842014-03-20 17:03:42 -070094# pylint: enable=F0401
Mike Frysinger91d7da92013-02-19 15:53:46 -050095
David Jamesfcb70ef2011-02-02 16:02:30 -080096
David Jamesfcb70ef2011-02-02 16:02:30 -080097def Usage():
98 """Print usage."""
Mike Frysinger383367e2014-09-16 15:06:17 -040099 print("Usage:")
Chris Ching5fcbd622016-11-28 09:22:15 -0700100 print(" ./parallel_emerge [--board=BOARD] [--workon=PKGS] [--rebuild]")
101 print(" [--eventlogfile=FILE] [emerge args] package")
Mike Frysinger383367e2014-09-16 15:06:17 -0400102 print()
103 print("Packages specified as workon packages are always built from source.")
104 print()
105 print("The --workon argument is mainly useful when you want to build and")
106 print("install packages that you are working on unconditionally, but do not")
107 print("to have to rev the package to indicate you want to build it from")
108 print("source. The build_packages script will automatically supply the")
109 print("workon argument to emerge, ensuring that packages selected using")
110 print("cros-workon are rebuilt.")
111 print()
112 print("The --rebuild option rebuilds packages whenever their dependencies")
113 print("are changed. This ensures that your build is correct.")
Chris Ching5fcbd622016-11-28 09:22:15 -0700114 print()
115 print("The --eventlogfile writes events to the given file. File is")
116 print("is overwritten if it exists.")
David Jamesfcb70ef2011-02-02 16:02:30 -0800117
118
David Jamesfcb70ef2011-02-02 16:02:30 -0800119# Global start time
120GLOBAL_START = time.time()
121
David James7358d032011-05-19 10:40:03 -0700122# Whether process has been killed by a signal.
123KILLED = multiprocessing.Event()
124
David Jamesfcb70ef2011-02-02 16:02:30 -0800125
126class EmergeData(object):
127 """This simple struct holds various emerge variables.
128
129 This struct helps us easily pass emerge variables around as a unit.
130 These variables are used for calculating dependencies and installing
131 packages.
132 """
133
David Jamesbf1e3442011-05-28 07:44:20 -0700134 __slots__ = ["action", "cmdline_packages", "depgraph", "favorites",
135 "mtimedb", "opts", "root_config", "scheduler_graph",
136 "settings", "spinner", "trees"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800137
138 def __init__(self):
139 # The action the user requested. If the user is installing packages, this
140 # is None. If the user is doing anything other than installing packages,
141 # this will contain the action name, which will map exactly to the
142 # long-form name of the associated emerge option.
143 #
144 # Example: If you call parallel_emerge --unmerge package, the action name
145 # will be "unmerge"
146 self.action = None
147
148 # The list of packages the user passed on the command-line.
149 self.cmdline_packages = None
150
151 # The emerge dependency graph. It'll contain all the packages involved in
152 # this merge, along with their versions.
153 self.depgraph = None
154
David Jamesbf1e3442011-05-28 07:44:20 -0700155 # The list of candidates to add to the world file.
156 self.favorites = None
157
David Jamesfcb70ef2011-02-02 16:02:30 -0800158 # A dict of the options passed to emerge. This dict has been cleaned up
159 # a bit by parse_opts, so that it's a bit easier for the emerge code to
160 # look at the options.
161 #
162 # Emerge takes a few shortcuts in its cleanup process to make parsing of
163 # the options dict easier. For example, if you pass in "--usepkg=n", the
164 # "--usepkg" flag is just left out of the dictionary altogether. Because
165 # --usepkg=n is the default, this makes parsing easier, because emerge
166 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
167 #
168 # These cleanup processes aren't applied to all options. For example, the
169 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
170 # applied by emerge, see the parse_opts function in the _emerge.main
171 # package.
172 self.opts = None
173
174 # A dictionary used by portage to maintain global state. This state is
175 # loaded from disk when portage starts up, and saved to disk whenever we
176 # call mtimedb.commit().
177 #
178 # This database contains information about global updates (i.e., what
179 # version of portage we have) and what we're currently doing. Portage
180 # saves what it is currently doing in this database so that it can be
181 # resumed when you call it with the --resume option.
182 #
183 # parallel_emerge does not save what it is currently doing in the mtimedb,
184 # so we do not support the --resume option.
185 self.mtimedb = None
186
187 # The portage configuration for our current root. This contains the portage
188 # settings (see below) and the three portage trees for our current root.
189 # (The three portage trees are explained below, in the documentation for
190 # the "trees" member.)
191 self.root_config = None
192
193 # The scheduler graph is used by emerge to calculate what packages to
194 # install. We don't actually install any deps, so this isn't really used,
195 # but we pass it in to the Scheduler object anyway.
196 self.scheduler_graph = None
197
198 # Portage settings for our current session. Most of these settings are set
199 # in make.conf inside our current install root.
200 self.settings = None
201
202 # The spinner, which spews stuff to stdout to indicate that portage is
203 # doing something. We maintain our own spinner, so we set the portage
204 # spinner to "silent" mode.
205 self.spinner = None
206
207 # The portage trees. There are separate portage trees for each root. To get
208 # the portage tree for the current root, you can look in self.trees[root],
209 # where root = self.settings["ROOT"].
210 #
211 # In each root, there are three trees: vartree, porttree, and bintree.
212 # - vartree: A database of the currently-installed packages.
213 # - porttree: A database of ebuilds, that can be used to build packages.
214 # - bintree: A database of binary packages.
215 self.trees = None
216
217
218class DepGraphGenerator(object):
219 """Grab dependency information about packages from portage.
220
221 Typical usage:
222 deps = DepGraphGenerator()
223 deps.Initialize(sys.argv[1:])
224 deps_tree, deps_info = deps.GenDependencyTree()
225 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
226 deps.PrintTree(deps_tree)
227 PrintDepsMap(deps_graph)
228 """
229
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700230 __slots__ = ["board", "emerge", "package_db", "show_output", "sysroot",
Gregory Meinkebec9b442018-04-17 12:01:19 -0600231 "unpack_only", "max_retries", "install_plan_filename"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800232
233 def __init__(self):
234 self.board = None
235 self.emerge = EmergeData()
David Jamesfcb70ef2011-02-02 16:02:30 -0800236 self.package_db = {}
David Jamesfcb70ef2011-02-02 16:02:30 -0800237 self.show_output = False
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700238 self.sysroot = None
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700239 self.unpack_only = False
Bertrand SIMONNET411945d2015-05-20 17:23:28 -0700240 self.max_retries = 1
Gregory Meinkebec9b442018-04-17 12:01:19 -0600241 self.install_plan_filename = None
David Jamesfcb70ef2011-02-02 16:02:30 -0800242
243 def ParseParallelEmergeArgs(self, argv):
244 """Read the parallel emerge arguments from the command-line.
245
246 We need to be compatible with emerge arg format. We scrape arguments that
247 are specific to parallel_emerge, and pass through the rest directly to
248 emerge.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500249
David Jamesfcb70ef2011-02-02 16:02:30 -0800250 Args:
251 argv: arguments list
Mike Frysinger1a736a82013-12-12 01:50:59 -0500252
David Jamesfcb70ef2011-02-02 16:02:30 -0800253 Returns:
254 Arguments that don't belong to parallel_emerge
255 """
256 emerge_args = []
257 for arg in argv:
258 # Specifically match arguments that are specific to parallel_emerge, and
259 # pass through the rest.
260 if arg.startswith("--board="):
261 self.board = arg.replace("--board=", "")
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700262 elif arg.startswith("--sysroot="):
263 self.sysroot = arg.replace("--sysroot=", "")
David Jamesfcb70ef2011-02-02 16:02:30 -0800264 elif arg.startswith("--workon="):
265 workon_str = arg.replace("--workon=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700266 emerge_args.append("--reinstall-atoms=%s" % workon_str)
267 emerge_args.append("--usepkg-exclude=%s" % workon_str)
David Jamesfcb70ef2011-02-02 16:02:30 -0800268 elif arg.startswith("--force-remote-binary="):
269 force_remote_binary = arg.replace("--force-remote-binary=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700270 emerge_args.append("--useoldpkg-atoms=%s" % force_remote_binary)
Bertrand SIMONNET411945d2015-05-20 17:23:28 -0700271 elif arg.startswith("--retries="):
272 self.max_retries = int(arg.replace("--retries=", ""))
David Jamesfcb70ef2011-02-02 16:02:30 -0800273 elif arg == "--show-output":
274 self.show_output = True
David James386ccd12011-05-04 20:17:42 -0700275 elif arg == "--rebuild":
David James7a1ea4b2011-10-13 15:06:41 -0700276 emerge_args.append("--rebuild-if-unbuilt")
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700277 elif arg == "--unpackonly":
278 emerge_args.append("--fetchonly")
279 self.unpack_only = True
Chris Ching5fcbd622016-11-28 09:22:15 -0700280 elif arg.startswith("--eventlogfile="):
281 log_file_name = arg.replace("--eventlogfile=", "")
Chris Ching4a2ebd62017-04-26 16:30:05 -0600282 event_logger = cros_event.getEventFileLogger(log_file_name)
283 event_logger.setKind('ParallelEmerge')
284 cros_event.setEventLogger(event_logger)
Gregory Meinkebec9b442018-04-17 12:01:19 -0600285 elif arg.startswith("--install-plan-filename"):
286 # No emerge equivalent, used to calculate the list of packages
287 # that changed and we will need to calculate reverse dependencies.
288 self.install_plan_filename = arg.replace("--install-plan-filename=", "")
David Jamesfcb70ef2011-02-02 16:02:30 -0800289 else:
290 # Not one of our options, so pass through to emerge.
291 emerge_args.append(arg)
292
David James386ccd12011-05-04 20:17:42 -0700293 # These packages take a really long time to build, so, for expediency, we
294 # are blacklisting them from automatic rebuilds because one of their
295 # dependencies needs to be recompiled.
Mike Frysinger5c2a9052014-04-15 15:52:04 -0400296 for pkg in ("chromeos-base/chromeos-chrome",):
David James7a1ea4b2011-10-13 15:06:41 -0700297 emerge_args.append("--rebuild-exclude=%s" % pkg)
David Jamesfcb70ef2011-02-02 16:02:30 -0800298
299 return emerge_args
300
301 def Initialize(self, args):
302 """Initializer. Parses arguments and sets up portage state."""
303
304 # Parse and strip out args that are just intended for parallel_emerge.
305 emerge_args = self.ParseParallelEmergeArgs(args)
306
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700307 if self.sysroot and self.board:
308 cros_build_lib.Die("--sysroot and --board are incompatible.")
309
David Jamesfcb70ef2011-02-02 16:02:30 -0800310 # Setup various environment variables based on our current board. These
311 # variables are normally setup inside emerge-${BOARD}, but since we don't
312 # call that script, we have to set it up here. These variables serve to
313 # point our tools at /build/BOARD and to setup cross compiles to the
314 # appropriate board as configured in toolchain.conf.
315 if self.board:
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700316 self.sysroot = os.environ.get('SYSROOT',
317 cros_build_lib.GetSysroot(self.board))
318
319 if self.sysroot:
320 os.environ["PORTAGE_CONFIGROOT"] = self.sysroot
321 os.environ["SYSROOT"] = self.sysroot
David Jamesfcb70ef2011-02-02 16:02:30 -0800322
David Jamesfcb70ef2011-02-02 16:02:30 -0800323 # Turn off interactive delays
324 os.environ["EBEEP_IGNORE"] = "1"
325 os.environ["EPAUSE_IGNORE"] = "1"
Mike Frysinger0a647fc2012-08-06 14:36:05 -0400326 os.environ["CLEAN_DELAY"] = "0"
David Jamesfcb70ef2011-02-02 16:02:30 -0800327
328 # Parse the emerge options.
David Jamesea3ca332011-05-26 11:48:29 -0700329 action, opts, cmdline_packages = parse_opts(emerge_args, silent=True)
David Jamesfcb70ef2011-02-02 16:02:30 -0800330
331 # Set environment variables based on options. Portage normally sets these
332 # environment variables in emerge_main, but we can't use that function,
333 # because it also does a bunch of other stuff that we don't want.
334 # TODO(davidjames): Patch portage to move this logic into a function we can
335 # reuse here.
336 if "--debug" in opts:
337 os.environ["PORTAGE_DEBUG"] = "1"
338 if "--config-root" in opts:
339 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
340 if "--root" in opts:
341 os.environ["ROOT"] = opts["--root"]
342 if "--accept-properties" in opts:
343 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
344
David James88d780c2014-02-05 13:03:29 -0800345 # If we're installing packages to the board, we can disable vardb locks.
346 # This is safe because we only run up to one instance of parallel_emerge in
347 # parallel.
348 # TODO(davidjames): Enable this for the host too.
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700349 if self.sysroot:
David Jamesfcb70ef2011-02-02 16:02:30 -0800350 os.environ.setdefault("PORTAGE_LOCKS", "false")
David Jamesfcb70ef2011-02-02 16:02:30 -0800351
352 # Now that we've setup the necessary environment variables, we can load the
353 # emerge config from disk.
Gilad Arnold94758762015-05-22 12:23:23 -0700354 # pylint: disable=unpacking-non-sequence
David Jamesfcb70ef2011-02-02 16:02:30 -0800355 settings, trees, mtimedb = load_emerge_config()
356
David Jamesea3ca332011-05-26 11:48:29 -0700357 # Add in EMERGE_DEFAULT_OPTS, if specified.
358 tmpcmdline = []
359 if "--ignore-default-opts" not in opts:
360 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
361 tmpcmdline.extend(emerge_args)
362 action, opts, cmdline_packages = parse_opts(tmpcmdline)
363
364 # If we're installing to the board, we want the --root-deps option so that
365 # portage will install the build dependencies to that location as well.
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700366 if self.sysroot:
David Jamesea3ca332011-05-26 11:48:29 -0700367 opts.setdefault("--root-deps", True)
368
David Jamesfcb70ef2011-02-02 16:02:30 -0800369 # Check whether our portage tree is out of date. Typically, this happens
370 # when you're setting up a new portage tree, such as in setup_board and
371 # make_chroot. In that case, portage applies a bunch of global updates
372 # here. Once the updates are finished, we need to commit any changes
373 # that the global update made to our mtimedb, and reload the config.
374 #
375 # Portage normally handles this logic in emerge_main, but again, we can't
376 # use that function here.
377 if _global_updates(trees, mtimedb["updates"]):
378 mtimedb.commit()
Gilad Arnold94758762015-05-22 12:23:23 -0700379 # pylint: disable=unpacking-non-sequence
David Jamesfcb70ef2011-02-02 16:02:30 -0800380 settings, trees, mtimedb = load_emerge_config(trees=trees)
381
382 # Setup implied options. Portage normally handles this logic in
383 # emerge_main.
384 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
385 opts.setdefault("--buildpkg", True)
386 if "--getbinpkgonly" in opts:
387 opts.setdefault("--usepkgonly", True)
388 opts.setdefault("--getbinpkg", True)
389 if "getbinpkg" in settings.features:
390 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
391 opts["--getbinpkg"] = True
392 if "--getbinpkg" in opts or "--usepkgonly" in opts:
393 opts.setdefault("--usepkg", True)
394 if "--fetch-all-uri" in opts:
395 opts.setdefault("--fetchonly", True)
396 if "--skipfirst" in opts:
397 opts.setdefault("--resume", True)
398 if "--buildpkgonly" in opts:
399 # --buildpkgonly will not merge anything, so it overrides all binary
400 # package options.
401 for opt in ("--getbinpkg", "--getbinpkgonly",
402 "--usepkg", "--usepkgonly"):
403 opts.pop(opt, None)
404 if (settings.get("PORTAGE_DEBUG", "") == "1" and
405 "python-trace" in settings.features):
406 portage.debug.set_trace(True)
407
408 # Complain about unsupported options
David James386ccd12011-05-04 20:17:42 -0700409 for opt in ("--ask", "--ask-enter-invalid", "--resume", "--skipfirst"):
David Jamesfcb70ef2011-02-02 16:02:30 -0800410 if opt in opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400411 print("%s is not supported by parallel_emerge" % opt)
David Jamesfcb70ef2011-02-02 16:02:30 -0800412 sys.exit(1)
413
414 # Make emerge specific adjustments to the config (e.g. colors!)
415 adjust_configs(opts, trees)
416
417 # Save our configuration so far in the emerge object
418 emerge = self.emerge
419 emerge.action, emerge.opts = action, opts
420 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
421 emerge.cmdline_packages = cmdline_packages
422 root = settings["ROOT"]
423 emerge.root_config = trees[root]["root_config"]
424
David James386ccd12011-05-04 20:17:42 -0700425 if "--usepkg" in opts:
David Jamesfcb70ef2011-02-02 16:02:30 -0800426 emerge.trees[root]["bintree"].populate("--getbinpkg" in opts)
427
David Jamesfcb70ef2011-02-02 16:02:30 -0800428 def CreateDepgraph(self, emerge, packages):
429 """Create an emerge depgraph object."""
430 # Setup emerge options.
431 emerge_opts = emerge.opts.copy()
432
David James386ccd12011-05-04 20:17:42 -0700433 # Ask portage to build a dependency graph. with the options we specified
434 # above.
David Jamesfcb70ef2011-02-02 16:02:30 -0800435 params = create_depgraph_params(emerge_opts, emerge.action)
David Jamesbf1e3442011-05-28 07:44:20 -0700436 success, depgraph, favorites = backtrack_depgraph(
David James386ccd12011-05-04 20:17:42 -0700437 emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
438 packages, emerge.spinner)
439 emerge.depgraph = depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800440
David James386ccd12011-05-04 20:17:42 -0700441 # Is it impossible to honor the user's request? Bail!
442 if not success:
443 depgraph.display_problems()
444 sys.exit(1)
David Jamesfcb70ef2011-02-02 16:02:30 -0800445
446 emerge.depgraph = depgraph
David Jamesbf1e3442011-05-28 07:44:20 -0700447 emerge.favorites = favorites
David Jamesfcb70ef2011-02-02 16:02:30 -0800448
David Jamesdeebd692011-05-09 17:02:52 -0700449 # Prime and flush emerge caches.
450 root = emerge.settings["ROOT"]
451 vardb = emerge.trees[root]["vartree"].dbapi
David James0bdc5de2011-05-12 16:22:26 -0700452 if "--pretend" not in emerge.opts:
453 vardb.counter_tick()
David Jamesdeebd692011-05-09 17:02:52 -0700454 vardb.flush_cache()
455
David James386ccd12011-05-04 20:17:42 -0700456 def GenDependencyTree(self):
David Jamesfcb70ef2011-02-02 16:02:30 -0800457 """Get dependency tree info from emerge.
458
David Jamesfcb70ef2011-02-02 16:02:30 -0800459 Returns:
460 Dependency tree
461 """
462 start = time.time()
463
464 emerge = self.emerge
465
466 # Create a list of packages to merge
467 packages = set(emerge.cmdline_packages[:])
David Jamesfcb70ef2011-02-02 16:02:30 -0800468
469 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
470 # need any extra output from portage.
471 portage.util.noiselimit = -1
472
473 # My favorite feature: The silent spinner. It doesn't spin. Ever.
474 # I'd disable the colors by default too, but they look kind of cool.
475 emerge.spinner = stdout_spinner()
476 emerge.spinner.update = emerge.spinner.update_quiet
477
478 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400479 print("Calculating deps...")
David Jamesfcb70ef2011-02-02 16:02:30 -0800480
Chris Ching4a2ebd62017-04-26 16:30:05 -0600481 with cros_event.newEvent(task_name="GenerateDepTree"):
Chris Ching5fcbd622016-11-28 09:22:15 -0700482 self.CreateDepgraph(emerge, packages)
483 depgraph = emerge.depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800484
485 # Build our own tree from the emerge digraph.
486 deps_tree = {}
Don Garrett25f309a2014-03-19 14:02:12 -0700487 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -0800488 digraph = depgraph._dynamic_config.digraph
David James3f778802011-08-25 19:31:45 -0700489 root = emerge.settings["ROOT"]
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -0700490 final_db = depgraph._dynamic_config._filtered_trees[root]['graph_db']
David Jamesfcb70ef2011-02-02 16:02:30 -0800491 for node, node_deps in digraph.nodes.items():
492 # Calculate dependency packages that need to be installed first. Each
493 # child on the digraph is a dependency. The "operation" field specifies
494 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
495 # contains the type of dependency (e.g. build, runtime, runtime_post,
496 # etc.)
497 #
David Jamesfcb70ef2011-02-02 16:02:30 -0800498 # Portage refers to the identifiers for packages as a CPV. This acronym
499 # stands for Component/Path/Version.
500 #
501 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
502 # Split up, this CPV would be:
503 # C -- Component: chromeos-base
504 # P -- Path: power_manager
505 # V -- Version: 0.0.1-r1
506 #
507 # We just refer to CPVs as packages here because it's easier.
508 deps = {}
509 for child, priorities in node_deps[0].items():
David James3f778802011-08-25 19:31:45 -0700510 if isinstance(child, Package) and child.root == root:
511 cpv = str(child.cpv)
512 action = str(child.operation)
513
514 # If we're uninstalling a package, check whether Portage is
515 # installing a replacement. If so, just depend on the installation
516 # of the new package, because the old package will automatically
517 # be uninstalled at that time.
518 if action == "uninstall":
519 for pkg in final_db.match_pkgs(child.slot_atom):
520 cpv = str(pkg.cpv)
521 action = "merge"
522 break
523
524 deps[cpv] = dict(action=action,
525 deptypes=[str(x) for x in priorities],
526 deps={})
David Jamesfcb70ef2011-02-02 16:02:30 -0800527
528 # We've built our list of deps, so we can add our package to the tree.
David James3f778802011-08-25 19:31:45 -0700529 if isinstance(node, Package) and node.root == root:
David Jamesfcb70ef2011-02-02 16:02:30 -0800530 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
531 deps=deps)
532
David Jamesfcb70ef2011-02-02 16:02:30 -0800533 # Ask portage for its install plan, so that we can only throw out
David James386ccd12011-05-04 20:17:42 -0700534 # dependencies that portage throws out.
David Jamesfcb70ef2011-02-02 16:02:30 -0800535 deps_info = {}
536 for pkg in depgraph.altlist():
537 if isinstance(pkg, Package):
David James3f778802011-08-25 19:31:45 -0700538 assert pkg.root == root
David Jamesfcb70ef2011-02-02 16:02:30 -0800539 self.package_db[pkg.cpv] = pkg
540
David Jamesfcb70ef2011-02-02 16:02:30 -0800541 # Save off info about the package
David James386ccd12011-05-04 20:17:42 -0700542 deps_info[str(pkg.cpv)] = {"idx": len(deps_info)}
David Jamesfcb70ef2011-02-02 16:02:30 -0800543
544 seconds = time.time() - start
545 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400546 print("Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800547
Gregory Meinkebec9b442018-04-17 12:01:19 -0600548 # Calculate the install plan packages and append to temp file. They will be
549 # used to calculate all the reverse dependencies on these change packages.
550 if self.install_plan_filename:
551 install_plan_pkgs = []
552 for d in deps_info:
553 install_plan_pkgs.append(d)
554 # always write the file even if nothing to do, scripts expect existence.
555 with open(self.install_plan_filename, "a") as f:
556 f.write("%s " % " ".join(install_plan_pkgs))
557
David Jamesfcb70ef2011-02-02 16:02:30 -0800558 return deps_tree, deps_info
559
560 def PrintTree(self, deps, depth=""):
561 """Print the deps we have seen in the emerge output.
562
563 Args:
Mike Frysinger6f3c48e2015-05-06 02:38:51 -0400564 deps: Dependency tree structure.
565 depth: Allows printing the tree recursively, with indentation.
David Jamesfcb70ef2011-02-02 16:02:30 -0800566 """
567 for entry in sorted(deps):
568 action = deps[entry]["action"]
Mike Frysinger383367e2014-09-16 15:06:17 -0400569 print("%s %s (%s)" % (depth, entry, action))
David Jamesfcb70ef2011-02-02 16:02:30 -0800570 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
571
David James386ccd12011-05-04 20:17:42 -0700572 def GenDependencyGraph(self, deps_tree, deps_info):
David Jamesfcb70ef2011-02-02 16:02:30 -0800573 """Generate a doubly linked dependency graph.
574
575 Args:
576 deps_tree: Dependency tree structure.
577 deps_info: More details on the dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500578
David Jamesfcb70ef2011-02-02 16:02:30 -0800579 Returns:
580 Deps graph in the form of a dict of packages, with each package
581 specifying a "needs" list and "provides" list.
582 """
583 emerge = self.emerge
David Jamesfcb70ef2011-02-02 16:02:30 -0800584
David Jamesfcb70ef2011-02-02 16:02:30 -0800585 # deps_map is the actual dependency graph.
586 #
587 # Each package specifies a "needs" list and a "provides" list. The "needs"
588 # list indicates which packages we depend on. The "provides" list
589 # indicates the reverse dependencies -- what packages need us.
590 #
591 # We also provide some other information in the dependency graph:
592 # - action: What we're planning on doing with this package. Generally,
593 # "merge", "nomerge", or "uninstall"
David Jamesfcb70ef2011-02-02 16:02:30 -0800594 deps_map = {}
595
596 def ReverseTree(packages):
597 """Convert tree to digraph.
598
599 Take the tree of package -> requirements and reverse it to a digraph of
600 buildable packages -> packages they unblock.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500601
David Jamesfcb70ef2011-02-02 16:02:30 -0800602 Args:
603 packages: Tree(s) of dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500604
David Jamesfcb70ef2011-02-02 16:02:30 -0800605 Returns:
606 Unsanitized digraph.
607 """
David James8c7e5e32011-06-28 11:26:03 -0700608 binpkg_phases = set(["setup", "preinst", "postinst"])
David Jamese5e1c0a2014-09-29 17:19:41 -0700609 needed_dep_types = set(["blocker", "buildtime", "buildtime_slot_op",
610 "runtime", "runtime_slot_op"])
Benjamin Gordon670b6972017-08-29 13:43:56 -0600611 ignored_dep_types = set(["ignored", "runtime_post", "soft"])
612
613 # There's a bug in the Portage library where it always returns 'optional'
614 # and never 'buildtime' for the digraph while --usepkg is enabled; even
615 # when the package is being rebuilt. To work around this, we treat
616 # 'optional' as needed when we are using --usepkg. See crbug.com/756240 .
617 if "--usepkg" in self.emerge.opts:
618 needed_dep_types.add("optional")
619 else:
620 ignored_dep_types.add("optional")
621
David Jamese5e1c0a2014-09-29 17:19:41 -0700622 all_dep_types = ignored_dep_types | needed_dep_types
David Jamesfcb70ef2011-02-02 16:02:30 -0800623 for pkg in packages:
624
625 # Create an entry for the package
626 action = packages[pkg]["action"]
David James8c7e5e32011-06-28 11:26:03 -0700627 default_pkg = {"needs": {}, "provides": set(), "action": action,
628 "nodeps": False, "binary": False}
David Jamesfcb70ef2011-02-02 16:02:30 -0800629 this_pkg = deps_map.setdefault(pkg, default_pkg)
630
David James8c7e5e32011-06-28 11:26:03 -0700631 if pkg in deps_info:
632 this_pkg["idx"] = deps_info[pkg]["idx"]
633
634 # If a package doesn't have any defined phases that might use the
635 # dependent packages (i.e. pkg_setup, pkg_preinst, or pkg_postinst),
636 # we can install this package before its deps are ready.
637 emerge_pkg = self.package_db.get(pkg)
638 if emerge_pkg and emerge_pkg.type_name == "binary":
639 this_pkg["binary"] = True
Mike Frysinger66652ec2014-04-24 11:42:25 -0400640 defined_phases = emerge_pkg.defined_phases
David James8c7e5e32011-06-28 11:26:03 -0700641 defined_binpkg_phases = binpkg_phases.intersection(defined_phases)
642 if not defined_binpkg_phases:
643 this_pkg["nodeps"] = True
644
David Jamesfcb70ef2011-02-02 16:02:30 -0800645 # Create entries for dependencies of this package first.
646 ReverseTree(packages[pkg]["deps"])
647
648 # Add dependencies to this package.
649 for dep, dep_item in packages[pkg]["deps"].iteritems():
David James8c7e5e32011-06-28 11:26:03 -0700650 # We only need to enforce strict ordering of dependencies if the
David James3f778802011-08-25 19:31:45 -0700651 # dependency is a blocker, or is a buildtime or runtime dependency.
652 # (I.e., ignored, optional, and runtime_post dependencies don't
653 # depend on ordering.)
David James8c7e5e32011-06-28 11:26:03 -0700654 dep_types = dep_item["deptypes"]
655 if needed_dep_types.intersection(dep_types):
656 deps_map[dep]["provides"].add(pkg)
657 this_pkg["needs"][dep] = "/".join(dep_types)
David Jamesfcb70ef2011-02-02 16:02:30 -0800658
David Jamese5e1c0a2014-09-29 17:19:41 -0700659 # Verify we processed all appropriate dependency types.
660 unknown_dep_types = set(dep_types) - all_dep_types
661 if unknown_dep_types:
662 print("Unknown dependency types found:")
663 print(" %s -> %s (%s)" % (pkg, dep, "/".join(unknown_dep_types)))
664 sys.exit(1)
665
David James3f778802011-08-25 19:31:45 -0700666 # If there's a blocker, Portage may need to move files from one
667 # package to another, which requires editing the CONTENTS files of
668 # both packages. To avoid race conditions while editing this file,
669 # the two packages must not be installed in parallel, so we can't
670 # safely ignore dependencies. See http://crosbug.com/19328
671 if "blocker" in dep_types:
672 this_pkg["nodeps"] = False
673
David Jamesfcb70ef2011-02-02 16:02:30 -0800674 def FindCycles():
675 """Find cycles in the dependency tree.
676
677 Returns:
678 A dict mapping cyclic packages to a dict of the deps that cause
679 cycles. For each dep that causes cycles, it returns an example
680 traversal of the graph that shows the cycle.
681 """
682
683 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
684 """Find cycles in cyclic dependencies starting at specified package.
685
686 Args:
687 pkg: Package identifier.
688 cycles: A dict mapping cyclic packages to a dict of the deps that
689 cause cycles. For each dep that causes cycles, it returns an
690 example traversal of the graph that shows the cycle.
691 unresolved: Nodes that have been visited but are not fully processed.
692 resolved: Nodes that have been visited and are fully processed.
693 """
694 pkg_cycles = cycles.get(pkg)
695 if pkg in resolved and not pkg_cycles:
696 # If we already looked at this package, and found no cyclic
697 # dependencies, we can stop now.
698 return
699 unresolved.append(pkg)
700 for dep in deps_map[pkg]["needs"]:
701 if dep in unresolved:
702 idx = unresolved.index(dep)
703 mycycle = unresolved[idx:] + [dep]
David James321490a2012-12-17 12:05:56 -0800704 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800705 pkg1, pkg2 = mycycle[i], mycycle[i+1]
706 cycles.setdefault(pkg1, {}).setdefault(pkg2, mycycle)
707 elif not pkg_cycles or dep not in pkg_cycles:
708 # Looks like we haven't seen this edge before.
709 FindCyclesAtNode(dep, cycles, unresolved, resolved)
710 unresolved.pop()
711 resolved.add(pkg)
712
713 cycles, unresolved, resolved = {}, [], set()
714 for pkg in deps_map:
715 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
716 return cycles
717
David James386ccd12011-05-04 20:17:42 -0700718 def RemoveUnusedPackages():
David Jamesfcb70ef2011-02-02 16:02:30 -0800719 """Remove installed packages, propagating dependencies."""
David Jamesfcb70ef2011-02-02 16:02:30 -0800720 # Schedule packages that aren't on the install list for removal
721 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
722
David Jamesfcb70ef2011-02-02 16:02:30 -0800723 # Remove the packages we don't want, simplifying the graph and making
724 # it easier for us to crack cycles.
725 for pkg in sorted(rm_pkgs):
726 this_pkg = deps_map[pkg]
727 needs = this_pkg["needs"]
728 provides = this_pkg["provides"]
729 for dep in needs:
730 dep_provides = deps_map[dep]["provides"]
731 dep_provides.update(provides)
732 dep_provides.discard(pkg)
733 dep_provides.discard(dep)
734 for target in provides:
735 target_needs = deps_map[target]["needs"]
736 target_needs.update(needs)
737 target_needs.pop(pkg, None)
738 target_needs.pop(target, None)
739 del deps_map[pkg]
740
741 def PrintCycleBreak(basedep, dep, mycycle):
742 """Print details about a cycle that we are planning on breaking.
743
Mike Frysinger02e1e072013-11-10 22:11:34 -0500744 We are breaking a cycle where dep needs basedep. mycycle is an
745 example cycle which contains dep -> basedep.
746 """
David Jamesfcb70ef2011-02-02 16:02:30 -0800747
David Jamesfcb70ef2011-02-02 16:02:30 -0800748 needs = deps_map[dep]["needs"]
749 depinfo = needs.get(basedep, "deleted")
David Jamesfcb70ef2011-02-02 16:02:30 -0800750
David James3f778802011-08-25 19:31:45 -0700751 # It's OK to swap install order for blockers, as long as the two
752 # packages aren't installed in parallel. If there is a cycle, then
753 # we know the packages depend on each other already, so we can drop the
754 # blocker safely without printing a warning.
755 if depinfo == "blocker":
756 return
757
David Jamesfcb70ef2011-02-02 16:02:30 -0800758 # Notify the user that we're breaking a cycle.
Mike Frysinger383367e2014-09-16 15:06:17 -0400759 print("Breaking %s -> %s (%s)" % (dep, basedep, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800760
761 # Show cycle.
David James321490a2012-12-17 12:05:56 -0800762 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800763 pkg1, pkg2 = mycycle[i], mycycle[i+1]
764 needs = deps_map[pkg1]["needs"]
765 depinfo = needs.get(pkg2, "deleted")
766 if pkg1 == dep and pkg2 == basedep:
767 depinfo = depinfo + ", deleting"
Mike Frysinger383367e2014-09-16 15:06:17 -0400768 print(" %s -> %s (%s)" % (pkg1, pkg2, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800769
770 def SanitizeTree():
771 """Remove circular dependencies.
772
773 We prune all dependencies involved in cycles that go against the emerge
774 ordering. This has a nice property: we're guaranteed to merge
775 dependencies in the same order that portage does.
776
777 Because we don't treat any dependencies as "soft" unless they're killed
778 by a cycle, we pay attention to a larger number of dependencies when
779 merging. This hurts performance a bit, but helps reliability.
780 """
781 start = time.time()
782 cycles = FindCycles()
783 while cycles:
784 for dep, mycycles in cycles.iteritems():
785 for basedep, mycycle in mycycles.iteritems():
786 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
Matt Tennant08797302011-10-17 16:18:45 -0700787 if "--quiet" not in emerge.opts:
788 PrintCycleBreak(basedep, dep, mycycle)
David Jamesfcb70ef2011-02-02 16:02:30 -0800789 del deps_map[dep]["needs"][basedep]
790 deps_map[basedep]["provides"].remove(dep)
791 cycles = FindCycles()
792 seconds = time.time() - start
793 if "--quiet" not in emerge.opts and seconds >= 0.1:
Mike Frysinger383367e2014-09-16 15:06:17 -0400794 print("Tree sanitized in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800795
David James8c7e5e32011-06-28 11:26:03 -0700796 def FindRecursiveProvides(pkg, seen):
797 """Find all nodes that require a particular package.
798
799 Assumes that graph is acyclic.
800
801 Args:
802 pkg: Package identifier.
803 seen: Nodes that have been visited so far.
804 """
805 if pkg in seen:
806 return
807 seen.add(pkg)
808 info = deps_map[pkg]
809 info["tprovides"] = info["provides"].copy()
810 for dep in info["provides"]:
811 FindRecursiveProvides(dep, seen)
812 info["tprovides"].update(deps_map[dep]["tprovides"])
813
David Jamesa22906f2011-05-04 19:53:26 -0700814 ReverseTree(deps_tree)
David Jamesa22906f2011-05-04 19:53:26 -0700815
David James386ccd12011-05-04 20:17:42 -0700816 # We need to remove unused packages so that we can use the dependency
817 # ordering of the install process to show us what cycles to crack.
818 RemoveUnusedPackages()
David Jamesfcb70ef2011-02-02 16:02:30 -0800819 SanitizeTree()
David James8c7e5e32011-06-28 11:26:03 -0700820 seen = set()
821 for pkg in deps_map:
822 FindRecursiveProvides(pkg, seen)
David Jamesfcb70ef2011-02-02 16:02:30 -0800823 return deps_map
824
825 def PrintInstallPlan(self, deps_map):
826 """Print an emerge-style install plan.
827
828 The install plan lists what packages we're installing, in order.
829 It's useful for understanding what parallel_emerge is doing.
830
831 Args:
832 deps_map: The dependency graph.
833 """
834
835 def InstallPlanAtNode(target, deps_map):
836 nodes = []
837 nodes.append(target)
838 for dep in deps_map[target]["provides"]:
839 del deps_map[dep]["needs"][target]
840 if not deps_map[dep]["needs"]:
841 nodes.extend(InstallPlanAtNode(dep, deps_map))
842 return nodes
843
844 deps_map = copy.deepcopy(deps_map)
845 install_plan = []
846 plan = set()
847 for target, info in deps_map.iteritems():
848 if not info["needs"] and target not in plan:
849 for item in InstallPlanAtNode(target, deps_map):
850 plan.add(item)
851 install_plan.append(self.package_db[item])
852
853 for pkg in plan:
854 del deps_map[pkg]
855
856 if deps_map:
Mike Frysinger383367e2014-09-16 15:06:17 -0400857 print("Cyclic dependencies:", " ".join(deps_map))
David Jamesfcb70ef2011-02-02 16:02:30 -0800858 PrintDepsMap(deps_map)
859 sys.exit(1)
860
861 self.emerge.depgraph.display(install_plan)
862
863
864def PrintDepsMap(deps_map):
865 """Print dependency graph, for each package list it's prerequisites."""
866 for i in sorted(deps_map):
Mike Frysinger383367e2014-09-16 15:06:17 -0400867 print("%s: (%s) needs" % (i, deps_map[i]["action"]))
David Jamesfcb70ef2011-02-02 16:02:30 -0800868 needs = deps_map[i]["needs"]
869 for j in sorted(needs):
Mike Frysinger383367e2014-09-16 15:06:17 -0400870 print(" %s" % (j))
David Jamesfcb70ef2011-02-02 16:02:30 -0800871 if not needs:
Mike Frysinger383367e2014-09-16 15:06:17 -0400872 print(" no dependencies")
David Jamesfcb70ef2011-02-02 16:02:30 -0800873
874
875class EmergeJobState(object):
Don Garrett25f309a2014-03-19 14:02:12 -0700876 """Structure describing the EmergeJobState."""
877
David Jamesfcb70ef2011-02-02 16:02:30 -0800878 __slots__ = ["done", "filename", "last_notify_timestamp", "last_output_seek",
879 "last_output_timestamp", "pkgname", "retcode", "start_timestamp",
Chris Ching73486ab2017-04-26 18:02:37 -0600880 "target", "try_count", "fetch_only", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800881
882 def __init__(self, target, pkgname, done, filename, start_timestamp,
Chris Ching73486ab2017-04-26 18:02:37 -0600883 retcode=None, fetch_only=False, try_count=0, unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800884
885 # The full name of the target we're building (e.g.
Mike Frysingerfd969312014-04-02 22:16:42 -0400886 # virtual/target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800887 self.target = target
888
Mike Frysingerfd969312014-04-02 22:16:42 -0400889 # The short name of the target we're building (e.g. target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800890 self.pkgname = pkgname
891
892 # Whether the job is done. (True if the job is done; false otherwise.)
893 self.done = done
894
895 # The filename where output is currently stored.
896 self.filename = filename
897
898 # The timestamp of the last time we printed the name of the log file. We
899 # print this at the beginning of the job, so this starts at
900 # start_timestamp.
901 self.last_notify_timestamp = start_timestamp
902
903 # The location (in bytes) of the end of the last complete line we printed.
904 # This starts off at zero. We use this to jump to the right place when we
905 # print output from the same ebuild multiple times.
906 self.last_output_seek = 0
907
908 # The timestamp of the last time we printed output. Since we haven't
909 # printed output yet, this starts at zero.
910 self.last_output_timestamp = 0
911
912 # The return code of our job, if the job is actually finished.
913 self.retcode = retcode
914
Chris Ching73486ab2017-04-26 18:02:37 -0600915 # Number of tries for this job
916 self.try_count = try_count
917
Brian Harring0be85c62012-03-17 19:52:12 -0700918 # Was this just a fetch job?
919 self.fetch_only = fetch_only
920
David Jamesfcb70ef2011-02-02 16:02:30 -0800921 # The timestamp when our job started.
922 self.start_timestamp = start_timestamp
923
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700924 # No emerge, only unpack packages.
925 self.unpack_only = unpack_only
926
David Jamesfcb70ef2011-02-02 16:02:30 -0800927
David James321490a2012-12-17 12:05:56 -0800928def KillHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700929 # Kill self and all subprocesses.
930 os.killpg(0, signal.SIGKILL)
931
Mike Frysingercc838832014-05-24 13:10:30 -0400932
David Jamesfcb70ef2011-02-02 16:02:30 -0800933def SetupWorkerSignals():
David James321490a2012-12-17 12:05:56 -0800934 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700935 # Set KILLED flag.
936 KILLED.set()
David James13cead42011-05-18 16:22:01 -0700937
David James7358d032011-05-19 10:40:03 -0700938 # Remove our signal handlers so we don't get called recursively.
939 signal.signal(signal.SIGINT, KillHandler)
940 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -0800941
942 # Ensure that we exit quietly and cleanly, if possible, when we receive
943 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all
944 # of the child processes will print details about KeyboardInterrupt
945 # exceptions, which isn't very helpful.
946 signal.signal(signal.SIGINT, ExitHandler)
947 signal.signal(signal.SIGTERM, ExitHandler)
948
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400949
Chris Ching73486ab2017-04-26 18:02:37 -0600950def EmergeProcess(output, job_state, *args, **kwargs):
David James1ed3e252011-10-05 20:26:15 -0700951 """Merge a package in a subprocess.
952
953 Args:
David James1ed3e252011-10-05 20:26:15 -0700954 output: Temporary file to write output.
Chris Ching73486ab2017-04-26 18:02:37 -0600955 job_state: Stored state of package
David James6b29d052012-11-02 10:27:27 -0700956 *args: Arguments to pass to Scheduler constructor.
957 **kwargs: Keyword arguments to pass to Scheduler constructor.
David James1ed3e252011-10-05 20:26:15 -0700958
959 Returns:
960 The exit code returned by the subprocess.
961 """
Chris Chingb8eba812017-06-22 09:54:48 -0600962
Chris Ching73486ab2017-04-26 18:02:37 -0600963 target = job_state.target
964
965 job_state.try_count += 1
966
Chris Chingb8eba812017-06-22 09:54:48 -0600967 cpv = portage_util.SplitCPV(target)
Chris Ching73486ab2017-04-26 18:02:37 -0600968
Chris Ching4a2ebd62017-04-26 16:30:05 -0600969 event = cros_event.newEvent(task_name="EmergePackage",
Chris Chingb8eba812017-06-22 09:54:48 -0600970 name=cpv.package,
971 category=cpv.category,
Chris Ching73486ab2017-04-26 18:02:37 -0600972 version=cpv.version,
973 try_count=job_state.try_count)
David James1ed3e252011-10-05 20:26:15 -0700974 pid = os.fork()
975 if pid == 0:
976 try:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400977 proctitle.settitle('EmergeProcess', target)
978
David James1ed3e252011-10-05 20:26:15 -0700979 # Sanity checks.
Mike Frysingerf02736e2013-11-08 15:27:00 -0500980 if sys.stdout.fileno() != 1:
981 raise Exception("sys.stdout.fileno() != 1")
982 if sys.stderr.fileno() != 2:
983 raise Exception("sys.stderr.fileno() != 2")
David James1ed3e252011-10-05 20:26:15 -0700984
985 # - Redirect 1 (stdout) and 2 (stderr) at our temporary file.
986 # - Redirect 0 to point at sys.stdin. In this case, sys.stdin
987 # points at a file reading os.devnull, because multiprocessing mucks
988 # with sys.stdin.
989 # - Leave the sys.stdin and output filehandles alone.
990 fd_pipes = {0: sys.stdin.fileno(),
991 1: output.fileno(),
992 2: output.fileno(),
993 sys.stdin.fileno(): sys.stdin.fileno(),
994 output.fileno(): output.fileno()}
Mike Frysinger66652ec2014-04-24 11:42:25 -0400995 # pylint: disable=W0212
996 portage.process._setup_pipes(fd_pipes, close_fds=False)
David James1ed3e252011-10-05 20:26:15 -0700997
998 # Portage doesn't like when sys.stdin.fileno() != 0, so point sys.stdin
999 # at the filehandle we just created in _setup_pipes.
1000 if sys.stdin.fileno() != 0:
David James6b29d052012-11-02 10:27:27 -07001001 sys.__stdin__ = sys.stdin = os.fdopen(0, "r")
1002
1003 scheduler = Scheduler(*args, **kwargs)
1004
1005 # Enable blocker handling even though we're in --nodeps mode. This
1006 # allows us to unmerge the blocker after we've merged the replacement.
1007 scheduler._opts_ignore_blockers = frozenset()
David James1ed3e252011-10-05 20:26:15 -07001008
1009 # Actually do the merge.
Chris Ching5fcbd622016-11-28 09:22:15 -07001010 with event:
Chris Ching73486ab2017-04-26 18:02:37 -06001011 job_state.retcode = scheduler.merge()
1012 if job_state.retcode != 0:
Chris Ching5fcbd622016-11-28 09:22:15 -07001013 event.fail(message="non-zero value returned")
David James1ed3e252011-10-05 20:26:15 -07001014
1015 # We catch all exceptions here (including SystemExit, KeyboardInterrupt,
1016 # etc) so as to ensure that we don't confuse the multiprocessing module,
1017 # which expects that all forked children exit with os._exit().
David James321490a2012-12-17 12:05:56 -08001018 # pylint: disable=W0702
David James1ed3e252011-10-05 20:26:15 -07001019 except:
1020 traceback.print_exc(file=output)
Chris Ching73486ab2017-04-26 18:02:37 -06001021 job_state.retcode = 1
David James1ed3e252011-10-05 20:26:15 -07001022 sys.stdout.flush()
1023 sys.stderr.flush()
1024 output.flush()
Don Garrett25f309a2014-03-19 14:02:12 -07001025 # pylint: disable=W0212
Chris Ching73486ab2017-04-26 18:02:37 -06001026 os._exit(job_state.retcode)
David James1ed3e252011-10-05 20:26:15 -07001027 else:
1028 # Return the exit code of the subprocess.
1029 return os.waitpid(pid, 0)[1]
David Jamesfcb70ef2011-02-02 16:02:30 -08001030
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001031
1032def UnpackPackage(pkg_state):
1033 """Unpacks package described by pkg_state.
1034
1035 Args:
1036 pkg_state: EmergeJobState object describing target.
1037
1038 Returns:
1039 Exit code returned by subprocess.
1040 """
1041 pkgdir = os.environ.get("PKGDIR",
1042 os.path.join(os.environ["SYSROOT"], "packages"))
1043 root = os.environ.get("ROOT", os.environ["SYSROOT"])
1044 path = os.path.join(pkgdir, pkg_state.target + ".tbz2")
1045 comp = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)
1046 cmd = [comp, "-dc"]
1047 if comp.endswith("pbzip2"):
1048 cmd.append("--ignore-trailing-garbage=1")
1049 cmd.append(path)
1050
Chris Ching4a2ebd62017-04-26 16:30:05 -06001051 with cros_event.newEvent(task_name="UnpackPackage", **pkg_state) as event:
Chris Ching5fcbd622016-11-28 09:22:15 -07001052 result = cros_build_lib.RunCommand(cmd, cwd=root, stdout_to_pipe=True,
1053 print_cmd=False, error_code_ok=True)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001054
Chris Ching5fcbd622016-11-28 09:22:15 -07001055 # If we were not successful, return now and don't attempt untar.
1056 if result.returncode != 0:
1057 event.fail("error compressing: returned {}".format(result.returncode))
1058 return result.returncode
1059
1060 cmd = ["sudo", "tar", "-xf", "-", "-C", root]
1061
1062 result = cros_build_lib.RunCommand(cmd, cwd=root, input=result.output,
1063 print_cmd=False, error_code_ok=True)
1064 if result.returncode != 0:
1065 event.fail("error extracting:returned {}".format(result.returncode))
1066
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001067 return result.returncode
1068
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001069
1070def EmergeWorker(task_queue, job_queue, emerge, package_db, fetch_only=False,
1071 unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -08001072 """This worker emerges any packages given to it on the task_queue.
1073
1074 Args:
1075 task_queue: The queue of tasks for this worker to do.
1076 job_queue: The queue of results from the worker.
1077 emerge: An EmergeData() object.
1078 package_db: A dict, mapping package ids to portage Package objects.
Brian Harring0be85c62012-03-17 19:52:12 -07001079 fetch_only: A bool, indicating if we should just fetch the target.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001080 unpack_only: A bool, indicating if we should just unpack the target.
David Jamesfcb70ef2011-02-02 16:02:30 -08001081
1082 It expects package identifiers to be passed to it via task_queue. When
1083 a task is started, it pushes the (target, filename) to the started_queue.
1084 The output is stored in filename. When a merge starts or finishes, we push
1085 EmergeJobState objects to the job_queue.
1086 """
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001087 if fetch_only:
1088 mode = 'fetch'
1089 elif unpack_only:
1090 mode = 'unpack'
1091 else:
1092 mode = 'emerge'
1093 proctitle.settitle('EmergeWorker', mode, '[idle]')
David Jamesfcb70ef2011-02-02 16:02:30 -08001094
1095 SetupWorkerSignals()
1096 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
David Jamesdeebd692011-05-09 17:02:52 -07001097
1098 # Disable flushing of caches to save on I/O.
David James7a1ea4b2011-10-13 15:06:41 -07001099 root = emerge.settings["ROOT"]
1100 vardb = emerge.trees[root]["vartree"].dbapi
Mike Frysingere56debd2014-11-19 01:54:36 -05001101 vardb._flush_cache_enabled = False # pylint: disable=protected-access
Brian Harring0be85c62012-03-17 19:52:12 -07001102 bindb = emerge.trees[root]["bintree"].dbapi
1103 # Might be a set, might be a list, might be None; no clue, just use shallow
1104 # copy to ensure we can roll it back.
Don Garrett25f309a2014-03-19 14:02:12 -07001105 # pylint: disable=W0212
Brian Harring0be85c62012-03-17 19:52:12 -07001106 original_remotepkgs = copy.copy(bindb.bintree._remotepkgs)
David Jamesdeebd692011-05-09 17:02:52 -07001107
David Jamesfcb70ef2011-02-02 16:02:30 -08001108 opts, spinner = emerge.opts, emerge.spinner
1109 opts["--nodeps"] = True
Brian Harring0be85c62012-03-17 19:52:12 -07001110 if fetch_only:
1111 opts["--fetchonly"] = True
1112
David Jamesfcb70ef2011-02-02 16:02:30 -08001113 while True:
1114 # Wait for a new item to show up on the queue. This is a blocking wait,
1115 # so if there's nothing to do, we just sit here.
Brian Harring0be85c62012-03-17 19:52:12 -07001116 pkg_state = task_queue.get()
1117 if pkg_state is None:
David Jamesfcb70ef2011-02-02 16:02:30 -08001118 # If target is None, this means that the main thread wants us to quit.
1119 # The other workers need to exit too, so we'll push the message back on
1120 # to the queue so they'll get it too.
Brian Harring0be85c62012-03-17 19:52:12 -07001121 task_queue.put(None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001122 return
David James7358d032011-05-19 10:40:03 -07001123 if KILLED.is_set():
1124 return
1125
Brian Harring0be85c62012-03-17 19:52:12 -07001126 target = pkg_state.target
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001127 proctitle.settitle('EmergeWorker', mode, target)
Brian Harring0be85c62012-03-17 19:52:12 -07001128
David Jamesfcb70ef2011-02-02 16:02:30 -08001129 db_pkg = package_db[target]
Brian Harring0be85c62012-03-17 19:52:12 -07001130
1131 if db_pkg.type_name == "binary":
1132 if not fetch_only and pkg_state.fetched_successfully:
1133 # Ensure portage doesn't think our pkg is remote- else it'll force
1134 # a redownload of it (even if the on-disk file is fine). In-memory
1135 # caching basically, implemented dumbly.
1136 bindb.bintree._remotepkgs = None
1137 else:
1138 bindb.bintree_remotepkgs = original_remotepkgs
1139
David Jamesfcb70ef2011-02-02 16:02:30 -08001140 db_pkg.root_config = emerge.root_config
1141 install_list = [db_pkg]
1142 pkgname = db_pkg.pf
1143 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False)
David James01b1e0f2012-06-07 17:18:05 -07001144 os.chmod(output.name, 644)
David Jamesfcb70ef2011-02-02 16:02:30 -08001145 start_timestamp = time.time()
Brian Harring0be85c62012-03-17 19:52:12 -07001146 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001147 fetch_only=fetch_only, unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001148 job_queue.put(job)
1149 if "--pretend" in opts:
Chris Ching73486ab2017-04-26 18:02:37 -06001150 job.retcode = 0
David Jamesfcb70ef2011-02-02 16:02:30 -08001151 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001152 try:
David James386ccd12011-05-04 20:17:42 -07001153 emerge.scheduler_graph.mergelist = install_list
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001154 if unpack_only:
Chris Ching73486ab2017-04-26 18:02:37 -06001155 job.retcode = UnpackPackage(pkg_state)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001156 else:
Chris Ching73486ab2017-04-26 18:02:37 -06001157 job.retcode = EmergeProcess(output, job, settings, trees, mtimedb,
1158 opts, spinner,
1159 favorites=emerge.favorites,
1160 graph_config=emerge.scheduler_graph)
David Jamesfcb70ef2011-02-02 16:02:30 -08001161 except Exception:
1162 traceback.print_exc(file=output)
Chris Ching73486ab2017-04-26 18:02:37 -06001163 job.retcode = 1
David James1ed3e252011-10-05 20:26:15 -07001164 output.close()
David Jamesfcb70ef2011-02-02 16:02:30 -08001165
David James7358d032011-05-19 10:40:03 -07001166 if KILLED.is_set():
1167 return
1168
David Jamesfcb70ef2011-02-02 16:02:30 -08001169 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp,
Chris Ching73486ab2017-04-26 18:02:37 -06001170 job.retcode, fetch_only=fetch_only,
1171 try_count=job.try_count, unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001172 job_queue.put(job)
1173
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001174 # Set the title back to idle as the multiprocess pool won't destroy us;
1175 # when another job comes up, it'll re-use this process.
1176 proctitle.settitle('EmergeWorker', mode, '[idle]')
1177
David Jamesfcb70ef2011-02-02 16:02:30 -08001178
1179class LinePrinter(object):
1180 """Helper object to print a single line."""
1181
1182 def __init__(self, line):
1183 self.line = line
1184
David James321490a2012-12-17 12:05:56 -08001185 def Print(self, _seek_locations):
Mike Frysinger383367e2014-09-16 15:06:17 -04001186 print(self.line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001187
1188
1189class JobPrinter(object):
1190 """Helper object to print output of a job."""
1191
1192 def __init__(self, job, unlink=False):
1193 """Print output of job.
1194
Mike Frysinger02e1e072013-11-10 22:11:34 -05001195 If unlink is True, unlink the job output file when done.
1196 """
David Jamesfcb70ef2011-02-02 16:02:30 -08001197 self.current_time = time.time()
1198 self.job = job
1199 self.unlink = unlink
1200
1201 def Print(self, seek_locations):
1202
1203 job = self.job
1204
1205 # Calculate how long the job has been running.
1206 seconds = self.current_time - job.start_timestamp
1207
1208 # Note that we've printed out the job so far.
1209 job.last_output_timestamp = self.current_time
1210
1211 # Note that we're starting the job
1212 info = "job %s (%dm%.1fs)" % (job.pkgname, seconds / 60, seconds % 60)
1213 last_output_seek = seek_locations.get(job.filename, 0)
1214 if last_output_seek:
Mike Frysinger383367e2014-09-16 15:06:17 -04001215 print("=== Continue output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001216 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001217 print("=== Start output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001218
1219 # Print actual output from job
1220 f = codecs.open(job.filename, encoding='utf-8', errors='replace')
1221 f.seek(last_output_seek)
1222 prefix = job.pkgname + ":"
1223 for line in f:
1224
1225 # Save off our position in the file
1226 if line and line[-1] == "\n":
1227 last_output_seek = f.tell()
1228 line = line[:-1]
1229
1230 # Print our line
Mike Frysinger383367e2014-09-16 15:06:17 -04001231 print(prefix, line.encode('utf-8', 'replace'))
David Jamesfcb70ef2011-02-02 16:02:30 -08001232 f.close()
1233
1234 # Save our last spot in the file so that we don't print out the same
1235 # location twice.
1236 seek_locations[job.filename] = last_output_seek
1237
1238 # Note end of output section
1239 if job.done:
Mike Frysinger383367e2014-09-16 15:06:17 -04001240 print("=== Complete: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001241 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001242 print("=== Still running: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001243
1244 if self.unlink:
1245 os.unlink(job.filename)
1246
1247
1248def PrintWorker(queue):
1249 """A worker that prints stuff to the screen as requested."""
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001250 proctitle.settitle('PrintWorker')
David Jamesfcb70ef2011-02-02 16:02:30 -08001251
David James321490a2012-12-17 12:05:56 -08001252 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -07001253 # Set KILLED flag.
1254 KILLED.set()
1255
David Jamesfcb70ef2011-02-02 16:02:30 -08001256 # Switch to default signal handlers so that we'll die after two signals.
David James7358d032011-05-19 10:40:03 -07001257 signal.signal(signal.SIGINT, KillHandler)
1258 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001259
1260 # Don't exit on the first SIGINT / SIGTERM, because the parent worker will
1261 # handle it and tell us when we need to exit.
1262 signal.signal(signal.SIGINT, ExitHandler)
1263 signal.signal(signal.SIGTERM, ExitHandler)
1264
1265 # seek_locations is a map indicating the position we are at in each file.
1266 # It starts off empty, but is set by the various Print jobs as we go along
1267 # to indicate where we left off in each file.
1268 seek_locations = {}
1269 while True:
1270 try:
1271 job = queue.get()
1272 if job:
1273 job.Print(seek_locations)
David Jamesbccf8eb2011-07-27 14:06:06 -07001274 sys.stdout.flush()
David Jamesfcb70ef2011-02-02 16:02:30 -08001275 else:
1276 break
1277 except IOError as ex:
1278 if ex.errno == errno.EINTR:
1279 # Looks like we received a signal. Keep printing.
1280 continue
1281 raise
1282
Brian Harring867e2362012-03-17 04:05:17 -07001283
Brian Harring0be85c62012-03-17 19:52:12 -07001284class TargetState(object):
Chris Ching5fcbd622016-11-28 09:22:15 -07001285 """Structure describing the TargetState."""
Brian Harring867e2362012-03-17 04:05:17 -07001286
Brian Harring0be85c62012-03-17 19:52:12 -07001287 __slots__ = ("target", "info", "score", "prefetched", "fetched_successfully")
Brian Harring867e2362012-03-17 04:05:17 -07001288
David James321490a2012-12-17 12:05:56 -08001289 def __init__(self, target, info):
Brian Harring867e2362012-03-17 04:05:17 -07001290 self.target, self.info = target, info
Brian Harring0be85c62012-03-17 19:52:12 -07001291 self.fetched_successfully = False
1292 self.prefetched = False
David James321490a2012-12-17 12:05:56 -08001293 self.score = None
Brian Harring867e2362012-03-17 04:05:17 -07001294 self.update_score()
1295
1296 def __cmp__(self, other):
1297 return cmp(self.score, other.score)
1298
1299 def update_score(self):
1300 self.score = (
1301 -len(self.info["tprovides"]),
Brian Harring0be85c62012-03-17 19:52:12 -07001302 len(self.info["needs"]),
Brian Harring11c5eeb2012-03-18 11:02:39 -07001303 not self.info["binary"],
Brian Harring867e2362012-03-17 04:05:17 -07001304 -len(self.info["provides"]),
1305 self.info["idx"],
1306 self.target,
1307 )
1308
1309
1310class ScoredHeap(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001311 """Implementation of a general purpose scored heap."""
Brian Harring867e2362012-03-17 04:05:17 -07001312
Brian Harring0be85c62012-03-17 19:52:12 -07001313 __slots__ = ("heap", "_heap_set")
1314
Brian Harring867e2362012-03-17 04:05:17 -07001315 def __init__(self, initial=()):
Brian Harring0be85c62012-03-17 19:52:12 -07001316 self.heap = list()
1317 self._heap_set = set()
1318 if initial:
1319 self.multi_put(initial)
Brian Harring867e2362012-03-17 04:05:17 -07001320
1321 def get(self):
Brian Harring0be85c62012-03-17 19:52:12 -07001322 item = heapq.heappop(self.heap)
1323 self._heap_set.remove(item.target)
1324 return item
Brian Harring867e2362012-03-17 04:05:17 -07001325
Brian Harring0be85c62012-03-17 19:52:12 -07001326 def put(self, item):
1327 if not isinstance(item, TargetState):
1328 raise ValueError("Item %r isn't a TargetState" % (item,))
1329 heapq.heappush(self.heap, item)
1330 self._heap_set.add(item.target)
Brian Harring867e2362012-03-17 04:05:17 -07001331
Brian Harring0be85c62012-03-17 19:52:12 -07001332 def multi_put(self, sequence):
1333 sequence = list(sequence)
1334 self.heap.extend(sequence)
1335 self._heap_set.update(x.target for x in sequence)
Brian Harring867e2362012-03-17 04:05:17 -07001336 self.sort()
1337
David James5c9996d2012-03-24 10:50:46 -07001338 def sort(self):
1339 heapq.heapify(self.heap)
1340
Brian Harring0be85c62012-03-17 19:52:12 -07001341 def __contains__(self, target):
1342 return target in self._heap_set
1343
1344 def __nonzero__(self):
1345 return bool(self.heap)
1346
Brian Harring867e2362012-03-17 04:05:17 -07001347 def __len__(self):
1348 return len(self.heap)
1349
1350
David Jamesfcb70ef2011-02-02 16:02:30 -08001351class EmergeQueue(object):
1352 """Class to schedule emerge jobs according to a dependency graph."""
1353
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001354 def __init__(self, deps_map, emerge, package_db, show_output, unpack_only,
1355 max_retries):
David Jamesfcb70ef2011-02-02 16:02:30 -08001356 # Store the dependency graph.
1357 self._deps_map = deps_map
Brian Harring0be85c62012-03-17 19:52:12 -07001358 self._state_map = {}
David Jamesfcb70ef2011-02-02 16:02:30 -08001359 # Initialize the running queue to empty
Brian Harring0be85c62012-03-17 19:52:12 -07001360 self._build_jobs = {}
1361 self._build_ready = ScoredHeap()
1362 self._fetch_jobs = {}
1363 self._fetch_ready = ScoredHeap()
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001364 self._unpack_jobs = {}
1365 self._unpack_ready = ScoredHeap()
David Jamesfcb70ef2011-02-02 16:02:30 -08001366 # List of total package installs represented in deps_map.
1367 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
1368 self._total_jobs = len(install_jobs)
1369 self._show_output = show_output
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001370 self._unpack_only = unpack_only
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001371 self._max_retries = max_retries
David Jamesfcb70ef2011-02-02 16:02:30 -08001372
1373 if "--pretend" in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001374 print("Skipping merge because of --pretend mode.")
David Jamesfcb70ef2011-02-02 16:02:30 -08001375 sys.exit(0)
1376
David Jamesaaf49e42014-04-24 09:40:05 -07001377 # Set up a session so we can easily terminate all children.
1378 self._SetupSession()
David James7358d032011-05-19 10:40:03 -07001379
David Jamesfcb70ef2011-02-02 16:02:30 -08001380 # Setup scheduler graph object. This is used by the child processes
1381 # to help schedule jobs.
1382 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1383
1384 # Calculate how many jobs we can run in parallel. We don't want to pass
1385 # the --jobs flag over to emerge itself, because that'll tell emerge to
1386 # hide its output, and said output is quite useful for debugging hung
1387 # jobs.
1388 procs = min(self._total_jobs,
1389 emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
Nam T. Nguyenf7098b32015-05-08 11:04:48 -07001390 self._build_procs = self._unpack_procs = max(1, procs)
1391 # Fetch is IO bound, we can use more processes.
1392 self._fetch_procs = max(4, procs)
David James8c7e5e32011-06-28 11:26:03 -07001393 self._load_avg = emerge.opts.pop("--load-average", None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001394 self._job_queue = multiprocessing.Queue()
1395 self._print_queue = multiprocessing.Queue()
Brian Harring0be85c62012-03-17 19:52:12 -07001396
1397 self._fetch_queue = multiprocessing.Queue()
1398 args = (self._fetch_queue, self._job_queue, emerge, package_db, True)
1399 self._fetch_pool = multiprocessing.Pool(self._fetch_procs, EmergeWorker,
1400 args)
1401
1402 self._build_queue = multiprocessing.Queue()
1403 args = (self._build_queue, self._job_queue, emerge, package_db)
1404 self._build_pool = multiprocessing.Pool(self._build_procs, EmergeWorker,
1405 args)
1406
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001407 if self._unpack_only:
1408 # Unpack pool only required on unpack_only jobs.
1409 self._unpack_queue = multiprocessing.Queue()
1410 args = (self._unpack_queue, self._job_queue, emerge, package_db, False,
1411 True)
1412 self._unpack_pool = multiprocessing.Pool(self._unpack_procs, EmergeWorker,
1413 args)
1414
David Jamesfcb70ef2011-02-02 16:02:30 -08001415 self._print_worker = multiprocessing.Process(target=PrintWorker,
1416 args=[self._print_queue])
1417 self._print_worker.start()
1418
1419 # Initialize the failed queue to empty.
1420 self._retry_queue = []
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001421 self._failed_count = dict()
David Jamesfcb70ef2011-02-02 16:02:30 -08001422
David Jamesfcb70ef2011-02-02 16:02:30 -08001423 # Setup an exit handler so that we print nice messages if we are
1424 # terminated.
1425 self._SetupExitHandler()
1426
1427 # Schedule our jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001428 self._state_map.update(
1429 (pkg, TargetState(pkg, data)) for pkg, data in deps_map.iteritems())
1430 self._fetch_ready.multi_put(self._state_map.itervalues())
David Jamesfcb70ef2011-02-02 16:02:30 -08001431
David Jamesaaf49e42014-04-24 09:40:05 -07001432 def _SetupSession(self):
1433 """Set up a session so we can easily terminate all children."""
1434 # When we call os.setsid(), this sets up a session / process group for this
1435 # process and all children. These session groups are needed so that we can
1436 # easily kill all children (including processes launched by emerge) before
1437 # we exit.
1438 #
1439 # One unfortunate side effect of os.setsid() is that it blocks CTRL-C from
1440 # being received. To work around this, we only call os.setsid() in a forked
1441 # process, so that the parent can still watch for CTRL-C. The parent will
1442 # just sit around, watching for signals and propagating them to the child,
1443 # until the child exits.
1444 #
1445 # TODO(davidjames): It would be nice if we could replace this with cgroups.
1446 pid = os.fork()
1447 if pid == 0:
1448 os.setsid()
1449 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001450 proctitle.settitle('SessionManager')
1451
David Jamesaaf49e42014-04-24 09:40:05 -07001452 def PropagateToChildren(signum, _frame):
1453 # Just propagate the signals down to the child. We'll exit when the
1454 # child does.
1455 try:
1456 os.kill(pid, signum)
1457 except OSError as ex:
1458 if ex.errno != errno.ESRCH:
1459 raise
1460 signal.signal(signal.SIGINT, PropagateToChildren)
1461 signal.signal(signal.SIGTERM, PropagateToChildren)
1462
1463 def StopGroup(_signum, _frame):
1464 # When we get stopped, stop the children.
1465 try:
1466 os.killpg(pid, signal.SIGSTOP)
1467 os.kill(0, signal.SIGSTOP)
1468 except OSError as ex:
1469 if ex.errno != errno.ESRCH:
1470 raise
1471 signal.signal(signal.SIGTSTP, StopGroup)
1472
1473 def ContinueGroup(_signum, _frame):
1474 # Launch the children again after being stopped.
1475 try:
1476 os.killpg(pid, signal.SIGCONT)
1477 except OSError as ex:
1478 if ex.errno != errno.ESRCH:
1479 raise
1480 signal.signal(signal.SIGCONT, ContinueGroup)
1481
1482 # Loop until the children exit. We exit with os._exit to be sure we
1483 # don't run any finalizers (those will be run by the child process.)
1484 # pylint: disable=W0212
1485 while True:
1486 try:
1487 # Wait for the process to exit. When it does, exit with the return
1488 # value of the subprocess.
Mike Frysingere2d8f0d2014-11-01 13:09:26 -04001489 os._exit(process_util.GetExitStatus(os.waitpid(pid, 0)[1]))
David Jamesaaf49e42014-04-24 09:40:05 -07001490 except OSError as ex:
1491 if ex.errno == errno.EINTR:
1492 continue
1493 traceback.print_exc()
1494 os._exit(1)
1495 except BaseException:
1496 traceback.print_exc()
1497 os._exit(1)
1498
David Jamesfcb70ef2011-02-02 16:02:30 -08001499 def _SetupExitHandler(self):
1500
David James321490a2012-12-17 12:05:56 -08001501 def ExitHandler(signum, _frame):
David James7358d032011-05-19 10:40:03 -07001502 # Set KILLED flag.
1503 KILLED.set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001504
1505 # Kill our signal handlers so we don't get called recursively
David James7358d032011-05-19 10:40:03 -07001506 signal.signal(signal.SIGINT, KillHandler)
1507 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001508
1509 # Print our current job status
Brian Harring0be85c62012-03-17 19:52:12 -07001510 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001511 if job:
1512 self._print_queue.put(JobPrinter(job, unlink=True))
1513
1514 # Notify the user that we are exiting
1515 self._Print("Exiting on signal %s" % signum)
David James7358d032011-05-19 10:40:03 -07001516 self._print_queue.put(None)
1517 self._print_worker.join()
David Jamesfcb70ef2011-02-02 16:02:30 -08001518
1519 # Kill child threads, then exit.
David James7358d032011-05-19 10:40:03 -07001520 os.killpg(0, signal.SIGKILL)
David Jamesfcb70ef2011-02-02 16:02:30 -08001521 sys.exit(1)
1522
1523 # Print out job status when we are killed
1524 signal.signal(signal.SIGINT, ExitHandler)
1525 signal.signal(signal.SIGTERM, ExitHandler)
1526
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001527 def _ScheduleUnpack(self, pkg_state):
1528 self._unpack_jobs[pkg_state.target] = None
1529 self._unpack_queue.put(pkg_state)
1530
Brian Harring0be85c62012-03-17 19:52:12 -07001531 def _Schedule(self, pkg_state):
David Jamesfcb70ef2011-02-02 16:02:30 -08001532 # We maintain a tree of all deps, if this doesn't need
David James8c7e5e32011-06-28 11:26:03 -07001533 # to be installed just free up its children and continue.
David Jamesfcb70ef2011-02-02 16:02:30 -08001534 # It is possible to reinstall deps of deps, without reinstalling
1535 # first level deps, like so:
Mike Frysingerfd969312014-04-02 22:16:42 -04001536 # virtual/target-os (merge) -> eselect (nomerge) -> python (merge)
Brian Harring0be85c62012-03-17 19:52:12 -07001537 this_pkg = pkg_state.info
1538 target = pkg_state.target
1539 if pkg_state.info is not None:
1540 if this_pkg["action"] == "nomerge":
1541 self._Finish(target)
1542 elif target not in self._build_jobs:
1543 # Kick off the build if it's marked to be built.
1544 self._build_jobs[target] = None
1545 self._build_queue.put(pkg_state)
1546 return True
David Jamesfcb70ef2011-02-02 16:02:30 -08001547
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001548 def _ScheduleLoop(self, unpack_only=False):
1549 if unpack_only:
1550 ready_queue = self._unpack_ready
1551 jobs_queue = self._unpack_jobs
1552 procs = self._unpack_procs
1553 else:
1554 ready_queue = self._build_ready
1555 jobs_queue = self._build_jobs
1556 procs = self._build_procs
1557
David James8c7e5e32011-06-28 11:26:03 -07001558 # If the current load exceeds our desired load average, don't schedule
1559 # more than one job.
1560 if self._load_avg and os.getloadavg()[0] > self._load_avg:
1561 needed_jobs = 1
1562 else:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001563 needed_jobs = procs
David James8c7e5e32011-06-28 11:26:03 -07001564
1565 # Schedule more jobs.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001566 while ready_queue and len(jobs_queue) < needed_jobs:
1567 state = ready_queue.get()
1568 if unpack_only:
1569 self._ScheduleUnpack(state)
1570 else:
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001571 if state.target not in self._failed_count:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001572 self._Schedule(state)
David Jamesfcb70ef2011-02-02 16:02:30 -08001573
1574 def _Print(self, line):
1575 """Print a single line."""
1576 self._print_queue.put(LinePrinter(line))
1577
1578 def _Status(self):
1579 """Print status."""
1580 current_time = time.time()
Aviv Keshet3b381682015-11-12 13:15:06 -08001581 current_time_struct = time.localtime(current_time)
David Jamesfcb70ef2011-02-02 16:02:30 -08001582 no_output = True
1583
1584 # Print interim output every minute if --show-output is used. Otherwise,
1585 # print notifications about running packages every 2 minutes, and print
1586 # full output for jobs that have been running for 60 minutes or more.
1587 if self._show_output:
1588 interval = 60
1589 notify_interval = 0
1590 else:
1591 interval = 60 * 60
1592 notify_interval = 60 * 2
David James321490a2012-12-17 12:05:56 -08001593 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001594 if job:
1595 last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
1596 if last_timestamp + interval < current_time:
1597 self._print_queue.put(JobPrinter(job))
1598 job.last_output_timestamp = current_time
1599 no_output = False
1600 elif (notify_interval and
1601 job.last_notify_timestamp + notify_interval < current_time):
1602 job_seconds = current_time - job.start_timestamp
1603 args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
1604 info = "Still building %s (%dm%.1fs). Logs in %s" % args
1605 job.last_notify_timestamp = current_time
1606 self._Print(info)
1607 no_output = False
1608
1609 # If we haven't printed any messages yet, print a general status message
1610 # here.
1611 if no_output:
1612 seconds = current_time - GLOBAL_START
Brian Harring0be85c62012-03-17 19:52:12 -07001613 fjobs, fready = len(self._fetch_jobs), len(self._fetch_ready)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001614 ujobs, uready = len(self._unpack_jobs), len(self._unpack_ready)
Brian Harring0be85c62012-03-17 19:52:12 -07001615 bjobs, bready = len(self._build_jobs), len(self._build_ready)
1616 retries = len(self._retry_queue)
1617 pending = max(0, len(self._deps_map) - fjobs - bjobs)
1618 line = "Pending %s/%s, " % (pending, self._total_jobs)
1619 if fjobs or fready:
1620 line += "Fetching %s/%s, " % (fjobs, fready + fjobs)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001621 if ujobs or uready:
1622 line += "Unpacking %s/%s, " % (ujobs, uready + ujobs)
Brian Harring0be85c62012-03-17 19:52:12 -07001623 if bjobs or bready or retries:
1624 line += "Building %s/%s, " % (bjobs, bready + bjobs)
1625 if retries:
1626 line += "Retrying %s, " % (retries,)
Mike Frysingerd6e2df02014-11-26 02:55:04 -05001627 load = " ".join(str(x) for x in os.getloadavg())
Aviv Keshet3b381682015-11-12 13:15:06 -08001628 line += ("[Time %s | Elapsed %dm%.1fs | Load %s]" % (
1629 time.strftime('%H:%M:%S', current_time_struct), seconds / 60,
1630 seconds % 60, load))
Brian Harring0be85c62012-03-17 19:52:12 -07001631 self._Print(line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001632
1633 def _Finish(self, target):
David James8c7e5e32011-06-28 11:26:03 -07001634 """Mark a target as completed and unblock dependencies."""
1635 this_pkg = self._deps_map[target]
1636 if this_pkg["needs"] and this_pkg["nodeps"]:
1637 # We got installed, but our deps have not been installed yet. Dependent
1638 # packages should only be installed when our needs have been fully met.
1639 this_pkg["action"] = "nomerge"
1640 else:
David James8c7e5e32011-06-28 11:26:03 -07001641 for dep in this_pkg["provides"]:
1642 dep_pkg = self._deps_map[dep]
Brian Harring0be85c62012-03-17 19:52:12 -07001643 state = self._state_map[dep]
David James8c7e5e32011-06-28 11:26:03 -07001644 del dep_pkg["needs"][target]
Brian Harring0be85c62012-03-17 19:52:12 -07001645 state.update_score()
1646 if not state.prefetched:
1647 if dep in self._fetch_ready:
1648 # If it's not currently being fetched, update the prioritization
1649 self._fetch_ready.sort()
1650 elif not dep_pkg["needs"]:
David James8c7e5e32011-06-28 11:26:03 -07001651 if dep_pkg["nodeps"] and dep_pkg["action"] == "nomerge":
1652 self._Finish(dep)
1653 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001654 self._build_ready.put(self._state_map[dep])
David James8c7e5e32011-06-28 11:26:03 -07001655 self._deps_map.pop(target)
David Jamesfcb70ef2011-02-02 16:02:30 -08001656
1657 def _Retry(self):
David James8c7e5e32011-06-28 11:26:03 -07001658 while self._retry_queue:
Brian Harring0be85c62012-03-17 19:52:12 -07001659 state = self._retry_queue.pop(0)
1660 if self._Schedule(state):
1661 self._Print("Retrying emerge of %s." % state.target)
David James8c7e5e32011-06-28 11:26:03 -07001662 break
David Jamesfcb70ef2011-02-02 16:02:30 -08001663
Brian Harringa43f5952012-04-12 01:19:34 -07001664 def _Shutdown(self):
David Jamesfcb70ef2011-02-02 16:02:30 -08001665 # Tell emerge workers to exit. They all exit when 'None' is pushed
1666 # to the queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001667
Brian Harringa43f5952012-04-12 01:19:34 -07001668 # Shutdown the workers first; then jobs (which is how they feed things back)
1669 # then finally the print queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001670
Brian Harringa43f5952012-04-12 01:19:34 -07001671 def _stop(queue, pool):
1672 if pool is None:
1673 return
1674 try:
1675 queue.put(None)
1676 pool.close()
1677 pool.join()
1678 finally:
1679 pool.terminate()
Brian Harring0be85c62012-03-17 19:52:12 -07001680
Brian Harringa43f5952012-04-12 01:19:34 -07001681 _stop(self._fetch_queue, self._fetch_pool)
1682 self._fetch_queue = self._fetch_pool = None
Brian Harring0be85c62012-03-17 19:52:12 -07001683
Brian Harringa43f5952012-04-12 01:19:34 -07001684 _stop(self._build_queue, self._build_pool)
1685 self._build_queue = self._build_pool = None
1686
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001687 if self._unpack_only:
1688 _stop(self._unpack_queue, self._unpack_pool)
1689 self._unpack_queue = self._unpack_pool = None
1690
Brian Harringa43f5952012-04-12 01:19:34 -07001691 if self._job_queue is not None:
1692 self._job_queue.close()
1693 self._job_queue = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001694
1695 # Now that our workers are finished, we can kill the print queue.
Brian Harringa43f5952012-04-12 01:19:34 -07001696 if self._print_worker is not None:
1697 try:
1698 self._print_queue.put(None)
1699 self._print_queue.close()
1700 self._print_worker.join()
1701 finally:
1702 self._print_worker.terminate()
1703 self._print_queue = self._print_worker = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001704
1705 def Run(self):
1706 """Run through the scheduled ebuilds.
1707
1708 Keep running so long as we have uninstalled packages in the
1709 dependency graph to merge.
1710 """
Brian Harringa43f5952012-04-12 01:19:34 -07001711 if not self._deps_map:
1712 return
1713
Brian Harring0be85c62012-03-17 19:52:12 -07001714 # Start the fetchers.
1715 for _ in xrange(min(self._fetch_procs, len(self._fetch_ready))):
1716 state = self._fetch_ready.get()
1717 self._fetch_jobs[state.target] = None
1718 self._fetch_queue.put(state)
1719
1720 # Print an update, then get going.
1721 self._Status()
1722
David Jamesfcb70ef2011-02-02 16:02:30 -08001723 while self._deps_map:
1724 # Check here that we are actually waiting for something.
Brian Harring0be85c62012-03-17 19:52:12 -07001725 if (self._build_queue.empty() and
David Jamesfcb70ef2011-02-02 16:02:30 -08001726 self._job_queue.empty() and
Brian Harring0be85c62012-03-17 19:52:12 -07001727 not self._fetch_jobs and
1728 not self._fetch_ready and
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001729 not self._unpack_jobs and
1730 not self._unpack_ready and
Brian Harring0be85c62012-03-17 19:52:12 -07001731 not self._build_jobs and
1732 not self._build_ready and
David Jamesfcb70ef2011-02-02 16:02:30 -08001733 self._deps_map):
1734 # If we have failed on a package, retry it now.
1735 if self._retry_queue:
1736 self._Retry()
1737 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001738 # Tell the user why we're exiting.
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001739 if self._failed_count:
1740 print('Packages failed:\n\t%s' %
1741 '\n\t'.join(self._failed_count.iterkeys()))
David James0eae23e2012-07-03 15:04:25 -07001742 status_file = os.environ.get("PARALLEL_EMERGE_STATUS_FILE")
1743 if status_file:
David James321490a2012-12-17 12:05:56 -08001744 failed_pkgs = set(portage.versions.cpv_getkey(x)
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001745 for x in self._failed_count.iterkeys())
David James0eae23e2012-07-03 15:04:25 -07001746 with open(status_file, "a") as f:
1747 f.write("%s\n" % " ".join(failed_pkgs))
David Jamesfcb70ef2011-02-02 16:02:30 -08001748 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001749 print("Deadlock! Circular dependencies!")
David Jamesfcb70ef2011-02-02 16:02:30 -08001750 sys.exit(1)
1751
David James321490a2012-12-17 12:05:56 -08001752 for _ in xrange(12):
David Jamesa74289a2011-08-12 10:41:24 -07001753 try:
1754 job = self._job_queue.get(timeout=5)
1755 break
1756 except Queue.Empty:
1757 # Check if any more jobs can be scheduled.
1758 self._ScheduleLoop()
1759 else:
Brian Harring706747c2012-03-16 03:04:31 -07001760 # Print an update every 60 seconds.
David Jamesfcb70ef2011-02-02 16:02:30 -08001761 self._Status()
1762 continue
1763
1764 target = job.target
1765
Brian Harring0be85c62012-03-17 19:52:12 -07001766 if job.fetch_only:
1767 if not job.done:
1768 self._fetch_jobs[job.target] = job
1769 else:
1770 state = self._state_map[job.target]
1771 state.prefetched = True
1772 state.fetched_successfully = (job.retcode == 0)
1773 del self._fetch_jobs[job.target]
1774 self._Print("Fetched %s in %2.2fs"
1775 % (target, time.time() - job.start_timestamp))
1776
1777 if self._show_output or job.retcode != 0:
1778 self._print_queue.put(JobPrinter(job, unlink=True))
1779 else:
1780 os.unlink(job.filename)
1781 # Failure or not, let build work with it next.
1782 if not self._deps_map[job.target]["needs"]:
1783 self._build_ready.put(state)
1784 self._ScheduleLoop()
1785
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001786 if self._unpack_only and job.retcode == 0:
1787 self._unpack_ready.put(state)
1788 self._ScheduleLoop(unpack_only=True)
1789
Brian Harring0be85c62012-03-17 19:52:12 -07001790 if self._fetch_ready:
1791 state = self._fetch_ready.get()
1792 self._fetch_queue.put(state)
1793 self._fetch_jobs[state.target] = None
1794 else:
1795 # Minor optimization; shut down fetchers early since we know
1796 # the queue is empty.
1797 self._fetch_queue.put(None)
1798 continue
1799
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001800 if job.unpack_only:
1801 if not job.done:
1802 self._unpack_jobs[target] = job
1803 else:
1804 del self._unpack_jobs[target]
1805 self._Print("Unpacked %s in %2.2fs"
1806 % (target, time.time() - job.start_timestamp))
1807 if self._show_output or job.retcode != 0:
1808 self._print_queue.put(JobPrinter(job, unlink=True))
1809 else:
1810 os.unlink(job.filename)
1811 if self._unpack_ready:
1812 state = self._unpack_ready.get()
1813 self._unpack_queue.put(state)
1814 self._unpack_jobs[state.target] = None
1815 continue
1816
David Jamesfcb70ef2011-02-02 16:02:30 -08001817 if not job.done:
Brian Harring0be85c62012-03-17 19:52:12 -07001818 self._build_jobs[target] = job
David Jamesfcb70ef2011-02-02 16:02:30 -08001819 self._Print("Started %s (logged in %s)" % (target, job.filename))
1820 continue
1821
1822 # Print output of job
1823 if self._show_output or job.retcode != 0:
1824 self._print_queue.put(JobPrinter(job, unlink=True))
1825 else:
1826 os.unlink(job.filename)
Brian Harring0be85c62012-03-17 19:52:12 -07001827 del self._build_jobs[target]
David Jamesfcb70ef2011-02-02 16:02:30 -08001828
1829 seconds = time.time() - job.start_timestamp
1830 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
1831
1832 # Complain if necessary.
1833 if job.retcode != 0:
1834 # Handle job failure.
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001835 failed_count = self._failed_count.get(target, 0)
1836 if failed_count >= self._max_retries:
1837 # If this job has failed and can't be retried, give up.
David Jamesfcb70ef2011-02-02 16:02:30 -08001838 self._Print("Failed %s. Your build has failed." % details)
1839 else:
1840 # Queue up this build to try again after a long while.
Brian Harring0be85c62012-03-17 19:52:12 -07001841 self._retry_queue.append(self._state_map[target])
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001842 self._failed_count[target] = failed_count + 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001843 self._Print("Failed %s, retrying later." % details)
1844 else:
David James32420cc2011-08-25 21:32:46 -07001845 self._Print("Completed %s" % details)
1846
1847 # Mark as completed and unblock waiting ebuilds.
1848 self._Finish(target)
1849
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001850 if target in self._failed_count and self._retry_queue:
David Jamesfcb70ef2011-02-02 16:02:30 -08001851 # If we have successfully retried a failed package, and there
1852 # are more failed packages, try the next one. We will only have
1853 # one retrying package actively running at a time.
1854 self._Retry()
1855
David Jamesfcb70ef2011-02-02 16:02:30 -08001856
David James8c7e5e32011-06-28 11:26:03 -07001857 # Schedule pending jobs and print an update.
1858 self._ScheduleLoop()
1859 self._Status()
David Jamesfcb70ef2011-02-02 16:02:30 -08001860
David Jamese703d0f2012-01-12 16:27:45 -08001861 # If packages were retried, output a warning.
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001862 if self._failed_count:
David Jamese703d0f2012-01-12 16:27:45 -08001863 self._Print("")
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001864 self._Print("WARNING: The following packages failed once or more,")
David Jamese703d0f2012-01-12 16:27:45 -08001865 self._Print("but succeeded upon retry. This might indicate incorrect")
1866 self._Print("dependencies.")
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001867 for pkg in self._failed_count.iterkeys():
David Jamese703d0f2012-01-12 16:27:45 -08001868 self._Print(" %s" % pkg)
1869 self._Print("@@@STEP_WARNINGS@@@")
1870 self._Print("")
1871
David Jamesfcb70ef2011-02-02 16:02:30 -08001872 # Tell child threads to exit.
1873 self._Print("Merge complete")
David Jamesfcb70ef2011-02-02 16:02:30 -08001874
1875
Brian Harring30675052012-02-29 12:18:22 -08001876def main(argv):
Brian Harring8294d652012-05-23 02:20:52 -07001877 try:
1878 return real_main(argv)
1879 finally:
1880 # Work around multiprocessing sucking and not cleaning up after itself.
1881 # http://bugs.python.org/issue4106;
1882 # Step one; ensure GC is ran *prior* to the VM starting shutdown.
1883 gc.collect()
1884 # Step two; go looking for those threads and try to manually reap
1885 # them if we can.
1886 for x in threading.enumerate():
1887 # Filter on the name, and ident; if ident is None, the thread
1888 # wasn't started.
1889 if x.name == 'QueueFeederThread' and x.ident is not None:
1890 x.join(1)
David Jamesfcb70ef2011-02-02 16:02:30 -08001891
Brian Harring8294d652012-05-23 02:20:52 -07001892
1893def real_main(argv):
Brian Harring30675052012-02-29 12:18:22 -08001894 parallel_emerge_args = argv[:]
David Jamesfcb70ef2011-02-02 16:02:30 -08001895 deps = DepGraphGenerator()
Brian Harring30675052012-02-29 12:18:22 -08001896 deps.Initialize(parallel_emerge_args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001897 emerge = deps.emerge
1898
1899 if emerge.action is not None:
Brian Harring30675052012-02-29 12:18:22 -08001900 argv = deps.ParseParallelEmergeArgs(argv)
Brian Harring8294d652012-05-23 02:20:52 -07001901 return emerge_main(argv)
David Jamesfcb70ef2011-02-02 16:02:30 -08001902 elif not emerge.cmdline_packages:
1903 Usage()
Brian Harring8294d652012-05-23 02:20:52 -07001904 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001905
1906 # Unless we're in pretend mode, there's not much point running without
1907 # root access. We need to be able to install packages.
1908 #
1909 # NOTE: Even if you're running --pretend, it's a good idea to run
1910 # parallel_emerge with root access so that portage can write to the
1911 # dependency cache. This is important for performance.
David James321490a2012-12-17 12:05:56 -08001912 if "--pretend" not in emerge.opts and portage.data.secpass < 2:
Mike Frysinger383367e2014-09-16 15:06:17 -04001913 print("parallel_emerge: superuser access is required.")
Brian Harring8294d652012-05-23 02:20:52 -07001914 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001915
1916 if "--quiet" not in emerge.opts:
1917 cmdline_packages = " ".join(emerge.cmdline_packages)
Mike Frysinger383367e2014-09-16 15:06:17 -04001918 print("Starting fast-emerge.")
1919 print(" Building package %s on %s" % (cmdline_packages,
Gilad Arnold05f94b02015-05-22 10:41:05 -07001920 deps.sysroot or "root"))
David Jamesfcb70ef2011-02-02 16:02:30 -08001921
David James386ccd12011-05-04 20:17:42 -07001922 deps_tree, deps_info = deps.GenDependencyTree()
David Jamesfcb70ef2011-02-02 16:02:30 -08001923
1924 # You want me to be verbose? I'll give you two trees! Twice as much value.
1925 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
1926 deps.PrintTree(deps_tree)
1927
David James386ccd12011-05-04 20:17:42 -07001928 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001929
1930 # OK, time to print out our progress so far.
1931 deps.PrintInstallPlan(deps_graph)
1932 if "--tree" in emerge.opts:
1933 PrintDepsMap(deps_graph)
1934
1935 # Are we upgrading portage? If so, and there are more packages to merge,
1936 # schedule a restart of parallel_emerge to merge the rest. This ensures that
1937 # we pick up all updates to portage settings before merging any more
1938 # packages.
1939 portage_upgrade = False
1940 root = emerge.settings["ROOT"]
Don Garrett25f309a2014-03-19 14:02:12 -07001941 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -08001942 if root == "/":
Gilad Arnoldcead28a2015-05-22 10:45:02 -07001943 final_db = emerge.depgraph._dynamic_config._filtered_trees[root]['graph_db']
Mike Frysinger3fb56ef2018-01-05 19:00:04 -05001944 for db_pkg in final_db.cp_list("sys-apps/portage"):
David Jamesfcb70ef2011-02-02 16:02:30 -08001945 portage_pkg = deps_graph.get(db_pkg.cpv)
David James0ff16f22012-11-02 14:18:07 -07001946 if portage_pkg:
David Jamesfcb70ef2011-02-02 16:02:30 -08001947 portage_upgrade = True
1948 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001949 print("Upgrading portage first, then restarting...")
David Jamesfcb70ef2011-02-02 16:02:30 -08001950
David James0ff16f22012-11-02 14:18:07 -07001951 # Upgrade Portage first, then the rest of the packages.
1952 #
1953 # In order to grant the child permission to run setsid, we need to run sudo
1954 # again. We preserve SUDO_USER here in case an ebuild depends on it.
1955 if portage_upgrade:
1956 # Calculate what arguments to use when re-invoking.
1957 args = ["sudo", "-E", "SUDO_USER=%s" % os.environ.get("SUDO_USER", "")]
1958 args += [os.path.abspath(sys.argv[0])] + parallel_emerge_args
1959 args += ["--exclude=sys-apps/portage"]
1960
1961 # First upgrade Portage.
1962 passthrough_args = ("--quiet", "--pretend", "--verbose")
1963 emerge_args = [k for k in emerge.opts if k in passthrough_args]
1964 ret = emerge_main(emerge_args + ["portage"])
1965 if ret != 0:
1966 return ret
1967
1968 # Now upgrade the rest.
1969 os.execvp(args[0], args)
1970
Bertrand SIMONNETc03c8ee2014-12-10 17:02:55 -08001971 # Attempt to solve crbug.com/433482
1972 # The file descriptor error appears only when getting userpriv_groups
1973 # (lazily generated). Loading userpriv_groups here will reduce the number of
1974 # calls from few hundreds to one.
1975 portage.data._get_global('userpriv_groups')
1976
David Jamesfcb70ef2011-02-02 16:02:30 -08001977 # Run the queued emerges.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001978 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output,
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001979 deps.unpack_only, deps.max_retries)
Brian Harringa43f5952012-04-12 01:19:34 -07001980 try:
1981 scheduler.Run()
1982 finally:
Don Garrett25f309a2014-03-19 14:02:12 -07001983 # pylint: disable=W0212
Brian Harringa43f5952012-04-12 01:19:34 -07001984 scheduler._Shutdown()
David James97ce8902011-08-16 09:51:05 -07001985 scheduler = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001986
Mike Frysingerd20a6e22012-10-04 19:01:10 -04001987 clean_logs(emerge.settings)
1988
Mike Frysinger383367e2014-09-16 15:06:17 -04001989 print("Done")
Brian Harring8294d652012-05-23 02:20:52 -07001990 return 0