blob: f364529d347faebffdd89c4fcf0348c4cd683206 [file] [log] [blame]
Mike Frysinger0a647fc2012-08-06 14:36:05 -04001# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
David Jamesfcb70ef2011-02-02 16:02:30 -08002# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Program to run emerge in parallel, for significant speedup.
6
7Usage:
David James386ccd12011-05-04 20:17:42 -07008 ./parallel_emerge [--board=BOARD] [--workon=PKGS]
David Jamesfcb70ef2011-02-02 16:02:30 -08009 [--force-remote-binary=PKGS] [emerge args] package
10
David James78b6cd92012-04-02 21:36:12 -070011This script runs multiple emerge processes in parallel, using appropriate
12Portage APIs. It is faster than standard emerge because it has a
13multiprocess model instead of an asynchronous model.
David Jamesfcb70ef2011-02-02 16:02:30 -080014"""
15
Mike Frysinger383367e2014-09-16 15:06:17 -040016from __future__ import print_function
17
David Jamesfcb70ef2011-02-02 16:02:30 -080018import codecs
19import copy
20import errno
Brian Harring8294d652012-05-23 02:20:52 -070021import gc
David James8c7e5e32011-06-28 11:26:03 -070022import heapq
David Jamesfcb70ef2011-02-02 16:02:30 -080023import multiprocessing
24import os
Mike Frysinger1ae28092013-10-17 17:17:22 -040025try:
26 import Queue
27except ImportError:
28 # Python-3 renamed to "queue". We still use Queue to avoid collisions
29 # with naming variables as "queue". Maybe we'll transition at some point.
30 # pylint: disable=F0401
31 import queue as Queue
David Jamesfcb70ef2011-02-02 16:02:30 -080032import signal
Bertrand SIMONNET19d789e2014-12-09 13:36:31 -080033import subprocess
David Jamesfcb70ef2011-02-02 16:02:30 -080034import sys
35import tempfile
Brian Harring8294d652012-05-23 02:20:52 -070036import threading
David Jamesfcb70ef2011-02-02 16:02:30 -080037import time
38import traceback
David Jamesfcb70ef2011-02-02 16:02:30 -080039
Thiago Goncalesf4acc422013-07-17 10:26:35 -070040from chromite.lib import cros_build_lib
Chris Ching5fcbd622016-11-28 09:22:15 -070041from chromite.lib import cros_event
Chris Chingb8eba812017-06-22 09:54:48 -060042from chromite.lib import portage_util
Mike Frysingere2d8f0d2014-11-01 13:09:26 -040043from chromite.lib import process_util
Mike Frysingerd74fe4a2014-04-24 11:43:38 -040044from chromite.lib import proctitle
Thiago Goncalesf4acc422013-07-17 10:26:35 -070045
David Jamesfcb70ef2011-02-02 16:02:30 -080046# If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
47# Chromium OS, the default "portage" user doesn't have the necessary
48# permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
49# is "root" here because we get called through sudo.
50#
51# We need to set this before importing any portage modules, because portage
52# looks up "PORTAGE_USERNAME" at import time.
53#
54# NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
55# encounter this case unless they have an old chroot or blow away the
56# environment by running sudo without the -E specifier.
57if "PORTAGE_USERNAME" not in os.environ:
58 homedir = os.environ.get("HOME")
59 if homedir:
60 os.environ["PORTAGE_USERNAME"] = os.path.basename(homedir)
61
Bertrand SIMONNET19d789e2014-12-09 13:36:31 -080062# Wrap Popen with a lock to ensure no two Popen are executed simultaneously in
63# the same process.
64# Two Popen call at the same time might be the cause for crbug.com/433482.
65_popen_lock = threading.Lock()
66_old_popen = subprocess.Popen
67
68def _LockedPopen(*args, **kwargs):
69 with _popen_lock:
70 return _old_popen(*args, **kwargs)
71
72subprocess.Popen = _LockedPopen
73
David Jamesfcb70ef2011-02-02 16:02:30 -080074# Portage doesn't expose dependency trees in its public API, so we have to
75# make use of some private APIs here. These modules are found under
76# /usr/lib/portage/pym/.
77#
78# TODO(davidjames): Update Portage to expose public APIs for these features.
Don Garrett25f309a2014-03-19 14:02:12 -070079# pylint: disable=F0401
David Jamesfcb70ef2011-02-02 16:02:30 -080080from _emerge.actions import adjust_configs
81from _emerge.actions import load_emerge_config
82from _emerge.create_depgraph_params import create_depgraph_params
David James386ccd12011-05-04 20:17:42 -070083from _emerge.depgraph import backtrack_depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -080084from _emerge.main import emerge_main
85from _emerge.main import parse_opts
86from _emerge.Package import Package
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -070087from _emerge.post_emerge import clean_logs
David Jamesfcb70ef2011-02-02 16:02:30 -080088from _emerge.Scheduler import Scheduler
David Jamesfcb70ef2011-02-02 16:02:30 -080089from _emerge.stdout_spinner import stdout_spinner
David James386ccd12011-05-04 20:17:42 -070090from portage._global_updates import _global_updates
David Jamesfcb70ef2011-02-02 16:02:30 -080091import portage
92import portage.debug
Don Garrettf8bf7842014-03-20 17:03:42 -070093# pylint: enable=F0401
Mike Frysinger91d7da92013-02-19 15:53:46 -050094
David Jamesfcb70ef2011-02-02 16:02:30 -080095
David Jamesfcb70ef2011-02-02 16:02:30 -080096def Usage():
97 """Print usage."""
Mike Frysinger383367e2014-09-16 15:06:17 -040098 print("Usage:")
Chris Ching5fcbd622016-11-28 09:22:15 -070099 print(" ./parallel_emerge [--board=BOARD] [--workon=PKGS] [--rebuild]")
100 print(" [--eventlogfile=FILE] [emerge args] package")
Mike Frysinger383367e2014-09-16 15:06:17 -0400101 print()
102 print("Packages specified as workon packages are always built from source.")
103 print()
104 print("The --workon argument is mainly useful when you want to build and")
105 print("install packages that you are working on unconditionally, but do not")
106 print("to have to rev the package to indicate you want to build it from")
107 print("source. The build_packages script will automatically supply the")
108 print("workon argument to emerge, ensuring that packages selected using")
109 print("cros-workon are rebuilt.")
110 print()
111 print("The --rebuild option rebuilds packages whenever their dependencies")
112 print("are changed. This ensures that your build is correct.")
Chris Ching5fcbd622016-11-28 09:22:15 -0700113 print()
114 print("The --eventlogfile writes events to the given file. File is")
115 print("is overwritten if it exists.")
David Jamesfcb70ef2011-02-02 16:02:30 -0800116
117
David Jamesfcb70ef2011-02-02 16:02:30 -0800118# Global start time
119GLOBAL_START = time.time()
120
David James7358d032011-05-19 10:40:03 -0700121# Whether process has been killed by a signal.
122KILLED = multiprocessing.Event()
123
David Jamesfcb70ef2011-02-02 16:02:30 -0800124
125class EmergeData(object):
126 """This simple struct holds various emerge variables.
127
128 This struct helps us easily pass emerge variables around as a unit.
129 These variables are used for calculating dependencies and installing
130 packages.
131 """
132
David Jamesbf1e3442011-05-28 07:44:20 -0700133 __slots__ = ["action", "cmdline_packages", "depgraph", "favorites",
134 "mtimedb", "opts", "root_config", "scheduler_graph",
135 "settings", "spinner", "trees"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800136
137 def __init__(self):
138 # The action the user requested. If the user is installing packages, this
139 # is None. If the user is doing anything other than installing packages,
140 # this will contain the action name, which will map exactly to the
141 # long-form name of the associated emerge option.
142 #
143 # Example: If you call parallel_emerge --unmerge package, the action name
144 # will be "unmerge"
145 self.action = None
146
147 # The list of packages the user passed on the command-line.
148 self.cmdline_packages = None
149
150 # The emerge dependency graph. It'll contain all the packages involved in
151 # this merge, along with their versions.
152 self.depgraph = None
153
David Jamesbf1e3442011-05-28 07:44:20 -0700154 # The list of candidates to add to the world file.
155 self.favorites = None
156
David Jamesfcb70ef2011-02-02 16:02:30 -0800157 # A dict of the options passed to emerge. This dict has been cleaned up
158 # a bit by parse_opts, so that it's a bit easier for the emerge code to
159 # look at the options.
160 #
161 # Emerge takes a few shortcuts in its cleanup process to make parsing of
162 # the options dict easier. For example, if you pass in "--usepkg=n", the
163 # "--usepkg" flag is just left out of the dictionary altogether. Because
164 # --usepkg=n is the default, this makes parsing easier, because emerge
165 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
166 #
167 # These cleanup processes aren't applied to all options. For example, the
168 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
169 # applied by emerge, see the parse_opts function in the _emerge.main
170 # package.
171 self.opts = None
172
173 # A dictionary used by portage to maintain global state. This state is
174 # loaded from disk when portage starts up, and saved to disk whenever we
175 # call mtimedb.commit().
176 #
177 # This database contains information about global updates (i.e., what
178 # version of portage we have) and what we're currently doing. Portage
179 # saves what it is currently doing in this database so that it can be
180 # resumed when you call it with the --resume option.
181 #
182 # parallel_emerge does not save what it is currently doing in the mtimedb,
183 # so we do not support the --resume option.
184 self.mtimedb = None
185
186 # The portage configuration for our current root. This contains the portage
187 # settings (see below) and the three portage trees for our current root.
188 # (The three portage trees are explained below, in the documentation for
189 # the "trees" member.)
190 self.root_config = None
191
192 # The scheduler graph is used by emerge to calculate what packages to
193 # install. We don't actually install any deps, so this isn't really used,
194 # but we pass it in to the Scheduler object anyway.
195 self.scheduler_graph = None
196
197 # Portage settings for our current session. Most of these settings are set
198 # in make.conf inside our current install root.
199 self.settings = None
200
201 # The spinner, which spews stuff to stdout to indicate that portage is
202 # doing something. We maintain our own spinner, so we set the portage
203 # spinner to "silent" mode.
204 self.spinner = None
205
206 # The portage trees. There are separate portage trees for each root. To get
207 # the portage tree for the current root, you can look in self.trees[root],
208 # where root = self.settings["ROOT"].
209 #
210 # In each root, there are three trees: vartree, porttree, and bintree.
211 # - vartree: A database of the currently-installed packages.
212 # - porttree: A database of ebuilds, that can be used to build packages.
213 # - bintree: A database of binary packages.
214 self.trees = None
215
216
217class DepGraphGenerator(object):
218 """Grab dependency information about packages from portage.
219
220 Typical usage:
221 deps = DepGraphGenerator()
222 deps.Initialize(sys.argv[1:])
223 deps_tree, deps_info = deps.GenDependencyTree()
224 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
225 deps.PrintTree(deps_tree)
226 PrintDepsMap(deps_graph)
227 """
228
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700229 __slots__ = ["board", "emerge", "package_db", "show_output", "sysroot",
Bertrand SIMONNET411945d2015-05-20 17:23:28 -0700230 "unpack_only", "max_retries"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800231
232 def __init__(self):
233 self.board = None
234 self.emerge = EmergeData()
David Jamesfcb70ef2011-02-02 16:02:30 -0800235 self.package_db = {}
David Jamesfcb70ef2011-02-02 16:02:30 -0800236 self.show_output = False
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700237 self.sysroot = None
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700238 self.unpack_only = False
Bertrand SIMONNET411945d2015-05-20 17:23:28 -0700239 self.max_retries = 1
David Jamesfcb70ef2011-02-02 16:02:30 -0800240
241 def ParseParallelEmergeArgs(self, argv):
242 """Read the parallel emerge arguments from the command-line.
243
244 We need to be compatible with emerge arg format. We scrape arguments that
245 are specific to parallel_emerge, and pass through the rest directly to
246 emerge.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500247
David Jamesfcb70ef2011-02-02 16:02:30 -0800248 Args:
249 argv: arguments list
Mike Frysinger1a736a82013-12-12 01:50:59 -0500250
David Jamesfcb70ef2011-02-02 16:02:30 -0800251 Returns:
252 Arguments that don't belong to parallel_emerge
253 """
254 emerge_args = []
255 for arg in argv:
256 # Specifically match arguments that are specific to parallel_emerge, and
257 # pass through the rest.
258 if arg.startswith("--board="):
259 self.board = arg.replace("--board=", "")
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700260 elif arg.startswith("--sysroot="):
261 self.sysroot = arg.replace("--sysroot=", "")
David Jamesfcb70ef2011-02-02 16:02:30 -0800262 elif arg.startswith("--workon="):
263 workon_str = arg.replace("--workon=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700264 emerge_args.append("--reinstall-atoms=%s" % workon_str)
265 emerge_args.append("--usepkg-exclude=%s" % workon_str)
David Jamesfcb70ef2011-02-02 16:02:30 -0800266 elif arg.startswith("--force-remote-binary="):
267 force_remote_binary = arg.replace("--force-remote-binary=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700268 emerge_args.append("--useoldpkg-atoms=%s" % force_remote_binary)
Bertrand SIMONNET411945d2015-05-20 17:23:28 -0700269 elif arg.startswith("--retries="):
270 self.max_retries = int(arg.replace("--retries=", ""))
David Jamesfcb70ef2011-02-02 16:02:30 -0800271 elif arg == "--show-output":
272 self.show_output = True
David James386ccd12011-05-04 20:17:42 -0700273 elif arg == "--rebuild":
David James7a1ea4b2011-10-13 15:06:41 -0700274 emerge_args.append("--rebuild-if-unbuilt")
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700275 elif arg == "--unpackonly":
276 emerge_args.append("--fetchonly")
277 self.unpack_only = True
Chris Ching5fcbd622016-11-28 09:22:15 -0700278 elif arg.startswith("--eventlogfile="):
279 log_file_name = arg.replace("--eventlogfile=", "")
Chris Ching4a2ebd62017-04-26 16:30:05 -0600280 event_logger = cros_event.getEventFileLogger(log_file_name)
281 event_logger.setKind('ParallelEmerge')
282 cros_event.setEventLogger(event_logger)
David Jamesfcb70ef2011-02-02 16:02:30 -0800283 else:
284 # Not one of our options, so pass through to emerge.
285 emerge_args.append(arg)
286
David James386ccd12011-05-04 20:17:42 -0700287 # These packages take a really long time to build, so, for expediency, we
288 # are blacklisting them from automatic rebuilds because one of their
289 # dependencies needs to be recompiled.
Mike Frysinger5c2a9052014-04-15 15:52:04 -0400290 for pkg in ("chromeos-base/chromeos-chrome",):
David James7a1ea4b2011-10-13 15:06:41 -0700291 emerge_args.append("--rebuild-exclude=%s" % pkg)
David Jamesfcb70ef2011-02-02 16:02:30 -0800292
293 return emerge_args
294
295 def Initialize(self, args):
296 """Initializer. Parses arguments and sets up portage state."""
297
298 # Parse and strip out args that are just intended for parallel_emerge.
299 emerge_args = self.ParseParallelEmergeArgs(args)
300
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700301 if self.sysroot and self.board:
302 cros_build_lib.Die("--sysroot and --board are incompatible.")
303
David Jamesfcb70ef2011-02-02 16:02:30 -0800304 # Setup various environment variables based on our current board. These
305 # variables are normally setup inside emerge-${BOARD}, but since we don't
306 # call that script, we have to set it up here. These variables serve to
307 # point our tools at /build/BOARD and to setup cross compiles to the
308 # appropriate board as configured in toolchain.conf.
309 if self.board:
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700310 self.sysroot = os.environ.get('SYSROOT',
311 cros_build_lib.GetSysroot(self.board))
312
313 if self.sysroot:
314 os.environ["PORTAGE_CONFIGROOT"] = self.sysroot
315 os.environ["SYSROOT"] = self.sysroot
David Jamesfcb70ef2011-02-02 16:02:30 -0800316
David Jamesfcb70ef2011-02-02 16:02:30 -0800317 # Turn off interactive delays
318 os.environ["EBEEP_IGNORE"] = "1"
319 os.environ["EPAUSE_IGNORE"] = "1"
Mike Frysinger0a647fc2012-08-06 14:36:05 -0400320 os.environ["CLEAN_DELAY"] = "0"
David Jamesfcb70ef2011-02-02 16:02:30 -0800321
322 # Parse the emerge options.
David Jamesea3ca332011-05-26 11:48:29 -0700323 action, opts, cmdline_packages = parse_opts(emerge_args, silent=True)
David Jamesfcb70ef2011-02-02 16:02:30 -0800324
325 # Set environment variables based on options. Portage normally sets these
326 # environment variables in emerge_main, but we can't use that function,
327 # because it also does a bunch of other stuff that we don't want.
328 # TODO(davidjames): Patch portage to move this logic into a function we can
329 # reuse here.
330 if "--debug" in opts:
331 os.environ["PORTAGE_DEBUG"] = "1"
332 if "--config-root" in opts:
333 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
334 if "--root" in opts:
335 os.environ["ROOT"] = opts["--root"]
336 if "--accept-properties" in opts:
337 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
338
David James88d780c2014-02-05 13:03:29 -0800339 # If we're installing packages to the board, we can disable vardb locks.
340 # This is safe because we only run up to one instance of parallel_emerge in
341 # parallel.
342 # TODO(davidjames): Enable this for the host too.
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700343 if self.sysroot:
David Jamesfcb70ef2011-02-02 16:02:30 -0800344 os.environ.setdefault("PORTAGE_LOCKS", "false")
David Jamesfcb70ef2011-02-02 16:02:30 -0800345
346 # Now that we've setup the necessary environment variables, we can load the
347 # emerge config from disk.
Gilad Arnold94758762015-05-22 12:23:23 -0700348 # pylint: disable=unpacking-non-sequence
David Jamesfcb70ef2011-02-02 16:02:30 -0800349 settings, trees, mtimedb = load_emerge_config()
350
David Jamesea3ca332011-05-26 11:48:29 -0700351 # Add in EMERGE_DEFAULT_OPTS, if specified.
352 tmpcmdline = []
353 if "--ignore-default-opts" not in opts:
354 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
355 tmpcmdline.extend(emerge_args)
356 action, opts, cmdline_packages = parse_opts(tmpcmdline)
357
358 # If we're installing to the board, we want the --root-deps option so that
359 # portage will install the build dependencies to that location as well.
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700360 if self.sysroot:
David Jamesea3ca332011-05-26 11:48:29 -0700361 opts.setdefault("--root-deps", True)
362
David Jamesfcb70ef2011-02-02 16:02:30 -0800363 # Check whether our portage tree is out of date. Typically, this happens
364 # when you're setting up a new portage tree, such as in setup_board and
365 # make_chroot. In that case, portage applies a bunch of global updates
366 # here. Once the updates are finished, we need to commit any changes
367 # that the global update made to our mtimedb, and reload the config.
368 #
369 # Portage normally handles this logic in emerge_main, but again, we can't
370 # use that function here.
371 if _global_updates(trees, mtimedb["updates"]):
372 mtimedb.commit()
Gilad Arnold94758762015-05-22 12:23:23 -0700373 # pylint: disable=unpacking-non-sequence
David Jamesfcb70ef2011-02-02 16:02:30 -0800374 settings, trees, mtimedb = load_emerge_config(trees=trees)
375
376 # Setup implied options. Portage normally handles this logic in
377 # emerge_main.
378 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
379 opts.setdefault("--buildpkg", True)
380 if "--getbinpkgonly" in opts:
381 opts.setdefault("--usepkgonly", True)
382 opts.setdefault("--getbinpkg", True)
383 if "getbinpkg" in settings.features:
384 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
385 opts["--getbinpkg"] = True
386 if "--getbinpkg" in opts or "--usepkgonly" in opts:
387 opts.setdefault("--usepkg", True)
388 if "--fetch-all-uri" in opts:
389 opts.setdefault("--fetchonly", True)
390 if "--skipfirst" in opts:
391 opts.setdefault("--resume", True)
392 if "--buildpkgonly" in opts:
393 # --buildpkgonly will not merge anything, so it overrides all binary
394 # package options.
395 for opt in ("--getbinpkg", "--getbinpkgonly",
396 "--usepkg", "--usepkgonly"):
397 opts.pop(opt, None)
398 if (settings.get("PORTAGE_DEBUG", "") == "1" and
399 "python-trace" in settings.features):
400 portage.debug.set_trace(True)
401
402 # Complain about unsupported options
David James386ccd12011-05-04 20:17:42 -0700403 for opt in ("--ask", "--ask-enter-invalid", "--resume", "--skipfirst"):
David Jamesfcb70ef2011-02-02 16:02:30 -0800404 if opt in opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400405 print("%s is not supported by parallel_emerge" % opt)
David Jamesfcb70ef2011-02-02 16:02:30 -0800406 sys.exit(1)
407
408 # Make emerge specific adjustments to the config (e.g. colors!)
409 adjust_configs(opts, trees)
410
411 # Save our configuration so far in the emerge object
412 emerge = self.emerge
413 emerge.action, emerge.opts = action, opts
414 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
415 emerge.cmdline_packages = cmdline_packages
416 root = settings["ROOT"]
417 emerge.root_config = trees[root]["root_config"]
418
David James386ccd12011-05-04 20:17:42 -0700419 if "--usepkg" in opts:
David Jamesfcb70ef2011-02-02 16:02:30 -0800420 emerge.trees[root]["bintree"].populate("--getbinpkg" in opts)
421
David Jamesfcb70ef2011-02-02 16:02:30 -0800422 def CreateDepgraph(self, emerge, packages):
423 """Create an emerge depgraph object."""
424 # Setup emerge options.
425 emerge_opts = emerge.opts.copy()
426
David James386ccd12011-05-04 20:17:42 -0700427 # Ask portage to build a dependency graph. with the options we specified
428 # above.
David Jamesfcb70ef2011-02-02 16:02:30 -0800429 params = create_depgraph_params(emerge_opts, emerge.action)
David Jamesbf1e3442011-05-28 07:44:20 -0700430 success, depgraph, favorites = backtrack_depgraph(
David James386ccd12011-05-04 20:17:42 -0700431 emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
432 packages, emerge.spinner)
433 emerge.depgraph = depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800434
David James386ccd12011-05-04 20:17:42 -0700435 # Is it impossible to honor the user's request? Bail!
436 if not success:
437 depgraph.display_problems()
438 sys.exit(1)
David Jamesfcb70ef2011-02-02 16:02:30 -0800439
440 emerge.depgraph = depgraph
David Jamesbf1e3442011-05-28 07:44:20 -0700441 emerge.favorites = favorites
David Jamesfcb70ef2011-02-02 16:02:30 -0800442
David Jamesdeebd692011-05-09 17:02:52 -0700443 # Prime and flush emerge caches.
444 root = emerge.settings["ROOT"]
445 vardb = emerge.trees[root]["vartree"].dbapi
David James0bdc5de2011-05-12 16:22:26 -0700446 if "--pretend" not in emerge.opts:
447 vardb.counter_tick()
David Jamesdeebd692011-05-09 17:02:52 -0700448 vardb.flush_cache()
449
David James386ccd12011-05-04 20:17:42 -0700450 def GenDependencyTree(self):
David Jamesfcb70ef2011-02-02 16:02:30 -0800451 """Get dependency tree info from emerge.
452
David Jamesfcb70ef2011-02-02 16:02:30 -0800453 Returns:
454 Dependency tree
455 """
456 start = time.time()
457
458 emerge = self.emerge
459
460 # Create a list of packages to merge
461 packages = set(emerge.cmdline_packages[:])
David Jamesfcb70ef2011-02-02 16:02:30 -0800462
463 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
464 # need any extra output from portage.
465 portage.util.noiselimit = -1
466
467 # My favorite feature: The silent spinner. It doesn't spin. Ever.
468 # I'd disable the colors by default too, but they look kind of cool.
469 emerge.spinner = stdout_spinner()
470 emerge.spinner.update = emerge.spinner.update_quiet
471
472 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400473 print("Calculating deps...")
David Jamesfcb70ef2011-02-02 16:02:30 -0800474
Chris Ching4a2ebd62017-04-26 16:30:05 -0600475 with cros_event.newEvent(task_name="GenerateDepTree"):
Chris Ching5fcbd622016-11-28 09:22:15 -0700476 self.CreateDepgraph(emerge, packages)
477 depgraph = emerge.depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800478
479 # Build our own tree from the emerge digraph.
480 deps_tree = {}
Don Garrett25f309a2014-03-19 14:02:12 -0700481 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -0800482 digraph = depgraph._dynamic_config.digraph
David James3f778802011-08-25 19:31:45 -0700483 root = emerge.settings["ROOT"]
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -0700484 final_db = depgraph._dynamic_config._filtered_trees[root]['graph_db']
David Jamesfcb70ef2011-02-02 16:02:30 -0800485 for node, node_deps in digraph.nodes.items():
486 # Calculate dependency packages that need to be installed first. Each
487 # child on the digraph is a dependency. The "operation" field specifies
488 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
489 # contains the type of dependency (e.g. build, runtime, runtime_post,
490 # etc.)
491 #
David Jamesfcb70ef2011-02-02 16:02:30 -0800492 # Portage refers to the identifiers for packages as a CPV. This acronym
493 # stands for Component/Path/Version.
494 #
495 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
496 # Split up, this CPV would be:
497 # C -- Component: chromeos-base
498 # P -- Path: power_manager
499 # V -- Version: 0.0.1-r1
500 #
501 # We just refer to CPVs as packages here because it's easier.
502 deps = {}
503 for child, priorities in node_deps[0].items():
David James3f778802011-08-25 19:31:45 -0700504 if isinstance(child, Package) and child.root == root:
505 cpv = str(child.cpv)
506 action = str(child.operation)
507
508 # If we're uninstalling a package, check whether Portage is
509 # installing a replacement. If so, just depend on the installation
510 # of the new package, because the old package will automatically
511 # be uninstalled at that time.
512 if action == "uninstall":
513 for pkg in final_db.match_pkgs(child.slot_atom):
514 cpv = str(pkg.cpv)
515 action = "merge"
516 break
517
518 deps[cpv] = dict(action=action,
519 deptypes=[str(x) for x in priorities],
520 deps={})
David Jamesfcb70ef2011-02-02 16:02:30 -0800521
522 # We've built our list of deps, so we can add our package to the tree.
David James3f778802011-08-25 19:31:45 -0700523 if isinstance(node, Package) and node.root == root:
David Jamesfcb70ef2011-02-02 16:02:30 -0800524 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
525 deps=deps)
526
David Jamesfcb70ef2011-02-02 16:02:30 -0800527 # Ask portage for its install plan, so that we can only throw out
David James386ccd12011-05-04 20:17:42 -0700528 # dependencies that portage throws out.
David Jamesfcb70ef2011-02-02 16:02:30 -0800529 deps_info = {}
530 for pkg in depgraph.altlist():
531 if isinstance(pkg, Package):
David James3f778802011-08-25 19:31:45 -0700532 assert pkg.root == root
David Jamesfcb70ef2011-02-02 16:02:30 -0800533 self.package_db[pkg.cpv] = pkg
534
David Jamesfcb70ef2011-02-02 16:02:30 -0800535 # Save off info about the package
David James386ccd12011-05-04 20:17:42 -0700536 deps_info[str(pkg.cpv)] = {"idx": len(deps_info)}
David Jamesfcb70ef2011-02-02 16:02:30 -0800537
538 seconds = time.time() - start
539 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400540 print("Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800541
542 return deps_tree, deps_info
543
544 def PrintTree(self, deps, depth=""):
545 """Print the deps we have seen in the emerge output.
546
547 Args:
Mike Frysinger6f3c48e2015-05-06 02:38:51 -0400548 deps: Dependency tree structure.
549 depth: Allows printing the tree recursively, with indentation.
David Jamesfcb70ef2011-02-02 16:02:30 -0800550 """
551 for entry in sorted(deps):
552 action = deps[entry]["action"]
Mike Frysinger383367e2014-09-16 15:06:17 -0400553 print("%s %s (%s)" % (depth, entry, action))
David Jamesfcb70ef2011-02-02 16:02:30 -0800554 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
555
David James386ccd12011-05-04 20:17:42 -0700556 def GenDependencyGraph(self, deps_tree, deps_info):
David Jamesfcb70ef2011-02-02 16:02:30 -0800557 """Generate a doubly linked dependency graph.
558
559 Args:
560 deps_tree: Dependency tree structure.
561 deps_info: More details on the dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500562
David Jamesfcb70ef2011-02-02 16:02:30 -0800563 Returns:
564 Deps graph in the form of a dict of packages, with each package
565 specifying a "needs" list and "provides" list.
566 """
567 emerge = self.emerge
David Jamesfcb70ef2011-02-02 16:02:30 -0800568
David Jamesfcb70ef2011-02-02 16:02:30 -0800569 # deps_map is the actual dependency graph.
570 #
571 # Each package specifies a "needs" list and a "provides" list. The "needs"
572 # list indicates which packages we depend on. The "provides" list
573 # indicates the reverse dependencies -- what packages need us.
574 #
575 # We also provide some other information in the dependency graph:
576 # - action: What we're planning on doing with this package. Generally,
577 # "merge", "nomerge", or "uninstall"
David Jamesfcb70ef2011-02-02 16:02:30 -0800578 deps_map = {}
579
580 def ReverseTree(packages):
581 """Convert tree to digraph.
582
583 Take the tree of package -> requirements and reverse it to a digraph of
584 buildable packages -> packages they unblock.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500585
David Jamesfcb70ef2011-02-02 16:02:30 -0800586 Args:
587 packages: Tree(s) of dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500588
David Jamesfcb70ef2011-02-02 16:02:30 -0800589 Returns:
590 Unsanitized digraph.
591 """
David James8c7e5e32011-06-28 11:26:03 -0700592 binpkg_phases = set(["setup", "preinst", "postinst"])
David Jamese5e1c0a2014-09-29 17:19:41 -0700593 needed_dep_types = set(["blocker", "buildtime", "buildtime_slot_op",
594 "runtime", "runtime_slot_op"])
595 ignored_dep_types = set(["ignored", "optional", "runtime_post", "soft"])
596 all_dep_types = ignored_dep_types | needed_dep_types
David Jamesfcb70ef2011-02-02 16:02:30 -0800597 for pkg in packages:
598
599 # Create an entry for the package
600 action = packages[pkg]["action"]
David James8c7e5e32011-06-28 11:26:03 -0700601 default_pkg = {"needs": {}, "provides": set(), "action": action,
602 "nodeps": False, "binary": False}
David Jamesfcb70ef2011-02-02 16:02:30 -0800603 this_pkg = deps_map.setdefault(pkg, default_pkg)
604
David James8c7e5e32011-06-28 11:26:03 -0700605 if pkg in deps_info:
606 this_pkg["idx"] = deps_info[pkg]["idx"]
607
608 # If a package doesn't have any defined phases that might use the
609 # dependent packages (i.e. pkg_setup, pkg_preinst, or pkg_postinst),
610 # we can install this package before its deps are ready.
611 emerge_pkg = self.package_db.get(pkg)
612 if emerge_pkg and emerge_pkg.type_name == "binary":
613 this_pkg["binary"] = True
Mike Frysinger66652ec2014-04-24 11:42:25 -0400614 defined_phases = emerge_pkg.defined_phases
David James8c7e5e32011-06-28 11:26:03 -0700615 defined_binpkg_phases = binpkg_phases.intersection(defined_phases)
616 if not defined_binpkg_phases:
617 this_pkg["nodeps"] = True
618
David Jamesfcb70ef2011-02-02 16:02:30 -0800619 # Create entries for dependencies of this package first.
620 ReverseTree(packages[pkg]["deps"])
621
622 # Add dependencies to this package.
623 for dep, dep_item in packages[pkg]["deps"].iteritems():
David James8c7e5e32011-06-28 11:26:03 -0700624 # We only need to enforce strict ordering of dependencies if the
David James3f778802011-08-25 19:31:45 -0700625 # dependency is a blocker, or is a buildtime or runtime dependency.
626 # (I.e., ignored, optional, and runtime_post dependencies don't
627 # depend on ordering.)
David James8c7e5e32011-06-28 11:26:03 -0700628 dep_types = dep_item["deptypes"]
629 if needed_dep_types.intersection(dep_types):
630 deps_map[dep]["provides"].add(pkg)
631 this_pkg["needs"][dep] = "/".join(dep_types)
David Jamesfcb70ef2011-02-02 16:02:30 -0800632
David Jamese5e1c0a2014-09-29 17:19:41 -0700633 # Verify we processed all appropriate dependency types.
634 unknown_dep_types = set(dep_types) - all_dep_types
635 if unknown_dep_types:
636 print("Unknown dependency types found:")
637 print(" %s -> %s (%s)" % (pkg, dep, "/".join(unknown_dep_types)))
638 sys.exit(1)
639
David James3f778802011-08-25 19:31:45 -0700640 # If there's a blocker, Portage may need to move files from one
641 # package to another, which requires editing the CONTENTS files of
642 # both packages. To avoid race conditions while editing this file,
643 # the two packages must not be installed in parallel, so we can't
644 # safely ignore dependencies. See http://crosbug.com/19328
645 if "blocker" in dep_types:
646 this_pkg["nodeps"] = False
647
David Jamesfcb70ef2011-02-02 16:02:30 -0800648 def FindCycles():
649 """Find cycles in the dependency tree.
650
651 Returns:
652 A dict mapping cyclic packages to a dict of the deps that cause
653 cycles. For each dep that causes cycles, it returns an example
654 traversal of the graph that shows the cycle.
655 """
656
657 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
658 """Find cycles in cyclic dependencies starting at specified package.
659
660 Args:
661 pkg: Package identifier.
662 cycles: A dict mapping cyclic packages to a dict of the deps that
663 cause cycles. For each dep that causes cycles, it returns an
664 example traversal of the graph that shows the cycle.
665 unresolved: Nodes that have been visited but are not fully processed.
666 resolved: Nodes that have been visited and are fully processed.
667 """
668 pkg_cycles = cycles.get(pkg)
669 if pkg in resolved and not pkg_cycles:
670 # If we already looked at this package, and found no cyclic
671 # dependencies, we can stop now.
672 return
673 unresolved.append(pkg)
674 for dep in deps_map[pkg]["needs"]:
675 if dep in unresolved:
676 idx = unresolved.index(dep)
677 mycycle = unresolved[idx:] + [dep]
David James321490a2012-12-17 12:05:56 -0800678 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800679 pkg1, pkg2 = mycycle[i], mycycle[i+1]
680 cycles.setdefault(pkg1, {}).setdefault(pkg2, mycycle)
681 elif not pkg_cycles or dep not in pkg_cycles:
682 # Looks like we haven't seen this edge before.
683 FindCyclesAtNode(dep, cycles, unresolved, resolved)
684 unresolved.pop()
685 resolved.add(pkg)
686
687 cycles, unresolved, resolved = {}, [], set()
688 for pkg in deps_map:
689 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
690 return cycles
691
David James386ccd12011-05-04 20:17:42 -0700692 def RemoveUnusedPackages():
David Jamesfcb70ef2011-02-02 16:02:30 -0800693 """Remove installed packages, propagating dependencies."""
David Jamesfcb70ef2011-02-02 16:02:30 -0800694 # Schedule packages that aren't on the install list for removal
695 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
696
David Jamesfcb70ef2011-02-02 16:02:30 -0800697 # Remove the packages we don't want, simplifying the graph and making
698 # it easier for us to crack cycles.
699 for pkg in sorted(rm_pkgs):
700 this_pkg = deps_map[pkg]
701 needs = this_pkg["needs"]
702 provides = this_pkg["provides"]
703 for dep in needs:
704 dep_provides = deps_map[dep]["provides"]
705 dep_provides.update(provides)
706 dep_provides.discard(pkg)
707 dep_provides.discard(dep)
708 for target in provides:
709 target_needs = deps_map[target]["needs"]
710 target_needs.update(needs)
711 target_needs.pop(pkg, None)
712 target_needs.pop(target, None)
713 del deps_map[pkg]
714
715 def PrintCycleBreak(basedep, dep, mycycle):
716 """Print details about a cycle that we are planning on breaking.
717
Mike Frysinger02e1e072013-11-10 22:11:34 -0500718 We are breaking a cycle where dep needs basedep. mycycle is an
719 example cycle which contains dep -> basedep.
720 """
David Jamesfcb70ef2011-02-02 16:02:30 -0800721
David Jamesfcb70ef2011-02-02 16:02:30 -0800722 needs = deps_map[dep]["needs"]
723 depinfo = needs.get(basedep, "deleted")
David Jamesfcb70ef2011-02-02 16:02:30 -0800724
David James3f778802011-08-25 19:31:45 -0700725 # It's OK to swap install order for blockers, as long as the two
726 # packages aren't installed in parallel. If there is a cycle, then
727 # we know the packages depend on each other already, so we can drop the
728 # blocker safely without printing a warning.
729 if depinfo == "blocker":
730 return
731
David Jamesfcb70ef2011-02-02 16:02:30 -0800732 # Notify the user that we're breaking a cycle.
Mike Frysinger383367e2014-09-16 15:06:17 -0400733 print("Breaking %s -> %s (%s)" % (dep, basedep, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800734
735 # Show cycle.
David James321490a2012-12-17 12:05:56 -0800736 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800737 pkg1, pkg2 = mycycle[i], mycycle[i+1]
738 needs = deps_map[pkg1]["needs"]
739 depinfo = needs.get(pkg2, "deleted")
740 if pkg1 == dep and pkg2 == basedep:
741 depinfo = depinfo + ", deleting"
Mike Frysinger383367e2014-09-16 15:06:17 -0400742 print(" %s -> %s (%s)" % (pkg1, pkg2, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800743
744 def SanitizeTree():
745 """Remove circular dependencies.
746
747 We prune all dependencies involved in cycles that go against the emerge
748 ordering. This has a nice property: we're guaranteed to merge
749 dependencies in the same order that portage does.
750
751 Because we don't treat any dependencies as "soft" unless they're killed
752 by a cycle, we pay attention to a larger number of dependencies when
753 merging. This hurts performance a bit, but helps reliability.
754 """
755 start = time.time()
756 cycles = FindCycles()
757 while cycles:
758 for dep, mycycles in cycles.iteritems():
759 for basedep, mycycle in mycycles.iteritems():
760 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
Matt Tennant08797302011-10-17 16:18:45 -0700761 if "--quiet" not in emerge.opts:
762 PrintCycleBreak(basedep, dep, mycycle)
David Jamesfcb70ef2011-02-02 16:02:30 -0800763 del deps_map[dep]["needs"][basedep]
764 deps_map[basedep]["provides"].remove(dep)
765 cycles = FindCycles()
766 seconds = time.time() - start
767 if "--quiet" not in emerge.opts and seconds >= 0.1:
Mike Frysinger383367e2014-09-16 15:06:17 -0400768 print("Tree sanitized in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800769
David James8c7e5e32011-06-28 11:26:03 -0700770 def FindRecursiveProvides(pkg, seen):
771 """Find all nodes that require a particular package.
772
773 Assumes that graph is acyclic.
774
775 Args:
776 pkg: Package identifier.
777 seen: Nodes that have been visited so far.
778 """
779 if pkg in seen:
780 return
781 seen.add(pkg)
782 info = deps_map[pkg]
783 info["tprovides"] = info["provides"].copy()
784 for dep in info["provides"]:
785 FindRecursiveProvides(dep, seen)
786 info["tprovides"].update(deps_map[dep]["tprovides"])
787
David Jamesa22906f2011-05-04 19:53:26 -0700788 ReverseTree(deps_tree)
David Jamesa22906f2011-05-04 19:53:26 -0700789
David James386ccd12011-05-04 20:17:42 -0700790 # We need to remove unused packages so that we can use the dependency
791 # ordering of the install process to show us what cycles to crack.
792 RemoveUnusedPackages()
David Jamesfcb70ef2011-02-02 16:02:30 -0800793 SanitizeTree()
David James8c7e5e32011-06-28 11:26:03 -0700794 seen = set()
795 for pkg in deps_map:
796 FindRecursiveProvides(pkg, seen)
David Jamesfcb70ef2011-02-02 16:02:30 -0800797 return deps_map
798
799 def PrintInstallPlan(self, deps_map):
800 """Print an emerge-style install plan.
801
802 The install plan lists what packages we're installing, in order.
803 It's useful for understanding what parallel_emerge is doing.
804
805 Args:
806 deps_map: The dependency graph.
807 """
808
809 def InstallPlanAtNode(target, deps_map):
810 nodes = []
811 nodes.append(target)
812 for dep in deps_map[target]["provides"]:
813 del deps_map[dep]["needs"][target]
814 if not deps_map[dep]["needs"]:
815 nodes.extend(InstallPlanAtNode(dep, deps_map))
816 return nodes
817
818 deps_map = copy.deepcopy(deps_map)
819 install_plan = []
820 plan = set()
821 for target, info in deps_map.iteritems():
822 if not info["needs"] and target not in plan:
823 for item in InstallPlanAtNode(target, deps_map):
824 plan.add(item)
825 install_plan.append(self.package_db[item])
826
827 for pkg in plan:
828 del deps_map[pkg]
829
830 if deps_map:
Mike Frysinger383367e2014-09-16 15:06:17 -0400831 print("Cyclic dependencies:", " ".join(deps_map))
David Jamesfcb70ef2011-02-02 16:02:30 -0800832 PrintDepsMap(deps_map)
833 sys.exit(1)
834
835 self.emerge.depgraph.display(install_plan)
836
837
838def PrintDepsMap(deps_map):
839 """Print dependency graph, for each package list it's prerequisites."""
840 for i in sorted(deps_map):
Mike Frysinger383367e2014-09-16 15:06:17 -0400841 print("%s: (%s) needs" % (i, deps_map[i]["action"]))
David Jamesfcb70ef2011-02-02 16:02:30 -0800842 needs = deps_map[i]["needs"]
843 for j in sorted(needs):
Mike Frysinger383367e2014-09-16 15:06:17 -0400844 print(" %s" % (j))
David Jamesfcb70ef2011-02-02 16:02:30 -0800845 if not needs:
Mike Frysinger383367e2014-09-16 15:06:17 -0400846 print(" no dependencies")
David Jamesfcb70ef2011-02-02 16:02:30 -0800847
848
849class EmergeJobState(object):
Don Garrett25f309a2014-03-19 14:02:12 -0700850 """Structure describing the EmergeJobState."""
851
David Jamesfcb70ef2011-02-02 16:02:30 -0800852 __slots__ = ["done", "filename", "last_notify_timestamp", "last_output_seek",
853 "last_output_timestamp", "pkgname", "retcode", "start_timestamp",
Chris Ching73486ab2017-04-26 18:02:37 -0600854 "target", "try_count", "fetch_only", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800855
856 def __init__(self, target, pkgname, done, filename, start_timestamp,
Chris Ching73486ab2017-04-26 18:02:37 -0600857 retcode=None, fetch_only=False, try_count=0, unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800858
859 # The full name of the target we're building (e.g.
Mike Frysingerfd969312014-04-02 22:16:42 -0400860 # virtual/target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800861 self.target = target
862
Mike Frysingerfd969312014-04-02 22:16:42 -0400863 # The short name of the target we're building (e.g. target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800864 self.pkgname = pkgname
865
866 # Whether the job is done. (True if the job is done; false otherwise.)
867 self.done = done
868
869 # The filename where output is currently stored.
870 self.filename = filename
871
872 # The timestamp of the last time we printed the name of the log file. We
873 # print this at the beginning of the job, so this starts at
874 # start_timestamp.
875 self.last_notify_timestamp = start_timestamp
876
877 # The location (in bytes) of the end of the last complete line we printed.
878 # This starts off at zero. We use this to jump to the right place when we
879 # print output from the same ebuild multiple times.
880 self.last_output_seek = 0
881
882 # The timestamp of the last time we printed output. Since we haven't
883 # printed output yet, this starts at zero.
884 self.last_output_timestamp = 0
885
886 # The return code of our job, if the job is actually finished.
887 self.retcode = retcode
888
Chris Ching73486ab2017-04-26 18:02:37 -0600889 # Number of tries for this job
890 self.try_count = try_count
891
Brian Harring0be85c62012-03-17 19:52:12 -0700892 # Was this just a fetch job?
893 self.fetch_only = fetch_only
894
David Jamesfcb70ef2011-02-02 16:02:30 -0800895 # The timestamp when our job started.
896 self.start_timestamp = start_timestamp
897
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700898 # No emerge, only unpack packages.
899 self.unpack_only = unpack_only
900
David Jamesfcb70ef2011-02-02 16:02:30 -0800901
David James321490a2012-12-17 12:05:56 -0800902def KillHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700903 # Kill self and all subprocesses.
904 os.killpg(0, signal.SIGKILL)
905
Mike Frysingercc838832014-05-24 13:10:30 -0400906
David Jamesfcb70ef2011-02-02 16:02:30 -0800907def SetupWorkerSignals():
David James321490a2012-12-17 12:05:56 -0800908 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700909 # Set KILLED flag.
910 KILLED.set()
David James13cead42011-05-18 16:22:01 -0700911
David James7358d032011-05-19 10:40:03 -0700912 # Remove our signal handlers so we don't get called recursively.
913 signal.signal(signal.SIGINT, KillHandler)
914 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -0800915
916 # Ensure that we exit quietly and cleanly, if possible, when we receive
917 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all
918 # of the child processes will print details about KeyboardInterrupt
919 # exceptions, which isn't very helpful.
920 signal.signal(signal.SIGINT, ExitHandler)
921 signal.signal(signal.SIGTERM, ExitHandler)
922
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400923
Chris Ching73486ab2017-04-26 18:02:37 -0600924def EmergeProcess(output, job_state, *args, **kwargs):
David James1ed3e252011-10-05 20:26:15 -0700925 """Merge a package in a subprocess.
926
927 Args:
David James1ed3e252011-10-05 20:26:15 -0700928 output: Temporary file to write output.
Chris Ching73486ab2017-04-26 18:02:37 -0600929 job_state: Stored state of package
David James6b29d052012-11-02 10:27:27 -0700930 *args: Arguments to pass to Scheduler constructor.
931 **kwargs: Keyword arguments to pass to Scheduler constructor.
David James1ed3e252011-10-05 20:26:15 -0700932
933 Returns:
934 The exit code returned by the subprocess.
935 """
Chris Chingb8eba812017-06-22 09:54:48 -0600936
Chris Ching73486ab2017-04-26 18:02:37 -0600937 target = job_state.target
938
939 job_state.try_count += 1
940
Chris Chingb8eba812017-06-22 09:54:48 -0600941 cpv = portage_util.SplitCPV(target)
Chris Ching73486ab2017-04-26 18:02:37 -0600942
Chris Ching4a2ebd62017-04-26 16:30:05 -0600943 event = cros_event.newEvent(task_name="EmergePackage",
Chris Chingb8eba812017-06-22 09:54:48 -0600944 name=cpv.package,
945 category=cpv.category,
Chris Ching73486ab2017-04-26 18:02:37 -0600946 version=cpv.version,
947 try_count=job_state.try_count)
David James1ed3e252011-10-05 20:26:15 -0700948 pid = os.fork()
949 if pid == 0:
950 try:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400951 proctitle.settitle('EmergeProcess', target)
952
David James1ed3e252011-10-05 20:26:15 -0700953 # Sanity checks.
Mike Frysingerf02736e2013-11-08 15:27:00 -0500954 if sys.stdout.fileno() != 1:
955 raise Exception("sys.stdout.fileno() != 1")
956 if sys.stderr.fileno() != 2:
957 raise Exception("sys.stderr.fileno() != 2")
David James1ed3e252011-10-05 20:26:15 -0700958
959 # - Redirect 1 (stdout) and 2 (stderr) at our temporary file.
960 # - Redirect 0 to point at sys.stdin. In this case, sys.stdin
961 # points at a file reading os.devnull, because multiprocessing mucks
962 # with sys.stdin.
963 # - Leave the sys.stdin and output filehandles alone.
964 fd_pipes = {0: sys.stdin.fileno(),
965 1: output.fileno(),
966 2: output.fileno(),
967 sys.stdin.fileno(): sys.stdin.fileno(),
968 output.fileno(): output.fileno()}
Mike Frysinger66652ec2014-04-24 11:42:25 -0400969 # pylint: disable=W0212
970 portage.process._setup_pipes(fd_pipes, close_fds=False)
David James1ed3e252011-10-05 20:26:15 -0700971
972 # Portage doesn't like when sys.stdin.fileno() != 0, so point sys.stdin
973 # at the filehandle we just created in _setup_pipes.
974 if sys.stdin.fileno() != 0:
David James6b29d052012-11-02 10:27:27 -0700975 sys.__stdin__ = sys.stdin = os.fdopen(0, "r")
976
977 scheduler = Scheduler(*args, **kwargs)
978
979 # Enable blocker handling even though we're in --nodeps mode. This
980 # allows us to unmerge the blocker after we've merged the replacement.
981 scheduler._opts_ignore_blockers = frozenset()
David James1ed3e252011-10-05 20:26:15 -0700982
983 # Actually do the merge.
Chris Ching5fcbd622016-11-28 09:22:15 -0700984 with event:
Chris Ching73486ab2017-04-26 18:02:37 -0600985 job_state.retcode = scheduler.merge()
986 if job_state.retcode != 0:
Chris Ching5fcbd622016-11-28 09:22:15 -0700987 event.fail(message="non-zero value returned")
David James1ed3e252011-10-05 20:26:15 -0700988
989 # We catch all exceptions here (including SystemExit, KeyboardInterrupt,
990 # etc) so as to ensure that we don't confuse the multiprocessing module,
991 # which expects that all forked children exit with os._exit().
David James321490a2012-12-17 12:05:56 -0800992 # pylint: disable=W0702
David James1ed3e252011-10-05 20:26:15 -0700993 except:
994 traceback.print_exc(file=output)
Chris Ching73486ab2017-04-26 18:02:37 -0600995 job_state.retcode = 1
David James1ed3e252011-10-05 20:26:15 -0700996 sys.stdout.flush()
997 sys.stderr.flush()
998 output.flush()
Don Garrett25f309a2014-03-19 14:02:12 -0700999 # pylint: disable=W0212
Chris Ching73486ab2017-04-26 18:02:37 -06001000 os._exit(job_state.retcode)
David James1ed3e252011-10-05 20:26:15 -07001001 else:
1002 # Return the exit code of the subprocess.
1003 return os.waitpid(pid, 0)[1]
David Jamesfcb70ef2011-02-02 16:02:30 -08001004
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001005
1006def UnpackPackage(pkg_state):
1007 """Unpacks package described by pkg_state.
1008
1009 Args:
1010 pkg_state: EmergeJobState object describing target.
1011
1012 Returns:
1013 Exit code returned by subprocess.
1014 """
1015 pkgdir = os.environ.get("PKGDIR",
1016 os.path.join(os.environ["SYSROOT"], "packages"))
1017 root = os.environ.get("ROOT", os.environ["SYSROOT"])
1018 path = os.path.join(pkgdir, pkg_state.target + ".tbz2")
1019 comp = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)
1020 cmd = [comp, "-dc"]
1021 if comp.endswith("pbzip2"):
1022 cmd.append("--ignore-trailing-garbage=1")
1023 cmd.append(path)
1024
Chris Ching4a2ebd62017-04-26 16:30:05 -06001025 with cros_event.newEvent(task_name="UnpackPackage", **pkg_state) as event:
Chris Ching5fcbd622016-11-28 09:22:15 -07001026 result = cros_build_lib.RunCommand(cmd, cwd=root, stdout_to_pipe=True,
1027 print_cmd=False, error_code_ok=True)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001028
Chris Ching5fcbd622016-11-28 09:22:15 -07001029 # If we were not successful, return now and don't attempt untar.
1030 if result.returncode != 0:
1031 event.fail("error compressing: returned {}".format(result.returncode))
1032 return result.returncode
1033
1034 cmd = ["sudo", "tar", "-xf", "-", "-C", root]
1035
1036 result = cros_build_lib.RunCommand(cmd, cwd=root, input=result.output,
1037 print_cmd=False, error_code_ok=True)
1038 if result.returncode != 0:
1039 event.fail("error extracting:returned {}".format(result.returncode))
1040
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001041 return result.returncode
1042
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001043
1044def EmergeWorker(task_queue, job_queue, emerge, package_db, fetch_only=False,
1045 unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -08001046 """This worker emerges any packages given to it on the task_queue.
1047
1048 Args:
1049 task_queue: The queue of tasks for this worker to do.
1050 job_queue: The queue of results from the worker.
1051 emerge: An EmergeData() object.
1052 package_db: A dict, mapping package ids to portage Package objects.
Brian Harring0be85c62012-03-17 19:52:12 -07001053 fetch_only: A bool, indicating if we should just fetch the target.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001054 unpack_only: A bool, indicating if we should just unpack the target.
David Jamesfcb70ef2011-02-02 16:02:30 -08001055
1056 It expects package identifiers to be passed to it via task_queue. When
1057 a task is started, it pushes the (target, filename) to the started_queue.
1058 The output is stored in filename. When a merge starts or finishes, we push
1059 EmergeJobState objects to the job_queue.
1060 """
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001061 if fetch_only:
1062 mode = 'fetch'
1063 elif unpack_only:
1064 mode = 'unpack'
1065 else:
1066 mode = 'emerge'
1067 proctitle.settitle('EmergeWorker', mode, '[idle]')
David Jamesfcb70ef2011-02-02 16:02:30 -08001068
1069 SetupWorkerSignals()
1070 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
David Jamesdeebd692011-05-09 17:02:52 -07001071
1072 # Disable flushing of caches to save on I/O.
David James7a1ea4b2011-10-13 15:06:41 -07001073 root = emerge.settings["ROOT"]
1074 vardb = emerge.trees[root]["vartree"].dbapi
Mike Frysingere56debd2014-11-19 01:54:36 -05001075 vardb._flush_cache_enabled = False # pylint: disable=protected-access
Brian Harring0be85c62012-03-17 19:52:12 -07001076 bindb = emerge.trees[root]["bintree"].dbapi
1077 # Might be a set, might be a list, might be None; no clue, just use shallow
1078 # copy to ensure we can roll it back.
Don Garrett25f309a2014-03-19 14:02:12 -07001079 # pylint: disable=W0212
Brian Harring0be85c62012-03-17 19:52:12 -07001080 original_remotepkgs = copy.copy(bindb.bintree._remotepkgs)
David Jamesdeebd692011-05-09 17:02:52 -07001081
David Jamesfcb70ef2011-02-02 16:02:30 -08001082 opts, spinner = emerge.opts, emerge.spinner
1083 opts["--nodeps"] = True
Brian Harring0be85c62012-03-17 19:52:12 -07001084 if fetch_only:
1085 opts["--fetchonly"] = True
1086
David Jamesfcb70ef2011-02-02 16:02:30 -08001087 while True:
1088 # Wait for a new item to show up on the queue. This is a blocking wait,
1089 # so if there's nothing to do, we just sit here.
Brian Harring0be85c62012-03-17 19:52:12 -07001090 pkg_state = task_queue.get()
1091 if pkg_state is None:
David Jamesfcb70ef2011-02-02 16:02:30 -08001092 # If target is None, this means that the main thread wants us to quit.
1093 # The other workers need to exit too, so we'll push the message back on
1094 # to the queue so they'll get it too.
Brian Harring0be85c62012-03-17 19:52:12 -07001095 task_queue.put(None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001096 return
David James7358d032011-05-19 10:40:03 -07001097 if KILLED.is_set():
1098 return
1099
Brian Harring0be85c62012-03-17 19:52:12 -07001100 target = pkg_state.target
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001101 proctitle.settitle('EmergeWorker', mode, target)
Brian Harring0be85c62012-03-17 19:52:12 -07001102
David Jamesfcb70ef2011-02-02 16:02:30 -08001103 db_pkg = package_db[target]
Brian Harring0be85c62012-03-17 19:52:12 -07001104
1105 if db_pkg.type_name == "binary":
1106 if not fetch_only and pkg_state.fetched_successfully:
1107 # Ensure portage doesn't think our pkg is remote- else it'll force
1108 # a redownload of it (even if the on-disk file is fine). In-memory
1109 # caching basically, implemented dumbly.
1110 bindb.bintree._remotepkgs = None
1111 else:
1112 bindb.bintree_remotepkgs = original_remotepkgs
1113
David Jamesfcb70ef2011-02-02 16:02:30 -08001114 db_pkg.root_config = emerge.root_config
1115 install_list = [db_pkg]
1116 pkgname = db_pkg.pf
1117 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False)
David James01b1e0f2012-06-07 17:18:05 -07001118 os.chmod(output.name, 644)
David Jamesfcb70ef2011-02-02 16:02:30 -08001119 start_timestamp = time.time()
Brian Harring0be85c62012-03-17 19:52:12 -07001120 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001121 fetch_only=fetch_only, unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001122 job_queue.put(job)
1123 if "--pretend" in opts:
Chris Ching73486ab2017-04-26 18:02:37 -06001124 job.retcode = 0
David Jamesfcb70ef2011-02-02 16:02:30 -08001125 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001126 try:
David James386ccd12011-05-04 20:17:42 -07001127 emerge.scheduler_graph.mergelist = install_list
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001128 if unpack_only:
Chris Ching73486ab2017-04-26 18:02:37 -06001129 job.retcode = UnpackPackage(pkg_state)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001130 else:
Chris Ching73486ab2017-04-26 18:02:37 -06001131 job.retcode = EmergeProcess(output, job, settings, trees, mtimedb,
1132 opts, spinner,
1133 favorites=emerge.favorites,
1134 graph_config=emerge.scheduler_graph)
David Jamesfcb70ef2011-02-02 16:02:30 -08001135 except Exception:
1136 traceback.print_exc(file=output)
Chris Ching73486ab2017-04-26 18:02:37 -06001137 job.retcode = 1
David James1ed3e252011-10-05 20:26:15 -07001138 output.close()
David Jamesfcb70ef2011-02-02 16:02:30 -08001139
David James7358d032011-05-19 10:40:03 -07001140 if KILLED.is_set():
1141 return
1142
David Jamesfcb70ef2011-02-02 16:02:30 -08001143 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp,
Chris Ching73486ab2017-04-26 18:02:37 -06001144 job.retcode, fetch_only=fetch_only,
1145 try_count=job.try_count, unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001146 job_queue.put(job)
1147
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001148 # Set the title back to idle as the multiprocess pool won't destroy us;
1149 # when another job comes up, it'll re-use this process.
1150 proctitle.settitle('EmergeWorker', mode, '[idle]')
1151
David Jamesfcb70ef2011-02-02 16:02:30 -08001152
1153class LinePrinter(object):
1154 """Helper object to print a single line."""
1155
1156 def __init__(self, line):
1157 self.line = line
1158
David James321490a2012-12-17 12:05:56 -08001159 def Print(self, _seek_locations):
Mike Frysinger383367e2014-09-16 15:06:17 -04001160 print(self.line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001161
1162
1163class JobPrinter(object):
1164 """Helper object to print output of a job."""
1165
1166 def __init__(self, job, unlink=False):
1167 """Print output of job.
1168
Mike Frysinger02e1e072013-11-10 22:11:34 -05001169 If unlink is True, unlink the job output file when done.
1170 """
David Jamesfcb70ef2011-02-02 16:02:30 -08001171 self.current_time = time.time()
1172 self.job = job
1173 self.unlink = unlink
1174
1175 def Print(self, seek_locations):
1176
1177 job = self.job
1178
1179 # Calculate how long the job has been running.
1180 seconds = self.current_time - job.start_timestamp
1181
1182 # Note that we've printed out the job so far.
1183 job.last_output_timestamp = self.current_time
1184
1185 # Note that we're starting the job
1186 info = "job %s (%dm%.1fs)" % (job.pkgname, seconds / 60, seconds % 60)
1187 last_output_seek = seek_locations.get(job.filename, 0)
1188 if last_output_seek:
Mike Frysinger383367e2014-09-16 15:06:17 -04001189 print("=== Continue output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001190 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001191 print("=== Start output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001192
1193 # Print actual output from job
1194 f = codecs.open(job.filename, encoding='utf-8', errors='replace')
1195 f.seek(last_output_seek)
1196 prefix = job.pkgname + ":"
1197 for line in f:
1198
1199 # Save off our position in the file
1200 if line and line[-1] == "\n":
1201 last_output_seek = f.tell()
1202 line = line[:-1]
1203
1204 # Print our line
Mike Frysinger383367e2014-09-16 15:06:17 -04001205 print(prefix, line.encode('utf-8', 'replace'))
David Jamesfcb70ef2011-02-02 16:02:30 -08001206 f.close()
1207
1208 # Save our last spot in the file so that we don't print out the same
1209 # location twice.
1210 seek_locations[job.filename] = last_output_seek
1211
1212 # Note end of output section
1213 if job.done:
Mike Frysinger383367e2014-09-16 15:06:17 -04001214 print("=== Complete: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001215 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001216 print("=== Still running: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001217
1218 if self.unlink:
1219 os.unlink(job.filename)
1220
1221
1222def PrintWorker(queue):
1223 """A worker that prints stuff to the screen as requested."""
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001224 proctitle.settitle('PrintWorker')
David Jamesfcb70ef2011-02-02 16:02:30 -08001225
David James321490a2012-12-17 12:05:56 -08001226 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -07001227 # Set KILLED flag.
1228 KILLED.set()
1229
David Jamesfcb70ef2011-02-02 16:02:30 -08001230 # Switch to default signal handlers so that we'll die after two signals.
David James7358d032011-05-19 10:40:03 -07001231 signal.signal(signal.SIGINT, KillHandler)
1232 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001233
1234 # Don't exit on the first SIGINT / SIGTERM, because the parent worker will
1235 # handle it and tell us when we need to exit.
1236 signal.signal(signal.SIGINT, ExitHandler)
1237 signal.signal(signal.SIGTERM, ExitHandler)
1238
1239 # seek_locations is a map indicating the position we are at in each file.
1240 # It starts off empty, but is set by the various Print jobs as we go along
1241 # to indicate where we left off in each file.
1242 seek_locations = {}
1243 while True:
1244 try:
1245 job = queue.get()
1246 if job:
1247 job.Print(seek_locations)
David Jamesbccf8eb2011-07-27 14:06:06 -07001248 sys.stdout.flush()
David Jamesfcb70ef2011-02-02 16:02:30 -08001249 else:
1250 break
1251 except IOError as ex:
1252 if ex.errno == errno.EINTR:
1253 # Looks like we received a signal. Keep printing.
1254 continue
1255 raise
1256
Brian Harring867e2362012-03-17 04:05:17 -07001257
Brian Harring0be85c62012-03-17 19:52:12 -07001258class TargetState(object):
Chris Ching5fcbd622016-11-28 09:22:15 -07001259 """Structure describing the TargetState."""
Brian Harring867e2362012-03-17 04:05:17 -07001260
Brian Harring0be85c62012-03-17 19:52:12 -07001261 __slots__ = ("target", "info", "score", "prefetched", "fetched_successfully")
Brian Harring867e2362012-03-17 04:05:17 -07001262
David James321490a2012-12-17 12:05:56 -08001263 def __init__(self, target, info):
Brian Harring867e2362012-03-17 04:05:17 -07001264 self.target, self.info = target, info
Brian Harring0be85c62012-03-17 19:52:12 -07001265 self.fetched_successfully = False
1266 self.prefetched = False
David James321490a2012-12-17 12:05:56 -08001267 self.score = None
Brian Harring867e2362012-03-17 04:05:17 -07001268 self.update_score()
1269
1270 def __cmp__(self, other):
1271 return cmp(self.score, other.score)
1272
1273 def update_score(self):
1274 self.score = (
1275 -len(self.info["tprovides"]),
Brian Harring0be85c62012-03-17 19:52:12 -07001276 len(self.info["needs"]),
Brian Harring11c5eeb2012-03-18 11:02:39 -07001277 not self.info["binary"],
Brian Harring867e2362012-03-17 04:05:17 -07001278 -len(self.info["provides"]),
1279 self.info["idx"],
1280 self.target,
1281 )
1282
1283
1284class ScoredHeap(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001285 """Implementation of a general purpose scored heap."""
Brian Harring867e2362012-03-17 04:05:17 -07001286
Brian Harring0be85c62012-03-17 19:52:12 -07001287 __slots__ = ("heap", "_heap_set")
1288
Brian Harring867e2362012-03-17 04:05:17 -07001289 def __init__(self, initial=()):
Brian Harring0be85c62012-03-17 19:52:12 -07001290 self.heap = list()
1291 self._heap_set = set()
1292 if initial:
1293 self.multi_put(initial)
Brian Harring867e2362012-03-17 04:05:17 -07001294
1295 def get(self):
Brian Harring0be85c62012-03-17 19:52:12 -07001296 item = heapq.heappop(self.heap)
1297 self._heap_set.remove(item.target)
1298 return item
Brian Harring867e2362012-03-17 04:05:17 -07001299
Brian Harring0be85c62012-03-17 19:52:12 -07001300 def put(self, item):
1301 if not isinstance(item, TargetState):
1302 raise ValueError("Item %r isn't a TargetState" % (item,))
1303 heapq.heappush(self.heap, item)
1304 self._heap_set.add(item.target)
Brian Harring867e2362012-03-17 04:05:17 -07001305
Brian Harring0be85c62012-03-17 19:52:12 -07001306 def multi_put(self, sequence):
1307 sequence = list(sequence)
1308 self.heap.extend(sequence)
1309 self._heap_set.update(x.target for x in sequence)
Brian Harring867e2362012-03-17 04:05:17 -07001310 self.sort()
1311
David James5c9996d2012-03-24 10:50:46 -07001312 def sort(self):
1313 heapq.heapify(self.heap)
1314
Brian Harring0be85c62012-03-17 19:52:12 -07001315 def __contains__(self, target):
1316 return target in self._heap_set
1317
1318 def __nonzero__(self):
1319 return bool(self.heap)
1320
Brian Harring867e2362012-03-17 04:05:17 -07001321 def __len__(self):
1322 return len(self.heap)
1323
1324
David Jamesfcb70ef2011-02-02 16:02:30 -08001325class EmergeQueue(object):
1326 """Class to schedule emerge jobs according to a dependency graph."""
1327
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001328 def __init__(self, deps_map, emerge, package_db, show_output, unpack_only,
1329 max_retries):
David Jamesfcb70ef2011-02-02 16:02:30 -08001330 # Store the dependency graph.
1331 self._deps_map = deps_map
Brian Harring0be85c62012-03-17 19:52:12 -07001332 self._state_map = {}
David Jamesfcb70ef2011-02-02 16:02:30 -08001333 # Initialize the running queue to empty
Brian Harring0be85c62012-03-17 19:52:12 -07001334 self._build_jobs = {}
1335 self._build_ready = ScoredHeap()
1336 self._fetch_jobs = {}
1337 self._fetch_ready = ScoredHeap()
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001338 self._unpack_jobs = {}
1339 self._unpack_ready = ScoredHeap()
David Jamesfcb70ef2011-02-02 16:02:30 -08001340 # List of total package installs represented in deps_map.
1341 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
1342 self._total_jobs = len(install_jobs)
1343 self._show_output = show_output
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001344 self._unpack_only = unpack_only
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001345 self._max_retries = max_retries
David Jamesfcb70ef2011-02-02 16:02:30 -08001346
1347 if "--pretend" in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001348 print("Skipping merge because of --pretend mode.")
David Jamesfcb70ef2011-02-02 16:02:30 -08001349 sys.exit(0)
1350
David Jamesaaf49e42014-04-24 09:40:05 -07001351 # Set up a session so we can easily terminate all children.
1352 self._SetupSession()
David James7358d032011-05-19 10:40:03 -07001353
David Jamesfcb70ef2011-02-02 16:02:30 -08001354 # Setup scheduler graph object. This is used by the child processes
1355 # to help schedule jobs.
1356 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1357
1358 # Calculate how many jobs we can run in parallel. We don't want to pass
1359 # the --jobs flag over to emerge itself, because that'll tell emerge to
1360 # hide its output, and said output is quite useful for debugging hung
1361 # jobs.
1362 procs = min(self._total_jobs,
1363 emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
Nam T. Nguyenf7098b32015-05-08 11:04:48 -07001364 self._build_procs = self._unpack_procs = max(1, procs)
1365 # Fetch is IO bound, we can use more processes.
1366 self._fetch_procs = max(4, procs)
David James8c7e5e32011-06-28 11:26:03 -07001367 self._load_avg = emerge.opts.pop("--load-average", None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001368 self._job_queue = multiprocessing.Queue()
1369 self._print_queue = multiprocessing.Queue()
Brian Harring0be85c62012-03-17 19:52:12 -07001370
1371 self._fetch_queue = multiprocessing.Queue()
1372 args = (self._fetch_queue, self._job_queue, emerge, package_db, True)
1373 self._fetch_pool = multiprocessing.Pool(self._fetch_procs, EmergeWorker,
1374 args)
1375
1376 self._build_queue = multiprocessing.Queue()
1377 args = (self._build_queue, self._job_queue, emerge, package_db)
1378 self._build_pool = multiprocessing.Pool(self._build_procs, EmergeWorker,
1379 args)
1380
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001381 if self._unpack_only:
1382 # Unpack pool only required on unpack_only jobs.
1383 self._unpack_queue = multiprocessing.Queue()
1384 args = (self._unpack_queue, self._job_queue, emerge, package_db, False,
1385 True)
1386 self._unpack_pool = multiprocessing.Pool(self._unpack_procs, EmergeWorker,
1387 args)
1388
David Jamesfcb70ef2011-02-02 16:02:30 -08001389 self._print_worker = multiprocessing.Process(target=PrintWorker,
1390 args=[self._print_queue])
1391 self._print_worker.start()
1392
1393 # Initialize the failed queue to empty.
1394 self._retry_queue = []
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001395 self._failed_count = dict()
David Jamesfcb70ef2011-02-02 16:02:30 -08001396
David Jamesfcb70ef2011-02-02 16:02:30 -08001397 # Setup an exit handler so that we print nice messages if we are
1398 # terminated.
1399 self._SetupExitHandler()
1400
1401 # Schedule our jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001402 self._state_map.update(
1403 (pkg, TargetState(pkg, data)) for pkg, data in deps_map.iteritems())
1404 self._fetch_ready.multi_put(self._state_map.itervalues())
David Jamesfcb70ef2011-02-02 16:02:30 -08001405
David Jamesaaf49e42014-04-24 09:40:05 -07001406 def _SetupSession(self):
1407 """Set up a session so we can easily terminate all children."""
1408 # When we call os.setsid(), this sets up a session / process group for this
1409 # process and all children. These session groups are needed so that we can
1410 # easily kill all children (including processes launched by emerge) before
1411 # we exit.
1412 #
1413 # One unfortunate side effect of os.setsid() is that it blocks CTRL-C from
1414 # being received. To work around this, we only call os.setsid() in a forked
1415 # process, so that the parent can still watch for CTRL-C. The parent will
1416 # just sit around, watching for signals and propagating them to the child,
1417 # until the child exits.
1418 #
1419 # TODO(davidjames): It would be nice if we could replace this with cgroups.
1420 pid = os.fork()
1421 if pid == 0:
1422 os.setsid()
1423 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001424 proctitle.settitle('SessionManager')
1425
David Jamesaaf49e42014-04-24 09:40:05 -07001426 def PropagateToChildren(signum, _frame):
1427 # Just propagate the signals down to the child. We'll exit when the
1428 # child does.
1429 try:
1430 os.kill(pid, signum)
1431 except OSError as ex:
1432 if ex.errno != errno.ESRCH:
1433 raise
1434 signal.signal(signal.SIGINT, PropagateToChildren)
1435 signal.signal(signal.SIGTERM, PropagateToChildren)
1436
1437 def StopGroup(_signum, _frame):
1438 # When we get stopped, stop the children.
1439 try:
1440 os.killpg(pid, signal.SIGSTOP)
1441 os.kill(0, signal.SIGSTOP)
1442 except OSError as ex:
1443 if ex.errno != errno.ESRCH:
1444 raise
1445 signal.signal(signal.SIGTSTP, StopGroup)
1446
1447 def ContinueGroup(_signum, _frame):
1448 # Launch the children again after being stopped.
1449 try:
1450 os.killpg(pid, signal.SIGCONT)
1451 except OSError as ex:
1452 if ex.errno != errno.ESRCH:
1453 raise
1454 signal.signal(signal.SIGCONT, ContinueGroup)
1455
1456 # Loop until the children exit. We exit with os._exit to be sure we
1457 # don't run any finalizers (those will be run by the child process.)
1458 # pylint: disable=W0212
1459 while True:
1460 try:
1461 # Wait for the process to exit. When it does, exit with the return
1462 # value of the subprocess.
Mike Frysingere2d8f0d2014-11-01 13:09:26 -04001463 os._exit(process_util.GetExitStatus(os.waitpid(pid, 0)[1]))
David Jamesaaf49e42014-04-24 09:40:05 -07001464 except OSError as ex:
1465 if ex.errno == errno.EINTR:
1466 continue
1467 traceback.print_exc()
1468 os._exit(1)
1469 except BaseException:
1470 traceback.print_exc()
1471 os._exit(1)
1472
David Jamesfcb70ef2011-02-02 16:02:30 -08001473 def _SetupExitHandler(self):
1474
David James321490a2012-12-17 12:05:56 -08001475 def ExitHandler(signum, _frame):
David James7358d032011-05-19 10:40:03 -07001476 # Set KILLED flag.
1477 KILLED.set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001478
1479 # Kill our signal handlers so we don't get called recursively
David James7358d032011-05-19 10:40:03 -07001480 signal.signal(signal.SIGINT, KillHandler)
1481 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001482
1483 # Print our current job status
Brian Harring0be85c62012-03-17 19:52:12 -07001484 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001485 if job:
1486 self._print_queue.put(JobPrinter(job, unlink=True))
1487
1488 # Notify the user that we are exiting
1489 self._Print("Exiting on signal %s" % signum)
David James7358d032011-05-19 10:40:03 -07001490 self._print_queue.put(None)
1491 self._print_worker.join()
David Jamesfcb70ef2011-02-02 16:02:30 -08001492
1493 # Kill child threads, then exit.
David James7358d032011-05-19 10:40:03 -07001494 os.killpg(0, signal.SIGKILL)
David Jamesfcb70ef2011-02-02 16:02:30 -08001495 sys.exit(1)
1496
1497 # Print out job status when we are killed
1498 signal.signal(signal.SIGINT, ExitHandler)
1499 signal.signal(signal.SIGTERM, ExitHandler)
1500
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001501 def _ScheduleUnpack(self, pkg_state):
1502 self._unpack_jobs[pkg_state.target] = None
1503 self._unpack_queue.put(pkg_state)
1504
Brian Harring0be85c62012-03-17 19:52:12 -07001505 def _Schedule(self, pkg_state):
David Jamesfcb70ef2011-02-02 16:02:30 -08001506 # We maintain a tree of all deps, if this doesn't need
David James8c7e5e32011-06-28 11:26:03 -07001507 # to be installed just free up its children and continue.
David Jamesfcb70ef2011-02-02 16:02:30 -08001508 # It is possible to reinstall deps of deps, without reinstalling
1509 # first level deps, like so:
Mike Frysingerfd969312014-04-02 22:16:42 -04001510 # virtual/target-os (merge) -> eselect (nomerge) -> python (merge)
Brian Harring0be85c62012-03-17 19:52:12 -07001511 this_pkg = pkg_state.info
1512 target = pkg_state.target
1513 if pkg_state.info is not None:
1514 if this_pkg["action"] == "nomerge":
1515 self._Finish(target)
1516 elif target not in self._build_jobs:
1517 # Kick off the build if it's marked to be built.
1518 self._build_jobs[target] = None
1519 self._build_queue.put(pkg_state)
1520 return True
David Jamesfcb70ef2011-02-02 16:02:30 -08001521
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001522 def _ScheduleLoop(self, unpack_only=False):
1523 if unpack_only:
1524 ready_queue = self._unpack_ready
1525 jobs_queue = self._unpack_jobs
1526 procs = self._unpack_procs
1527 else:
1528 ready_queue = self._build_ready
1529 jobs_queue = self._build_jobs
1530 procs = self._build_procs
1531
David James8c7e5e32011-06-28 11:26:03 -07001532 # If the current load exceeds our desired load average, don't schedule
1533 # more than one job.
1534 if self._load_avg and os.getloadavg()[0] > self._load_avg:
1535 needed_jobs = 1
1536 else:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001537 needed_jobs = procs
David James8c7e5e32011-06-28 11:26:03 -07001538
1539 # Schedule more jobs.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001540 while ready_queue and len(jobs_queue) < needed_jobs:
1541 state = ready_queue.get()
1542 if unpack_only:
1543 self._ScheduleUnpack(state)
1544 else:
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001545 if state.target not in self._failed_count:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001546 self._Schedule(state)
David Jamesfcb70ef2011-02-02 16:02:30 -08001547
1548 def _Print(self, line):
1549 """Print a single line."""
1550 self._print_queue.put(LinePrinter(line))
1551
1552 def _Status(self):
1553 """Print status."""
1554 current_time = time.time()
Aviv Keshet3b381682015-11-12 13:15:06 -08001555 current_time_struct = time.localtime(current_time)
David Jamesfcb70ef2011-02-02 16:02:30 -08001556 no_output = True
1557
1558 # Print interim output every minute if --show-output is used. Otherwise,
1559 # print notifications about running packages every 2 minutes, and print
1560 # full output for jobs that have been running for 60 minutes or more.
1561 if self._show_output:
1562 interval = 60
1563 notify_interval = 0
1564 else:
1565 interval = 60 * 60
1566 notify_interval = 60 * 2
David James321490a2012-12-17 12:05:56 -08001567 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001568 if job:
1569 last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
1570 if last_timestamp + interval < current_time:
1571 self._print_queue.put(JobPrinter(job))
1572 job.last_output_timestamp = current_time
1573 no_output = False
1574 elif (notify_interval and
1575 job.last_notify_timestamp + notify_interval < current_time):
1576 job_seconds = current_time - job.start_timestamp
1577 args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
1578 info = "Still building %s (%dm%.1fs). Logs in %s" % args
1579 job.last_notify_timestamp = current_time
1580 self._Print(info)
1581 no_output = False
1582
1583 # If we haven't printed any messages yet, print a general status message
1584 # here.
1585 if no_output:
1586 seconds = current_time - GLOBAL_START
Brian Harring0be85c62012-03-17 19:52:12 -07001587 fjobs, fready = len(self._fetch_jobs), len(self._fetch_ready)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001588 ujobs, uready = len(self._unpack_jobs), len(self._unpack_ready)
Brian Harring0be85c62012-03-17 19:52:12 -07001589 bjobs, bready = len(self._build_jobs), len(self._build_ready)
1590 retries = len(self._retry_queue)
1591 pending = max(0, len(self._deps_map) - fjobs - bjobs)
1592 line = "Pending %s/%s, " % (pending, self._total_jobs)
1593 if fjobs or fready:
1594 line += "Fetching %s/%s, " % (fjobs, fready + fjobs)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001595 if ujobs or uready:
1596 line += "Unpacking %s/%s, " % (ujobs, uready + ujobs)
Brian Harring0be85c62012-03-17 19:52:12 -07001597 if bjobs or bready or retries:
1598 line += "Building %s/%s, " % (bjobs, bready + bjobs)
1599 if retries:
1600 line += "Retrying %s, " % (retries,)
Mike Frysingerd6e2df02014-11-26 02:55:04 -05001601 load = " ".join(str(x) for x in os.getloadavg())
Aviv Keshet3b381682015-11-12 13:15:06 -08001602 line += ("[Time %s | Elapsed %dm%.1fs | Load %s]" % (
1603 time.strftime('%H:%M:%S', current_time_struct), seconds / 60,
1604 seconds % 60, load))
Brian Harring0be85c62012-03-17 19:52:12 -07001605 self._Print(line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001606
1607 def _Finish(self, target):
David James8c7e5e32011-06-28 11:26:03 -07001608 """Mark a target as completed and unblock dependencies."""
1609 this_pkg = self._deps_map[target]
1610 if this_pkg["needs"] and this_pkg["nodeps"]:
1611 # We got installed, but our deps have not been installed yet. Dependent
1612 # packages should only be installed when our needs have been fully met.
1613 this_pkg["action"] = "nomerge"
1614 else:
David James8c7e5e32011-06-28 11:26:03 -07001615 for dep in this_pkg["provides"]:
1616 dep_pkg = self._deps_map[dep]
Brian Harring0be85c62012-03-17 19:52:12 -07001617 state = self._state_map[dep]
David James8c7e5e32011-06-28 11:26:03 -07001618 del dep_pkg["needs"][target]
Brian Harring0be85c62012-03-17 19:52:12 -07001619 state.update_score()
1620 if not state.prefetched:
1621 if dep in self._fetch_ready:
1622 # If it's not currently being fetched, update the prioritization
1623 self._fetch_ready.sort()
1624 elif not dep_pkg["needs"]:
David James8c7e5e32011-06-28 11:26:03 -07001625 if dep_pkg["nodeps"] and dep_pkg["action"] == "nomerge":
1626 self._Finish(dep)
1627 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001628 self._build_ready.put(self._state_map[dep])
David James8c7e5e32011-06-28 11:26:03 -07001629 self._deps_map.pop(target)
David Jamesfcb70ef2011-02-02 16:02:30 -08001630
1631 def _Retry(self):
David James8c7e5e32011-06-28 11:26:03 -07001632 while self._retry_queue:
Brian Harring0be85c62012-03-17 19:52:12 -07001633 state = self._retry_queue.pop(0)
1634 if self._Schedule(state):
1635 self._Print("Retrying emerge of %s." % state.target)
David James8c7e5e32011-06-28 11:26:03 -07001636 break
David Jamesfcb70ef2011-02-02 16:02:30 -08001637
Brian Harringa43f5952012-04-12 01:19:34 -07001638 def _Shutdown(self):
David Jamesfcb70ef2011-02-02 16:02:30 -08001639 # Tell emerge workers to exit. They all exit when 'None' is pushed
1640 # to the queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001641
Brian Harringa43f5952012-04-12 01:19:34 -07001642 # Shutdown the workers first; then jobs (which is how they feed things back)
1643 # then finally the print queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001644
Brian Harringa43f5952012-04-12 01:19:34 -07001645 def _stop(queue, pool):
1646 if pool is None:
1647 return
1648 try:
1649 queue.put(None)
1650 pool.close()
1651 pool.join()
1652 finally:
1653 pool.terminate()
Brian Harring0be85c62012-03-17 19:52:12 -07001654
Brian Harringa43f5952012-04-12 01:19:34 -07001655 _stop(self._fetch_queue, self._fetch_pool)
1656 self._fetch_queue = self._fetch_pool = None
Brian Harring0be85c62012-03-17 19:52:12 -07001657
Brian Harringa43f5952012-04-12 01:19:34 -07001658 _stop(self._build_queue, self._build_pool)
1659 self._build_queue = self._build_pool = None
1660
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001661 if self._unpack_only:
1662 _stop(self._unpack_queue, self._unpack_pool)
1663 self._unpack_queue = self._unpack_pool = None
1664
Brian Harringa43f5952012-04-12 01:19:34 -07001665 if self._job_queue is not None:
1666 self._job_queue.close()
1667 self._job_queue = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001668
1669 # Now that our workers are finished, we can kill the print queue.
Brian Harringa43f5952012-04-12 01:19:34 -07001670 if self._print_worker is not None:
1671 try:
1672 self._print_queue.put(None)
1673 self._print_queue.close()
1674 self._print_worker.join()
1675 finally:
1676 self._print_worker.terminate()
1677 self._print_queue = self._print_worker = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001678
1679 def Run(self):
1680 """Run through the scheduled ebuilds.
1681
1682 Keep running so long as we have uninstalled packages in the
1683 dependency graph to merge.
1684 """
Brian Harringa43f5952012-04-12 01:19:34 -07001685 if not self._deps_map:
1686 return
1687
Brian Harring0be85c62012-03-17 19:52:12 -07001688 # Start the fetchers.
1689 for _ in xrange(min(self._fetch_procs, len(self._fetch_ready))):
1690 state = self._fetch_ready.get()
1691 self._fetch_jobs[state.target] = None
1692 self._fetch_queue.put(state)
1693
1694 # Print an update, then get going.
1695 self._Status()
1696
David Jamesfcb70ef2011-02-02 16:02:30 -08001697 while self._deps_map:
1698 # Check here that we are actually waiting for something.
Brian Harring0be85c62012-03-17 19:52:12 -07001699 if (self._build_queue.empty() and
David Jamesfcb70ef2011-02-02 16:02:30 -08001700 self._job_queue.empty() and
Brian Harring0be85c62012-03-17 19:52:12 -07001701 not self._fetch_jobs and
1702 not self._fetch_ready and
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001703 not self._unpack_jobs and
1704 not self._unpack_ready and
Brian Harring0be85c62012-03-17 19:52:12 -07001705 not self._build_jobs and
1706 not self._build_ready and
David Jamesfcb70ef2011-02-02 16:02:30 -08001707 self._deps_map):
1708 # If we have failed on a package, retry it now.
1709 if self._retry_queue:
1710 self._Retry()
1711 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001712 # Tell the user why we're exiting.
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001713 if self._failed_count:
1714 print('Packages failed:\n\t%s' %
1715 '\n\t'.join(self._failed_count.iterkeys()))
David James0eae23e2012-07-03 15:04:25 -07001716 status_file = os.environ.get("PARALLEL_EMERGE_STATUS_FILE")
1717 if status_file:
David James321490a2012-12-17 12:05:56 -08001718 failed_pkgs = set(portage.versions.cpv_getkey(x)
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001719 for x in self._failed_count.iterkeys())
David James0eae23e2012-07-03 15:04:25 -07001720 with open(status_file, "a") as f:
1721 f.write("%s\n" % " ".join(failed_pkgs))
David Jamesfcb70ef2011-02-02 16:02:30 -08001722 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001723 print("Deadlock! Circular dependencies!")
David Jamesfcb70ef2011-02-02 16:02:30 -08001724 sys.exit(1)
1725
David James321490a2012-12-17 12:05:56 -08001726 for _ in xrange(12):
David Jamesa74289a2011-08-12 10:41:24 -07001727 try:
1728 job = self._job_queue.get(timeout=5)
1729 break
1730 except Queue.Empty:
1731 # Check if any more jobs can be scheduled.
1732 self._ScheduleLoop()
1733 else:
Brian Harring706747c2012-03-16 03:04:31 -07001734 # Print an update every 60 seconds.
David Jamesfcb70ef2011-02-02 16:02:30 -08001735 self._Status()
1736 continue
1737
1738 target = job.target
1739
Brian Harring0be85c62012-03-17 19:52:12 -07001740 if job.fetch_only:
1741 if not job.done:
1742 self._fetch_jobs[job.target] = job
1743 else:
1744 state = self._state_map[job.target]
1745 state.prefetched = True
1746 state.fetched_successfully = (job.retcode == 0)
1747 del self._fetch_jobs[job.target]
1748 self._Print("Fetched %s in %2.2fs"
1749 % (target, time.time() - job.start_timestamp))
1750
1751 if self._show_output or job.retcode != 0:
1752 self._print_queue.put(JobPrinter(job, unlink=True))
1753 else:
1754 os.unlink(job.filename)
1755 # Failure or not, let build work with it next.
1756 if not self._deps_map[job.target]["needs"]:
1757 self._build_ready.put(state)
1758 self._ScheduleLoop()
1759
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001760 if self._unpack_only and job.retcode == 0:
1761 self._unpack_ready.put(state)
1762 self._ScheduleLoop(unpack_only=True)
1763
Brian Harring0be85c62012-03-17 19:52:12 -07001764 if self._fetch_ready:
1765 state = self._fetch_ready.get()
1766 self._fetch_queue.put(state)
1767 self._fetch_jobs[state.target] = None
1768 else:
1769 # Minor optimization; shut down fetchers early since we know
1770 # the queue is empty.
1771 self._fetch_queue.put(None)
1772 continue
1773
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001774 if job.unpack_only:
1775 if not job.done:
1776 self._unpack_jobs[target] = job
1777 else:
1778 del self._unpack_jobs[target]
1779 self._Print("Unpacked %s in %2.2fs"
1780 % (target, time.time() - job.start_timestamp))
1781 if self._show_output or job.retcode != 0:
1782 self._print_queue.put(JobPrinter(job, unlink=True))
1783 else:
1784 os.unlink(job.filename)
1785 if self._unpack_ready:
1786 state = self._unpack_ready.get()
1787 self._unpack_queue.put(state)
1788 self._unpack_jobs[state.target] = None
1789 continue
1790
David Jamesfcb70ef2011-02-02 16:02:30 -08001791 if not job.done:
Brian Harring0be85c62012-03-17 19:52:12 -07001792 self._build_jobs[target] = job
David Jamesfcb70ef2011-02-02 16:02:30 -08001793 self._Print("Started %s (logged in %s)" % (target, job.filename))
1794 continue
1795
1796 # Print output of job
1797 if self._show_output or job.retcode != 0:
1798 self._print_queue.put(JobPrinter(job, unlink=True))
1799 else:
1800 os.unlink(job.filename)
Brian Harring0be85c62012-03-17 19:52:12 -07001801 del self._build_jobs[target]
David Jamesfcb70ef2011-02-02 16:02:30 -08001802
1803 seconds = time.time() - job.start_timestamp
1804 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
1805
1806 # Complain if necessary.
1807 if job.retcode != 0:
1808 # Handle job failure.
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001809 failed_count = self._failed_count.get(target, 0)
1810 if failed_count >= self._max_retries:
1811 # If this job has failed and can't be retried, give up.
David Jamesfcb70ef2011-02-02 16:02:30 -08001812 self._Print("Failed %s. Your build has failed." % details)
1813 else:
1814 # Queue up this build to try again after a long while.
Brian Harring0be85c62012-03-17 19:52:12 -07001815 self._retry_queue.append(self._state_map[target])
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001816 self._failed_count[target] = failed_count + 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001817 self._Print("Failed %s, retrying later." % details)
1818 else:
David James32420cc2011-08-25 21:32:46 -07001819 self._Print("Completed %s" % details)
1820
1821 # Mark as completed and unblock waiting ebuilds.
1822 self._Finish(target)
1823
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001824 if target in self._failed_count and self._retry_queue:
David Jamesfcb70ef2011-02-02 16:02:30 -08001825 # If we have successfully retried a failed package, and there
1826 # are more failed packages, try the next one. We will only have
1827 # one retrying package actively running at a time.
1828 self._Retry()
1829
David Jamesfcb70ef2011-02-02 16:02:30 -08001830
David James8c7e5e32011-06-28 11:26:03 -07001831 # Schedule pending jobs and print an update.
1832 self._ScheduleLoop()
1833 self._Status()
David Jamesfcb70ef2011-02-02 16:02:30 -08001834
David Jamese703d0f2012-01-12 16:27:45 -08001835 # If packages were retried, output a warning.
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001836 if self._failed_count:
David Jamese703d0f2012-01-12 16:27:45 -08001837 self._Print("")
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001838 self._Print("WARNING: The following packages failed once or more,")
David Jamese703d0f2012-01-12 16:27:45 -08001839 self._Print("but succeeded upon retry. This might indicate incorrect")
1840 self._Print("dependencies.")
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001841 for pkg in self._failed_count.iterkeys():
David Jamese703d0f2012-01-12 16:27:45 -08001842 self._Print(" %s" % pkg)
1843 self._Print("@@@STEP_WARNINGS@@@")
1844 self._Print("")
1845
David Jamesfcb70ef2011-02-02 16:02:30 -08001846 # Tell child threads to exit.
1847 self._Print("Merge complete")
David Jamesfcb70ef2011-02-02 16:02:30 -08001848
1849
Brian Harring30675052012-02-29 12:18:22 -08001850def main(argv):
Brian Harring8294d652012-05-23 02:20:52 -07001851 try:
1852 return real_main(argv)
1853 finally:
1854 # Work around multiprocessing sucking and not cleaning up after itself.
1855 # http://bugs.python.org/issue4106;
1856 # Step one; ensure GC is ran *prior* to the VM starting shutdown.
1857 gc.collect()
1858 # Step two; go looking for those threads and try to manually reap
1859 # them if we can.
1860 for x in threading.enumerate():
1861 # Filter on the name, and ident; if ident is None, the thread
1862 # wasn't started.
1863 if x.name == 'QueueFeederThread' and x.ident is not None:
1864 x.join(1)
David Jamesfcb70ef2011-02-02 16:02:30 -08001865
Brian Harring8294d652012-05-23 02:20:52 -07001866
1867def real_main(argv):
Brian Harring30675052012-02-29 12:18:22 -08001868 parallel_emerge_args = argv[:]
David Jamesfcb70ef2011-02-02 16:02:30 -08001869 deps = DepGraphGenerator()
Brian Harring30675052012-02-29 12:18:22 -08001870 deps.Initialize(parallel_emerge_args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001871 emerge = deps.emerge
1872
1873 if emerge.action is not None:
Brian Harring30675052012-02-29 12:18:22 -08001874 argv = deps.ParseParallelEmergeArgs(argv)
Brian Harring8294d652012-05-23 02:20:52 -07001875 return emerge_main(argv)
David Jamesfcb70ef2011-02-02 16:02:30 -08001876 elif not emerge.cmdline_packages:
1877 Usage()
Brian Harring8294d652012-05-23 02:20:52 -07001878 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001879
1880 # Unless we're in pretend mode, there's not much point running without
1881 # root access. We need to be able to install packages.
1882 #
1883 # NOTE: Even if you're running --pretend, it's a good idea to run
1884 # parallel_emerge with root access so that portage can write to the
1885 # dependency cache. This is important for performance.
David James321490a2012-12-17 12:05:56 -08001886 if "--pretend" not in emerge.opts and portage.data.secpass < 2:
Mike Frysinger383367e2014-09-16 15:06:17 -04001887 print("parallel_emerge: superuser access is required.")
Brian Harring8294d652012-05-23 02:20:52 -07001888 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001889
1890 if "--quiet" not in emerge.opts:
1891 cmdline_packages = " ".join(emerge.cmdline_packages)
Mike Frysinger383367e2014-09-16 15:06:17 -04001892 print("Starting fast-emerge.")
1893 print(" Building package %s on %s" % (cmdline_packages,
Gilad Arnold05f94b02015-05-22 10:41:05 -07001894 deps.sysroot or "root"))
David Jamesfcb70ef2011-02-02 16:02:30 -08001895
David James386ccd12011-05-04 20:17:42 -07001896 deps_tree, deps_info = deps.GenDependencyTree()
David Jamesfcb70ef2011-02-02 16:02:30 -08001897
1898 # You want me to be verbose? I'll give you two trees! Twice as much value.
1899 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
1900 deps.PrintTree(deps_tree)
1901
David James386ccd12011-05-04 20:17:42 -07001902 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001903
1904 # OK, time to print out our progress so far.
1905 deps.PrintInstallPlan(deps_graph)
1906 if "--tree" in emerge.opts:
1907 PrintDepsMap(deps_graph)
1908
1909 # Are we upgrading portage? If so, and there are more packages to merge,
1910 # schedule a restart of parallel_emerge to merge the rest. This ensures that
1911 # we pick up all updates to portage settings before merging any more
1912 # packages.
1913 portage_upgrade = False
1914 root = emerge.settings["ROOT"]
Don Garrett25f309a2014-03-19 14:02:12 -07001915 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -08001916 if root == "/":
Gilad Arnoldcead28a2015-05-22 10:45:02 -07001917 final_db = emerge.depgraph._dynamic_config._filtered_trees[root]['graph_db']
David Jamesfcb70ef2011-02-02 16:02:30 -08001918 for db_pkg in final_db.match_pkgs("sys-apps/portage"):
1919 portage_pkg = deps_graph.get(db_pkg.cpv)
David James0ff16f22012-11-02 14:18:07 -07001920 if portage_pkg:
David Jamesfcb70ef2011-02-02 16:02:30 -08001921 portage_upgrade = True
1922 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001923 print("Upgrading portage first, then restarting...")
David Jamesfcb70ef2011-02-02 16:02:30 -08001924
David James0ff16f22012-11-02 14:18:07 -07001925 # Upgrade Portage first, then the rest of the packages.
1926 #
1927 # In order to grant the child permission to run setsid, we need to run sudo
1928 # again. We preserve SUDO_USER here in case an ebuild depends on it.
1929 if portage_upgrade:
1930 # Calculate what arguments to use when re-invoking.
1931 args = ["sudo", "-E", "SUDO_USER=%s" % os.environ.get("SUDO_USER", "")]
1932 args += [os.path.abspath(sys.argv[0])] + parallel_emerge_args
1933 args += ["--exclude=sys-apps/portage"]
1934
1935 # First upgrade Portage.
1936 passthrough_args = ("--quiet", "--pretend", "--verbose")
1937 emerge_args = [k for k in emerge.opts if k in passthrough_args]
1938 ret = emerge_main(emerge_args + ["portage"])
1939 if ret != 0:
1940 return ret
1941
1942 # Now upgrade the rest.
1943 os.execvp(args[0], args)
1944
Bertrand SIMONNETc03c8ee2014-12-10 17:02:55 -08001945 # Attempt to solve crbug.com/433482
1946 # The file descriptor error appears only when getting userpriv_groups
1947 # (lazily generated). Loading userpriv_groups here will reduce the number of
1948 # calls from few hundreds to one.
1949 portage.data._get_global('userpriv_groups')
1950
David Jamesfcb70ef2011-02-02 16:02:30 -08001951 # Run the queued emerges.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001952 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output,
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001953 deps.unpack_only, deps.max_retries)
Brian Harringa43f5952012-04-12 01:19:34 -07001954 try:
1955 scheduler.Run()
1956 finally:
Don Garrett25f309a2014-03-19 14:02:12 -07001957 # pylint: disable=W0212
Brian Harringa43f5952012-04-12 01:19:34 -07001958 scheduler._Shutdown()
David James97ce8902011-08-16 09:51:05 -07001959 scheduler = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001960
Mike Frysingerd20a6e22012-10-04 19:01:10 -04001961 clean_logs(emerge.settings)
1962
Mike Frysinger383367e2014-09-16 15:06:17 -04001963 print("Done")
Brian Harring8294d652012-05-23 02:20:52 -07001964 return 0