blob: f6b5ba3f7580db6a092466bc2731996e9599fdc0 [file] [log] [blame]
Mike Frysinger0a647fc2012-08-06 14:36:05 -04001# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
David Jamesfcb70ef2011-02-02 16:02:30 -08002# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Program to run emerge in parallel, for significant speedup.
6
7Usage:
David James386ccd12011-05-04 20:17:42 -07008 ./parallel_emerge [--board=BOARD] [--workon=PKGS]
David Jamesfcb70ef2011-02-02 16:02:30 -08009 [--force-remote-binary=PKGS] [emerge args] package
10
David James78b6cd92012-04-02 21:36:12 -070011This script runs multiple emerge processes in parallel, using appropriate
12Portage APIs. It is faster than standard emerge because it has a
13multiprocess model instead of an asynchronous model.
David Jamesfcb70ef2011-02-02 16:02:30 -080014"""
15
Mike Frysinger383367e2014-09-16 15:06:17 -040016from __future__ import print_function
17
David Jamesfcb70ef2011-02-02 16:02:30 -080018import codecs
19import copy
20import errno
Brian Harring8294d652012-05-23 02:20:52 -070021import gc
David James8c7e5e32011-06-28 11:26:03 -070022import heapq
David Jamesfcb70ef2011-02-02 16:02:30 -080023import multiprocessing
24import os
Mike Frysinger1ae28092013-10-17 17:17:22 -040025try:
26 import Queue
27except ImportError:
28 # Python-3 renamed to "queue". We still use Queue to avoid collisions
29 # with naming variables as "queue". Maybe we'll transition at some point.
30 # pylint: disable=F0401
31 import queue as Queue
David Jamesfcb70ef2011-02-02 16:02:30 -080032import signal
Bertrand SIMONNET19d789e2014-12-09 13:36:31 -080033import subprocess
David Jamesfcb70ef2011-02-02 16:02:30 -080034import sys
35import tempfile
Brian Harring8294d652012-05-23 02:20:52 -070036import threading
David Jamesfcb70ef2011-02-02 16:02:30 -080037import time
38import traceback
David Jamesfcb70ef2011-02-02 16:02:30 -080039
Thiago Goncalesf4acc422013-07-17 10:26:35 -070040from chromite.lib import cros_build_lib
Chris Ching5fcbd622016-11-28 09:22:15 -070041from chromite.lib import cros_event
Chris Chingb8eba812017-06-22 09:54:48 -060042from chromite.lib import portage_util
Mike Frysingere2d8f0d2014-11-01 13:09:26 -040043from chromite.lib import process_util
Mike Frysingerd74fe4a2014-04-24 11:43:38 -040044from chromite.lib import proctitle
Thiago Goncalesf4acc422013-07-17 10:26:35 -070045
David Jamesfcb70ef2011-02-02 16:02:30 -080046# If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
47# Chromium OS, the default "portage" user doesn't have the necessary
48# permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
49# is "root" here because we get called through sudo.
50#
51# We need to set this before importing any portage modules, because portage
52# looks up "PORTAGE_USERNAME" at import time.
53#
54# NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
55# encounter this case unless they have an old chroot or blow away the
56# environment by running sudo without the -E specifier.
57if "PORTAGE_USERNAME" not in os.environ:
58 homedir = os.environ.get("HOME")
59 if homedir:
60 os.environ["PORTAGE_USERNAME"] = os.path.basename(homedir)
61
Bertrand SIMONNET19d789e2014-12-09 13:36:31 -080062# Wrap Popen with a lock to ensure no two Popen are executed simultaneously in
63# the same process.
64# Two Popen call at the same time might be the cause for crbug.com/433482.
65_popen_lock = threading.Lock()
66_old_popen = subprocess.Popen
67
68def _LockedPopen(*args, **kwargs):
69 with _popen_lock:
70 return _old_popen(*args, **kwargs)
71
72subprocess.Popen = _LockedPopen
73
David Jamesfcb70ef2011-02-02 16:02:30 -080074# Portage doesn't expose dependency trees in its public API, so we have to
75# make use of some private APIs here. These modules are found under
76# /usr/lib/portage/pym/.
77#
78# TODO(davidjames): Update Portage to expose public APIs for these features.
Don Garrett25f309a2014-03-19 14:02:12 -070079# pylint: disable=F0401
David Jamesfcb70ef2011-02-02 16:02:30 -080080from _emerge.actions import adjust_configs
81from _emerge.actions import load_emerge_config
82from _emerge.create_depgraph_params import create_depgraph_params
David James386ccd12011-05-04 20:17:42 -070083from _emerge.depgraph import backtrack_depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -080084from _emerge.main import emerge_main
85from _emerge.main import parse_opts
86from _emerge.Package import Package
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -070087from _emerge.post_emerge import clean_logs
David Jamesfcb70ef2011-02-02 16:02:30 -080088from _emerge.Scheduler import Scheduler
David Jamesfcb70ef2011-02-02 16:02:30 -080089from _emerge.stdout_spinner import stdout_spinner
David James386ccd12011-05-04 20:17:42 -070090from portage._global_updates import _global_updates
David Jamesfcb70ef2011-02-02 16:02:30 -080091import portage
92import portage.debug
Don Garrettf8bf7842014-03-20 17:03:42 -070093# pylint: enable=F0401
Mike Frysinger91d7da92013-02-19 15:53:46 -050094
David Jamesfcb70ef2011-02-02 16:02:30 -080095
David Jamesfcb70ef2011-02-02 16:02:30 -080096def Usage():
97 """Print usage."""
Mike Frysinger383367e2014-09-16 15:06:17 -040098 print("Usage:")
Chris Ching5fcbd622016-11-28 09:22:15 -070099 print(" ./parallel_emerge [--board=BOARD] [--workon=PKGS] [--rebuild]")
100 print(" [--eventlogfile=FILE] [emerge args] package")
Mike Frysinger383367e2014-09-16 15:06:17 -0400101 print()
102 print("Packages specified as workon packages are always built from source.")
103 print()
104 print("The --workon argument is mainly useful when you want to build and")
105 print("install packages that you are working on unconditionally, but do not")
106 print("to have to rev the package to indicate you want to build it from")
107 print("source. The build_packages script will automatically supply the")
108 print("workon argument to emerge, ensuring that packages selected using")
109 print("cros-workon are rebuilt.")
110 print()
111 print("The --rebuild option rebuilds packages whenever their dependencies")
112 print("are changed. This ensures that your build is correct.")
Chris Ching5fcbd622016-11-28 09:22:15 -0700113 print()
114 print("The --eventlogfile writes events to the given file. File is")
115 print("is overwritten if it exists.")
David Jamesfcb70ef2011-02-02 16:02:30 -0800116
117
David Jamesfcb70ef2011-02-02 16:02:30 -0800118# Global start time
119GLOBAL_START = time.time()
120
David James7358d032011-05-19 10:40:03 -0700121# Whether process has been killed by a signal.
122KILLED = multiprocessing.Event()
123
David Jamesfcb70ef2011-02-02 16:02:30 -0800124
125class EmergeData(object):
126 """This simple struct holds various emerge variables.
127
128 This struct helps us easily pass emerge variables around as a unit.
129 These variables are used for calculating dependencies and installing
130 packages.
131 """
132
David Jamesbf1e3442011-05-28 07:44:20 -0700133 __slots__ = ["action", "cmdline_packages", "depgraph", "favorites",
134 "mtimedb", "opts", "root_config", "scheduler_graph",
135 "settings", "spinner", "trees"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800136
137 def __init__(self):
138 # The action the user requested. If the user is installing packages, this
139 # is None. If the user is doing anything other than installing packages,
140 # this will contain the action name, which will map exactly to the
141 # long-form name of the associated emerge option.
142 #
143 # Example: If you call parallel_emerge --unmerge package, the action name
144 # will be "unmerge"
145 self.action = None
146
147 # The list of packages the user passed on the command-line.
148 self.cmdline_packages = None
149
150 # The emerge dependency graph. It'll contain all the packages involved in
151 # this merge, along with their versions.
152 self.depgraph = None
153
David Jamesbf1e3442011-05-28 07:44:20 -0700154 # The list of candidates to add to the world file.
155 self.favorites = None
156
David Jamesfcb70ef2011-02-02 16:02:30 -0800157 # A dict of the options passed to emerge. This dict has been cleaned up
158 # a bit by parse_opts, so that it's a bit easier for the emerge code to
159 # look at the options.
160 #
161 # Emerge takes a few shortcuts in its cleanup process to make parsing of
162 # the options dict easier. For example, if you pass in "--usepkg=n", the
163 # "--usepkg" flag is just left out of the dictionary altogether. Because
164 # --usepkg=n is the default, this makes parsing easier, because emerge
165 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
166 #
167 # These cleanup processes aren't applied to all options. For example, the
168 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
169 # applied by emerge, see the parse_opts function in the _emerge.main
170 # package.
171 self.opts = None
172
173 # A dictionary used by portage to maintain global state. This state is
174 # loaded from disk when portage starts up, and saved to disk whenever we
175 # call mtimedb.commit().
176 #
177 # This database contains information about global updates (i.e., what
178 # version of portage we have) and what we're currently doing. Portage
179 # saves what it is currently doing in this database so that it can be
180 # resumed when you call it with the --resume option.
181 #
182 # parallel_emerge does not save what it is currently doing in the mtimedb,
183 # so we do not support the --resume option.
184 self.mtimedb = None
185
186 # The portage configuration for our current root. This contains the portage
187 # settings (see below) and the three portage trees for our current root.
188 # (The three portage trees are explained below, in the documentation for
189 # the "trees" member.)
190 self.root_config = None
191
192 # The scheduler graph is used by emerge to calculate what packages to
193 # install. We don't actually install any deps, so this isn't really used,
194 # but we pass it in to the Scheduler object anyway.
195 self.scheduler_graph = None
196
197 # Portage settings for our current session. Most of these settings are set
198 # in make.conf inside our current install root.
199 self.settings = None
200
201 # The spinner, which spews stuff to stdout to indicate that portage is
202 # doing something. We maintain our own spinner, so we set the portage
203 # spinner to "silent" mode.
204 self.spinner = None
205
206 # The portage trees. There are separate portage trees for each root. To get
207 # the portage tree for the current root, you can look in self.trees[root],
208 # where root = self.settings["ROOT"].
209 #
210 # In each root, there are three trees: vartree, porttree, and bintree.
211 # - vartree: A database of the currently-installed packages.
212 # - porttree: A database of ebuilds, that can be used to build packages.
213 # - bintree: A database of binary packages.
214 self.trees = None
215
216
217class DepGraphGenerator(object):
218 """Grab dependency information about packages from portage.
219
220 Typical usage:
221 deps = DepGraphGenerator()
222 deps.Initialize(sys.argv[1:])
223 deps_tree, deps_info = deps.GenDependencyTree()
224 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
225 deps.PrintTree(deps_tree)
226 PrintDepsMap(deps_graph)
227 """
228
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700229 __slots__ = ["board", "emerge", "package_db", "show_output", "sysroot",
Bertrand SIMONNET411945d2015-05-20 17:23:28 -0700230 "unpack_only", "max_retries"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800231
232 def __init__(self):
233 self.board = None
234 self.emerge = EmergeData()
David Jamesfcb70ef2011-02-02 16:02:30 -0800235 self.package_db = {}
David Jamesfcb70ef2011-02-02 16:02:30 -0800236 self.show_output = False
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700237 self.sysroot = None
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700238 self.unpack_only = False
Bertrand SIMONNET411945d2015-05-20 17:23:28 -0700239 self.max_retries = 1
David Jamesfcb70ef2011-02-02 16:02:30 -0800240
241 def ParseParallelEmergeArgs(self, argv):
242 """Read the parallel emerge arguments from the command-line.
243
244 We need to be compatible with emerge arg format. We scrape arguments that
245 are specific to parallel_emerge, and pass through the rest directly to
246 emerge.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500247
David Jamesfcb70ef2011-02-02 16:02:30 -0800248 Args:
249 argv: arguments list
Mike Frysinger1a736a82013-12-12 01:50:59 -0500250
David Jamesfcb70ef2011-02-02 16:02:30 -0800251 Returns:
252 Arguments that don't belong to parallel_emerge
253 """
254 emerge_args = []
255 for arg in argv:
256 # Specifically match arguments that are specific to parallel_emerge, and
257 # pass through the rest.
258 if arg.startswith("--board="):
259 self.board = arg.replace("--board=", "")
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700260 elif arg.startswith("--sysroot="):
261 self.sysroot = arg.replace("--sysroot=", "")
David Jamesfcb70ef2011-02-02 16:02:30 -0800262 elif arg.startswith("--workon="):
263 workon_str = arg.replace("--workon=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700264 emerge_args.append("--reinstall-atoms=%s" % workon_str)
265 emerge_args.append("--usepkg-exclude=%s" % workon_str)
David Jamesfcb70ef2011-02-02 16:02:30 -0800266 elif arg.startswith("--force-remote-binary="):
267 force_remote_binary = arg.replace("--force-remote-binary=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700268 emerge_args.append("--useoldpkg-atoms=%s" % force_remote_binary)
Bertrand SIMONNET411945d2015-05-20 17:23:28 -0700269 elif arg.startswith("--retries="):
270 self.max_retries = int(arg.replace("--retries=", ""))
David Jamesfcb70ef2011-02-02 16:02:30 -0800271 elif arg == "--show-output":
272 self.show_output = True
David James386ccd12011-05-04 20:17:42 -0700273 elif arg == "--rebuild":
David James7a1ea4b2011-10-13 15:06:41 -0700274 emerge_args.append("--rebuild-if-unbuilt")
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700275 elif arg == "--unpackonly":
276 emerge_args.append("--fetchonly")
277 self.unpack_only = True
Chris Ching5fcbd622016-11-28 09:22:15 -0700278 elif arg.startswith("--eventlogfile="):
279 log_file_name = arg.replace("--eventlogfile=", "")
Chris Ching4a2ebd62017-04-26 16:30:05 -0600280 event_logger = cros_event.getEventFileLogger(log_file_name)
281 event_logger.setKind('ParallelEmerge')
282 cros_event.setEventLogger(event_logger)
David Jamesfcb70ef2011-02-02 16:02:30 -0800283 else:
284 # Not one of our options, so pass through to emerge.
285 emerge_args.append(arg)
286
David James386ccd12011-05-04 20:17:42 -0700287 # These packages take a really long time to build, so, for expediency, we
288 # are blacklisting them from automatic rebuilds because one of their
289 # dependencies needs to be recompiled.
Mike Frysinger5c2a9052014-04-15 15:52:04 -0400290 for pkg in ("chromeos-base/chromeos-chrome",):
David James7a1ea4b2011-10-13 15:06:41 -0700291 emerge_args.append("--rebuild-exclude=%s" % pkg)
David Jamesfcb70ef2011-02-02 16:02:30 -0800292
293 return emerge_args
294
295 def Initialize(self, args):
296 """Initializer. Parses arguments and sets up portage state."""
297
298 # Parse and strip out args that are just intended for parallel_emerge.
299 emerge_args = self.ParseParallelEmergeArgs(args)
300
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700301 if self.sysroot and self.board:
302 cros_build_lib.Die("--sysroot and --board are incompatible.")
303
David Jamesfcb70ef2011-02-02 16:02:30 -0800304 # Setup various environment variables based on our current board. These
305 # variables are normally setup inside emerge-${BOARD}, but since we don't
306 # call that script, we have to set it up here. These variables serve to
307 # point our tools at /build/BOARD and to setup cross compiles to the
308 # appropriate board as configured in toolchain.conf.
309 if self.board:
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700310 self.sysroot = os.environ.get('SYSROOT',
311 cros_build_lib.GetSysroot(self.board))
312
313 if self.sysroot:
314 os.environ["PORTAGE_CONFIGROOT"] = self.sysroot
315 os.environ["SYSROOT"] = self.sysroot
David Jamesfcb70ef2011-02-02 16:02:30 -0800316
David Jamesfcb70ef2011-02-02 16:02:30 -0800317 # Turn off interactive delays
318 os.environ["EBEEP_IGNORE"] = "1"
319 os.environ["EPAUSE_IGNORE"] = "1"
Mike Frysinger0a647fc2012-08-06 14:36:05 -0400320 os.environ["CLEAN_DELAY"] = "0"
David Jamesfcb70ef2011-02-02 16:02:30 -0800321
322 # Parse the emerge options.
David Jamesea3ca332011-05-26 11:48:29 -0700323 action, opts, cmdline_packages = parse_opts(emerge_args, silent=True)
David Jamesfcb70ef2011-02-02 16:02:30 -0800324
325 # Set environment variables based on options. Portage normally sets these
326 # environment variables in emerge_main, but we can't use that function,
327 # because it also does a bunch of other stuff that we don't want.
328 # TODO(davidjames): Patch portage to move this logic into a function we can
329 # reuse here.
330 if "--debug" in opts:
331 os.environ["PORTAGE_DEBUG"] = "1"
332 if "--config-root" in opts:
333 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
334 if "--root" in opts:
335 os.environ["ROOT"] = opts["--root"]
336 if "--accept-properties" in opts:
337 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
338
David James88d780c2014-02-05 13:03:29 -0800339 # If we're installing packages to the board, we can disable vardb locks.
340 # This is safe because we only run up to one instance of parallel_emerge in
341 # parallel.
342 # TODO(davidjames): Enable this for the host too.
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700343 if self.sysroot:
David Jamesfcb70ef2011-02-02 16:02:30 -0800344 os.environ.setdefault("PORTAGE_LOCKS", "false")
David Jamesfcb70ef2011-02-02 16:02:30 -0800345
346 # Now that we've setup the necessary environment variables, we can load the
347 # emerge config from disk.
Gilad Arnold94758762015-05-22 12:23:23 -0700348 # pylint: disable=unpacking-non-sequence
David Jamesfcb70ef2011-02-02 16:02:30 -0800349 settings, trees, mtimedb = load_emerge_config()
350
David Jamesea3ca332011-05-26 11:48:29 -0700351 # Add in EMERGE_DEFAULT_OPTS, if specified.
352 tmpcmdline = []
353 if "--ignore-default-opts" not in opts:
354 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
355 tmpcmdline.extend(emerge_args)
356 action, opts, cmdline_packages = parse_opts(tmpcmdline)
357
358 # If we're installing to the board, we want the --root-deps option so that
359 # portage will install the build dependencies to that location as well.
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700360 if self.sysroot:
David Jamesea3ca332011-05-26 11:48:29 -0700361 opts.setdefault("--root-deps", True)
362
David Jamesfcb70ef2011-02-02 16:02:30 -0800363 # Check whether our portage tree is out of date. Typically, this happens
364 # when you're setting up a new portage tree, such as in setup_board and
365 # make_chroot. In that case, portage applies a bunch of global updates
366 # here. Once the updates are finished, we need to commit any changes
367 # that the global update made to our mtimedb, and reload the config.
368 #
369 # Portage normally handles this logic in emerge_main, but again, we can't
370 # use that function here.
371 if _global_updates(trees, mtimedb["updates"]):
372 mtimedb.commit()
Gilad Arnold94758762015-05-22 12:23:23 -0700373 # pylint: disable=unpacking-non-sequence
David Jamesfcb70ef2011-02-02 16:02:30 -0800374 settings, trees, mtimedb = load_emerge_config(trees=trees)
375
376 # Setup implied options. Portage normally handles this logic in
377 # emerge_main.
378 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
379 opts.setdefault("--buildpkg", True)
380 if "--getbinpkgonly" in opts:
381 opts.setdefault("--usepkgonly", True)
382 opts.setdefault("--getbinpkg", True)
383 if "getbinpkg" in settings.features:
384 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
385 opts["--getbinpkg"] = True
386 if "--getbinpkg" in opts or "--usepkgonly" in opts:
387 opts.setdefault("--usepkg", True)
388 if "--fetch-all-uri" in opts:
389 opts.setdefault("--fetchonly", True)
390 if "--skipfirst" in opts:
391 opts.setdefault("--resume", True)
392 if "--buildpkgonly" in opts:
393 # --buildpkgonly will not merge anything, so it overrides all binary
394 # package options.
395 for opt in ("--getbinpkg", "--getbinpkgonly",
396 "--usepkg", "--usepkgonly"):
397 opts.pop(opt, None)
398 if (settings.get("PORTAGE_DEBUG", "") == "1" and
399 "python-trace" in settings.features):
400 portage.debug.set_trace(True)
401
402 # Complain about unsupported options
David James386ccd12011-05-04 20:17:42 -0700403 for opt in ("--ask", "--ask-enter-invalid", "--resume", "--skipfirst"):
David Jamesfcb70ef2011-02-02 16:02:30 -0800404 if opt in opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400405 print("%s is not supported by parallel_emerge" % opt)
David Jamesfcb70ef2011-02-02 16:02:30 -0800406 sys.exit(1)
407
408 # Make emerge specific adjustments to the config (e.g. colors!)
409 adjust_configs(opts, trees)
410
411 # Save our configuration so far in the emerge object
412 emerge = self.emerge
413 emerge.action, emerge.opts = action, opts
414 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
415 emerge.cmdline_packages = cmdline_packages
416 root = settings["ROOT"]
417 emerge.root_config = trees[root]["root_config"]
418
David James386ccd12011-05-04 20:17:42 -0700419 if "--usepkg" in opts:
David Jamesfcb70ef2011-02-02 16:02:30 -0800420 emerge.trees[root]["bintree"].populate("--getbinpkg" in opts)
421
David Jamesfcb70ef2011-02-02 16:02:30 -0800422 def CreateDepgraph(self, emerge, packages):
423 """Create an emerge depgraph object."""
424 # Setup emerge options.
425 emerge_opts = emerge.opts.copy()
426
David James386ccd12011-05-04 20:17:42 -0700427 # Ask portage to build a dependency graph. with the options we specified
428 # above.
David Jamesfcb70ef2011-02-02 16:02:30 -0800429 params = create_depgraph_params(emerge_opts, emerge.action)
David Jamesbf1e3442011-05-28 07:44:20 -0700430 success, depgraph, favorites = backtrack_depgraph(
David James386ccd12011-05-04 20:17:42 -0700431 emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
432 packages, emerge.spinner)
433 emerge.depgraph = depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800434
David James386ccd12011-05-04 20:17:42 -0700435 # Is it impossible to honor the user's request? Bail!
436 if not success:
437 depgraph.display_problems()
438 sys.exit(1)
David Jamesfcb70ef2011-02-02 16:02:30 -0800439
440 emerge.depgraph = depgraph
David Jamesbf1e3442011-05-28 07:44:20 -0700441 emerge.favorites = favorites
David Jamesfcb70ef2011-02-02 16:02:30 -0800442
David Jamesdeebd692011-05-09 17:02:52 -0700443 # Prime and flush emerge caches.
444 root = emerge.settings["ROOT"]
445 vardb = emerge.trees[root]["vartree"].dbapi
David James0bdc5de2011-05-12 16:22:26 -0700446 if "--pretend" not in emerge.opts:
447 vardb.counter_tick()
David Jamesdeebd692011-05-09 17:02:52 -0700448 vardb.flush_cache()
449
David James386ccd12011-05-04 20:17:42 -0700450 def GenDependencyTree(self):
David Jamesfcb70ef2011-02-02 16:02:30 -0800451 """Get dependency tree info from emerge.
452
David Jamesfcb70ef2011-02-02 16:02:30 -0800453 Returns:
454 Dependency tree
455 """
456 start = time.time()
457
458 emerge = self.emerge
459
460 # Create a list of packages to merge
461 packages = set(emerge.cmdline_packages[:])
David Jamesfcb70ef2011-02-02 16:02:30 -0800462
463 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
464 # need any extra output from portage.
465 portage.util.noiselimit = -1
466
467 # My favorite feature: The silent spinner. It doesn't spin. Ever.
468 # I'd disable the colors by default too, but they look kind of cool.
469 emerge.spinner = stdout_spinner()
470 emerge.spinner.update = emerge.spinner.update_quiet
471
472 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400473 print("Calculating deps...")
David Jamesfcb70ef2011-02-02 16:02:30 -0800474
Chris Ching4a2ebd62017-04-26 16:30:05 -0600475 with cros_event.newEvent(task_name="GenerateDepTree"):
Chris Ching5fcbd622016-11-28 09:22:15 -0700476 self.CreateDepgraph(emerge, packages)
477 depgraph = emerge.depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800478
479 # Build our own tree from the emerge digraph.
480 deps_tree = {}
Don Garrett25f309a2014-03-19 14:02:12 -0700481 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -0800482 digraph = depgraph._dynamic_config.digraph
David James3f778802011-08-25 19:31:45 -0700483 root = emerge.settings["ROOT"]
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -0700484 final_db = depgraph._dynamic_config._filtered_trees[root]['graph_db']
David Jamesfcb70ef2011-02-02 16:02:30 -0800485 for node, node_deps in digraph.nodes.items():
486 # Calculate dependency packages that need to be installed first. Each
487 # child on the digraph is a dependency. The "operation" field specifies
488 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
489 # contains the type of dependency (e.g. build, runtime, runtime_post,
490 # etc.)
491 #
David Jamesfcb70ef2011-02-02 16:02:30 -0800492 # Portage refers to the identifiers for packages as a CPV. This acronym
493 # stands for Component/Path/Version.
494 #
495 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
496 # Split up, this CPV would be:
497 # C -- Component: chromeos-base
498 # P -- Path: power_manager
499 # V -- Version: 0.0.1-r1
500 #
501 # We just refer to CPVs as packages here because it's easier.
502 deps = {}
503 for child, priorities in node_deps[0].items():
David James3f778802011-08-25 19:31:45 -0700504 if isinstance(child, Package) and child.root == root:
505 cpv = str(child.cpv)
506 action = str(child.operation)
507
508 # If we're uninstalling a package, check whether Portage is
509 # installing a replacement. If so, just depend on the installation
510 # of the new package, because the old package will automatically
511 # be uninstalled at that time.
512 if action == "uninstall":
513 for pkg in final_db.match_pkgs(child.slot_atom):
514 cpv = str(pkg.cpv)
515 action = "merge"
516 break
517
518 deps[cpv] = dict(action=action,
519 deptypes=[str(x) for x in priorities],
520 deps={})
David Jamesfcb70ef2011-02-02 16:02:30 -0800521
522 # We've built our list of deps, so we can add our package to the tree.
David James3f778802011-08-25 19:31:45 -0700523 if isinstance(node, Package) and node.root == root:
David Jamesfcb70ef2011-02-02 16:02:30 -0800524 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
525 deps=deps)
526
David Jamesfcb70ef2011-02-02 16:02:30 -0800527 # Ask portage for its install plan, so that we can only throw out
David James386ccd12011-05-04 20:17:42 -0700528 # dependencies that portage throws out.
David Jamesfcb70ef2011-02-02 16:02:30 -0800529 deps_info = {}
530 for pkg in depgraph.altlist():
531 if isinstance(pkg, Package):
David James3f778802011-08-25 19:31:45 -0700532 assert pkg.root == root
David Jamesfcb70ef2011-02-02 16:02:30 -0800533 self.package_db[pkg.cpv] = pkg
534
David Jamesfcb70ef2011-02-02 16:02:30 -0800535 # Save off info about the package
David James386ccd12011-05-04 20:17:42 -0700536 deps_info[str(pkg.cpv)] = {"idx": len(deps_info)}
David Jamesfcb70ef2011-02-02 16:02:30 -0800537
538 seconds = time.time() - start
539 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400540 print("Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800541
542 return deps_tree, deps_info
543
544 def PrintTree(self, deps, depth=""):
545 """Print the deps we have seen in the emerge output.
546
547 Args:
Mike Frysinger6f3c48e2015-05-06 02:38:51 -0400548 deps: Dependency tree structure.
549 depth: Allows printing the tree recursively, with indentation.
David Jamesfcb70ef2011-02-02 16:02:30 -0800550 """
551 for entry in sorted(deps):
552 action = deps[entry]["action"]
Mike Frysinger383367e2014-09-16 15:06:17 -0400553 print("%s %s (%s)" % (depth, entry, action))
David Jamesfcb70ef2011-02-02 16:02:30 -0800554 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
555
David James386ccd12011-05-04 20:17:42 -0700556 def GenDependencyGraph(self, deps_tree, deps_info):
David Jamesfcb70ef2011-02-02 16:02:30 -0800557 """Generate a doubly linked dependency graph.
558
559 Args:
560 deps_tree: Dependency tree structure.
561 deps_info: More details on the dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500562
David Jamesfcb70ef2011-02-02 16:02:30 -0800563 Returns:
564 Deps graph in the form of a dict of packages, with each package
565 specifying a "needs" list and "provides" list.
566 """
567 emerge = self.emerge
David Jamesfcb70ef2011-02-02 16:02:30 -0800568
David Jamesfcb70ef2011-02-02 16:02:30 -0800569 # deps_map is the actual dependency graph.
570 #
571 # Each package specifies a "needs" list and a "provides" list. The "needs"
572 # list indicates which packages we depend on. The "provides" list
573 # indicates the reverse dependencies -- what packages need us.
574 #
575 # We also provide some other information in the dependency graph:
576 # - action: What we're planning on doing with this package. Generally,
577 # "merge", "nomerge", or "uninstall"
David Jamesfcb70ef2011-02-02 16:02:30 -0800578 deps_map = {}
579
580 def ReverseTree(packages):
581 """Convert tree to digraph.
582
583 Take the tree of package -> requirements and reverse it to a digraph of
584 buildable packages -> packages they unblock.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500585
David Jamesfcb70ef2011-02-02 16:02:30 -0800586 Args:
587 packages: Tree(s) of dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500588
David Jamesfcb70ef2011-02-02 16:02:30 -0800589 Returns:
590 Unsanitized digraph.
591 """
David James8c7e5e32011-06-28 11:26:03 -0700592 binpkg_phases = set(["setup", "preinst", "postinst"])
David Jamese5e1c0a2014-09-29 17:19:41 -0700593 needed_dep_types = set(["blocker", "buildtime", "buildtime_slot_op",
594 "runtime", "runtime_slot_op"])
595 ignored_dep_types = set(["ignored", "optional", "runtime_post", "soft"])
596 all_dep_types = ignored_dep_types | needed_dep_types
David Jamesfcb70ef2011-02-02 16:02:30 -0800597 for pkg in packages:
598
599 # Create an entry for the package
600 action = packages[pkg]["action"]
David James8c7e5e32011-06-28 11:26:03 -0700601 default_pkg = {"needs": {}, "provides": set(), "action": action,
602 "nodeps": False, "binary": False}
David Jamesfcb70ef2011-02-02 16:02:30 -0800603 this_pkg = deps_map.setdefault(pkg, default_pkg)
604
David James8c7e5e32011-06-28 11:26:03 -0700605 if pkg in deps_info:
606 this_pkg["idx"] = deps_info[pkg]["idx"]
607
608 # If a package doesn't have any defined phases that might use the
609 # dependent packages (i.e. pkg_setup, pkg_preinst, or pkg_postinst),
610 # we can install this package before its deps are ready.
611 emerge_pkg = self.package_db.get(pkg)
612 if emerge_pkg and emerge_pkg.type_name == "binary":
613 this_pkg["binary"] = True
Mike Frysinger66652ec2014-04-24 11:42:25 -0400614 defined_phases = emerge_pkg.defined_phases
David James8c7e5e32011-06-28 11:26:03 -0700615 defined_binpkg_phases = binpkg_phases.intersection(defined_phases)
616 if not defined_binpkg_phases:
617 this_pkg["nodeps"] = True
618
David Jamesfcb70ef2011-02-02 16:02:30 -0800619 # Create entries for dependencies of this package first.
620 ReverseTree(packages[pkg]["deps"])
621
622 # Add dependencies to this package.
623 for dep, dep_item in packages[pkg]["deps"].iteritems():
David James8c7e5e32011-06-28 11:26:03 -0700624 # We only need to enforce strict ordering of dependencies if the
David James3f778802011-08-25 19:31:45 -0700625 # dependency is a blocker, or is a buildtime or runtime dependency.
626 # (I.e., ignored, optional, and runtime_post dependencies don't
627 # depend on ordering.)
David James8c7e5e32011-06-28 11:26:03 -0700628 dep_types = dep_item["deptypes"]
629 if needed_dep_types.intersection(dep_types):
630 deps_map[dep]["provides"].add(pkg)
631 this_pkg["needs"][dep] = "/".join(dep_types)
David Jamesfcb70ef2011-02-02 16:02:30 -0800632
David Jamese5e1c0a2014-09-29 17:19:41 -0700633 # Verify we processed all appropriate dependency types.
634 unknown_dep_types = set(dep_types) - all_dep_types
635 if unknown_dep_types:
636 print("Unknown dependency types found:")
637 print(" %s -> %s (%s)" % (pkg, dep, "/".join(unknown_dep_types)))
638 sys.exit(1)
639
David James3f778802011-08-25 19:31:45 -0700640 # If there's a blocker, Portage may need to move files from one
641 # package to another, which requires editing the CONTENTS files of
642 # both packages. To avoid race conditions while editing this file,
643 # the two packages must not be installed in parallel, so we can't
644 # safely ignore dependencies. See http://crosbug.com/19328
645 if "blocker" in dep_types:
646 this_pkg["nodeps"] = False
647
David Jamesfcb70ef2011-02-02 16:02:30 -0800648 def FindCycles():
649 """Find cycles in the dependency tree.
650
651 Returns:
652 A dict mapping cyclic packages to a dict of the deps that cause
653 cycles. For each dep that causes cycles, it returns an example
654 traversal of the graph that shows the cycle.
655 """
656
657 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
658 """Find cycles in cyclic dependencies starting at specified package.
659
660 Args:
661 pkg: Package identifier.
662 cycles: A dict mapping cyclic packages to a dict of the deps that
663 cause cycles. For each dep that causes cycles, it returns an
664 example traversal of the graph that shows the cycle.
665 unresolved: Nodes that have been visited but are not fully processed.
666 resolved: Nodes that have been visited and are fully processed.
667 """
668 pkg_cycles = cycles.get(pkg)
669 if pkg in resolved and not pkg_cycles:
670 # If we already looked at this package, and found no cyclic
671 # dependencies, we can stop now.
672 return
673 unresolved.append(pkg)
674 for dep in deps_map[pkg]["needs"]:
675 if dep in unresolved:
676 idx = unresolved.index(dep)
677 mycycle = unresolved[idx:] + [dep]
David James321490a2012-12-17 12:05:56 -0800678 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800679 pkg1, pkg2 = mycycle[i], mycycle[i+1]
680 cycles.setdefault(pkg1, {}).setdefault(pkg2, mycycle)
681 elif not pkg_cycles or dep not in pkg_cycles:
682 # Looks like we haven't seen this edge before.
683 FindCyclesAtNode(dep, cycles, unresolved, resolved)
684 unresolved.pop()
685 resolved.add(pkg)
686
687 cycles, unresolved, resolved = {}, [], set()
688 for pkg in deps_map:
689 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
690 return cycles
691
David James386ccd12011-05-04 20:17:42 -0700692 def RemoveUnusedPackages():
David Jamesfcb70ef2011-02-02 16:02:30 -0800693 """Remove installed packages, propagating dependencies."""
David Jamesfcb70ef2011-02-02 16:02:30 -0800694 # Schedule packages that aren't on the install list for removal
695 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
696
David Jamesfcb70ef2011-02-02 16:02:30 -0800697 # Remove the packages we don't want, simplifying the graph and making
698 # it easier for us to crack cycles.
699 for pkg in sorted(rm_pkgs):
700 this_pkg = deps_map[pkg]
701 needs = this_pkg["needs"]
702 provides = this_pkg["provides"]
703 for dep in needs:
704 dep_provides = deps_map[dep]["provides"]
705 dep_provides.update(provides)
706 dep_provides.discard(pkg)
707 dep_provides.discard(dep)
708 for target in provides:
709 target_needs = deps_map[target]["needs"]
710 target_needs.update(needs)
711 target_needs.pop(pkg, None)
712 target_needs.pop(target, None)
713 del deps_map[pkg]
714
715 def PrintCycleBreak(basedep, dep, mycycle):
716 """Print details about a cycle that we are planning on breaking.
717
Mike Frysinger02e1e072013-11-10 22:11:34 -0500718 We are breaking a cycle where dep needs basedep. mycycle is an
719 example cycle which contains dep -> basedep.
720 """
David Jamesfcb70ef2011-02-02 16:02:30 -0800721
David Jamesfcb70ef2011-02-02 16:02:30 -0800722 needs = deps_map[dep]["needs"]
723 depinfo = needs.get(basedep, "deleted")
David Jamesfcb70ef2011-02-02 16:02:30 -0800724
David James3f778802011-08-25 19:31:45 -0700725 # It's OK to swap install order for blockers, as long as the two
726 # packages aren't installed in parallel. If there is a cycle, then
727 # we know the packages depend on each other already, so we can drop the
728 # blocker safely without printing a warning.
729 if depinfo == "blocker":
730 return
731
David Jamesfcb70ef2011-02-02 16:02:30 -0800732 # Notify the user that we're breaking a cycle.
Mike Frysinger383367e2014-09-16 15:06:17 -0400733 print("Breaking %s -> %s (%s)" % (dep, basedep, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800734
735 # Show cycle.
David James321490a2012-12-17 12:05:56 -0800736 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800737 pkg1, pkg2 = mycycle[i], mycycle[i+1]
738 needs = deps_map[pkg1]["needs"]
739 depinfo = needs.get(pkg2, "deleted")
740 if pkg1 == dep and pkg2 == basedep:
741 depinfo = depinfo + ", deleting"
Mike Frysinger383367e2014-09-16 15:06:17 -0400742 print(" %s -> %s (%s)" % (pkg1, pkg2, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800743
744 def SanitizeTree():
745 """Remove circular dependencies.
746
747 We prune all dependencies involved in cycles that go against the emerge
748 ordering. This has a nice property: we're guaranteed to merge
749 dependencies in the same order that portage does.
750
751 Because we don't treat any dependencies as "soft" unless they're killed
752 by a cycle, we pay attention to a larger number of dependencies when
753 merging. This hurts performance a bit, but helps reliability.
754 """
755 start = time.time()
756 cycles = FindCycles()
757 while cycles:
758 for dep, mycycles in cycles.iteritems():
759 for basedep, mycycle in mycycles.iteritems():
760 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
Matt Tennant08797302011-10-17 16:18:45 -0700761 if "--quiet" not in emerge.opts:
762 PrintCycleBreak(basedep, dep, mycycle)
David Jamesfcb70ef2011-02-02 16:02:30 -0800763 del deps_map[dep]["needs"][basedep]
764 deps_map[basedep]["provides"].remove(dep)
765 cycles = FindCycles()
766 seconds = time.time() - start
767 if "--quiet" not in emerge.opts and seconds >= 0.1:
Mike Frysinger383367e2014-09-16 15:06:17 -0400768 print("Tree sanitized in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800769
David James8c7e5e32011-06-28 11:26:03 -0700770 def FindRecursiveProvides(pkg, seen):
771 """Find all nodes that require a particular package.
772
773 Assumes that graph is acyclic.
774
775 Args:
776 pkg: Package identifier.
777 seen: Nodes that have been visited so far.
778 """
779 if pkg in seen:
780 return
781 seen.add(pkg)
782 info = deps_map[pkg]
783 info["tprovides"] = info["provides"].copy()
784 for dep in info["provides"]:
785 FindRecursiveProvides(dep, seen)
786 info["tprovides"].update(deps_map[dep]["tprovides"])
787
David Jamesa22906f2011-05-04 19:53:26 -0700788 ReverseTree(deps_tree)
David Jamesa22906f2011-05-04 19:53:26 -0700789
David James386ccd12011-05-04 20:17:42 -0700790 # We need to remove unused packages so that we can use the dependency
791 # ordering of the install process to show us what cycles to crack.
792 RemoveUnusedPackages()
David Jamesfcb70ef2011-02-02 16:02:30 -0800793 SanitizeTree()
David James8c7e5e32011-06-28 11:26:03 -0700794 seen = set()
795 for pkg in deps_map:
796 FindRecursiveProvides(pkg, seen)
David Jamesfcb70ef2011-02-02 16:02:30 -0800797 return deps_map
798
799 def PrintInstallPlan(self, deps_map):
800 """Print an emerge-style install plan.
801
802 The install plan lists what packages we're installing, in order.
803 It's useful for understanding what parallel_emerge is doing.
804
805 Args:
806 deps_map: The dependency graph.
807 """
808
809 def InstallPlanAtNode(target, deps_map):
810 nodes = []
811 nodes.append(target)
812 for dep in deps_map[target]["provides"]:
813 del deps_map[dep]["needs"][target]
814 if not deps_map[dep]["needs"]:
815 nodes.extend(InstallPlanAtNode(dep, deps_map))
816 return nodes
817
818 deps_map = copy.deepcopy(deps_map)
819 install_plan = []
820 plan = set()
821 for target, info in deps_map.iteritems():
822 if not info["needs"] and target not in plan:
823 for item in InstallPlanAtNode(target, deps_map):
824 plan.add(item)
825 install_plan.append(self.package_db[item])
826
827 for pkg in plan:
828 del deps_map[pkg]
829
830 if deps_map:
Mike Frysinger383367e2014-09-16 15:06:17 -0400831 print("Cyclic dependencies:", " ".join(deps_map))
David Jamesfcb70ef2011-02-02 16:02:30 -0800832 PrintDepsMap(deps_map)
833 sys.exit(1)
834
835 self.emerge.depgraph.display(install_plan)
836
837
838def PrintDepsMap(deps_map):
839 """Print dependency graph, for each package list it's prerequisites."""
840 for i in sorted(deps_map):
Mike Frysinger383367e2014-09-16 15:06:17 -0400841 print("%s: (%s) needs" % (i, deps_map[i]["action"]))
David Jamesfcb70ef2011-02-02 16:02:30 -0800842 needs = deps_map[i]["needs"]
843 for j in sorted(needs):
Mike Frysinger383367e2014-09-16 15:06:17 -0400844 print(" %s" % (j))
David Jamesfcb70ef2011-02-02 16:02:30 -0800845 if not needs:
Mike Frysinger383367e2014-09-16 15:06:17 -0400846 print(" no dependencies")
David Jamesfcb70ef2011-02-02 16:02:30 -0800847
848
849class EmergeJobState(object):
Don Garrett25f309a2014-03-19 14:02:12 -0700850 """Structure describing the EmergeJobState."""
851
David Jamesfcb70ef2011-02-02 16:02:30 -0800852 __slots__ = ["done", "filename", "last_notify_timestamp", "last_output_seek",
853 "last_output_timestamp", "pkgname", "retcode", "start_timestamp",
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700854 "target", "fetch_only", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800855
856 def __init__(self, target, pkgname, done, filename, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700857 retcode=None, fetch_only=False, unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800858
859 # The full name of the target we're building (e.g.
Mike Frysingerfd969312014-04-02 22:16:42 -0400860 # virtual/target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800861 self.target = target
862
Mike Frysingerfd969312014-04-02 22:16:42 -0400863 # The short name of the target we're building (e.g. target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800864 self.pkgname = pkgname
865
866 # Whether the job is done. (True if the job is done; false otherwise.)
867 self.done = done
868
869 # The filename where output is currently stored.
870 self.filename = filename
871
872 # The timestamp of the last time we printed the name of the log file. We
873 # print this at the beginning of the job, so this starts at
874 # start_timestamp.
875 self.last_notify_timestamp = start_timestamp
876
877 # The location (in bytes) of the end of the last complete line we printed.
878 # This starts off at zero. We use this to jump to the right place when we
879 # print output from the same ebuild multiple times.
880 self.last_output_seek = 0
881
882 # The timestamp of the last time we printed output. Since we haven't
883 # printed output yet, this starts at zero.
884 self.last_output_timestamp = 0
885
886 # The return code of our job, if the job is actually finished.
887 self.retcode = retcode
888
Brian Harring0be85c62012-03-17 19:52:12 -0700889 # Was this just a fetch job?
890 self.fetch_only = fetch_only
891
David Jamesfcb70ef2011-02-02 16:02:30 -0800892 # The timestamp when our job started.
893 self.start_timestamp = start_timestamp
894
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700895 # No emerge, only unpack packages.
896 self.unpack_only = unpack_only
897
David Jamesfcb70ef2011-02-02 16:02:30 -0800898
David James321490a2012-12-17 12:05:56 -0800899def KillHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700900 # Kill self and all subprocesses.
901 os.killpg(0, signal.SIGKILL)
902
Mike Frysingercc838832014-05-24 13:10:30 -0400903
David Jamesfcb70ef2011-02-02 16:02:30 -0800904def SetupWorkerSignals():
David James321490a2012-12-17 12:05:56 -0800905 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700906 # Set KILLED flag.
907 KILLED.set()
David James13cead42011-05-18 16:22:01 -0700908
David James7358d032011-05-19 10:40:03 -0700909 # Remove our signal handlers so we don't get called recursively.
910 signal.signal(signal.SIGINT, KillHandler)
911 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -0800912
913 # Ensure that we exit quietly and cleanly, if possible, when we receive
914 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all
915 # of the child processes will print details about KeyboardInterrupt
916 # exceptions, which isn't very helpful.
917 signal.signal(signal.SIGINT, ExitHandler)
918 signal.signal(signal.SIGTERM, ExitHandler)
919
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400920
921def EmergeProcess(output, target, *args, **kwargs):
David James1ed3e252011-10-05 20:26:15 -0700922 """Merge a package in a subprocess.
923
924 Args:
David James1ed3e252011-10-05 20:26:15 -0700925 output: Temporary file to write output.
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400926 target: The package we'll be processing (for display purposes).
David James6b29d052012-11-02 10:27:27 -0700927 *args: Arguments to pass to Scheduler constructor.
928 **kwargs: Keyword arguments to pass to Scheduler constructor.
David James1ed3e252011-10-05 20:26:15 -0700929
930 Returns:
931 The exit code returned by the subprocess.
932 """
Chris Chingb8eba812017-06-22 09:54:48 -0600933
934 cpv = portage_util.SplitCPV(target)
Chris Ching4a2ebd62017-04-26 16:30:05 -0600935 event = cros_event.newEvent(task_name="EmergePackage",
Chris Chingb8eba812017-06-22 09:54:48 -0600936 name=cpv.package,
937 category=cpv.category,
938 version=cpv.version)
David James1ed3e252011-10-05 20:26:15 -0700939 pid = os.fork()
940 if pid == 0:
941 try:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400942 proctitle.settitle('EmergeProcess', target)
943
David James1ed3e252011-10-05 20:26:15 -0700944 # Sanity checks.
Mike Frysingerf02736e2013-11-08 15:27:00 -0500945 if sys.stdout.fileno() != 1:
946 raise Exception("sys.stdout.fileno() != 1")
947 if sys.stderr.fileno() != 2:
948 raise Exception("sys.stderr.fileno() != 2")
David James1ed3e252011-10-05 20:26:15 -0700949
950 # - Redirect 1 (stdout) and 2 (stderr) at our temporary file.
951 # - Redirect 0 to point at sys.stdin. In this case, sys.stdin
952 # points at a file reading os.devnull, because multiprocessing mucks
953 # with sys.stdin.
954 # - Leave the sys.stdin and output filehandles alone.
955 fd_pipes = {0: sys.stdin.fileno(),
956 1: output.fileno(),
957 2: output.fileno(),
958 sys.stdin.fileno(): sys.stdin.fileno(),
959 output.fileno(): output.fileno()}
Mike Frysinger66652ec2014-04-24 11:42:25 -0400960 # pylint: disable=W0212
961 portage.process._setup_pipes(fd_pipes, close_fds=False)
David James1ed3e252011-10-05 20:26:15 -0700962
963 # Portage doesn't like when sys.stdin.fileno() != 0, so point sys.stdin
964 # at the filehandle we just created in _setup_pipes.
965 if sys.stdin.fileno() != 0:
David James6b29d052012-11-02 10:27:27 -0700966 sys.__stdin__ = sys.stdin = os.fdopen(0, "r")
967
968 scheduler = Scheduler(*args, **kwargs)
969
970 # Enable blocker handling even though we're in --nodeps mode. This
971 # allows us to unmerge the blocker after we've merged the replacement.
972 scheduler._opts_ignore_blockers = frozenset()
David James1ed3e252011-10-05 20:26:15 -0700973
974 # Actually do the merge.
Chris Ching5fcbd622016-11-28 09:22:15 -0700975 with event:
976 retval = scheduler.merge()
977 if retval != 0:
978 event.fail(message="non-zero value returned")
David James1ed3e252011-10-05 20:26:15 -0700979
980 # We catch all exceptions here (including SystemExit, KeyboardInterrupt,
981 # etc) so as to ensure that we don't confuse the multiprocessing module,
982 # which expects that all forked children exit with os._exit().
David James321490a2012-12-17 12:05:56 -0800983 # pylint: disable=W0702
David James1ed3e252011-10-05 20:26:15 -0700984 except:
985 traceback.print_exc(file=output)
986 retval = 1
987 sys.stdout.flush()
988 sys.stderr.flush()
989 output.flush()
Don Garrett25f309a2014-03-19 14:02:12 -0700990 # pylint: disable=W0212
David James1ed3e252011-10-05 20:26:15 -0700991 os._exit(retval)
992 else:
993 # Return the exit code of the subprocess.
994 return os.waitpid(pid, 0)[1]
David Jamesfcb70ef2011-02-02 16:02:30 -0800995
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700996
997def UnpackPackage(pkg_state):
998 """Unpacks package described by pkg_state.
999
1000 Args:
1001 pkg_state: EmergeJobState object describing target.
1002
1003 Returns:
1004 Exit code returned by subprocess.
1005 """
1006 pkgdir = os.environ.get("PKGDIR",
1007 os.path.join(os.environ["SYSROOT"], "packages"))
1008 root = os.environ.get("ROOT", os.environ["SYSROOT"])
1009 path = os.path.join(pkgdir, pkg_state.target + ".tbz2")
1010 comp = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)
1011 cmd = [comp, "-dc"]
1012 if comp.endswith("pbzip2"):
1013 cmd.append("--ignore-trailing-garbage=1")
1014 cmd.append(path)
1015
Chris Ching4a2ebd62017-04-26 16:30:05 -06001016 with cros_event.newEvent(task_name="UnpackPackage", **pkg_state) as event:
Chris Ching5fcbd622016-11-28 09:22:15 -07001017 result = cros_build_lib.RunCommand(cmd, cwd=root, stdout_to_pipe=True,
1018 print_cmd=False, error_code_ok=True)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001019
Chris Ching5fcbd622016-11-28 09:22:15 -07001020 # If we were not successful, return now and don't attempt untar.
1021 if result.returncode != 0:
1022 event.fail("error compressing: returned {}".format(result.returncode))
1023 return result.returncode
1024
1025 cmd = ["sudo", "tar", "-xf", "-", "-C", root]
1026
1027 result = cros_build_lib.RunCommand(cmd, cwd=root, input=result.output,
1028 print_cmd=False, error_code_ok=True)
1029 if result.returncode != 0:
1030 event.fail("error extracting:returned {}".format(result.returncode))
1031
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001032 return result.returncode
1033
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001034
1035def EmergeWorker(task_queue, job_queue, emerge, package_db, fetch_only=False,
1036 unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -08001037 """This worker emerges any packages given to it on the task_queue.
1038
1039 Args:
1040 task_queue: The queue of tasks for this worker to do.
1041 job_queue: The queue of results from the worker.
1042 emerge: An EmergeData() object.
1043 package_db: A dict, mapping package ids to portage Package objects.
Brian Harring0be85c62012-03-17 19:52:12 -07001044 fetch_only: A bool, indicating if we should just fetch the target.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001045 unpack_only: A bool, indicating if we should just unpack the target.
David Jamesfcb70ef2011-02-02 16:02:30 -08001046
1047 It expects package identifiers to be passed to it via task_queue. When
1048 a task is started, it pushes the (target, filename) to the started_queue.
1049 The output is stored in filename. When a merge starts or finishes, we push
1050 EmergeJobState objects to the job_queue.
1051 """
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001052 if fetch_only:
1053 mode = 'fetch'
1054 elif unpack_only:
1055 mode = 'unpack'
1056 else:
1057 mode = 'emerge'
1058 proctitle.settitle('EmergeWorker', mode, '[idle]')
David Jamesfcb70ef2011-02-02 16:02:30 -08001059
1060 SetupWorkerSignals()
1061 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
David Jamesdeebd692011-05-09 17:02:52 -07001062
1063 # Disable flushing of caches to save on I/O.
David James7a1ea4b2011-10-13 15:06:41 -07001064 root = emerge.settings["ROOT"]
1065 vardb = emerge.trees[root]["vartree"].dbapi
Mike Frysingere56debd2014-11-19 01:54:36 -05001066 vardb._flush_cache_enabled = False # pylint: disable=protected-access
Brian Harring0be85c62012-03-17 19:52:12 -07001067 bindb = emerge.trees[root]["bintree"].dbapi
1068 # Might be a set, might be a list, might be None; no clue, just use shallow
1069 # copy to ensure we can roll it back.
Don Garrett25f309a2014-03-19 14:02:12 -07001070 # pylint: disable=W0212
Brian Harring0be85c62012-03-17 19:52:12 -07001071 original_remotepkgs = copy.copy(bindb.bintree._remotepkgs)
David Jamesdeebd692011-05-09 17:02:52 -07001072
David Jamesfcb70ef2011-02-02 16:02:30 -08001073 opts, spinner = emerge.opts, emerge.spinner
1074 opts["--nodeps"] = True
Brian Harring0be85c62012-03-17 19:52:12 -07001075 if fetch_only:
1076 opts["--fetchonly"] = True
1077
David Jamesfcb70ef2011-02-02 16:02:30 -08001078 while True:
1079 # Wait for a new item to show up on the queue. This is a blocking wait,
1080 # so if there's nothing to do, we just sit here.
Brian Harring0be85c62012-03-17 19:52:12 -07001081 pkg_state = task_queue.get()
1082 if pkg_state is None:
David Jamesfcb70ef2011-02-02 16:02:30 -08001083 # If target is None, this means that the main thread wants us to quit.
1084 # The other workers need to exit too, so we'll push the message back on
1085 # to the queue so they'll get it too.
Brian Harring0be85c62012-03-17 19:52:12 -07001086 task_queue.put(None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001087 return
David James7358d032011-05-19 10:40:03 -07001088 if KILLED.is_set():
1089 return
1090
Brian Harring0be85c62012-03-17 19:52:12 -07001091 target = pkg_state.target
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001092 proctitle.settitle('EmergeWorker', mode, target)
Brian Harring0be85c62012-03-17 19:52:12 -07001093
David Jamesfcb70ef2011-02-02 16:02:30 -08001094 db_pkg = package_db[target]
Brian Harring0be85c62012-03-17 19:52:12 -07001095
1096 if db_pkg.type_name == "binary":
1097 if not fetch_only and pkg_state.fetched_successfully:
1098 # Ensure portage doesn't think our pkg is remote- else it'll force
1099 # a redownload of it (even if the on-disk file is fine). In-memory
1100 # caching basically, implemented dumbly.
1101 bindb.bintree._remotepkgs = None
1102 else:
1103 bindb.bintree_remotepkgs = original_remotepkgs
1104
David Jamesfcb70ef2011-02-02 16:02:30 -08001105 db_pkg.root_config = emerge.root_config
1106 install_list = [db_pkg]
1107 pkgname = db_pkg.pf
1108 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False)
David James01b1e0f2012-06-07 17:18:05 -07001109 os.chmod(output.name, 644)
David Jamesfcb70ef2011-02-02 16:02:30 -08001110 start_timestamp = time.time()
Brian Harring0be85c62012-03-17 19:52:12 -07001111 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001112 fetch_only=fetch_only, unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001113 job_queue.put(job)
1114 if "--pretend" in opts:
1115 retcode = 0
1116 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001117 try:
David James386ccd12011-05-04 20:17:42 -07001118 emerge.scheduler_graph.mergelist = install_list
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001119 if unpack_only:
1120 retcode = UnpackPackage(pkg_state)
1121 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001122 retcode = EmergeProcess(output, target, settings, trees, mtimedb,
1123 opts, spinner, favorites=emerge.favorites,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001124 graph_config=emerge.scheduler_graph)
David Jamesfcb70ef2011-02-02 16:02:30 -08001125 except Exception:
1126 traceback.print_exc(file=output)
1127 retcode = 1
David James1ed3e252011-10-05 20:26:15 -07001128 output.close()
David Jamesfcb70ef2011-02-02 16:02:30 -08001129
David James7358d032011-05-19 10:40:03 -07001130 if KILLED.is_set():
1131 return
1132
David Jamesfcb70ef2011-02-02 16:02:30 -08001133 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001134 retcode, fetch_only=fetch_only,
1135 unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001136 job_queue.put(job)
1137
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001138 # Set the title back to idle as the multiprocess pool won't destroy us;
1139 # when another job comes up, it'll re-use this process.
1140 proctitle.settitle('EmergeWorker', mode, '[idle]')
1141
David Jamesfcb70ef2011-02-02 16:02:30 -08001142
1143class LinePrinter(object):
1144 """Helper object to print a single line."""
1145
1146 def __init__(self, line):
1147 self.line = line
1148
David James321490a2012-12-17 12:05:56 -08001149 def Print(self, _seek_locations):
Mike Frysinger383367e2014-09-16 15:06:17 -04001150 print(self.line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001151
1152
1153class JobPrinter(object):
1154 """Helper object to print output of a job."""
1155
1156 def __init__(self, job, unlink=False):
1157 """Print output of job.
1158
Mike Frysinger02e1e072013-11-10 22:11:34 -05001159 If unlink is True, unlink the job output file when done.
1160 """
David Jamesfcb70ef2011-02-02 16:02:30 -08001161 self.current_time = time.time()
1162 self.job = job
1163 self.unlink = unlink
1164
1165 def Print(self, seek_locations):
1166
1167 job = self.job
1168
1169 # Calculate how long the job has been running.
1170 seconds = self.current_time - job.start_timestamp
1171
1172 # Note that we've printed out the job so far.
1173 job.last_output_timestamp = self.current_time
1174
1175 # Note that we're starting the job
1176 info = "job %s (%dm%.1fs)" % (job.pkgname, seconds / 60, seconds % 60)
1177 last_output_seek = seek_locations.get(job.filename, 0)
1178 if last_output_seek:
Mike Frysinger383367e2014-09-16 15:06:17 -04001179 print("=== Continue output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001180 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001181 print("=== Start output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001182
1183 # Print actual output from job
1184 f = codecs.open(job.filename, encoding='utf-8', errors='replace')
1185 f.seek(last_output_seek)
1186 prefix = job.pkgname + ":"
1187 for line in f:
1188
1189 # Save off our position in the file
1190 if line and line[-1] == "\n":
1191 last_output_seek = f.tell()
1192 line = line[:-1]
1193
1194 # Print our line
Mike Frysinger383367e2014-09-16 15:06:17 -04001195 print(prefix, line.encode('utf-8', 'replace'))
David Jamesfcb70ef2011-02-02 16:02:30 -08001196 f.close()
1197
1198 # Save our last spot in the file so that we don't print out the same
1199 # location twice.
1200 seek_locations[job.filename] = last_output_seek
1201
1202 # Note end of output section
1203 if job.done:
Mike Frysinger383367e2014-09-16 15:06:17 -04001204 print("=== Complete: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001205 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001206 print("=== Still running: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001207
1208 if self.unlink:
1209 os.unlink(job.filename)
1210
1211
1212def PrintWorker(queue):
1213 """A worker that prints stuff to the screen as requested."""
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001214 proctitle.settitle('PrintWorker')
David Jamesfcb70ef2011-02-02 16:02:30 -08001215
David James321490a2012-12-17 12:05:56 -08001216 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -07001217 # Set KILLED flag.
1218 KILLED.set()
1219
David Jamesfcb70ef2011-02-02 16:02:30 -08001220 # Switch to default signal handlers so that we'll die after two signals.
David James7358d032011-05-19 10:40:03 -07001221 signal.signal(signal.SIGINT, KillHandler)
1222 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001223
1224 # Don't exit on the first SIGINT / SIGTERM, because the parent worker will
1225 # handle it and tell us when we need to exit.
1226 signal.signal(signal.SIGINT, ExitHandler)
1227 signal.signal(signal.SIGTERM, ExitHandler)
1228
1229 # seek_locations is a map indicating the position we are at in each file.
1230 # It starts off empty, but is set by the various Print jobs as we go along
1231 # to indicate where we left off in each file.
1232 seek_locations = {}
1233 while True:
1234 try:
1235 job = queue.get()
1236 if job:
1237 job.Print(seek_locations)
David Jamesbccf8eb2011-07-27 14:06:06 -07001238 sys.stdout.flush()
David Jamesfcb70ef2011-02-02 16:02:30 -08001239 else:
1240 break
1241 except IOError as ex:
1242 if ex.errno == errno.EINTR:
1243 # Looks like we received a signal. Keep printing.
1244 continue
1245 raise
1246
Brian Harring867e2362012-03-17 04:05:17 -07001247
Brian Harring0be85c62012-03-17 19:52:12 -07001248class TargetState(object):
Chris Ching5fcbd622016-11-28 09:22:15 -07001249 """Structure describing the TargetState."""
Brian Harring867e2362012-03-17 04:05:17 -07001250
Brian Harring0be85c62012-03-17 19:52:12 -07001251 __slots__ = ("target", "info", "score", "prefetched", "fetched_successfully")
Brian Harring867e2362012-03-17 04:05:17 -07001252
David James321490a2012-12-17 12:05:56 -08001253 def __init__(self, target, info):
Brian Harring867e2362012-03-17 04:05:17 -07001254 self.target, self.info = target, info
Brian Harring0be85c62012-03-17 19:52:12 -07001255 self.fetched_successfully = False
1256 self.prefetched = False
David James321490a2012-12-17 12:05:56 -08001257 self.score = None
Brian Harring867e2362012-03-17 04:05:17 -07001258 self.update_score()
1259
1260 def __cmp__(self, other):
1261 return cmp(self.score, other.score)
1262
1263 def update_score(self):
1264 self.score = (
1265 -len(self.info["tprovides"]),
Brian Harring0be85c62012-03-17 19:52:12 -07001266 len(self.info["needs"]),
Brian Harring11c5eeb2012-03-18 11:02:39 -07001267 not self.info["binary"],
Brian Harring867e2362012-03-17 04:05:17 -07001268 -len(self.info["provides"]),
1269 self.info["idx"],
1270 self.target,
1271 )
1272
1273
1274class ScoredHeap(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001275 """Implementation of a general purpose scored heap."""
Brian Harring867e2362012-03-17 04:05:17 -07001276
Brian Harring0be85c62012-03-17 19:52:12 -07001277 __slots__ = ("heap", "_heap_set")
1278
Brian Harring867e2362012-03-17 04:05:17 -07001279 def __init__(self, initial=()):
Brian Harring0be85c62012-03-17 19:52:12 -07001280 self.heap = list()
1281 self._heap_set = set()
1282 if initial:
1283 self.multi_put(initial)
Brian Harring867e2362012-03-17 04:05:17 -07001284
1285 def get(self):
Brian Harring0be85c62012-03-17 19:52:12 -07001286 item = heapq.heappop(self.heap)
1287 self._heap_set.remove(item.target)
1288 return item
Brian Harring867e2362012-03-17 04:05:17 -07001289
Brian Harring0be85c62012-03-17 19:52:12 -07001290 def put(self, item):
1291 if not isinstance(item, TargetState):
1292 raise ValueError("Item %r isn't a TargetState" % (item,))
1293 heapq.heappush(self.heap, item)
1294 self._heap_set.add(item.target)
Brian Harring867e2362012-03-17 04:05:17 -07001295
Brian Harring0be85c62012-03-17 19:52:12 -07001296 def multi_put(self, sequence):
1297 sequence = list(sequence)
1298 self.heap.extend(sequence)
1299 self._heap_set.update(x.target for x in sequence)
Brian Harring867e2362012-03-17 04:05:17 -07001300 self.sort()
1301
David James5c9996d2012-03-24 10:50:46 -07001302 def sort(self):
1303 heapq.heapify(self.heap)
1304
Brian Harring0be85c62012-03-17 19:52:12 -07001305 def __contains__(self, target):
1306 return target in self._heap_set
1307
1308 def __nonzero__(self):
1309 return bool(self.heap)
1310
Brian Harring867e2362012-03-17 04:05:17 -07001311 def __len__(self):
1312 return len(self.heap)
1313
1314
David Jamesfcb70ef2011-02-02 16:02:30 -08001315class EmergeQueue(object):
1316 """Class to schedule emerge jobs according to a dependency graph."""
1317
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001318 def __init__(self, deps_map, emerge, package_db, show_output, unpack_only,
1319 max_retries):
David Jamesfcb70ef2011-02-02 16:02:30 -08001320 # Store the dependency graph.
1321 self._deps_map = deps_map
Brian Harring0be85c62012-03-17 19:52:12 -07001322 self._state_map = {}
David Jamesfcb70ef2011-02-02 16:02:30 -08001323 # Initialize the running queue to empty
Brian Harring0be85c62012-03-17 19:52:12 -07001324 self._build_jobs = {}
1325 self._build_ready = ScoredHeap()
1326 self._fetch_jobs = {}
1327 self._fetch_ready = ScoredHeap()
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001328 self._unpack_jobs = {}
1329 self._unpack_ready = ScoredHeap()
David Jamesfcb70ef2011-02-02 16:02:30 -08001330 # List of total package installs represented in deps_map.
1331 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
1332 self._total_jobs = len(install_jobs)
1333 self._show_output = show_output
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001334 self._unpack_only = unpack_only
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001335 self._max_retries = max_retries
David Jamesfcb70ef2011-02-02 16:02:30 -08001336
1337 if "--pretend" in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001338 print("Skipping merge because of --pretend mode.")
David Jamesfcb70ef2011-02-02 16:02:30 -08001339 sys.exit(0)
1340
David Jamesaaf49e42014-04-24 09:40:05 -07001341 # Set up a session so we can easily terminate all children.
1342 self._SetupSession()
David James7358d032011-05-19 10:40:03 -07001343
David Jamesfcb70ef2011-02-02 16:02:30 -08001344 # Setup scheduler graph object. This is used by the child processes
1345 # to help schedule jobs.
1346 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1347
1348 # Calculate how many jobs we can run in parallel. We don't want to pass
1349 # the --jobs flag over to emerge itself, because that'll tell emerge to
1350 # hide its output, and said output is quite useful for debugging hung
1351 # jobs.
1352 procs = min(self._total_jobs,
1353 emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
Nam T. Nguyenf7098b32015-05-08 11:04:48 -07001354 self._build_procs = self._unpack_procs = max(1, procs)
1355 # Fetch is IO bound, we can use more processes.
1356 self._fetch_procs = max(4, procs)
David James8c7e5e32011-06-28 11:26:03 -07001357 self._load_avg = emerge.opts.pop("--load-average", None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001358 self._job_queue = multiprocessing.Queue()
1359 self._print_queue = multiprocessing.Queue()
Brian Harring0be85c62012-03-17 19:52:12 -07001360
1361 self._fetch_queue = multiprocessing.Queue()
1362 args = (self._fetch_queue, self._job_queue, emerge, package_db, True)
1363 self._fetch_pool = multiprocessing.Pool(self._fetch_procs, EmergeWorker,
1364 args)
1365
1366 self._build_queue = multiprocessing.Queue()
1367 args = (self._build_queue, self._job_queue, emerge, package_db)
1368 self._build_pool = multiprocessing.Pool(self._build_procs, EmergeWorker,
1369 args)
1370
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001371 if self._unpack_only:
1372 # Unpack pool only required on unpack_only jobs.
1373 self._unpack_queue = multiprocessing.Queue()
1374 args = (self._unpack_queue, self._job_queue, emerge, package_db, False,
1375 True)
1376 self._unpack_pool = multiprocessing.Pool(self._unpack_procs, EmergeWorker,
1377 args)
1378
David Jamesfcb70ef2011-02-02 16:02:30 -08001379 self._print_worker = multiprocessing.Process(target=PrintWorker,
1380 args=[self._print_queue])
1381 self._print_worker.start()
1382
1383 # Initialize the failed queue to empty.
1384 self._retry_queue = []
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001385 self._failed_count = dict()
David Jamesfcb70ef2011-02-02 16:02:30 -08001386
David Jamesfcb70ef2011-02-02 16:02:30 -08001387 # Setup an exit handler so that we print nice messages if we are
1388 # terminated.
1389 self._SetupExitHandler()
1390
1391 # Schedule our jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001392 self._state_map.update(
1393 (pkg, TargetState(pkg, data)) for pkg, data in deps_map.iteritems())
1394 self._fetch_ready.multi_put(self._state_map.itervalues())
David Jamesfcb70ef2011-02-02 16:02:30 -08001395
David Jamesaaf49e42014-04-24 09:40:05 -07001396 def _SetupSession(self):
1397 """Set up a session so we can easily terminate all children."""
1398 # When we call os.setsid(), this sets up a session / process group for this
1399 # process and all children. These session groups are needed so that we can
1400 # easily kill all children (including processes launched by emerge) before
1401 # we exit.
1402 #
1403 # One unfortunate side effect of os.setsid() is that it blocks CTRL-C from
1404 # being received. To work around this, we only call os.setsid() in a forked
1405 # process, so that the parent can still watch for CTRL-C. The parent will
1406 # just sit around, watching for signals and propagating them to the child,
1407 # until the child exits.
1408 #
1409 # TODO(davidjames): It would be nice if we could replace this with cgroups.
1410 pid = os.fork()
1411 if pid == 0:
1412 os.setsid()
1413 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001414 proctitle.settitle('SessionManager')
1415
David Jamesaaf49e42014-04-24 09:40:05 -07001416 def PropagateToChildren(signum, _frame):
1417 # Just propagate the signals down to the child. We'll exit when the
1418 # child does.
1419 try:
1420 os.kill(pid, signum)
1421 except OSError as ex:
1422 if ex.errno != errno.ESRCH:
1423 raise
1424 signal.signal(signal.SIGINT, PropagateToChildren)
1425 signal.signal(signal.SIGTERM, PropagateToChildren)
1426
1427 def StopGroup(_signum, _frame):
1428 # When we get stopped, stop the children.
1429 try:
1430 os.killpg(pid, signal.SIGSTOP)
1431 os.kill(0, signal.SIGSTOP)
1432 except OSError as ex:
1433 if ex.errno != errno.ESRCH:
1434 raise
1435 signal.signal(signal.SIGTSTP, StopGroup)
1436
1437 def ContinueGroup(_signum, _frame):
1438 # Launch the children again after being stopped.
1439 try:
1440 os.killpg(pid, signal.SIGCONT)
1441 except OSError as ex:
1442 if ex.errno != errno.ESRCH:
1443 raise
1444 signal.signal(signal.SIGCONT, ContinueGroup)
1445
1446 # Loop until the children exit. We exit with os._exit to be sure we
1447 # don't run any finalizers (those will be run by the child process.)
1448 # pylint: disable=W0212
1449 while True:
1450 try:
1451 # Wait for the process to exit. When it does, exit with the return
1452 # value of the subprocess.
Mike Frysingere2d8f0d2014-11-01 13:09:26 -04001453 os._exit(process_util.GetExitStatus(os.waitpid(pid, 0)[1]))
David Jamesaaf49e42014-04-24 09:40:05 -07001454 except OSError as ex:
1455 if ex.errno == errno.EINTR:
1456 continue
1457 traceback.print_exc()
1458 os._exit(1)
1459 except BaseException:
1460 traceback.print_exc()
1461 os._exit(1)
1462
David Jamesfcb70ef2011-02-02 16:02:30 -08001463 def _SetupExitHandler(self):
1464
David James321490a2012-12-17 12:05:56 -08001465 def ExitHandler(signum, _frame):
David James7358d032011-05-19 10:40:03 -07001466 # Set KILLED flag.
1467 KILLED.set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001468
1469 # Kill our signal handlers so we don't get called recursively
David James7358d032011-05-19 10:40:03 -07001470 signal.signal(signal.SIGINT, KillHandler)
1471 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001472
1473 # Print our current job status
Brian Harring0be85c62012-03-17 19:52:12 -07001474 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001475 if job:
1476 self._print_queue.put(JobPrinter(job, unlink=True))
1477
1478 # Notify the user that we are exiting
1479 self._Print("Exiting on signal %s" % signum)
David James7358d032011-05-19 10:40:03 -07001480 self._print_queue.put(None)
1481 self._print_worker.join()
David Jamesfcb70ef2011-02-02 16:02:30 -08001482
1483 # Kill child threads, then exit.
David James7358d032011-05-19 10:40:03 -07001484 os.killpg(0, signal.SIGKILL)
David Jamesfcb70ef2011-02-02 16:02:30 -08001485 sys.exit(1)
1486
1487 # Print out job status when we are killed
1488 signal.signal(signal.SIGINT, ExitHandler)
1489 signal.signal(signal.SIGTERM, ExitHandler)
1490
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001491 def _ScheduleUnpack(self, pkg_state):
1492 self._unpack_jobs[pkg_state.target] = None
1493 self._unpack_queue.put(pkg_state)
1494
Brian Harring0be85c62012-03-17 19:52:12 -07001495 def _Schedule(self, pkg_state):
David Jamesfcb70ef2011-02-02 16:02:30 -08001496 # We maintain a tree of all deps, if this doesn't need
David James8c7e5e32011-06-28 11:26:03 -07001497 # to be installed just free up its children and continue.
David Jamesfcb70ef2011-02-02 16:02:30 -08001498 # It is possible to reinstall deps of deps, without reinstalling
1499 # first level deps, like so:
Mike Frysingerfd969312014-04-02 22:16:42 -04001500 # virtual/target-os (merge) -> eselect (nomerge) -> python (merge)
Brian Harring0be85c62012-03-17 19:52:12 -07001501 this_pkg = pkg_state.info
1502 target = pkg_state.target
1503 if pkg_state.info is not None:
1504 if this_pkg["action"] == "nomerge":
1505 self._Finish(target)
1506 elif target not in self._build_jobs:
1507 # Kick off the build if it's marked to be built.
1508 self._build_jobs[target] = None
1509 self._build_queue.put(pkg_state)
1510 return True
David Jamesfcb70ef2011-02-02 16:02:30 -08001511
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001512 def _ScheduleLoop(self, unpack_only=False):
1513 if unpack_only:
1514 ready_queue = self._unpack_ready
1515 jobs_queue = self._unpack_jobs
1516 procs = self._unpack_procs
1517 else:
1518 ready_queue = self._build_ready
1519 jobs_queue = self._build_jobs
1520 procs = self._build_procs
1521
David James8c7e5e32011-06-28 11:26:03 -07001522 # If the current load exceeds our desired load average, don't schedule
1523 # more than one job.
1524 if self._load_avg and os.getloadavg()[0] > self._load_avg:
1525 needed_jobs = 1
1526 else:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001527 needed_jobs = procs
David James8c7e5e32011-06-28 11:26:03 -07001528
1529 # Schedule more jobs.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001530 while ready_queue and len(jobs_queue) < needed_jobs:
1531 state = ready_queue.get()
1532 if unpack_only:
1533 self._ScheduleUnpack(state)
1534 else:
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001535 if state.target not in self._failed_count:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001536 self._Schedule(state)
David Jamesfcb70ef2011-02-02 16:02:30 -08001537
1538 def _Print(self, line):
1539 """Print a single line."""
1540 self._print_queue.put(LinePrinter(line))
1541
1542 def _Status(self):
1543 """Print status."""
1544 current_time = time.time()
Aviv Keshet3b381682015-11-12 13:15:06 -08001545 current_time_struct = time.localtime(current_time)
David Jamesfcb70ef2011-02-02 16:02:30 -08001546 no_output = True
1547
1548 # Print interim output every minute if --show-output is used. Otherwise,
1549 # print notifications about running packages every 2 minutes, and print
1550 # full output for jobs that have been running for 60 minutes or more.
1551 if self._show_output:
1552 interval = 60
1553 notify_interval = 0
1554 else:
1555 interval = 60 * 60
1556 notify_interval = 60 * 2
David James321490a2012-12-17 12:05:56 -08001557 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001558 if job:
1559 last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
1560 if last_timestamp + interval < current_time:
1561 self._print_queue.put(JobPrinter(job))
1562 job.last_output_timestamp = current_time
1563 no_output = False
1564 elif (notify_interval and
1565 job.last_notify_timestamp + notify_interval < current_time):
1566 job_seconds = current_time - job.start_timestamp
1567 args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
1568 info = "Still building %s (%dm%.1fs). Logs in %s" % args
1569 job.last_notify_timestamp = current_time
1570 self._Print(info)
1571 no_output = False
1572
1573 # If we haven't printed any messages yet, print a general status message
1574 # here.
1575 if no_output:
1576 seconds = current_time - GLOBAL_START
Brian Harring0be85c62012-03-17 19:52:12 -07001577 fjobs, fready = len(self._fetch_jobs), len(self._fetch_ready)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001578 ujobs, uready = len(self._unpack_jobs), len(self._unpack_ready)
Brian Harring0be85c62012-03-17 19:52:12 -07001579 bjobs, bready = len(self._build_jobs), len(self._build_ready)
1580 retries = len(self._retry_queue)
1581 pending = max(0, len(self._deps_map) - fjobs - bjobs)
1582 line = "Pending %s/%s, " % (pending, self._total_jobs)
1583 if fjobs or fready:
1584 line += "Fetching %s/%s, " % (fjobs, fready + fjobs)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001585 if ujobs or uready:
1586 line += "Unpacking %s/%s, " % (ujobs, uready + ujobs)
Brian Harring0be85c62012-03-17 19:52:12 -07001587 if bjobs or bready or retries:
1588 line += "Building %s/%s, " % (bjobs, bready + bjobs)
1589 if retries:
1590 line += "Retrying %s, " % (retries,)
Mike Frysingerd6e2df02014-11-26 02:55:04 -05001591 load = " ".join(str(x) for x in os.getloadavg())
Aviv Keshet3b381682015-11-12 13:15:06 -08001592 line += ("[Time %s | Elapsed %dm%.1fs | Load %s]" % (
1593 time.strftime('%H:%M:%S', current_time_struct), seconds / 60,
1594 seconds % 60, load))
Brian Harring0be85c62012-03-17 19:52:12 -07001595 self._Print(line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001596
1597 def _Finish(self, target):
David James8c7e5e32011-06-28 11:26:03 -07001598 """Mark a target as completed and unblock dependencies."""
1599 this_pkg = self._deps_map[target]
1600 if this_pkg["needs"] and this_pkg["nodeps"]:
1601 # We got installed, but our deps have not been installed yet. Dependent
1602 # packages should only be installed when our needs have been fully met.
1603 this_pkg["action"] = "nomerge"
1604 else:
David James8c7e5e32011-06-28 11:26:03 -07001605 for dep in this_pkg["provides"]:
1606 dep_pkg = self._deps_map[dep]
Brian Harring0be85c62012-03-17 19:52:12 -07001607 state = self._state_map[dep]
David James8c7e5e32011-06-28 11:26:03 -07001608 del dep_pkg["needs"][target]
Brian Harring0be85c62012-03-17 19:52:12 -07001609 state.update_score()
1610 if not state.prefetched:
1611 if dep in self._fetch_ready:
1612 # If it's not currently being fetched, update the prioritization
1613 self._fetch_ready.sort()
1614 elif not dep_pkg["needs"]:
David James8c7e5e32011-06-28 11:26:03 -07001615 if dep_pkg["nodeps"] and dep_pkg["action"] == "nomerge":
1616 self._Finish(dep)
1617 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001618 self._build_ready.put(self._state_map[dep])
David James8c7e5e32011-06-28 11:26:03 -07001619 self._deps_map.pop(target)
David Jamesfcb70ef2011-02-02 16:02:30 -08001620
1621 def _Retry(self):
David James8c7e5e32011-06-28 11:26:03 -07001622 while self._retry_queue:
Brian Harring0be85c62012-03-17 19:52:12 -07001623 state = self._retry_queue.pop(0)
1624 if self._Schedule(state):
1625 self._Print("Retrying emerge of %s." % state.target)
David James8c7e5e32011-06-28 11:26:03 -07001626 break
David Jamesfcb70ef2011-02-02 16:02:30 -08001627
Brian Harringa43f5952012-04-12 01:19:34 -07001628 def _Shutdown(self):
David Jamesfcb70ef2011-02-02 16:02:30 -08001629 # Tell emerge workers to exit. They all exit when 'None' is pushed
1630 # to the queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001631
Brian Harringa43f5952012-04-12 01:19:34 -07001632 # Shutdown the workers first; then jobs (which is how they feed things back)
1633 # then finally the print queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001634
Brian Harringa43f5952012-04-12 01:19:34 -07001635 def _stop(queue, pool):
1636 if pool is None:
1637 return
1638 try:
1639 queue.put(None)
1640 pool.close()
1641 pool.join()
1642 finally:
1643 pool.terminate()
Brian Harring0be85c62012-03-17 19:52:12 -07001644
Brian Harringa43f5952012-04-12 01:19:34 -07001645 _stop(self._fetch_queue, self._fetch_pool)
1646 self._fetch_queue = self._fetch_pool = None
Brian Harring0be85c62012-03-17 19:52:12 -07001647
Brian Harringa43f5952012-04-12 01:19:34 -07001648 _stop(self._build_queue, self._build_pool)
1649 self._build_queue = self._build_pool = None
1650
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001651 if self._unpack_only:
1652 _stop(self._unpack_queue, self._unpack_pool)
1653 self._unpack_queue = self._unpack_pool = None
1654
Brian Harringa43f5952012-04-12 01:19:34 -07001655 if self._job_queue is not None:
1656 self._job_queue.close()
1657 self._job_queue = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001658
1659 # Now that our workers are finished, we can kill the print queue.
Brian Harringa43f5952012-04-12 01:19:34 -07001660 if self._print_worker is not None:
1661 try:
1662 self._print_queue.put(None)
1663 self._print_queue.close()
1664 self._print_worker.join()
1665 finally:
1666 self._print_worker.terminate()
1667 self._print_queue = self._print_worker = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001668
1669 def Run(self):
1670 """Run through the scheduled ebuilds.
1671
1672 Keep running so long as we have uninstalled packages in the
1673 dependency graph to merge.
1674 """
Brian Harringa43f5952012-04-12 01:19:34 -07001675 if not self._deps_map:
1676 return
1677
Brian Harring0be85c62012-03-17 19:52:12 -07001678 # Start the fetchers.
1679 for _ in xrange(min(self._fetch_procs, len(self._fetch_ready))):
1680 state = self._fetch_ready.get()
1681 self._fetch_jobs[state.target] = None
1682 self._fetch_queue.put(state)
1683
1684 # Print an update, then get going.
1685 self._Status()
1686
David Jamesfcb70ef2011-02-02 16:02:30 -08001687 while self._deps_map:
1688 # Check here that we are actually waiting for something.
Brian Harring0be85c62012-03-17 19:52:12 -07001689 if (self._build_queue.empty() and
David Jamesfcb70ef2011-02-02 16:02:30 -08001690 self._job_queue.empty() and
Brian Harring0be85c62012-03-17 19:52:12 -07001691 not self._fetch_jobs and
1692 not self._fetch_ready and
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001693 not self._unpack_jobs and
1694 not self._unpack_ready and
Brian Harring0be85c62012-03-17 19:52:12 -07001695 not self._build_jobs and
1696 not self._build_ready and
David Jamesfcb70ef2011-02-02 16:02:30 -08001697 self._deps_map):
1698 # If we have failed on a package, retry it now.
1699 if self._retry_queue:
1700 self._Retry()
1701 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001702 # Tell the user why we're exiting.
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001703 if self._failed_count:
1704 print('Packages failed:\n\t%s' %
1705 '\n\t'.join(self._failed_count.iterkeys()))
David James0eae23e2012-07-03 15:04:25 -07001706 status_file = os.environ.get("PARALLEL_EMERGE_STATUS_FILE")
1707 if status_file:
David James321490a2012-12-17 12:05:56 -08001708 failed_pkgs = set(portage.versions.cpv_getkey(x)
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001709 for x in self._failed_count.iterkeys())
David James0eae23e2012-07-03 15:04:25 -07001710 with open(status_file, "a") as f:
1711 f.write("%s\n" % " ".join(failed_pkgs))
David Jamesfcb70ef2011-02-02 16:02:30 -08001712 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001713 print("Deadlock! Circular dependencies!")
David Jamesfcb70ef2011-02-02 16:02:30 -08001714 sys.exit(1)
1715
David James321490a2012-12-17 12:05:56 -08001716 for _ in xrange(12):
David Jamesa74289a2011-08-12 10:41:24 -07001717 try:
1718 job = self._job_queue.get(timeout=5)
1719 break
1720 except Queue.Empty:
1721 # Check if any more jobs can be scheduled.
1722 self._ScheduleLoop()
1723 else:
Brian Harring706747c2012-03-16 03:04:31 -07001724 # Print an update every 60 seconds.
David Jamesfcb70ef2011-02-02 16:02:30 -08001725 self._Status()
1726 continue
1727
1728 target = job.target
1729
Brian Harring0be85c62012-03-17 19:52:12 -07001730 if job.fetch_only:
1731 if not job.done:
1732 self._fetch_jobs[job.target] = job
1733 else:
1734 state = self._state_map[job.target]
1735 state.prefetched = True
1736 state.fetched_successfully = (job.retcode == 0)
1737 del self._fetch_jobs[job.target]
1738 self._Print("Fetched %s in %2.2fs"
1739 % (target, time.time() - job.start_timestamp))
1740
1741 if self._show_output or job.retcode != 0:
1742 self._print_queue.put(JobPrinter(job, unlink=True))
1743 else:
1744 os.unlink(job.filename)
1745 # Failure or not, let build work with it next.
1746 if not self._deps_map[job.target]["needs"]:
1747 self._build_ready.put(state)
1748 self._ScheduleLoop()
1749
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001750 if self._unpack_only and job.retcode == 0:
1751 self._unpack_ready.put(state)
1752 self._ScheduleLoop(unpack_only=True)
1753
Brian Harring0be85c62012-03-17 19:52:12 -07001754 if self._fetch_ready:
1755 state = self._fetch_ready.get()
1756 self._fetch_queue.put(state)
1757 self._fetch_jobs[state.target] = None
1758 else:
1759 # Minor optimization; shut down fetchers early since we know
1760 # the queue is empty.
1761 self._fetch_queue.put(None)
1762 continue
1763
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001764 if job.unpack_only:
1765 if not job.done:
1766 self._unpack_jobs[target] = job
1767 else:
1768 del self._unpack_jobs[target]
1769 self._Print("Unpacked %s in %2.2fs"
1770 % (target, time.time() - job.start_timestamp))
1771 if self._show_output or job.retcode != 0:
1772 self._print_queue.put(JobPrinter(job, unlink=True))
1773 else:
1774 os.unlink(job.filename)
1775 if self._unpack_ready:
1776 state = self._unpack_ready.get()
1777 self._unpack_queue.put(state)
1778 self._unpack_jobs[state.target] = None
1779 continue
1780
David Jamesfcb70ef2011-02-02 16:02:30 -08001781 if not job.done:
Brian Harring0be85c62012-03-17 19:52:12 -07001782 self._build_jobs[target] = job
David Jamesfcb70ef2011-02-02 16:02:30 -08001783 self._Print("Started %s (logged in %s)" % (target, job.filename))
1784 continue
1785
1786 # Print output of job
1787 if self._show_output or job.retcode != 0:
1788 self._print_queue.put(JobPrinter(job, unlink=True))
1789 else:
1790 os.unlink(job.filename)
Brian Harring0be85c62012-03-17 19:52:12 -07001791 del self._build_jobs[target]
David Jamesfcb70ef2011-02-02 16:02:30 -08001792
1793 seconds = time.time() - job.start_timestamp
1794 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
1795
1796 # Complain if necessary.
1797 if job.retcode != 0:
1798 # Handle job failure.
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001799 failed_count = self._failed_count.get(target, 0)
1800 if failed_count >= self._max_retries:
1801 # If this job has failed and can't be retried, give up.
David Jamesfcb70ef2011-02-02 16:02:30 -08001802 self._Print("Failed %s. Your build has failed." % details)
1803 else:
1804 # Queue up this build to try again after a long while.
Brian Harring0be85c62012-03-17 19:52:12 -07001805 self._retry_queue.append(self._state_map[target])
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001806 self._failed_count[target] = failed_count + 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001807 self._Print("Failed %s, retrying later." % details)
1808 else:
David James32420cc2011-08-25 21:32:46 -07001809 self._Print("Completed %s" % details)
1810
1811 # Mark as completed and unblock waiting ebuilds.
1812 self._Finish(target)
1813
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001814 if target in self._failed_count and self._retry_queue:
David Jamesfcb70ef2011-02-02 16:02:30 -08001815 # If we have successfully retried a failed package, and there
1816 # are more failed packages, try the next one. We will only have
1817 # one retrying package actively running at a time.
1818 self._Retry()
1819
David Jamesfcb70ef2011-02-02 16:02:30 -08001820
David James8c7e5e32011-06-28 11:26:03 -07001821 # Schedule pending jobs and print an update.
1822 self._ScheduleLoop()
1823 self._Status()
David Jamesfcb70ef2011-02-02 16:02:30 -08001824
David Jamese703d0f2012-01-12 16:27:45 -08001825 # If packages were retried, output a warning.
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001826 if self._failed_count:
David Jamese703d0f2012-01-12 16:27:45 -08001827 self._Print("")
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001828 self._Print("WARNING: The following packages failed once or more,")
David Jamese703d0f2012-01-12 16:27:45 -08001829 self._Print("but succeeded upon retry. This might indicate incorrect")
1830 self._Print("dependencies.")
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001831 for pkg in self._failed_count.iterkeys():
David Jamese703d0f2012-01-12 16:27:45 -08001832 self._Print(" %s" % pkg)
1833 self._Print("@@@STEP_WARNINGS@@@")
1834 self._Print("")
1835
David Jamesfcb70ef2011-02-02 16:02:30 -08001836 # Tell child threads to exit.
1837 self._Print("Merge complete")
David Jamesfcb70ef2011-02-02 16:02:30 -08001838
1839
Brian Harring30675052012-02-29 12:18:22 -08001840def main(argv):
Brian Harring8294d652012-05-23 02:20:52 -07001841 try:
1842 return real_main(argv)
1843 finally:
1844 # Work around multiprocessing sucking and not cleaning up after itself.
1845 # http://bugs.python.org/issue4106;
1846 # Step one; ensure GC is ran *prior* to the VM starting shutdown.
1847 gc.collect()
1848 # Step two; go looking for those threads and try to manually reap
1849 # them if we can.
1850 for x in threading.enumerate():
1851 # Filter on the name, and ident; if ident is None, the thread
1852 # wasn't started.
1853 if x.name == 'QueueFeederThread' and x.ident is not None:
1854 x.join(1)
David Jamesfcb70ef2011-02-02 16:02:30 -08001855
Brian Harring8294d652012-05-23 02:20:52 -07001856
1857def real_main(argv):
Brian Harring30675052012-02-29 12:18:22 -08001858 parallel_emerge_args = argv[:]
David Jamesfcb70ef2011-02-02 16:02:30 -08001859 deps = DepGraphGenerator()
Brian Harring30675052012-02-29 12:18:22 -08001860 deps.Initialize(parallel_emerge_args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001861 emerge = deps.emerge
1862
1863 if emerge.action is not None:
Brian Harring30675052012-02-29 12:18:22 -08001864 argv = deps.ParseParallelEmergeArgs(argv)
Brian Harring8294d652012-05-23 02:20:52 -07001865 return emerge_main(argv)
David Jamesfcb70ef2011-02-02 16:02:30 -08001866 elif not emerge.cmdline_packages:
1867 Usage()
Brian Harring8294d652012-05-23 02:20:52 -07001868 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001869
1870 # Unless we're in pretend mode, there's not much point running without
1871 # root access. We need to be able to install packages.
1872 #
1873 # NOTE: Even if you're running --pretend, it's a good idea to run
1874 # parallel_emerge with root access so that portage can write to the
1875 # dependency cache. This is important for performance.
David James321490a2012-12-17 12:05:56 -08001876 if "--pretend" not in emerge.opts and portage.data.secpass < 2:
Mike Frysinger383367e2014-09-16 15:06:17 -04001877 print("parallel_emerge: superuser access is required.")
Brian Harring8294d652012-05-23 02:20:52 -07001878 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001879
1880 if "--quiet" not in emerge.opts:
1881 cmdline_packages = " ".join(emerge.cmdline_packages)
Mike Frysinger383367e2014-09-16 15:06:17 -04001882 print("Starting fast-emerge.")
1883 print(" Building package %s on %s" % (cmdline_packages,
Gilad Arnold05f94b02015-05-22 10:41:05 -07001884 deps.sysroot or "root"))
David Jamesfcb70ef2011-02-02 16:02:30 -08001885
David James386ccd12011-05-04 20:17:42 -07001886 deps_tree, deps_info = deps.GenDependencyTree()
David Jamesfcb70ef2011-02-02 16:02:30 -08001887
1888 # You want me to be verbose? I'll give you two trees! Twice as much value.
1889 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
1890 deps.PrintTree(deps_tree)
1891
David James386ccd12011-05-04 20:17:42 -07001892 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001893
1894 # OK, time to print out our progress so far.
1895 deps.PrintInstallPlan(deps_graph)
1896 if "--tree" in emerge.opts:
1897 PrintDepsMap(deps_graph)
1898
1899 # Are we upgrading portage? If so, and there are more packages to merge,
1900 # schedule a restart of parallel_emerge to merge the rest. This ensures that
1901 # we pick up all updates to portage settings before merging any more
1902 # packages.
1903 portage_upgrade = False
1904 root = emerge.settings["ROOT"]
Don Garrett25f309a2014-03-19 14:02:12 -07001905 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -08001906 if root == "/":
Gilad Arnoldcead28a2015-05-22 10:45:02 -07001907 final_db = emerge.depgraph._dynamic_config._filtered_trees[root]['graph_db']
David Jamesfcb70ef2011-02-02 16:02:30 -08001908 for db_pkg in final_db.match_pkgs("sys-apps/portage"):
1909 portage_pkg = deps_graph.get(db_pkg.cpv)
David James0ff16f22012-11-02 14:18:07 -07001910 if portage_pkg:
David Jamesfcb70ef2011-02-02 16:02:30 -08001911 portage_upgrade = True
1912 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001913 print("Upgrading portage first, then restarting...")
David Jamesfcb70ef2011-02-02 16:02:30 -08001914
David James0ff16f22012-11-02 14:18:07 -07001915 # Upgrade Portage first, then the rest of the packages.
1916 #
1917 # In order to grant the child permission to run setsid, we need to run sudo
1918 # again. We preserve SUDO_USER here in case an ebuild depends on it.
1919 if portage_upgrade:
1920 # Calculate what arguments to use when re-invoking.
1921 args = ["sudo", "-E", "SUDO_USER=%s" % os.environ.get("SUDO_USER", "")]
1922 args += [os.path.abspath(sys.argv[0])] + parallel_emerge_args
1923 args += ["--exclude=sys-apps/portage"]
1924
1925 # First upgrade Portage.
1926 passthrough_args = ("--quiet", "--pretend", "--verbose")
1927 emerge_args = [k for k in emerge.opts if k in passthrough_args]
1928 ret = emerge_main(emerge_args + ["portage"])
1929 if ret != 0:
1930 return ret
1931
1932 # Now upgrade the rest.
1933 os.execvp(args[0], args)
1934
Bertrand SIMONNETc03c8ee2014-12-10 17:02:55 -08001935 # Attempt to solve crbug.com/433482
1936 # The file descriptor error appears only when getting userpriv_groups
1937 # (lazily generated). Loading userpriv_groups here will reduce the number of
1938 # calls from few hundreds to one.
1939 portage.data._get_global('userpriv_groups')
1940
David Jamesfcb70ef2011-02-02 16:02:30 -08001941 # Run the queued emerges.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001942 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output,
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001943 deps.unpack_only, deps.max_retries)
Brian Harringa43f5952012-04-12 01:19:34 -07001944 try:
1945 scheduler.Run()
1946 finally:
Don Garrett25f309a2014-03-19 14:02:12 -07001947 # pylint: disable=W0212
Brian Harringa43f5952012-04-12 01:19:34 -07001948 scheduler._Shutdown()
David James97ce8902011-08-16 09:51:05 -07001949 scheduler = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001950
Mike Frysingerd20a6e22012-10-04 19:01:10 -04001951 clean_logs(emerge.settings)
1952
Mike Frysinger383367e2014-09-16 15:06:17 -04001953 print("Done")
Brian Harring8294d652012-05-23 02:20:52 -07001954 return 0