blob: 04145f3aaf86636de7a5a62b88baf09012a75c43 [file] [log] [blame]
Mike Frysinger0a647fc2012-08-06 14:36:05 -04001# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
David Jamesfcb70ef2011-02-02 16:02:30 -08002# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Program to run emerge in parallel, for significant speedup.
6
7Usage:
David James386ccd12011-05-04 20:17:42 -07008 ./parallel_emerge [--board=BOARD] [--workon=PKGS]
David Jamesfcb70ef2011-02-02 16:02:30 -08009 [--force-remote-binary=PKGS] [emerge args] package
10
David James78b6cd92012-04-02 21:36:12 -070011This script runs multiple emerge processes in parallel, using appropriate
12Portage APIs. It is faster than standard emerge because it has a
13multiprocess model instead of an asynchronous model.
David Jamesfcb70ef2011-02-02 16:02:30 -080014"""
15
Mike Frysinger383367e2014-09-16 15:06:17 -040016from __future__ import print_function
17
David Jamesfcb70ef2011-02-02 16:02:30 -080018import codecs
19import copy
20import errno
Brian Harring8294d652012-05-23 02:20:52 -070021import gc
David James8c7e5e32011-06-28 11:26:03 -070022import heapq
David Jamesfcb70ef2011-02-02 16:02:30 -080023import multiprocessing
24import os
Mike Frysinger1ae28092013-10-17 17:17:22 -040025try:
26 import Queue
27except ImportError:
28 # Python-3 renamed to "queue". We still use Queue to avoid collisions
29 # with naming variables as "queue". Maybe we'll transition at some point.
30 # pylint: disable=F0401
31 import queue as Queue
David Jamesfcb70ef2011-02-02 16:02:30 -080032import signal
Bertrand SIMONNET19d789e2014-12-09 13:36:31 -080033import subprocess
David Jamesfcb70ef2011-02-02 16:02:30 -080034import sys
35import tempfile
Brian Harring8294d652012-05-23 02:20:52 -070036import threading
David Jamesfcb70ef2011-02-02 16:02:30 -080037import time
38import traceback
David Jamesfcb70ef2011-02-02 16:02:30 -080039
Thiago Goncalesf4acc422013-07-17 10:26:35 -070040from chromite.lib import cros_build_lib
Chris Ching5fcbd622016-11-28 09:22:15 -070041from chromite.lib import cros_event
Chris Chingb8eba812017-06-22 09:54:48 -060042from chromite.lib import portage_util
Mike Frysingere2d8f0d2014-11-01 13:09:26 -040043from chromite.lib import process_util
Mike Frysingerd74fe4a2014-04-24 11:43:38 -040044from chromite.lib import proctitle
Thiago Goncalesf4acc422013-07-17 10:26:35 -070045
David Jamesfcb70ef2011-02-02 16:02:30 -080046# If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
47# Chromium OS, the default "portage" user doesn't have the necessary
48# permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
49# is "root" here because we get called through sudo.
50#
51# We need to set this before importing any portage modules, because portage
52# looks up "PORTAGE_USERNAME" at import time.
53#
54# NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
55# encounter this case unless they have an old chroot or blow away the
56# environment by running sudo without the -E specifier.
57if "PORTAGE_USERNAME" not in os.environ:
58 homedir = os.environ.get("HOME")
59 if homedir:
60 os.environ["PORTAGE_USERNAME"] = os.path.basename(homedir)
61
Bertrand SIMONNET19d789e2014-12-09 13:36:31 -080062# Wrap Popen with a lock to ensure no two Popen are executed simultaneously in
63# the same process.
64# Two Popen call at the same time might be the cause for crbug.com/433482.
65_popen_lock = threading.Lock()
66_old_popen = subprocess.Popen
67
68def _LockedPopen(*args, **kwargs):
69 with _popen_lock:
70 return _old_popen(*args, **kwargs)
71
72subprocess.Popen = _LockedPopen
73
David Jamesfcb70ef2011-02-02 16:02:30 -080074# Portage doesn't expose dependency trees in its public API, so we have to
75# make use of some private APIs here. These modules are found under
76# /usr/lib/portage/pym/.
77#
78# TODO(davidjames): Update Portage to expose public APIs for these features.
Don Garrett25f309a2014-03-19 14:02:12 -070079# pylint: disable=F0401
David Jamesfcb70ef2011-02-02 16:02:30 -080080from _emerge.actions import adjust_configs
81from _emerge.actions import load_emerge_config
82from _emerge.create_depgraph_params import create_depgraph_params
David James386ccd12011-05-04 20:17:42 -070083from _emerge.depgraph import backtrack_depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -080084from _emerge.main import emerge_main
85from _emerge.main import parse_opts
86from _emerge.Package import Package
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -070087from _emerge.post_emerge import clean_logs
David Jamesfcb70ef2011-02-02 16:02:30 -080088from _emerge.Scheduler import Scheduler
David Jamesfcb70ef2011-02-02 16:02:30 -080089from _emerge.stdout_spinner import stdout_spinner
David James386ccd12011-05-04 20:17:42 -070090from portage._global_updates import _global_updates
David Jamesfcb70ef2011-02-02 16:02:30 -080091import portage
92import portage.debug
Don Garrettf8bf7842014-03-20 17:03:42 -070093# pylint: enable=F0401
Mike Frysinger91d7da92013-02-19 15:53:46 -050094
David Jamesfcb70ef2011-02-02 16:02:30 -080095
David Jamesfcb70ef2011-02-02 16:02:30 -080096def Usage():
97 """Print usage."""
Mike Frysinger383367e2014-09-16 15:06:17 -040098 print("Usage:")
Chris Ching5fcbd622016-11-28 09:22:15 -070099 print(" ./parallel_emerge [--board=BOARD] [--workon=PKGS] [--rebuild]")
100 print(" [--eventlogfile=FILE] [emerge args] package")
Mike Frysinger383367e2014-09-16 15:06:17 -0400101 print()
102 print("Packages specified as workon packages are always built from source.")
103 print()
104 print("The --workon argument is mainly useful when you want to build and")
105 print("install packages that you are working on unconditionally, but do not")
106 print("to have to rev the package to indicate you want to build it from")
107 print("source. The build_packages script will automatically supply the")
108 print("workon argument to emerge, ensuring that packages selected using")
109 print("cros-workon are rebuilt.")
110 print()
111 print("The --rebuild option rebuilds packages whenever their dependencies")
112 print("are changed. This ensures that your build is correct.")
Chris Ching5fcbd622016-11-28 09:22:15 -0700113 print()
114 print("The --eventlogfile writes events to the given file. File is")
115 print("is overwritten if it exists.")
David Jamesfcb70ef2011-02-02 16:02:30 -0800116
117
David Jamesfcb70ef2011-02-02 16:02:30 -0800118# Global start time
119GLOBAL_START = time.time()
120
David James7358d032011-05-19 10:40:03 -0700121# Whether process has been killed by a signal.
122KILLED = multiprocessing.Event()
123
David Jamesfcb70ef2011-02-02 16:02:30 -0800124
125class EmergeData(object):
126 """This simple struct holds various emerge variables.
127
128 This struct helps us easily pass emerge variables around as a unit.
129 These variables are used for calculating dependencies and installing
130 packages.
131 """
132
David Jamesbf1e3442011-05-28 07:44:20 -0700133 __slots__ = ["action", "cmdline_packages", "depgraph", "favorites",
134 "mtimedb", "opts", "root_config", "scheduler_graph",
135 "settings", "spinner", "trees"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800136
137 def __init__(self):
138 # The action the user requested. If the user is installing packages, this
139 # is None. If the user is doing anything other than installing packages,
140 # this will contain the action name, which will map exactly to the
141 # long-form name of the associated emerge option.
142 #
143 # Example: If you call parallel_emerge --unmerge package, the action name
144 # will be "unmerge"
145 self.action = None
146
147 # The list of packages the user passed on the command-line.
148 self.cmdline_packages = None
149
150 # The emerge dependency graph. It'll contain all the packages involved in
151 # this merge, along with their versions.
152 self.depgraph = None
153
David Jamesbf1e3442011-05-28 07:44:20 -0700154 # The list of candidates to add to the world file.
155 self.favorites = None
156
David Jamesfcb70ef2011-02-02 16:02:30 -0800157 # A dict of the options passed to emerge. This dict has been cleaned up
158 # a bit by parse_opts, so that it's a bit easier for the emerge code to
159 # look at the options.
160 #
161 # Emerge takes a few shortcuts in its cleanup process to make parsing of
162 # the options dict easier. For example, if you pass in "--usepkg=n", the
163 # "--usepkg" flag is just left out of the dictionary altogether. Because
164 # --usepkg=n is the default, this makes parsing easier, because emerge
165 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
166 #
167 # These cleanup processes aren't applied to all options. For example, the
168 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
169 # applied by emerge, see the parse_opts function in the _emerge.main
170 # package.
171 self.opts = None
172
173 # A dictionary used by portage to maintain global state. This state is
174 # loaded from disk when portage starts up, and saved to disk whenever we
175 # call mtimedb.commit().
176 #
177 # This database contains information about global updates (i.e., what
178 # version of portage we have) and what we're currently doing. Portage
179 # saves what it is currently doing in this database so that it can be
180 # resumed when you call it with the --resume option.
181 #
182 # parallel_emerge does not save what it is currently doing in the mtimedb,
183 # so we do not support the --resume option.
184 self.mtimedb = None
185
186 # The portage configuration for our current root. This contains the portage
187 # settings (see below) and the three portage trees for our current root.
188 # (The three portage trees are explained below, in the documentation for
189 # the "trees" member.)
190 self.root_config = None
191
192 # The scheduler graph is used by emerge to calculate what packages to
193 # install. We don't actually install any deps, so this isn't really used,
194 # but we pass it in to the Scheduler object anyway.
195 self.scheduler_graph = None
196
197 # Portage settings for our current session. Most of these settings are set
198 # in make.conf inside our current install root.
199 self.settings = None
200
201 # The spinner, which spews stuff to stdout to indicate that portage is
202 # doing something. We maintain our own spinner, so we set the portage
203 # spinner to "silent" mode.
204 self.spinner = None
205
206 # The portage trees. There are separate portage trees for each root. To get
207 # the portage tree for the current root, you can look in self.trees[root],
208 # where root = self.settings["ROOT"].
209 #
210 # In each root, there are three trees: vartree, porttree, and bintree.
211 # - vartree: A database of the currently-installed packages.
212 # - porttree: A database of ebuilds, that can be used to build packages.
213 # - bintree: A database of binary packages.
214 self.trees = None
215
216
217class DepGraphGenerator(object):
218 """Grab dependency information about packages from portage.
219
220 Typical usage:
221 deps = DepGraphGenerator()
222 deps.Initialize(sys.argv[1:])
223 deps_tree, deps_info = deps.GenDependencyTree()
224 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
225 deps.PrintTree(deps_tree)
226 PrintDepsMap(deps_graph)
227 """
228
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700229 __slots__ = ["board", "emerge", "package_db", "show_output", "sysroot",
Bertrand SIMONNET411945d2015-05-20 17:23:28 -0700230 "unpack_only", "max_retries"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800231
232 def __init__(self):
233 self.board = None
234 self.emerge = EmergeData()
David Jamesfcb70ef2011-02-02 16:02:30 -0800235 self.package_db = {}
David Jamesfcb70ef2011-02-02 16:02:30 -0800236 self.show_output = False
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700237 self.sysroot = None
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700238 self.unpack_only = False
Bertrand SIMONNET411945d2015-05-20 17:23:28 -0700239 self.max_retries = 1
David Jamesfcb70ef2011-02-02 16:02:30 -0800240
241 def ParseParallelEmergeArgs(self, argv):
242 """Read the parallel emerge arguments from the command-line.
243
244 We need to be compatible with emerge arg format. We scrape arguments that
245 are specific to parallel_emerge, and pass through the rest directly to
246 emerge.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500247
David Jamesfcb70ef2011-02-02 16:02:30 -0800248 Args:
249 argv: arguments list
Mike Frysinger1a736a82013-12-12 01:50:59 -0500250
David Jamesfcb70ef2011-02-02 16:02:30 -0800251 Returns:
252 Arguments that don't belong to parallel_emerge
253 """
254 emerge_args = []
255 for arg in argv:
256 # Specifically match arguments that are specific to parallel_emerge, and
257 # pass through the rest.
258 if arg.startswith("--board="):
259 self.board = arg.replace("--board=", "")
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700260 elif arg.startswith("--sysroot="):
261 self.sysroot = arg.replace("--sysroot=", "")
David Jamesfcb70ef2011-02-02 16:02:30 -0800262 elif arg.startswith("--workon="):
263 workon_str = arg.replace("--workon=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700264 emerge_args.append("--reinstall-atoms=%s" % workon_str)
265 emerge_args.append("--usepkg-exclude=%s" % workon_str)
David Jamesfcb70ef2011-02-02 16:02:30 -0800266 elif arg.startswith("--force-remote-binary="):
267 force_remote_binary = arg.replace("--force-remote-binary=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700268 emerge_args.append("--useoldpkg-atoms=%s" % force_remote_binary)
Bertrand SIMONNET411945d2015-05-20 17:23:28 -0700269 elif arg.startswith("--retries="):
270 self.max_retries = int(arg.replace("--retries=", ""))
David Jamesfcb70ef2011-02-02 16:02:30 -0800271 elif arg == "--show-output":
272 self.show_output = True
David James386ccd12011-05-04 20:17:42 -0700273 elif arg == "--rebuild":
David James7a1ea4b2011-10-13 15:06:41 -0700274 emerge_args.append("--rebuild-if-unbuilt")
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700275 elif arg == "--unpackonly":
276 emerge_args.append("--fetchonly")
277 self.unpack_only = True
Chris Ching5fcbd622016-11-28 09:22:15 -0700278 elif arg.startswith("--eventlogfile="):
279 log_file_name = arg.replace("--eventlogfile=", "")
Chris Ching4a2ebd62017-04-26 16:30:05 -0600280 event_logger = cros_event.getEventFileLogger(log_file_name)
281 event_logger.setKind('ParallelEmerge')
282 cros_event.setEventLogger(event_logger)
David Jamesfcb70ef2011-02-02 16:02:30 -0800283 else:
284 # Not one of our options, so pass through to emerge.
285 emerge_args.append(arg)
286
David James386ccd12011-05-04 20:17:42 -0700287 # These packages take a really long time to build, so, for expediency, we
288 # are blacklisting them from automatic rebuilds because one of their
289 # dependencies needs to be recompiled.
Mike Frysinger5c2a9052014-04-15 15:52:04 -0400290 for pkg in ("chromeos-base/chromeos-chrome",):
David James7a1ea4b2011-10-13 15:06:41 -0700291 emerge_args.append("--rebuild-exclude=%s" % pkg)
David Jamesfcb70ef2011-02-02 16:02:30 -0800292
293 return emerge_args
294
295 def Initialize(self, args):
296 """Initializer. Parses arguments and sets up portage state."""
297
298 # Parse and strip out args that are just intended for parallel_emerge.
299 emerge_args = self.ParseParallelEmergeArgs(args)
300
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700301 if self.sysroot and self.board:
302 cros_build_lib.Die("--sysroot and --board are incompatible.")
303
David Jamesfcb70ef2011-02-02 16:02:30 -0800304 # Setup various environment variables based on our current board. These
305 # variables are normally setup inside emerge-${BOARD}, but since we don't
306 # call that script, we have to set it up here. These variables serve to
307 # point our tools at /build/BOARD and to setup cross compiles to the
308 # appropriate board as configured in toolchain.conf.
309 if self.board:
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700310 self.sysroot = os.environ.get('SYSROOT',
311 cros_build_lib.GetSysroot(self.board))
312
313 if self.sysroot:
314 os.environ["PORTAGE_CONFIGROOT"] = self.sysroot
315 os.environ["SYSROOT"] = self.sysroot
David Jamesfcb70ef2011-02-02 16:02:30 -0800316
David Jamesfcb70ef2011-02-02 16:02:30 -0800317 # Turn off interactive delays
318 os.environ["EBEEP_IGNORE"] = "1"
319 os.environ["EPAUSE_IGNORE"] = "1"
Mike Frysinger0a647fc2012-08-06 14:36:05 -0400320 os.environ["CLEAN_DELAY"] = "0"
David Jamesfcb70ef2011-02-02 16:02:30 -0800321
322 # Parse the emerge options.
David Jamesea3ca332011-05-26 11:48:29 -0700323 action, opts, cmdline_packages = parse_opts(emerge_args, silent=True)
David Jamesfcb70ef2011-02-02 16:02:30 -0800324
325 # Set environment variables based on options. Portage normally sets these
326 # environment variables in emerge_main, but we can't use that function,
327 # because it also does a bunch of other stuff that we don't want.
328 # TODO(davidjames): Patch portage to move this logic into a function we can
329 # reuse here.
330 if "--debug" in opts:
331 os.environ["PORTAGE_DEBUG"] = "1"
332 if "--config-root" in opts:
333 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
334 if "--root" in opts:
335 os.environ["ROOT"] = opts["--root"]
336 if "--accept-properties" in opts:
337 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
338
David James88d780c2014-02-05 13:03:29 -0800339 # If we're installing packages to the board, we can disable vardb locks.
340 # This is safe because we only run up to one instance of parallel_emerge in
341 # parallel.
342 # TODO(davidjames): Enable this for the host too.
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700343 if self.sysroot:
David Jamesfcb70ef2011-02-02 16:02:30 -0800344 os.environ.setdefault("PORTAGE_LOCKS", "false")
David Jamesfcb70ef2011-02-02 16:02:30 -0800345
346 # Now that we've setup the necessary environment variables, we can load the
347 # emerge config from disk.
Gilad Arnold94758762015-05-22 12:23:23 -0700348 # pylint: disable=unpacking-non-sequence
David Jamesfcb70ef2011-02-02 16:02:30 -0800349 settings, trees, mtimedb = load_emerge_config()
350
David Jamesea3ca332011-05-26 11:48:29 -0700351 # Add in EMERGE_DEFAULT_OPTS, if specified.
352 tmpcmdline = []
353 if "--ignore-default-opts" not in opts:
354 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
355 tmpcmdline.extend(emerge_args)
356 action, opts, cmdline_packages = parse_opts(tmpcmdline)
357
358 # If we're installing to the board, we want the --root-deps option so that
359 # portage will install the build dependencies to that location as well.
Bertrand SIMONNET0625e1a2015-04-07 11:41:16 -0700360 if self.sysroot:
David Jamesea3ca332011-05-26 11:48:29 -0700361 opts.setdefault("--root-deps", True)
362
David Jamesfcb70ef2011-02-02 16:02:30 -0800363 # Check whether our portage tree is out of date. Typically, this happens
364 # when you're setting up a new portage tree, such as in setup_board and
365 # make_chroot. In that case, portage applies a bunch of global updates
366 # here. Once the updates are finished, we need to commit any changes
367 # that the global update made to our mtimedb, and reload the config.
368 #
369 # Portage normally handles this logic in emerge_main, but again, we can't
370 # use that function here.
371 if _global_updates(trees, mtimedb["updates"]):
372 mtimedb.commit()
Gilad Arnold94758762015-05-22 12:23:23 -0700373 # pylint: disable=unpacking-non-sequence
David Jamesfcb70ef2011-02-02 16:02:30 -0800374 settings, trees, mtimedb = load_emerge_config(trees=trees)
375
376 # Setup implied options. Portage normally handles this logic in
377 # emerge_main.
378 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
379 opts.setdefault("--buildpkg", True)
380 if "--getbinpkgonly" in opts:
381 opts.setdefault("--usepkgonly", True)
382 opts.setdefault("--getbinpkg", True)
383 if "getbinpkg" in settings.features:
384 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
385 opts["--getbinpkg"] = True
386 if "--getbinpkg" in opts or "--usepkgonly" in opts:
387 opts.setdefault("--usepkg", True)
388 if "--fetch-all-uri" in opts:
389 opts.setdefault("--fetchonly", True)
390 if "--skipfirst" in opts:
391 opts.setdefault("--resume", True)
392 if "--buildpkgonly" in opts:
393 # --buildpkgonly will not merge anything, so it overrides all binary
394 # package options.
395 for opt in ("--getbinpkg", "--getbinpkgonly",
396 "--usepkg", "--usepkgonly"):
397 opts.pop(opt, None)
398 if (settings.get("PORTAGE_DEBUG", "") == "1" and
399 "python-trace" in settings.features):
400 portage.debug.set_trace(True)
401
402 # Complain about unsupported options
David James386ccd12011-05-04 20:17:42 -0700403 for opt in ("--ask", "--ask-enter-invalid", "--resume", "--skipfirst"):
David Jamesfcb70ef2011-02-02 16:02:30 -0800404 if opt in opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400405 print("%s is not supported by parallel_emerge" % opt)
David Jamesfcb70ef2011-02-02 16:02:30 -0800406 sys.exit(1)
407
408 # Make emerge specific adjustments to the config (e.g. colors!)
409 adjust_configs(opts, trees)
410
411 # Save our configuration so far in the emerge object
412 emerge = self.emerge
413 emerge.action, emerge.opts = action, opts
414 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
415 emerge.cmdline_packages = cmdline_packages
416 root = settings["ROOT"]
417 emerge.root_config = trees[root]["root_config"]
418
David James386ccd12011-05-04 20:17:42 -0700419 if "--usepkg" in opts:
David Jamesfcb70ef2011-02-02 16:02:30 -0800420 emerge.trees[root]["bintree"].populate("--getbinpkg" in opts)
421
David Jamesfcb70ef2011-02-02 16:02:30 -0800422 def CreateDepgraph(self, emerge, packages):
423 """Create an emerge depgraph object."""
424 # Setup emerge options.
425 emerge_opts = emerge.opts.copy()
426
David James386ccd12011-05-04 20:17:42 -0700427 # Ask portage to build a dependency graph. with the options we specified
428 # above.
David Jamesfcb70ef2011-02-02 16:02:30 -0800429 params = create_depgraph_params(emerge_opts, emerge.action)
David Jamesbf1e3442011-05-28 07:44:20 -0700430 success, depgraph, favorites = backtrack_depgraph(
David James386ccd12011-05-04 20:17:42 -0700431 emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
432 packages, emerge.spinner)
433 emerge.depgraph = depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800434
David James386ccd12011-05-04 20:17:42 -0700435 # Is it impossible to honor the user's request? Bail!
436 if not success:
437 depgraph.display_problems()
438 sys.exit(1)
David Jamesfcb70ef2011-02-02 16:02:30 -0800439
440 emerge.depgraph = depgraph
David Jamesbf1e3442011-05-28 07:44:20 -0700441 emerge.favorites = favorites
David Jamesfcb70ef2011-02-02 16:02:30 -0800442
David Jamesdeebd692011-05-09 17:02:52 -0700443 # Prime and flush emerge caches.
444 root = emerge.settings["ROOT"]
445 vardb = emerge.trees[root]["vartree"].dbapi
David James0bdc5de2011-05-12 16:22:26 -0700446 if "--pretend" not in emerge.opts:
447 vardb.counter_tick()
David Jamesdeebd692011-05-09 17:02:52 -0700448 vardb.flush_cache()
449
David James386ccd12011-05-04 20:17:42 -0700450 def GenDependencyTree(self):
David Jamesfcb70ef2011-02-02 16:02:30 -0800451 """Get dependency tree info from emerge.
452
David Jamesfcb70ef2011-02-02 16:02:30 -0800453 Returns:
454 Dependency tree
455 """
456 start = time.time()
457
458 emerge = self.emerge
459
460 # Create a list of packages to merge
461 packages = set(emerge.cmdline_packages[:])
David Jamesfcb70ef2011-02-02 16:02:30 -0800462
463 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
464 # need any extra output from portage.
465 portage.util.noiselimit = -1
466
467 # My favorite feature: The silent spinner. It doesn't spin. Ever.
468 # I'd disable the colors by default too, but they look kind of cool.
469 emerge.spinner = stdout_spinner()
470 emerge.spinner.update = emerge.spinner.update_quiet
471
472 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400473 print("Calculating deps...")
David Jamesfcb70ef2011-02-02 16:02:30 -0800474
Chris Ching4a2ebd62017-04-26 16:30:05 -0600475 with cros_event.newEvent(task_name="GenerateDepTree"):
Chris Ching5fcbd622016-11-28 09:22:15 -0700476 self.CreateDepgraph(emerge, packages)
477 depgraph = emerge.depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800478
479 # Build our own tree from the emerge digraph.
480 deps_tree = {}
Don Garrett25f309a2014-03-19 14:02:12 -0700481 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -0800482 digraph = depgraph._dynamic_config.digraph
David James3f778802011-08-25 19:31:45 -0700483 root = emerge.settings["ROOT"]
Bertrand SIMONNETa15b5072014-10-23 15:27:52 -0700484 final_db = depgraph._dynamic_config._filtered_trees[root]['graph_db']
David Jamesfcb70ef2011-02-02 16:02:30 -0800485 for node, node_deps in digraph.nodes.items():
486 # Calculate dependency packages that need to be installed first. Each
487 # child on the digraph is a dependency. The "operation" field specifies
488 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
489 # contains the type of dependency (e.g. build, runtime, runtime_post,
490 # etc.)
491 #
David Jamesfcb70ef2011-02-02 16:02:30 -0800492 # Portage refers to the identifiers for packages as a CPV. This acronym
493 # stands for Component/Path/Version.
494 #
495 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
496 # Split up, this CPV would be:
497 # C -- Component: chromeos-base
498 # P -- Path: power_manager
499 # V -- Version: 0.0.1-r1
500 #
501 # We just refer to CPVs as packages here because it's easier.
502 deps = {}
503 for child, priorities in node_deps[0].items():
David James3f778802011-08-25 19:31:45 -0700504 if isinstance(child, Package) and child.root == root:
505 cpv = str(child.cpv)
506 action = str(child.operation)
507
508 # If we're uninstalling a package, check whether Portage is
509 # installing a replacement. If so, just depend on the installation
510 # of the new package, because the old package will automatically
511 # be uninstalled at that time.
512 if action == "uninstall":
513 for pkg in final_db.match_pkgs(child.slot_atom):
514 cpv = str(pkg.cpv)
515 action = "merge"
516 break
517
518 deps[cpv] = dict(action=action,
519 deptypes=[str(x) for x in priorities],
520 deps={})
David Jamesfcb70ef2011-02-02 16:02:30 -0800521
522 # We've built our list of deps, so we can add our package to the tree.
David James3f778802011-08-25 19:31:45 -0700523 if isinstance(node, Package) and node.root == root:
David Jamesfcb70ef2011-02-02 16:02:30 -0800524 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
525 deps=deps)
526
David Jamesfcb70ef2011-02-02 16:02:30 -0800527 # Ask portage for its install plan, so that we can only throw out
David James386ccd12011-05-04 20:17:42 -0700528 # dependencies that portage throws out.
David Jamesfcb70ef2011-02-02 16:02:30 -0800529 deps_info = {}
530 for pkg in depgraph.altlist():
531 if isinstance(pkg, Package):
David James3f778802011-08-25 19:31:45 -0700532 assert pkg.root == root
David Jamesfcb70ef2011-02-02 16:02:30 -0800533 self.package_db[pkg.cpv] = pkg
534
David Jamesfcb70ef2011-02-02 16:02:30 -0800535 # Save off info about the package
David James386ccd12011-05-04 20:17:42 -0700536 deps_info[str(pkg.cpv)] = {"idx": len(deps_info)}
David Jamesfcb70ef2011-02-02 16:02:30 -0800537
538 seconds = time.time() - start
539 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -0400540 print("Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800541
542 return deps_tree, deps_info
543
544 def PrintTree(self, deps, depth=""):
545 """Print the deps we have seen in the emerge output.
546
547 Args:
Mike Frysinger6f3c48e2015-05-06 02:38:51 -0400548 deps: Dependency tree structure.
549 depth: Allows printing the tree recursively, with indentation.
David Jamesfcb70ef2011-02-02 16:02:30 -0800550 """
551 for entry in sorted(deps):
552 action = deps[entry]["action"]
Mike Frysinger383367e2014-09-16 15:06:17 -0400553 print("%s %s (%s)" % (depth, entry, action))
David Jamesfcb70ef2011-02-02 16:02:30 -0800554 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
555
David James386ccd12011-05-04 20:17:42 -0700556 def GenDependencyGraph(self, deps_tree, deps_info):
David Jamesfcb70ef2011-02-02 16:02:30 -0800557 """Generate a doubly linked dependency graph.
558
559 Args:
560 deps_tree: Dependency tree structure.
561 deps_info: More details on the dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500562
David Jamesfcb70ef2011-02-02 16:02:30 -0800563 Returns:
564 Deps graph in the form of a dict of packages, with each package
565 specifying a "needs" list and "provides" list.
566 """
567 emerge = self.emerge
David Jamesfcb70ef2011-02-02 16:02:30 -0800568
David Jamesfcb70ef2011-02-02 16:02:30 -0800569 # deps_map is the actual dependency graph.
570 #
571 # Each package specifies a "needs" list and a "provides" list. The "needs"
572 # list indicates which packages we depend on. The "provides" list
573 # indicates the reverse dependencies -- what packages need us.
574 #
575 # We also provide some other information in the dependency graph:
576 # - action: What we're planning on doing with this package. Generally,
577 # "merge", "nomerge", or "uninstall"
David Jamesfcb70ef2011-02-02 16:02:30 -0800578 deps_map = {}
579
580 def ReverseTree(packages):
581 """Convert tree to digraph.
582
583 Take the tree of package -> requirements and reverse it to a digraph of
584 buildable packages -> packages they unblock.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500585
David Jamesfcb70ef2011-02-02 16:02:30 -0800586 Args:
587 packages: Tree(s) of dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500588
David Jamesfcb70ef2011-02-02 16:02:30 -0800589 Returns:
590 Unsanitized digraph.
591 """
David James8c7e5e32011-06-28 11:26:03 -0700592 binpkg_phases = set(["setup", "preinst", "postinst"])
David Jamese5e1c0a2014-09-29 17:19:41 -0700593 needed_dep_types = set(["blocker", "buildtime", "buildtime_slot_op",
594 "runtime", "runtime_slot_op"])
Benjamin Gordon670b6972017-08-29 13:43:56 -0600595 ignored_dep_types = set(["ignored", "runtime_post", "soft"])
596
597 # There's a bug in the Portage library where it always returns 'optional'
598 # and never 'buildtime' for the digraph while --usepkg is enabled; even
599 # when the package is being rebuilt. To work around this, we treat
600 # 'optional' as needed when we are using --usepkg. See crbug.com/756240 .
601 if "--usepkg" in self.emerge.opts:
602 needed_dep_types.add("optional")
603 else:
604 ignored_dep_types.add("optional")
605
David Jamese5e1c0a2014-09-29 17:19:41 -0700606 all_dep_types = ignored_dep_types | needed_dep_types
David Jamesfcb70ef2011-02-02 16:02:30 -0800607 for pkg in packages:
608
609 # Create an entry for the package
610 action = packages[pkg]["action"]
David James8c7e5e32011-06-28 11:26:03 -0700611 default_pkg = {"needs": {}, "provides": set(), "action": action,
612 "nodeps": False, "binary": False}
David Jamesfcb70ef2011-02-02 16:02:30 -0800613 this_pkg = deps_map.setdefault(pkg, default_pkg)
614
David James8c7e5e32011-06-28 11:26:03 -0700615 if pkg in deps_info:
616 this_pkg["idx"] = deps_info[pkg]["idx"]
617
618 # If a package doesn't have any defined phases that might use the
619 # dependent packages (i.e. pkg_setup, pkg_preinst, or pkg_postinst),
620 # we can install this package before its deps are ready.
621 emerge_pkg = self.package_db.get(pkg)
622 if emerge_pkg and emerge_pkg.type_name == "binary":
623 this_pkg["binary"] = True
Mike Frysinger66652ec2014-04-24 11:42:25 -0400624 defined_phases = emerge_pkg.defined_phases
David James8c7e5e32011-06-28 11:26:03 -0700625 defined_binpkg_phases = binpkg_phases.intersection(defined_phases)
626 if not defined_binpkg_phases:
627 this_pkg["nodeps"] = True
628
David Jamesfcb70ef2011-02-02 16:02:30 -0800629 # Create entries for dependencies of this package first.
630 ReverseTree(packages[pkg]["deps"])
631
632 # Add dependencies to this package.
633 for dep, dep_item in packages[pkg]["deps"].iteritems():
David James8c7e5e32011-06-28 11:26:03 -0700634 # We only need to enforce strict ordering of dependencies if the
David James3f778802011-08-25 19:31:45 -0700635 # dependency is a blocker, or is a buildtime or runtime dependency.
636 # (I.e., ignored, optional, and runtime_post dependencies don't
637 # depend on ordering.)
David James8c7e5e32011-06-28 11:26:03 -0700638 dep_types = dep_item["deptypes"]
639 if needed_dep_types.intersection(dep_types):
640 deps_map[dep]["provides"].add(pkg)
641 this_pkg["needs"][dep] = "/".join(dep_types)
David Jamesfcb70ef2011-02-02 16:02:30 -0800642
David Jamese5e1c0a2014-09-29 17:19:41 -0700643 # Verify we processed all appropriate dependency types.
644 unknown_dep_types = set(dep_types) - all_dep_types
645 if unknown_dep_types:
646 print("Unknown dependency types found:")
647 print(" %s -> %s (%s)" % (pkg, dep, "/".join(unknown_dep_types)))
648 sys.exit(1)
649
David James3f778802011-08-25 19:31:45 -0700650 # If there's a blocker, Portage may need to move files from one
651 # package to another, which requires editing the CONTENTS files of
652 # both packages. To avoid race conditions while editing this file,
653 # the two packages must not be installed in parallel, so we can't
654 # safely ignore dependencies. See http://crosbug.com/19328
655 if "blocker" in dep_types:
656 this_pkg["nodeps"] = False
657
David Jamesfcb70ef2011-02-02 16:02:30 -0800658 def FindCycles():
659 """Find cycles in the dependency tree.
660
661 Returns:
662 A dict mapping cyclic packages to a dict of the deps that cause
663 cycles. For each dep that causes cycles, it returns an example
664 traversal of the graph that shows the cycle.
665 """
666
667 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
668 """Find cycles in cyclic dependencies starting at specified package.
669
670 Args:
671 pkg: Package identifier.
672 cycles: A dict mapping cyclic packages to a dict of the deps that
673 cause cycles. For each dep that causes cycles, it returns an
674 example traversal of the graph that shows the cycle.
675 unresolved: Nodes that have been visited but are not fully processed.
676 resolved: Nodes that have been visited and are fully processed.
677 """
678 pkg_cycles = cycles.get(pkg)
679 if pkg in resolved and not pkg_cycles:
680 # If we already looked at this package, and found no cyclic
681 # dependencies, we can stop now.
682 return
683 unresolved.append(pkg)
684 for dep in deps_map[pkg]["needs"]:
685 if dep in unresolved:
686 idx = unresolved.index(dep)
687 mycycle = unresolved[idx:] + [dep]
David James321490a2012-12-17 12:05:56 -0800688 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800689 pkg1, pkg2 = mycycle[i], mycycle[i+1]
690 cycles.setdefault(pkg1, {}).setdefault(pkg2, mycycle)
691 elif not pkg_cycles or dep not in pkg_cycles:
692 # Looks like we haven't seen this edge before.
693 FindCyclesAtNode(dep, cycles, unresolved, resolved)
694 unresolved.pop()
695 resolved.add(pkg)
696
697 cycles, unresolved, resolved = {}, [], set()
698 for pkg in deps_map:
699 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
700 return cycles
701
David James386ccd12011-05-04 20:17:42 -0700702 def RemoveUnusedPackages():
David Jamesfcb70ef2011-02-02 16:02:30 -0800703 """Remove installed packages, propagating dependencies."""
David Jamesfcb70ef2011-02-02 16:02:30 -0800704 # Schedule packages that aren't on the install list for removal
705 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
706
David Jamesfcb70ef2011-02-02 16:02:30 -0800707 # Remove the packages we don't want, simplifying the graph and making
708 # it easier for us to crack cycles.
709 for pkg in sorted(rm_pkgs):
710 this_pkg = deps_map[pkg]
711 needs = this_pkg["needs"]
712 provides = this_pkg["provides"]
713 for dep in needs:
714 dep_provides = deps_map[dep]["provides"]
715 dep_provides.update(provides)
716 dep_provides.discard(pkg)
717 dep_provides.discard(dep)
718 for target in provides:
719 target_needs = deps_map[target]["needs"]
720 target_needs.update(needs)
721 target_needs.pop(pkg, None)
722 target_needs.pop(target, None)
723 del deps_map[pkg]
724
725 def PrintCycleBreak(basedep, dep, mycycle):
726 """Print details about a cycle that we are planning on breaking.
727
Mike Frysinger02e1e072013-11-10 22:11:34 -0500728 We are breaking a cycle where dep needs basedep. mycycle is an
729 example cycle which contains dep -> basedep.
730 """
David Jamesfcb70ef2011-02-02 16:02:30 -0800731
David Jamesfcb70ef2011-02-02 16:02:30 -0800732 needs = deps_map[dep]["needs"]
733 depinfo = needs.get(basedep, "deleted")
David Jamesfcb70ef2011-02-02 16:02:30 -0800734
David James3f778802011-08-25 19:31:45 -0700735 # It's OK to swap install order for blockers, as long as the two
736 # packages aren't installed in parallel. If there is a cycle, then
737 # we know the packages depend on each other already, so we can drop the
738 # blocker safely without printing a warning.
739 if depinfo == "blocker":
740 return
741
David Jamesfcb70ef2011-02-02 16:02:30 -0800742 # Notify the user that we're breaking a cycle.
Mike Frysinger383367e2014-09-16 15:06:17 -0400743 print("Breaking %s -> %s (%s)" % (dep, basedep, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800744
745 # Show cycle.
David James321490a2012-12-17 12:05:56 -0800746 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800747 pkg1, pkg2 = mycycle[i], mycycle[i+1]
748 needs = deps_map[pkg1]["needs"]
749 depinfo = needs.get(pkg2, "deleted")
750 if pkg1 == dep and pkg2 == basedep:
751 depinfo = depinfo + ", deleting"
Mike Frysinger383367e2014-09-16 15:06:17 -0400752 print(" %s -> %s (%s)" % (pkg1, pkg2, depinfo))
David Jamesfcb70ef2011-02-02 16:02:30 -0800753
754 def SanitizeTree():
755 """Remove circular dependencies.
756
757 We prune all dependencies involved in cycles that go against the emerge
758 ordering. This has a nice property: we're guaranteed to merge
759 dependencies in the same order that portage does.
760
761 Because we don't treat any dependencies as "soft" unless they're killed
762 by a cycle, we pay attention to a larger number of dependencies when
763 merging. This hurts performance a bit, but helps reliability.
764 """
765 start = time.time()
766 cycles = FindCycles()
767 while cycles:
768 for dep, mycycles in cycles.iteritems():
769 for basedep, mycycle in mycycles.iteritems():
770 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
Matt Tennant08797302011-10-17 16:18:45 -0700771 if "--quiet" not in emerge.opts:
772 PrintCycleBreak(basedep, dep, mycycle)
David Jamesfcb70ef2011-02-02 16:02:30 -0800773 del deps_map[dep]["needs"][basedep]
774 deps_map[basedep]["provides"].remove(dep)
775 cycles = FindCycles()
776 seconds = time.time() - start
777 if "--quiet" not in emerge.opts and seconds >= 0.1:
Mike Frysinger383367e2014-09-16 15:06:17 -0400778 print("Tree sanitized in %dm%.1fs" % (seconds / 60, seconds % 60))
David Jamesfcb70ef2011-02-02 16:02:30 -0800779
David James8c7e5e32011-06-28 11:26:03 -0700780 def FindRecursiveProvides(pkg, seen):
781 """Find all nodes that require a particular package.
782
783 Assumes that graph is acyclic.
784
785 Args:
786 pkg: Package identifier.
787 seen: Nodes that have been visited so far.
788 """
789 if pkg in seen:
790 return
791 seen.add(pkg)
792 info = deps_map[pkg]
793 info["tprovides"] = info["provides"].copy()
794 for dep in info["provides"]:
795 FindRecursiveProvides(dep, seen)
796 info["tprovides"].update(deps_map[dep]["tprovides"])
797
David Jamesa22906f2011-05-04 19:53:26 -0700798 ReverseTree(deps_tree)
David Jamesa22906f2011-05-04 19:53:26 -0700799
David James386ccd12011-05-04 20:17:42 -0700800 # We need to remove unused packages so that we can use the dependency
801 # ordering of the install process to show us what cycles to crack.
802 RemoveUnusedPackages()
David Jamesfcb70ef2011-02-02 16:02:30 -0800803 SanitizeTree()
David James8c7e5e32011-06-28 11:26:03 -0700804 seen = set()
805 for pkg in deps_map:
806 FindRecursiveProvides(pkg, seen)
David Jamesfcb70ef2011-02-02 16:02:30 -0800807 return deps_map
808
809 def PrintInstallPlan(self, deps_map):
810 """Print an emerge-style install plan.
811
812 The install plan lists what packages we're installing, in order.
813 It's useful for understanding what parallel_emerge is doing.
814
815 Args:
816 deps_map: The dependency graph.
817 """
818
819 def InstallPlanAtNode(target, deps_map):
820 nodes = []
821 nodes.append(target)
822 for dep in deps_map[target]["provides"]:
823 del deps_map[dep]["needs"][target]
824 if not deps_map[dep]["needs"]:
825 nodes.extend(InstallPlanAtNode(dep, deps_map))
826 return nodes
827
828 deps_map = copy.deepcopy(deps_map)
829 install_plan = []
830 plan = set()
831 for target, info in deps_map.iteritems():
832 if not info["needs"] and target not in plan:
833 for item in InstallPlanAtNode(target, deps_map):
834 plan.add(item)
835 install_plan.append(self.package_db[item])
836
837 for pkg in plan:
838 del deps_map[pkg]
839
840 if deps_map:
Mike Frysinger383367e2014-09-16 15:06:17 -0400841 print("Cyclic dependencies:", " ".join(deps_map))
David Jamesfcb70ef2011-02-02 16:02:30 -0800842 PrintDepsMap(deps_map)
843 sys.exit(1)
844
845 self.emerge.depgraph.display(install_plan)
846
847
848def PrintDepsMap(deps_map):
849 """Print dependency graph, for each package list it's prerequisites."""
850 for i in sorted(deps_map):
Mike Frysinger383367e2014-09-16 15:06:17 -0400851 print("%s: (%s) needs" % (i, deps_map[i]["action"]))
David Jamesfcb70ef2011-02-02 16:02:30 -0800852 needs = deps_map[i]["needs"]
853 for j in sorted(needs):
Mike Frysinger383367e2014-09-16 15:06:17 -0400854 print(" %s" % (j))
David Jamesfcb70ef2011-02-02 16:02:30 -0800855 if not needs:
Mike Frysinger383367e2014-09-16 15:06:17 -0400856 print(" no dependencies")
David Jamesfcb70ef2011-02-02 16:02:30 -0800857
858
859class EmergeJobState(object):
Don Garrett25f309a2014-03-19 14:02:12 -0700860 """Structure describing the EmergeJobState."""
861
David Jamesfcb70ef2011-02-02 16:02:30 -0800862 __slots__ = ["done", "filename", "last_notify_timestamp", "last_output_seek",
863 "last_output_timestamp", "pkgname", "retcode", "start_timestamp",
Chris Ching73486ab2017-04-26 18:02:37 -0600864 "target", "try_count", "fetch_only", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800865
866 def __init__(self, target, pkgname, done, filename, start_timestamp,
Chris Ching73486ab2017-04-26 18:02:37 -0600867 retcode=None, fetch_only=False, try_count=0, unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800868
869 # The full name of the target we're building (e.g.
Mike Frysingerfd969312014-04-02 22:16:42 -0400870 # virtual/target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800871 self.target = target
872
Mike Frysingerfd969312014-04-02 22:16:42 -0400873 # The short name of the target we're building (e.g. target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800874 self.pkgname = pkgname
875
876 # Whether the job is done. (True if the job is done; false otherwise.)
877 self.done = done
878
879 # The filename where output is currently stored.
880 self.filename = filename
881
882 # The timestamp of the last time we printed the name of the log file. We
883 # print this at the beginning of the job, so this starts at
884 # start_timestamp.
885 self.last_notify_timestamp = start_timestamp
886
887 # The location (in bytes) of the end of the last complete line we printed.
888 # This starts off at zero. We use this to jump to the right place when we
889 # print output from the same ebuild multiple times.
890 self.last_output_seek = 0
891
892 # The timestamp of the last time we printed output. Since we haven't
893 # printed output yet, this starts at zero.
894 self.last_output_timestamp = 0
895
896 # The return code of our job, if the job is actually finished.
897 self.retcode = retcode
898
Chris Ching73486ab2017-04-26 18:02:37 -0600899 # Number of tries for this job
900 self.try_count = try_count
901
Brian Harring0be85c62012-03-17 19:52:12 -0700902 # Was this just a fetch job?
903 self.fetch_only = fetch_only
904
David Jamesfcb70ef2011-02-02 16:02:30 -0800905 # The timestamp when our job started.
906 self.start_timestamp = start_timestamp
907
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700908 # No emerge, only unpack packages.
909 self.unpack_only = unpack_only
910
David Jamesfcb70ef2011-02-02 16:02:30 -0800911
David James321490a2012-12-17 12:05:56 -0800912def KillHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700913 # Kill self and all subprocesses.
914 os.killpg(0, signal.SIGKILL)
915
Mike Frysingercc838832014-05-24 13:10:30 -0400916
David Jamesfcb70ef2011-02-02 16:02:30 -0800917def SetupWorkerSignals():
David James321490a2012-12-17 12:05:56 -0800918 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700919 # Set KILLED flag.
920 KILLED.set()
David James13cead42011-05-18 16:22:01 -0700921
David James7358d032011-05-19 10:40:03 -0700922 # Remove our signal handlers so we don't get called recursively.
923 signal.signal(signal.SIGINT, KillHandler)
924 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -0800925
926 # Ensure that we exit quietly and cleanly, if possible, when we receive
927 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all
928 # of the child processes will print details about KeyboardInterrupt
929 # exceptions, which isn't very helpful.
930 signal.signal(signal.SIGINT, ExitHandler)
931 signal.signal(signal.SIGTERM, ExitHandler)
932
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400933
Chris Ching73486ab2017-04-26 18:02:37 -0600934def EmergeProcess(output, job_state, *args, **kwargs):
David James1ed3e252011-10-05 20:26:15 -0700935 """Merge a package in a subprocess.
936
937 Args:
David James1ed3e252011-10-05 20:26:15 -0700938 output: Temporary file to write output.
Chris Ching73486ab2017-04-26 18:02:37 -0600939 job_state: Stored state of package
David James6b29d052012-11-02 10:27:27 -0700940 *args: Arguments to pass to Scheduler constructor.
941 **kwargs: Keyword arguments to pass to Scheduler constructor.
David James1ed3e252011-10-05 20:26:15 -0700942
943 Returns:
944 The exit code returned by the subprocess.
945 """
Chris Chingb8eba812017-06-22 09:54:48 -0600946
Chris Ching73486ab2017-04-26 18:02:37 -0600947 target = job_state.target
948
949 job_state.try_count += 1
950
Chris Chingb8eba812017-06-22 09:54:48 -0600951 cpv = portage_util.SplitCPV(target)
Chris Ching73486ab2017-04-26 18:02:37 -0600952
Chris Ching4a2ebd62017-04-26 16:30:05 -0600953 event = cros_event.newEvent(task_name="EmergePackage",
Chris Chingb8eba812017-06-22 09:54:48 -0600954 name=cpv.package,
955 category=cpv.category,
Chris Ching73486ab2017-04-26 18:02:37 -0600956 version=cpv.version,
957 try_count=job_state.try_count)
David James1ed3e252011-10-05 20:26:15 -0700958 pid = os.fork()
959 if pid == 0:
960 try:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400961 proctitle.settitle('EmergeProcess', target)
962
David James1ed3e252011-10-05 20:26:15 -0700963 # Sanity checks.
Mike Frysingerf02736e2013-11-08 15:27:00 -0500964 if sys.stdout.fileno() != 1:
965 raise Exception("sys.stdout.fileno() != 1")
966 if sys.stderr.fileno() != 2:
967 raise Exception("sys.stderr.fileno() != 2")
David James1ed3e252011-10-05 20:26:15 -0700968
969 # - Redirect 1 (stdout) and 2 (stderr) at our temporary file.
970 # - Redirect 0 to point at sys.stdin. In this case, sys.stdin
971 # points at a file reading os.devnull, because multiprocessing mucks
972 # with sys.stdin.
973 # - Leave the sys.stdin and output filehandles alone.
974 fd_pipes = {0: sys.stdin.fileno(),
975 1: output.fileno(),
976 2: output.fileno(),
977 sys.stdin.fileno(): sys.stdin.fileno(),
978 output.fileno(): output.fileno()}
Mike Frysinger66652ec2014-04-24 11:42:25 -0400979 # pylint: disable=W0212
980 portage.process._setup_pipes(fd_pipes, close_fds=False)
David James1ed3e252011-10-05 20:26:15 -0700981
982 # Portage doesn't like when sys.stdin.fileno() != 0, so point sys.stdin
983 # at the filehandle we just created in _setup_pipes.
984 if sys.stdin.fileno() != 0:
David James6b29d052012-11-02 10:27:27 -0700985 sys.__stdin__ = sys.stdin = os.fdopen(0, "r")
986
987 scheduler = Scheduler(*args, **kwargs)
988
989 # Enable blocker handling even though we're in --nodeps mode. This
990 # allows us to unmerge the blocker after we've merged the replacement.
991 scheduler._opts_ignore_blockers = frozenset()
David James1ed3e252011-10-05 20:26:15 -0700992
993 # Actually do the merge.
Chris Ching5fcbd622016-11-28 09:22:15 -0700994 with event:
Chris Ching73486ab2017-04-26 18:02:37 -0600995 job_state.retcode = scheduler.merge()
996 if job_state.retcode != 0:
Chris Ching5fcbd622016-11-28 09:22:15 -0700997 event.fail(message="non-zero value returned")
David James1ed3e252011-10-05 20:26:15 -0700998
999 # We catch all exceptions here (including SystemExit, KeyboardInterrupt,
1000 # etc) so as to ensure that we don't confuse the multiprocessing module,
1001 # which expects that all forked children exit with os._exit().
David James321490a2012-12-17 12:05:56 -08001002 # pylint: disable=W0702
David James1ed3e252011-10-05 20:26:15 -07001003 except:
1004 traceback.print_exc(file=output)
Chris Ching73486ab2017-04-26 18:02:37 -06001005 job_state.retcode = 1
David James1ed3e252011-10-05 20:26:15 -07001006 sys.stdout.flush()
1007 sys.stderr.flush()
1008 output.flush()
Don Garrett25f309a2014-03-19 14:02:12 -07001009 # pylint: disable=W0212
Chris Ching73486ab2017-04-26 18:02:37 -06001010 os._exit(job_state.retcode)
David James1ed3e252011-10-05 20:26:15 -07001011 else:
1012 # Return the exit code of the subprocess.
1013 return os.waitpid(pid, 0)[1]
David Jamesfcb70ef2011-02-02 16:02:30 -08001014
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001015
1016def UnpackPackage(pkg_state):
1017 """Unpacks package described by pkg_state.
1018
1019 Args:
1020 pkg_state: EmergeJobState object describing target.
1021
1022 Returns:
1023 Exit code returned by subprocess.
1024 """
1025 pkgdir = os.environ.get("PKGDIR",
1026 os.path.join(os.environ["SYSROOT"], "packages"))
1027 root = os.environ.get("ROOT", os.environ["SYSROOT"])
1028 path = os.path.join(pkgdir, pkg_state.target + ".tbz2")
1029 comp = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)
1030 cmd = [comp, "-dc"]
1031 if comp.endswith("pbzip2"):
1032 cmd.append("--ignore-trailing-garbage=1")
1033 cmd.append(path)
1034
Chris Ching4a2ebd62017-04-26 16:30:05 -06001035 with cros_event.newEvent(task_name="UnpackPackage", **pkg_state) as event:
Chris Ching5fcbd622016-11-28 09:22:15 -07001036 result = cros_build_lib.RunCommand(cmd, cwd=root, stdout_to_pipe=True,
1037 print_cmd=False, error_code_ok=True)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001038
Chris Ching5fcbd622016-11-28 09:22:15 -07001039 # If we were not successful, return now and don't attempt untar.
1040 if result.returncode != 0:
1041 event.fail("error compressing: returned {}".format(result.returncode))
1042 return result.returncode
1043
1044 cmd = ["sudo", "tar", "-xf", "-", "-C", root]
1045
1046 result = cros_build_lib.RunCommand(cmd, cwd=root, input=result.output,
1047 print_cmd=False, error_code_ok=True)
1048 if result.returncode != 0:
1049 event.fail("error extracting:returned {}".format(result.returncode))
1050
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001051 return result.returncode
1052
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001053
1054def EmergeWorker(task_queue, job_queue, emerge, package_db, fetch_only=False,
1055 unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -08001056 """This worker emerges any packages given to it on the task_queue.
1057
1058 Args:
1059 task_queue: The queue of tasks for this worker to do.
1060 job_queue: The queue of results from the worker.
1061 emerge: An EmergeData() object.
1062 package_db: A dict, mapping package ids to portage Package objects.
Brian Harring0be85c62012-03-17 19:52:12 -07001063 fetch_only: A bool, indicating if we should just fetch the target.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001064 unpack_only: A bool, indicating if we should just unpack the target.
David Jamesfcb70ef2011-02-02 16:02:30 -08001065
1066 It expects package identifiers to be passed to it via task_queue. When
1067 a task is started, it pushes the (target, filename) to the started_queue.
1068 The output is stored in filename. When a merge starts or finishes, we push
1069 EmergeJobState objects to the job_queue.
1070 """
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001071 if fetch_only:
1072 mode = 'fetch'
1073 elif unpack_only:
1074 mode = 'unpack'
1075 else:
1076 mode = 'emerge'
1077 proctitle.settitle('EmergeWorker', mode, '[idle]')
David Jamesfcb70ef2011-02-02 16:02:30 -08001078
1079 SetupWorkerSignals()
1080 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
David Jamesdeebd692011-05-09 17:02:52 -07001081
1082 # Disable flushing of caches to save on I/O.
David James7a1ea4b2011-10-13 15:06:41 -07001083 root = emerge.settings["ROOT"]
1084 vardb = emerge.trees[root]["vartree"].dbapi
Mike Frysingere56debd2014-11-19 01:54:36 -05001085 vardb._flush_cache_enabled = False # pylint: disable=protected-access
Brian Harring0be85c62012-03-17 19:52:12 -07001086 bindb = emerge.trees[root]["bintree"].dbapi
1087 # Might be a set, might be a list, might be None; no clue, just use shallow
1088 # copy to ensure we can roll it back.
Don Garrett25f309a2014-03-19 14:02:12 -07001089 # pylint: disable=W0212
Brian Harring0be85c62012-03-17 19:52:12 -07001090 original_remotepkgs = copy.copy(bindb.bintree._remotepkgs)
David Jamesdeebd692011-05-09 17:02:52 -07001091
David Jamesfcb70ef2011-02-02 16:02:30 -08001092 opts, spinner = emerge.opts, emerge.spinner
1093 opts["--nodeps"] = True
Brian Harring0be85c62012-03-17 19:52:12 -07001094 if fetch_only:
1095 opts["--fetchonly"] = True
1096
David Jamesfcb70ef2011-02-02 16:02:30 -08001097 while True:
1098 # Wait for a new item to show up on the queue. This is a blocking wait,
1099 # so if there's nothing to do, we just sit here.
Brian Harring0be85c62012-03-17 19:52:12 -07001100 pkg_state = task_queue.get()
1101 if pkg_state is None:
David Jamesfcb70ef2011-02-02 16:02:30 -08001102 # If target is None, this means that the main thread wants us to quit.
1103 # The other workers need to exit too, so we'll push the message back on
1104 # to the queue so they'll get it too.
Brian Harring0be85c62012-03-17 19:52:12 -07001105 task_queue.put(None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001106 return
David James7358d032011-05-19 10:40:03 -07001107 if KILLED.is_set():
1108 return
1109
Brian Harring0be85c62012-03-17 19:52:12 -07001110 target = pkg_state.target
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001111 proctitle.settitle('EmergeWorker', mode, target)
Brian Harring0be85c62012-03-17 19:52:12 -07001112
David Jamesfcb70ef2011-02-02 16:02:30 -08001113 db_pkg = package_db[target]
Brian Harring0be85c62012-03-17 19:52:12 -07001114
1115 if db_pkg.type_name == "binary":
1116 if not fetch_only and pkg_state.fetched_successfully:
1117 # Ensure portage doesn't think our pkg is remote- else it'll force
1118 # a redownload of it (even if the on-disk file is fine). In-memory
1119 # caching basically, implemented dumbly.
1120 bindb.bintree._remotepkgs = None
1121 else:
1122 bindb.bintree_remotepkgs = original_remotepkgs
1123
David Jamesfcb70ef2011-02-02 16:02:30 -08001124 db_pkg.root_config = emerge.root_config
1125 install_list = [db_pkg]
1126 pkgname = db_pkg.pf
1127 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False)
David James01b1e0f2012-06-07 17:18:05 -07001128 os.chmod(output.name, 644)
David Jamesfcb70ef2011-02-02 16:02:30 -08001129 start_timestamp = time.time()
Brian Harring0be85c62012-03-17 19:52:12 -07001130 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001131 fetch_only=fetch_only, unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001132 job_queue.put(job)
1133 if "--pretend" in opts:
Chris Ching73486ab2017-04-26 18:02:37 -06001134 job.retcode = 0
David Jamesfcb70ef2011-02-02 16:02:30 -08001135 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001136 try:
David James386ccd12011-05-04 20:17:42 -07001137 emerge.scheduler_graph.mergelist = install_list
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001138 if unpack_only:
Chris Ching73486ab2017-04-26 18:02:37 -06001139 job.retcode = UnpackPackage(pkg_state)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001140 else:
Chris Ching73486ab2017-04-26 18:02:37 -06001141 job.retcode = EmergeProcess(output, job, settings, trees, mtimedb,
1142 opts, spinner,
1143 favorites=emerge.favorites,
1144 graph_config=emerge.scheduler_graph)
David Jamesfcb70ef2011-02-02 16:02:30 -08001145 except Exception:
1146 traceback.print_exc(file=output)
Chris Ching73486ab2017-04-26 18:02:37 -06001147 job.retcode = 1
David James1ed3e252011-10-05 20:26:15 -07001148 output.close()
David Jamesfcb70ef2011-02-02 16:02:30 -08001149
David James7358d032011-05-19 10:40:03 -07001150 if KILLED.is_set():
1151 return
1152
David Jamesfcb70ef2011-02-02 16:02:30 -08001153 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp,
Chris Ching73486ab2017-04-26 18:02:37 -06001154 job.retcode, fetch_only=fetch_only,
1155 try_count=job.try_count, unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001156 job_queue.put(job)
1157
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001158 # Set the title back to idle as the multiprocess pool won't destroy us;
1159 # when another job comes up, it'll re-use this process.
1160 proctitle.settitle('EmergeWorker', mode, '[idle]')
1161
David Jamesfcb70ef2011-02-02 16:02:30 -08001162
1163class LinePrinter(object):
1164 """Helper object to print a single line."""
1165
1166 def __init__(self, line):
1167 self.line = line
1168
David James321490a2012-12-17 12:05:56 -08001169 def Print(self, _seek_locations):
Mike Frysinger383367e2014-09-16 15:06:17 -04001170 print(self.line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001171
1172
1173class JobPrinter(object):
1174 """Helper object to print output of a job."""
1175
1176 def __init__(self, job, unlink=False):
1177 """Print output of job.
1178
Mike Frysinger02e1e072013-11-10 22:11:34 -05001179 If unlink is True, unlink the job output file when done.
1180 """
David Jamesfcb70ef2011-02-02 16:02:30 -08001181 self.current_time = time.time()
1182 self.job = job
1183 self.unlink = unlink
1184
1185 def Print(self, seek_locations):
1186
1187 job = self.job
1188
1189 # Calculate how long the job has been running.
1190 seconds = self.current_time - job.start_timestamp
1191
1192 # Note that we've printed out the job so far.
1193 job.last_output_timestamp = self.current_time
1194
1195 # Note that we're starting the job
1196 info = "job %s (%dm%.1fs)" % (job.pkgname, seconds / 60, seconds % 60)
1197 last_output_seek = seek_locations.get(job.filename, 0)
1198 if last_output_seek:
Mike Frysinger383367e2014-09-16 15:06:17 -04001199 print("=== Continue output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001200 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001201 print("=== Start output for %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001202
1203 # Print actual output from job
1204 f = codecs.open(job.filename, encoding='utf-8', errors='replace')
1205 f.seek(last_output_seek)
1206 prefix = job.pkgname + ":"
1207 for line in f:
1208
1209 # Save off our position in the file
1210 if line and line[-1] == "\n":
1211 last_output_seek = f.tell()
1212 line = line[:-1]
1213
1214 # Print our line
Mike Frysinger383367e2014-09-16 15:06:17 -04001215 print(prefix, line.encode('utf-8', 'replace'))
David Jamesfcb70ef2011-02-02 16:02:30 -08001216 f.close()
1217
1218 # Save our last spot in the file so that we don't print out the same
1219 # location twice.
1220 seek_locations[job.filename] = last_output_seek
1221
1222 # Note end of output section
1223 if job.done:
Mike Frysinger383367e2014-09-16 15:06:17 -04001224 print("=== Complete: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001225 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001226 print("=== Still running: %s ===" % info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001227
1228 if self.unlink:
1229 os.unlink(job.filename)
1230
1231
1232def PrintWorker(queue):
1233 """A worker that prints stuff to the screen as requested."""
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001234 proctitle.settitle('PrintWorker')
David Jamesfcb70ef2011-02-02 16:02:30 -08001235
David James321490a2012-12-17 12:05:56 -08001236 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -07001237 # Set KILLED flag.
1238 KILLED.set()
1239
David Jamesfcb70ef2011-02-02 16:02:30 -08001240 # Switch to default signal handlers so that we'll die after two signals.
David James7358d032011-05-19 10:40:03 -07001241 signal.signal(signal.SIGINT, KillHandler)
1242 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001243
1244 # Don't exit on the first SIGINT / SIGTERM, because the parent worker will
1245 # handle it and tell us when we need to exit.
1246 signal.signal(signal.SIGINT, ExitHandler)
1247 signal.signal(signal.SIGTERM, ExitHandler)
1248
1249 # seek_locations is a map indicating the position we are at in each file.
1250 # It starts off empty, but is set by the various Print jobs as we go along
1251 # to indicate where we left off in each file.
1252 seek_locations = {}
1253 while True:
1254 try:
1255 job = queue.get()
1256 if job:
1257 job.Print(seek_locations)
David Jamesbccf8eb2011-07-27 14:06:06 -07001258 sys.stdout.flush()
David Jamesfcb70ef2011-02-02 16:02:30 -08001259 else:
1260 break
1261 except IOError as ex:
1262 if ex.errno == errno.EINTR:
1263 # Looks like we received a signal. Keep printing.
1264 continue
1265 raise
1266
Brian Harring867e2362012-03-17 04:05:17 -07001267
Brian Harring0be85c62012-03-17 19:52:12 -07001268class TargetState(object):
Chris Ching5fcbd622016-11-28 09:22:15 -07001269 """Structure describing the TargetState."""
Brian Harring867e2362012-03-17 04:05:17 -07001270
Brian Harring0be85c62012-03-17 19:52:12 -07001271 __slots__ = ("target", "info", "score", "prefetched", "fetched_successfully")
Brian Harring867e2362012-03-17 04:05:17 -07001272
David James321490a2012-12-17 12:05:56 -08001273 def __init__(self, target, info):
Brian Harring867e2362012-03-17 04:05:17 -07001274 self.target, self.info = target, info
Brian Harring0be85c62012-03-17 19:52:12 -07001275 self.fetched_successfully = False
1276 self.prefetched = False
David James321490a2012-12-17 12:05:56 -08001277 self.score = None
Brian Harring867e2362012-03-17 04:05:17 -07001278 self.update_score()
1279
1280 def __cmp__(self, other):
1281 return cmp(self.score, other.score)
1282
1283 def update_score(self):
1284 self.score = (
1285 -len(self.info["tprovides"]),
Brian Harring0be85c62012-03-17 19:52:12 -07001286 len(self.info["needs"]),
Brian Harring11c5eeb2012-03-18 11:02:39 -07001287 not self.info["binary"],
Brian Harring867e2362012-03-17 04:05:17 -07001288 -len(self.info["provides"]),
1289 self.info["idx"],
1290 self.target,
1291 )
1292
1293
1294class ScoredHeap(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001295 """Implementation of a general purpose scored heap."""
Brian Harring867e2362012-03-17 04:05:17 -07001296
Brian Harring0be85c62012-03-17 19:52:12 -07001297 __slots__ = ("heap", "_heap_set")
1298
Brian Harring867e2362012-03-17 04:05:17 -07001299 def __init__(self, initial=()):
Brian Harring0be85c62012-03-17 19:52:12 -07001300 self.heap = list()
1301 self._heap_set = set()
1302 if initial:
1303 self.multi_put(initial)
Brian Harring867e2362012-03-17 04:05:17 -07001304
1305 def get(self):
Brian Harring0be85c62012-03-17 19:52:12 -07001306 item = heapq.heappop(self.heap)
1307 self._heap_set.remove(item.target)
1308 return item
Brian Harring867e2362012-03-17 04:05:17 -07001309
Brian Harring0be85c62012-03-17 19:52:12 -07001310 def put(self, item):
1311 if not isinstance(item, TargetState):
1312 raise ValueError("Item %r isn't a TargetState" % (item,))
1313 heapq.heappush(self.heap, item)
1314 self._heap_set.add(item.target)
Brian Harring867e2362012-03-17 04:05:17 -07001315
Brian Harring0be85c62012-03-17 19:52:12 -07001316 def multi_put(self, sequence):
1317 sequence = list(sequence)
1318 self.heap.extend(sequence)
1319 self._heap_set.update(x.target for x in sequence)
Brian Harring867e2362012-03-17 04:05:17 -07001320 self.sort()
1321
David James5c9996d2012-03-24 10:50:46 -07001322 def sort(self):
1323 heapq.heapify(self.heap)
1324
Brian Harring0be85c62012-03-17 19:52:12 -07001325 def __contains__(self, target):
1326 return target in self._heap_set
1327
1328 def __nonzero__(self):
1329 return bool(self.heap)
1330
Brian Harring867e2362012-03-17 04:05:17 -07001331 def __len__(self):
1332 return len(self.heap)
1333
1334
David Jamesfcb70ef2011-02-02 16:02:30 -08001335class EmergeQueue(object):
1336 """Class to schedule emerge jobs according to a dependency graph."""
1337
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001338 def __init__(self, deps_map, emerge, package_db, show_output, unpack_only,
1339 max_retries):
David Jamesfcb70ef2011-02-02 16:02:30 -08001340 # Store the dependency graph.
1341 self._deps_map = deps_map
Brian Harring0be85c62012-03-17 19:52:12 -07001342 self._state_map = {}
David Jamesfcb70ef2011-02-02 16:02:30 -08001343 # Initialize the running queue to empty
Brian Harring0be85c62012-03-17 19:52:12 -07001344 self._build_jobs = {}
1345 self._build_ready = ScoredHeap()
1346 self._fetch_jobs = {}
1347 self._fetch_ready = ScoredHeap()
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001348 self._unpack_jobs = {}
1349 self._unpack_ready = ScoredHeap()
David Jamesfcb70ef2011-02-02 16:02:30 -08001350 # List of total package installs represented in deps_map.
1351 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
1352 self._total_jobs = len(install_jobs)
1353 self._show_output = show_output
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001354 self._unpack_only = unpack_only
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001355 self._max_retries = max_retries
David Jamesfcb70ef2011-02-02 16:02:30 -08001356
1357 if "--pretend" in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001358 print("Skipping merge because of --pretend mode.")
David Jamesfcb70ef2011-02-02 16:02:30 -08001359 sys.exit(0)
1360
David Jamesaaf49e42014-04-24 09:40:05 -07001361 # Set up a session so we can easily terminate all children.
1362 self._SetupSession()
David James7358d032011-05-19 10:40:03 -07001363
David Jamesfcb70ef2011-02-02 16:02:30 -08001364 # Setup scheduler graph object. This is used by the child processes
1365 # to help schedule jobs.
1366 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1367
1368 # Calculate how many jobs we can run in parallel. We don't want to pass
1369 # the --jobs flag over to emerge itself, because that'll tell emerge to
1370 # hide its output, and said output is quite useful for debugging hung
1371 # jobs.
1372 procs = min(self._total_jobs,
1373 emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
Nam T. Nguyenf7098b32015-05-08 11:04:48 -07001374 self._build_procs = self._unpack_procs = max(1, procs)
1375 # Fetch is IO bound, we can use more processes.
1376 self._fetch_procs = max(4, procs)
David James8c7e5e32011-06-28 11:26:03 -07001377 self._load_avg = emerge.opts.pop("--load-average", None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001378 self._job_queue = multiprocessing.Queue()
1379 self._print_queue = multiprocessing.Queue()
Brian Harring0be85c62012-03-17 19:52:12 -07001380
1381 self._fetch_queue = multiprocessing.Queue()
1382 args = (self._fetch_queue, self._job_queue, emerge, package_db, True)
1383 self._fetch_pool = multiprocessing.Pool(self._fetch_procs, EmergeWorker,
1384 args)
1385
1386 self._build_queue = multiprocessing.Queue()
1387 args = (self._build_queue, self._job_queue, emerge, package_db)
1388 self._build_pool = multiprocessing.Pool(self._build_procs, EmergeWorker,
1389 args)
1390
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001391 if self._unpack_only:
1392 # Unpack pool only required on unpack_only jobs.
1393 self._unpack_queue = multiprocessing.Queue()
1394 args = (self._unpack_queue, self._job_queue, emerge, package_db, False,
1395 True)
1396 self._unpack_pool = multiprocessing.Pool(self._unpack_procs, EmergeWorker,
1397 args)
1398
David Jamesfcb70ef2011-02-02 16:02:30 -08001399 self._print_worker = multiprocessing.Process(target=PrintWorker,
1400 args=[self._print_queue])
1401 self._print_worker.start()
1402
1403 # Initialize the failed queue to empty.
1404 self._retry_queue = []
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001405 self._failed_count = dict()
David Jamesfcb70ef2011-02-02 16:02:30 -08001406
David Jamesfcb70ef2011-02-02 16:02:30 -08001407 # Setup an exit handler so that we print nice messages if we are
1408 # terminated.
1409 self._SetupExitHandler()
1410
1411 # Schedule our jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001412 self._state_map.update(
1413 (pkg, TargetState(pkg, data)) for pkg, data in deps_map.iteritems())
1414 self._fetch_ready.multi_put(self._state_map.itervalues())
David Jamesfcb70ef2011-02-02 16:02:30 -08001415
David Jamesaaf49e42014-04-24 09:40:05 -07001416 def _SetupSession(self):
1417 """Set up a session so we can easily terminate all children."""
1418 # When we call os.setsid(), this sets up a session / process group for this
1419 # process and all children. These session groups are needed so that we can
1420 # easily kill all children (including processes launched by emerge) before
1421 # we exit.
1422 #
1423 # One unfortunate side effect of os.setsid() is that it blocks CTRL-C from
1424 # being received. To work around this, we only call os.setsid() in a forked
1425 # process, so that the parent can still watch for CTRL-C. The parent will
1426 # just sit around, watching for signals and propagating them to the child,
1427 # until the child exits.
1428 #
1429 # TODO(davidjames): It would be nice if we could replace this with cgroups.
1430 pid = os.fork()
1431 if pid == 0:
1432 os.setsid()
1433 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001434 proctitle.settitle('SessionManager')
1435
David Jamesaaf49e42014-04-24 09:40:05 -07001436 def PropagateToChildren(signum, _frame):
1437 # Just propagate the signals down to the child. We'll exit when the
1438 # child does.
1439 try:
1440 os.kill(pid, signum)
1441 except OSError as ex:
1442 if ex.errno != errno.ESRCH:
1443 raise
1444 signal.signal(signal.SIGINT, PropagateToChildren)
1445 signal.signal(signal.SIGTERM, PropagateToChildren)
1446
1447 def StopGroup(_signum, _frame):
1448 # When we get stopped, stop the children.
1449 try:
1450 os.killpg(pid, signal.SIGSTOP)
1451 os.kill(0, signal.SIGSTOP)
1452 except OSError as ex:
1453 if ex.errno != errno.ESRCH:
1454 raise
1455 signal.signal(signal.SIGTSTP, StopGroup)
1456
1457 def ContinueGroup(_signum, _frame):
1458 # Launch the children again after being stopped.
1459 try:
1460 os.killpg(pid, signal.SIGCONT)
1461 except OSError as ex:
1462 if ex.errno != errno.ESRCH:
1463 raise
1464 signal.signal(signal.SIGCONT, ContinueGroup)
1465
1466 # Loop until the children exit. We exit with os._exit to be sure we
1467 # don't run any finalizers (those will be run by the child process.)
1468 # pylint: disable=W0212
1469 while True:
1470 try:
1471 # Wait for the process to exit. When it does, exit with the return
1472 # value of the subprocess.
Mike Frysingere2d8f0d2014-11-01 13:09:26 -04001473 os._exit(process_util.GetExitStatus(os.waitpid(pid, 0)[1]))
David Jamesaaf49e42014-04-24 09:40:05 -07001474 except OSError as ex:
1475 if ex.errno == errno.EINTR:
1476 continue
1477 traceback.print_exc()
1478 os._exit(1)
1479 except BaseException:
1480 traceback.print_exc()
1481 os._exit(1)
1482
David Jamesfcb70ef2011-02-02 16:02:30 -08001483 def _SetupExitHandler(self):
1484
David James321490a2012-12-17 12:05:56 -08001485 def ExitHandler(signum, _frame):
David James7358d032011-05-19 10:40:03 -07001486 # Set KILLED flag.
1487 KILLED.set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001488
1489 # Kill our signal handlers so we don't get called recursively
David James7358d032011-05-19 10:40:03 -07001490 signal.signal(signal.SIGINT, KillHandler)
1491 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001492
1493 # Print our current job status
Brian Harring0be85c62012-03-17 19:52:12 -07001494 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001495 if job:
1496 self._print_queue.put(JobPrinter(job, unlink=True))
1497
1498 # Notify the user that we are exiting
1499 self._Print("Exiting on signal %s" % signum)
David James7358d032011-05-19 10:40:03 -07001500 self._print_queue.put(None)
1501 self._print_worker.join()
David Jamesfcb70ef2011-02-02 16:02:30 -08001502
1503 # Kill child threads, then exit.
David James7358d032011-05-19 10:40:03 -07001504 os.killpg(0, signal.SIGKILL)
David Jamesfcb70ef2011-02-02 16:02:30 -08001505 sys.exit(1)
1506
1507 # Print out job status when we are killed
1508 signal.signal(signal.SIGINT, ExitHandler)
1509 signal.signal(signal.SIGTERM, ExitHandler)
1510
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001511 def _ScheduleUnpack(self, pkg_state):
1512 self._unpack_jobs[pkg_state.target] = None
1513 self._unpack_queue.put(pkg_state)
1514
Brian Harring0be85c62012-03-17 19:52:12 -07001515 def _Schedule(self, pkg_state):
David Jamesfcb70ef2011-02-02 16:02:30 -08001516 # We maintain a tree of all deps, if this doesn't need
David James8c7e5e32011-06-28 11:26:03 -07001517 # to be installed just free up its children and continue.
David Jamesfcb70ef2011-02-02 16:02:30 -08001518 # It is possible to reinstall deps of deps, without reinstalling
1519 # first level deps, like so:
Mike Frysingerfd969312014-04-02 22:16:42 -04001520 # virtual/target-os (merge) -> eselect (nomerge) -> python (merge)
Brian Harring0be85c62012-03-17 19:52:12 -07001521 this_pkg = pkg_state.info
1522 target = pkg_state.target
1523 if pkg_state.info is not None:
1524 if this_pkg["action"] == "nomerge":
1525 self._Finish(target)
1526 elif target not in self._build_jobs:
1527 # Kick off the build if it's marked to be built.
1528 self._build_jobs[target] = None
1529 self._build_queue.put(pkg_state)
1530 return True
David Jamesfcb70ef2011-02-02 16:02:30 -08001531
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001532 def _ScheduleLoop(self, unpack_only=False):
1533 if unpack_only:
1534 ready_queue = self._unpack_ready
1535 jobs_queue = self._unpack_jobs
1536 procs = self._unpack_procs
1537 else:
1538 ready_queue = self._build_ready
1539 jobs_queue = self._build_jobs
1540 procs = self._build_procs
1541
David James8c7e5e32011-06-28 11:26:03 -07001542 # If the current load exceeds our desired load average, don't schedule
1543 # more than one job.
1544 if self._load_avg and os.getloadavg()[0] > self._load_avg:
1545 needed_jobs = 1
1546 else:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001547 needed_jobs = procs
David James8c7e5e32011-06-28 11:26:03 -07001548
1549 # Schedule more jobs.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001550 while ready_queue and len(jobs_queue) < needed_jobs:
1551 state = ready_queue.get()
1552 if unpack_only:
1553 self._ScheduleUnpack(state)
1554 else:
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001555 if state.target not in self._failed_count:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001556 self._Schedule(state)
David Jamesfcb70ef2011-02-02 16:02:30 -08001557
1558 def _Print(self, line):
1559 """Print a single line."""
1560 self._print_queue.put(LinePrinter(line))
1561
1562 def _Status(self):
1563 """Print status."""
1564 current_time = time.time()
Aviv Keshet3b381682015-11-12 13:15:06 -08001565 current_time_struct = time.localtime(current_time)
David Jamesfcb70ef2011-02-02 16:02:30 -08001566 no_output = True
1567
1568 # Print interim output every minute if --show-output is used. Otherwise,
1569 # print notifications about running packages every 2 minutes, and print
1570 # full output for jobs that have been running for 60 minutes or more.
1571 if self._show_output:
1572 interval = 60
1573 notify_interval = 0
1574 else:
1575 interval = 60 * 60
1576 notify_interval = 60 * 2
David James321490a2012-12-17 12:05:56 -08001577 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001578 if job:
1579 last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
1580 if last_timestamp + interval < current_time:
1581 self._print_queue.put(JobPrinter(job))
1582 job.last_output_timestamp = current_time
1583 no_output = False
1584 elif (notify_interval and
1585 job.last_notify_timestamp + notify_interval < current_time):
1586 job_seconds = current_time - job.start_timestamp
1587 args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
1588 info = "Still building %s (%dm%.1fs). Logs in %s" % args
1589 job.last_notify_timestamp = current_time
1590 self._Print(info)
1591 no_output = False
1592
1593 # If we haven't printed any messages yet, print a general status message
1594 # here.
1595 if no_output:
1596 seconds = current_time - GLOBAL_START
Brian Harring0be85c62012-03-17 19:52:12 -07001597 fjobs, fready = len(self._fetch_jobs), len(self._fetch_ready)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001598 ujobs, uready = len(self._unpack_jobs), len(self._unpack_ready)
Brian Harring0be85c62012-03-17 19:52:12 -07001599 bjobs, bready = len(self._build_jobs), len(self._build_ready)
1600 retries = len(self._retry_queue)
1601 pending = max(0, len(self._deps_map) - fjobs - bjobs)
1602 line = "Pending %s/%s, " % (pending, self._total_jobs)
1603 if fjobs or fready:
1604 line += "Fetching %s/%s, " % (fjobs, fready + fjobs)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001605 if ujobs or uready:
1606 line += "Unpacking %s/%s, " % (ujobs, uready + ujobs)
Brian Harring0be85c62012-03-17 19:52:12 -07001607 if bjobs or bready or retries:
1608 line += "Building %s/%s, " % (bjobs, bready + bjobs)
1609 if retries:
1610 line += "Retrying %s, " % (retries,)
Mike Frysingerd6e2df02014-11-26 02:55:04 -05001611 load = " ".join(str(x) for x in os.getloadavg())
Aviv Keshet3b381682015-11-12 13:15:06 -08001612 line += ("[Time %s | Elapsed %dm%.1fs | Load %s]" % (
1613 time.strftime('%H:%M:%S', current_time_struct), seconds / 60,
1614 seconds % 60, load))
Brian Harring0be85c62012-03-17 19:52:12 -07001615 self._Print(line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001616
1617 def _Finish(self, target):
David James8c7e5e32011-06-28 11:26:03 -07001618 """Mark a target as completed and unblock dependencies."""
1619 this_pkg = self._deps_map[target]
1620 if this_pkg["needs"] and this_pkg["nodeps"]:
1621 # We got installed, but our deps have not been installed yet. Dependent
1622 # packages should only be installed when our needs have been fully met.
1623 this_pkg["action"] = "nomerge"
1624 else:
David James8c7e5e32011-06-28 11:26:03 -07001625 for dep in this_pkg["provides"]:
1626 dep_pkg = self._deps_map[dep]
Brian Harring0be85c62012-03-17 19:52:12 -07001627 state = self._state_map[dep]
David James8c7e5e32011-06-28 11:26:03 -07001628 del dep_pkg["needs"][target]
Brian Harring0be85c62012-03-17 19:52:12 -07001629 state.update_score()
1630 if not state.prefetched:
1631 if dep in self._fetch_ready:
1632 # If it's not currently being fetched, update the prioritization
1633 self._fetch_ready.sort()
1634 elif not dep_pkg["needs"]:
David James8c7e5e32011-06-28 11:26:03 -07001635 if dep_pkg["nodeps"] and dep_pkg["action"] == "nomerge":
1636 self._Finish(dep)
1637 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001638 self._build_ready.put(self._state_map[dep])
David James8c7e5e32011-06-28 11:26:03 -07001639 self._deps_map.pop(target)
David Jamesfcb70ef2011-02-02 16:02:30 -08001640
1641 def _Retry(self):
David James8c7e5e32011-06-28 11:26:03 -07001642 while self._retry_queue:
Brian Harring0be85c62012-03-17 19:52:12 -07001643 state = self._retry_queue.pop(0)
1644 if self._Schedule(state):
1645 self._Print("Retrying emerge of %s." % state.target)
David James8c7e5e32011-06-28 11:26:03 -07001646 break
David Jamesfcb70ef2011-02-02 16:02:30 -08001647
Brian Harringa43f5952012-04-12 01:19:34 -07001648 def _Shutdown(self):
David Jamesfcb70ef2011-02-02 16:02:30 -08001649 # Tell emerge workers to exit. They all exit when 'None' is pushed
1650 # to the queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001651
Brian Harringa43f5952012-04-12 01:19:34 -07001652 # Shutdown the workers first; then jobs (which is how they feed things back)
1653 # then finally the print queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001654
Brian Harringa43f5952012-04-12 01:19:34 -07001655 def _stop(queue, pool):
1656 if pool is None:
1657 return
1658 try:
1659 queue.put(None)
1660 pool.close()
1661 pool.join()
1662 finally:
1663 pool.terminate()
Brian Harring0be85c62012-03-17 19:52:12 -07001664
Brian Harringa43f5952012-04-12 01:19:34 -07001665 _stop(self._fetch_queue, self._fetch_pool)
1666 self._fetch_queue = self._fetch_pool = None
Brian Harring0be85c62012-03-17 19:52:12 -07001667
Brian Harringa43f5952012-04-12 01:19:34 -07001668 _stop(self._build_queue, self._build_pool)
1669 self._build_queue = self._build_pool = None
1670
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001671 if self._unpack_only:
1672 _stop(self._unpack_queue, self._unpack_pool)
1673 self._unpack_queue = self._unpack_pool = None
1674
Brian Harringa43f5952012-04-12 01:19:34 -07001675 if self._job_queue is not None:
1676 self._job_queue.close()
1677 self._job_queue = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001678
1679 # Now that our workers are finished, we can kill the print queue.
Brian Harringa43f5952012-04-12 01:19:34 -07001680 if self._print_worker is not None:
1681 try:
1682 self._print_queue.put(None)
1683 self._print_queue.close()
1684 self._print_worker.join()
1685 finally:
1686 self._print_worker.terminate()
1687 self._print_queue = self._print_worker = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001688
1689 def Run(self):
1690 """Run through the scheduled ebuilds.
1691
1692 Keep running so long as we have uninstalled packages in the
1693 dependency graph to merge.
1694 """
Brian Harringa43f5952012-04-12 01:19:34 -07001695 if not self._deps_map:
1696 return
1697
Brian Harring0be85c62012-03-17 19:52:12 -07001698 # Start the fetchers.
1699 for _ in xrange(min(self._fetch_procs, len(self._fetch_ready))):
1700 state = self._fetch_ready.get()
1701 self._fetch_jobs[state.target] = None
1702 self._fetch_queue.put(state)
1703
1704 # Print an update, then get going.
1705 self._Status()
1706
David Jamesfcb70ef2011-02-02 16:02:30 -08001707 while self._deps_map:
1708 # Check here that we are actually waiting for something.
Brian Harring0be85c62012-03-17 19:52:12 -07001709 if (self._build_queue.empty() and
David Jamesfcb70ef2011-02-02 16:02:30 -08001710 self._job_queue.empty() and
Brian Harring0be85c62012-03-17 19:52:12 -07001711 not self._fetch_jobs and
1712 not self._fetch_ready and
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001713 not self._unpack_jobs and
1714 not self._unpack_ready and
Brian Harring0be85c62012-03-17 19:52:12 -07001715 not self._build_jobs and
1716 not self._build_ready and
David Jamesfcb70ef2011-02-02 16:02:30 -08001717 self._deps_map):
1718 # If we have failed on a package, retry it now.
1719 if self._retry_queue:
1720 self._Retry()
1721 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001722 # Tell the user why we're exiting.
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001723 if self._failed_count:
1724 print('Packages failed:\n\t%s' %
1725 '\n\t'.join(self._failed_count.iterkeys()))
David James0eae23e2012-07-03 15:04:25 -07001726 status_file = os.environ.get("PARALLEL_EMERGE_STATUS_FILE")
1727 if status_file:
David James321490a2012-12-17 12:05:56 -08001728 failed_pkgs = set(portage.versions.cpv_getkey(x)
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001729 for x in self._failed_count.iterkeys())
David James0eae23e2012-07-03 15:04:25 -07001730 with open(status_file, "a") as f:
1731 f.write("%s\n" % " ".join(failed_pkgs))
David Jamesfcb70ef2011-02-02 16:02:30 -08001732 else:
Mike Frysinger383367e2014-09-16 15:06:17 -04001733 print("Deadlock! Circular dependencies!")
David Jamesfcb70ef2011-02-02 16:02:30 -08001734 sys.exit(1)
1735
David James321490a2012-12-17 12:05:56 -08001736 for _ in xrange(12):
David Jamesa74289a2011-08-12 10:41:24 -07001737 try:
1738 job = self._job_queue.get(timeout=5)
1739 break
1740 except Queue.Empty:
1741 # Check if any more jobs can be scheduled.
1742 self._ScheduleLoop()
1743 else:
Brian Harring706747c2012-03-16 03:04:31 -07001744 # Print an update every 60 seconds.
David Jamesfcb70ef2011-02-02 16:02:30 -08001745 self._Status()
1746 continue
1747
1748 target = job.target
1749
Brian Harring0be85c62012-03-17 19:52:12 -07001750 if job.fetch_only:
1751 if not job.done:
1752 self._fetch_jobs[job.target] = job
1753 else:
1754 state = self._state_map[job.target]
1755 state.prefetched = True
1756 state.fetched_successfully = (job.retcode == 0)
1757 del self._fetch_jobs[job.target]
1758 self._Print("Fetched %s in %2.2fs"
1759 % (target, time.time() - job.start_timestamp))
1760
1761 if self._show_output or job.retcode != 0:
1762 self._print_queue.put(JobPrinter(job, unlink=True))
1763 else:
1764 os.unlink(job.filename)
1765 # Failure or not, let build work with it next.
1766 if not self._deps_map[job.target]["needs"]:
1767 self._build_ready.put(state)
1768 self._ScheduleLoop()
1769
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001770 if self._unpack_only and job.retcode == 0:
1771 self._unpack_ready.put(state)
1772 self._ScheduleLoop(unpack_only=True)
1773
Brian Harring0be85c62012-03-17 19:52:12 -07001774 if self._fetch_ready:
1775 state = self._fetch_ready.get()
1776 self._fetch_queue.put(state)
1777 self._fetch_jobs[state.target] = None
1778 else:
1779 # Minor optimization; shut down fetchers early since we know
1780 # the queue is empty.
1781 self._fetch_queue.put(None)
1782 continue
1783
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001784 if job.unpack_only:
1785 if not job.done:
1786 self._unpack_jobs[target] = job
1787 else:
1788 del self._unpack_jobs[target]
1789 self._Print("Unpacked %s in %2.2fs"
1790 % (target, time.time() - job.start_timestamp))
1791 if self._show_output or job.retcode != 0:
1792 self._print_queue.put(JobPrinter(job, unlink=True))
1793 else:
1794 os.unlink(job.filename)
1795 if self._unpack_ready:
1796 state = self._unpack_ready.get()
1797 self._unpack_queue.put(state)
1798 self._unpack_jobs[state.target] = None
1799 continue
1800
David Jamesfcb70ef2011-02-02 16:02:30 -08001801 if not job.done:
Brian Harring0be85c62012-03-17 19:52:12 -07001802 self._build_jobs[target] = job
David Jamesfcb70ef2011-02-02 16:02:30 -08001803 self._Print("Started %s (logged in %s)" % (target, job.filename))
1804 continue
1805
1806 # Print output of job
1807 if self._show_output or job.retcode != 0:
1808 self._print_queue.put(JobPrinter(job, unlink=True))
1809 else:
1810 os.unlink(job.filename)
Brian Harring0be85c62012-03-17 19:52:12 -07001811 del self._build_jobs[target]
David Jamesfcb70ef2011-02-02 16:02:30 -08001812
1813 seconds = time.time() - job.start_timestamp
1814 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
1815
1816 # Complain if necessary.
1817 if job.retcode != 0:
1818 # Handle job failure.
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001819 failed_count = self._failed_count.get(target, 0)
1820 if failed_count >= self._max_retries:
1821 # If this job has failed and can't be retried, give up.
David Jamesfcb70ef2011-02-02 16:02:30 -08001822 self._Print("Failed %s. Your build has failed." % details)
1823 else:
1824 # Queue up this build to try again after a long while.
Brian Harring0be85c62012-03-17 19:52:12 -07001825 self._retry_queue.append(self._state_map[target])
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001826 self._failed_count[target] = failed_count + 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001827 self._Print("Failed %s, retrying later." % details)
1828 else:
David James32420cc2011-08-25 21:32:46 -07001829 self._Print("Completed %s" % details)
1830
1831 # Mark as completed and unblock waiting ebuilds.
1832 self._Finish(target)
1833
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001834 if target in self._failed_count and self._retry_queue:
David Jamesfcb70ef2011-02-02 16:02:30 -08001835 # If we have successfully retried a failed package, and there
1836 # are more failed packages, try the next one. We will only have
1837 # one retrying package actively running at a time.
1838 self._Retry()
1839
David Jamesfcb70ef2011-02-02 16:02:30 -08001840
David James8c7e5e32011-06-28 11:26:03 -07001841 # Schedule pending jobs and print an update.
1842 self._ScheduleLoop()
1843 self._Status()
David Jamesfcb70ef2011-02-02 16:02:30 -08001844
David Jamese703d0f2012-01-12 16:27:45 -08001845 # If packages were retried, output a warning.
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001846 if self._failed_count:
David Jamese703d0f2012-01-12 16:27:45 -08001847 self._Print("")
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001848 self._Print("WARNING: The following packages failed once or more,")
David Jamese703d0f2012-01-12 16:27:45 -08001849 self._Print("but succeeded upon retry. This might indicate incorrect")
1850 self._Print("dependencies.")
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001851 for pkg in self._failed_count.iterkeys():
David Jamese703d0f2012-01-12 16:27:45 -08001852 self._Print(" %s" % pkg)
1853 self._Print("@@@STEP_WARNINGS@@@")
1854 self._Print("")
1855
David Jamesfcb70ef2011-02-02 16:02:30 -08001856 # Tell child threads to exit.
1857 self._Print("Merge complete")
David Jamesfcb70ef2011-02-02 16:02:30 -08001858
1859
Brian Harring30675052012-02-29 12:18:22 -08001860def main(argv):
Brian Harring8294d652012-05-23 02:20:52 -07001861 try:
1862 return real_main(argv)
1863 finally:
1864 # Work around multiprocessing sucking and not cleaning up after itself.
1865 # http://bugs.python.org/issue4106;
1866 # Step one; ensure GC is ran *prior* to the VM starting shutdown.
1867 gc.collect()
1868 # Step two; go looking for those threads and try to manually reap
1869 # them if we can.
1870 for x in threading.enumerate():
1871 # Filter on the name, and ident; if ident is None, the thread
1872 # wasn't started.
1873 if x.name == 'QueueFeederThread' and x.ident is not None:
1874 x.join(1)
David Jamesfcb70ef2011-02-02 16:02:30 -08001875
Brian Harring8294d652012-05-23 02:20:52 -07001876
1877def real_main(argv):
Brian Harring30675052012-02-29 12:18:22 -08001878 parallel_emerge_args = argv[:]
David Jamesfcb70ef2011-02-02 16:02:30 -08001879 deps = DepGraphGenerator()
Brian Harring30675052012-02-29 12:18:22 -08001880 deps.Initialize(parallel_emerge_args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001881 emerge = deps.emerge
1882
1883 if emerge.action is not None:
Brian Harring30675052012-02-29 12:18:22 -08001884 argv = deps.ParseParallelEmergeArgs(argv)
Brian Harring8294d652012-05-23 02:20:52 -07001885 return emerge_main(argv)
David Jamesfcb70ef2011-02-02 16:02:30 -08001886 elif not emerge.cmdline_packages:
1887 Usage()
Brian Harring8294d652012-05-23 02:20:52 -07001888 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001889
1890 # Unless we're in pretend mode, there's not much point running without
1891 # root access. We need to be able to install packages.
1892 #
1893 # NOTE: Even if you're running --pretend, it's a good idea to run
1894 # parallel_emerge with root access so that portage can write to the
1895 # dependency cache. This is important for performance.
David James321490a2012-12-17 12:05:56 -08001896 if "--pretend" not in emerge.opts and portage.data.secpass < 2:
Mike Frysinger383367e2014-09-16 15:06:17 -04001897 print("parallel_emerge: superuser access is required.")
Brian Harring8294d652012-05-23 02:20:52 -07001898 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001899
1900 if "--quiet" not in emerge.opts:
1901 cmdline_packages = " ".join(emerge.cmdline_packages)
Mike Frysinger383367e2014-09-16 15:06:17 -04001902 print("Starting fast-emerge.")
1903 print(" Building package %s on %s" % (cmdline_packages,
Gilad Arnold05f94b02015-05-22 10:41:05 -07001904 deps.sysroot or "root"))
David Jamesfcb70ef2011-02-02 16:02:30 -08001905
David James386ccd12011-05-04 20:17:42 -07001906 deps_tree, deps_info = deps.GenDependencyTree()
David Jamesfcb70ef2011-02-02 16:02:30 -08001907
1908 # You want me to be verbose? I'll give you two trees! Twice as much value.
1909 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
1910 deps.PrintTree(deps_tree)
1911
David James386ccd12011-05-04 20:17:42 -07001912 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001913
1914 # OK, time to print out our progress so far.
1915 deps.PrintInstallPlan(deps_graph)
1916 if "--tree" in emerge.opts:
1917 PrintDepsMap(deps_graph)
1918
1919 # Are we upgrading portage? If so, and there are more packages to merge,
1920 # schedule a restart of parallel_emerge to merge the rest. This ensures that
1921 # we pick up all updates to portage settings before merging any more
1922 # packages.
1923 portage_upgrade = False
1924 root = emerge.settings["ROOT"]
Don Garrett25f309a2014-03-19 14:02:12 -07001925 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -08001926 if root == "/":
Gilad Arnoldcead28a2015-05-22 10:45:02 -07001927 final_db = emerge.depgraph._dynamic_config._filtered_trees[root]['graph_db']
David Jamesfcb70ef2011-02-02 16:02:30 -08001928 for db_pkg in final_db.match_pkgs("sys-apps/portage"):
1929 portage_pkg = deps_graph.get(db_pkg.cpv)
David James0ff16f22012-11-02 14:18:07 -07001930 if portage_pkg:
David Jamesfcb70ef2011-02-02 16:02:30 -08001931 portage_upgrade = True
1932 if "--quiet" not in emerge.opts:
Mike Frysinger383367e2014-09-16 15:06:17 -04001933 print("Upgrading portage first, then restarting...")
David Jamesfcb70ef2011-02-02 16:02:30 -08001934
David James0ff16f22012-11-02 14:18:07 -07001935 # Upgrade Portage first, then the rest of the packages.
1936 #
1937 # In order to grant the child permission to run setsid, we need to run sudo
1938 # again. We preserve SUDO_USER here in case an ebuild depends on it.
1939 if portage_upgrade:
1940 # Calculate what arguments to use when re-invoking.
1941 args = ["sudo", "-E", "SUDO_USER=%s" % os.environ.get("SUDO_USER", "")]
1942 args += [os.path.abspath(sys.argv[0])] + parallel_emerge_args
1943 args += ["--exclude=sys-apps/portage"]
1944
1945 # First upgrade Portage.
1946 passthrough_args = ("--quiet", "--pretend", "--verbose")
1947 emerge_args = [k for k in emerge.opts if k in passthrough_args]
1948 ret = emerge_main(emerge_args + ["portage"])
1949 if ret != 0:
1950 return ret
1951
1952 # Now upgrade the rest.
1953 os.execvp(args[0], args)
1954
Bertrand SIMONNETc03c8ee2014-12-10 17:02:55 -08001955 # Attempt to solve crbug.com/433482
1956 # The file descriptor error appears only when getting userpriv_groups
1957 # (lazily generated). Loading userpriv_groups here will reduce the number of
1958 # calls from few hundreds to one.
1959 portage.data._get_global('userpriv_groups')
1960
David Jamesfcb70ef2011-02-02 16:02:30 -08001961 # Run the queued emerges.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001962 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output,
Bertrand SIMONNET411945d2015-05-20 17:23:28 -07001963 deps.unpack_only, deps.max_retries)
Brian Harringa43f5952012-04-12 01:19:34 -07001964 try:
1965 scheduler.Run()
1966 finally:
Don Garrett25f309a2014-03-19 14:02:12 -07001967 # pylint: disable=W0212
Brian Harringa43f5952012-04-12 01:19:34 -07001968 scheduler._Shutdown()
David James97ce8902011-08-16 09:51:05 -07001969 scheduler = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001970
Mike Frysingerd20a6e22012-10-04 19:01:10 -04001971 clean_logs(emerge.settings)
1972
Mike Frysinger383367e2014-09-16 15:06:17 -04001973 print("Done")
Brian Harring8294d652012-05-23 02:20:52 -07001974 return 0