blob: 23cfec90d202c5cca5158cf1b0d82be7fb3cd0f7 [file] [log] [blame]
Mike Frysinger0a647fc2012-08-06 14:36:05 -04001# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
David Jamesfcb70ef2011-02-02 16:02:30 -08002# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Program to run emerge in parallel, for significant speedup.
6
7Usage:
David James386ccd12011-05-04 20:17:42 -07008 ./parallel_emerge [--board=BOARD] [--workon=PKGS]
David Jamesfcb70ef2011-02-02 16:02:30 -08009 [--force-remote-binary=PKGS] [emerge args] package
10
David James78b6cd92012-04-02 21:36:12 -070011This script runs multiple emerge processes in parallel, using appropriate
12Portage APIs. It is faster than standard emerge because it has a
13multiprocess model instead of an asynchronous model.
David Jamesfcb70ef2011-02-02 16:02:30 -080014"""
15
16import codecs
17import copy
18import errno
Brian Harring8294d652012-05-23 02:20:52 -070019import gc
David James8c7e5e32011-06-28 11:26:03 -070020import heapq
David Jamesfcb70ef2011-02-02 16:02:30 -080021import multiprocessing
22import os
Mike Frysinger1ae28092013-10-17 17:17:22 -040023try:
24 import Queue
25except ImportError:
26 # Python-3 renamed to "queue". We still use Queue to avoid collisions
27 # with naming variables as "queue". Maybe we'll transition at some point.
28 # pylint: disable=F0401
29 import queue as Queue
David Jamesfcb70ef2011-02-02 16:02:30 -080030import signal
31import sys
32import tempfile
Brian Harring8294d652012-05-23 02:20:52 -070033import threading
David Jamesfcb70ef2011-02-02 16:02:30 -080034import time
35import traceback
David Jamesfcb70ef2011-02-02 16:02:30 -080036
Thiago Goncalesf4acc422013-07-17 10:26:35 -070037from chromite.lib import cros_build_lib
David Jamesaaf49e42014-04-24 09:40:05 -070038from chromite.lib import osutils
Mike Frysingerd74fe4a2014-04-24 11:43:38 -040039from chromite.lib import proctitle
Thiago Goncalesf4acc422013-07-17 10:26:35 -070040
David Jamesfcb70ef2011-02-02 16:02:30 -080041# If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
42# Chromium OS, the default "portage" user doesn't have the necessary
43# permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
44# is "root" here because we get called through sudo.
45#
46# We need to set this before importing any portage modules, because portage
47# looks up "PORTAGE_USERNAME" at import time.
48#
49# NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
50# encounter this case unless they have an old chroot or blow away the
51# environment by running sudo without the -E specifier.
52if "PORTAGE_USERNAME" not in os.environ:
53 homedir = os.environ.get("HOME")
54 if homedir:
55 os.environ["PORTAGE_USERNAME"] = os.path.basename(homedir)
56
57# Portage doesn't expose dependency trees in its public API, so we have to
58# make use of some private APIs here. These modules are found under
59# /usr/lib/portage/pym/.
60#
61# TODO(davidjames): Update Portage to expose public APIs for these features.
Don Garrett25f309a2014-03-19 14:02:12 -070062# pylint: disable=F0401
David Jamesfcb70ef2011-02-02 16:02:30 -080063from _emerge.actions import adjust_configs
64from _emerge.actions import load_emerge_config
65from _emerge.create_depgraph_params import create_depgraph_params
David James386ccd12011-05-04 20:17:42 -070066from _emerge.depgraph import backtrack_depgraph
Mike Frysinger901eaad2012-10-10 18:18:03 -040067try:
68 from _emerge.main import clean_logs
69except ImportError:
70 # Older portage versions did not provide clean_logs, so stub it.
71 # We need this if running in an older chroot that hasn't yet upgraded
72 # the portage version.
73 clean_logs = lambda x: None
David Jamesfcb70ef2011-02-02 16:02:30 -080074from _emerge.main import emerge_main
75from _emerge.main import parse_opts
76from _emerge.Package import Package
77from _emerge.Scheduler import Scheduler
David Jamesfcb70ef2011-02-02 16:02:30 -080078from _emerge.stdout_spinner import stdout_spinner
David James386ccd12011-05-04 20:17:42 -070079from portage._global_updates import _global_updates
David Jamesfcb70ef2011-02-02 16:02:30 -080080import portage
81import portage.debug
Don Garrettf8bf7842014-03-20 17:03:42 -070082# pylint: enable=F0401
Mike Frysinger91d7da92013-02-19 15:53:46 -050083
David Jamesfcb70ef2011-02-02 16:02:30 -080084
David Jamesfcb70ef2011-02-02 16:02:30 -080085def Usage():
86 """Print usage."""
87 print "Usage:"
David James386ccd12011-05-04 20:17:42 -070088 print " ./parallel_emerge [--board=BOARD] [--workon=PKGS]"
David Jamesfcb70ef2011-02-02 16:02:30 -080089 print " [--rebuild] [emerge args] package"
90 print
91 print "Packages specified as workon packages are always built from source."
David Jamesfcb70ef2011-02-02 16:02:30 -080092 print
93 print "The --workon argument is mainly useful when you want to build and"
94 print "install packages that you are working on unconditionally, but do not"
95 print "to have to rev the package to indicate you want to build it from"
96 print "source. The build_packages script will automatically supply the"
97 print "workon argument to emerge, ensuring that packages selected using"
98 print "cros-workon are rebuilt."
99 print
100 print "The --rebuild option rebuilds packages whenever their dependencies"
101 print "are changed. This ensures that your build is correct."
David Jamesfcb70ef2011-02-02 16:02:30 -0800102
103
David Jamesfcb70ef2011-02-02 16:02:30 -0800104# Global start time
105GLOBAL_START = time.time()
106
David James7358d032011-05-19 10:40:03 -0700107# Whether process has been killed by a signal.
108KILLED = multiprocessing.Event()
109
David Jamesfcb70ef2011-02-02 16:02:30 -0800110
111class EmergeData(object):
112 """This simple struct holds various emerge variables.
113
114 This struct helps us easily pass emerge variables around as a unit.
115 These variables are used for calculating dependencies and installing
116 packages.
117 """
118
David Jamesbf1e3442011-05-28 07:44:20 -0700119 __slots__ = ["action", "cmdline_packages", "depgraph", "favorites",
120 "mtimedb", "opts", "root_config", "scheduler_graph",
121 "settings", "spinner", "trees"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800122
123 def __init__(self):
124 # The action the user requested. If the user is installing packages, this
125 # is None. If the user is doing anything other than installing packages,
126 # this will contain the action name, which will map exactly to the
127 # long-form name of the associated emerge option.
128 #
129 # Example: If you call parallel_emerge --unmerge package, the action name
130 # will be "unmerge"
131 self.action = None
132
133 # The list of packages the user passed on the command-line.
134 self.cmdline_packages = None
135
136 # The emerge dependency graph. It'll contain all the packages involved in
137 # this merge, along with their versions.
138 self.depgraph = None
139
David Jamesbf1e3442011-05-28 07:44:20 -0700140 # The list of candidates to add to the world file.
141 self.favorites = None
142
David Jamesfcb70ef2011-02-02 16:02:30 -0800143 # A dict of the options passed to emerge. This dict has been cleaned up
144 # a bit by parse_opts, so that it's a bit easier for the emerge code to
145 # look at the options.
146 #
147 # Emerge takes a few shortcuts in its cleanup process to make parsing of
148 # the options dict easier. For example, if you pass in "--usepkg=n", the
149 # "--usepkg" flag is just left out of the dictionary altogether. Because
150 # --usepkg=n is the default, this makes parsing easier, because emerge
151 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
152 #
153 # These cleanup processes aren't applied to all options. For example, the
154 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
155 # applied by emerge, see the parse_opts function in the _emerge.main
156 # package.
157 self.opts = None
158
159 # A dictionary used by portage to maintain global state. This state is
160 # loaded from disk when portage starts up, and saved to disk whenever we
161 # call mtimedb.commit().
162 #
163 # This database contains information about global updates (i.e., what
164 # version of portage we have) and what we're currently doing. Portage
165 # saves what it is currently doing in this database so that it can be
166 # resumed when you call it with the --resume option.
167 #
168 # parallel_emerge does not save what it is currently doing in the mtimedb,
169 # so we do not support the --resume option.
170 self.mtimedb = None
171
172 # The portage configuration for our current root. This contains the portage
173 # settings (see below) and the three portage trees for our current root.
174 # (The three portage trees are explained below, in the documentation for
175 # the "trees" member.)
176 self.root_config = None
177
178 # The scheduler graph is used by emerge to calculate what packages to
179 # install. We don't actually install any deps, so this isn't really used,
180 # but we pass it in to the Scheduler object anyway.
181 self.scheduler_graph = None
182
183 # Portage settings for our current session. Most of these settings are set
184 # in make.conf inside our current install root.
185 self.settings = None
186
187 # The spinner, which spews stuff to stdout to indicate that portage is
188 # doing something. We maintain our own spinner, so we set the portage
189 # spinner to "silent" mode.
190 self.spinner = None
191
192 # The portage trees. There are separate portage trees for each root. To get
193 # the portage tree for the current root, you can look in self.trees[root],
194 # where root = self.settings["ROOT"].
195 #
196 # In each root, there are three trees: vartree, porttree, and bintree.
197 # - vartree: A database of the currently-installed packages.
198 # - porttree: A database of ebuilds, that can be used to build packages.
199 # - bintree: A database of binary packages.
200 self.trees = None
201
202
203class DepGraphGenerator(object):
204 """Grab dependency information about packages from portage.
205
206 Typical usage:
207 deps = DepGraphGenerator()
208 deps.Initialize(sys.argv[1:])
209 deps_tree, deps_info = deps.GenDependencyTree()
210 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
211 deps.PrintTree(deps_tree)
212 PrintDepsMap(deps_graph)
213 """
214
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700215 __slots__ = ["board", "emerge", "package_db", "show_output", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800216
217 def __init__(self):
218 self.board = None
219 self.emerge = EmergeData()
David Jamesfcb70ef2011-02-02 16:02:30 -0800220 self.package_db = {}
David Jamesfcb70ef2011-02-02 16:02:30 -0800221 self.show_output = False
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700222 self.unpack_only = False
David Jamesfcb70ef2011-02-02 16:02:30 -0800223
224 def ParseParallelEmergeArgs(self, argv):
225 """Read the parallel emerge arguments from the command-line.
226
227 We need to be compatible with emerge arg format. We scrape arguments that
228 are specific to parallel_emerge, and pass through the rest directly to
229 emerge.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500230
David Jamesfcb70ef2011-02-02 16:02:30 -0800231 Args:
232 argv: arguments list
Mike Frysinger1a736a82013-12-12 01:50:59 -0500233
David Jamesfcb70ef2011-02-02 16:02:30 -0800234 Returns:
235 Arguments that don't belong to parallel_emerge
236 """
237 emerge_args = []
238 for arg in argv:
239 # Specifically match arguments that are specific to parallel_emerge, and
240 # pass through the rest.
241 if arg.startswith("--board="):
242 self.board = arg.replace("--board=", "")
243 elif arg.startswith("--workon="):
244 workon_str = arg.replace("--workon=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700245 emerge_args.append("--reinstall-atoms=%s" % workon_str)
246 emerge_args.append("--usepkg-exclude=%s" % workon_str)
David Jamesfcb70ef2011-02-02 16:02:30 -0800247 elif arg.startswith("--force-remote-binary="):
248 force_remote_binary = arg.replace("--force-remote-binary=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700249 emerge_args.append("--useoldpkg-atoms=%s" % force_remote_binary)
David Jamesfcb70ef2011-02-02 16:02:30 -0800250 elif arg == "--show-output":
251 self.show_output = True
David James386ccd12011-05-04 20:17:42 -0700252 elif arg == "--rebuild":
David James7a1ea4b2011-10-13 15:06:41 -0700253 emerge_args.append("--rebuild-if-unbuilt")
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700254 elif arg == "--unpackonly":
255 emerge_args.append("--fetchonly")
256 self.unpack_only = True
David Jamesfcb70ef2011-02-02 16:02:30 -0800257 else:
258 # Not one of our options, so pass through to emerge.
259 emerge_args.append(arg)
260
David James386ccd12011-05-04 20:17:42 -0700261 # These packages take a really long time to build, so, for expediency, we
262 # are blacklisting them from automatic rebuilds because one of their
263 # dependencies needs to be recompiled.
Mike Frysinger5c2a9052014-04-15 15:52:04 -0400264 for pkg in ("chromeos-base/chromeos-chrome",):
David James7a1ea4b2011-10-13 15:06:41 -0700265 emerge_args.append("--rebuild-exclude=%s" % pkg)
David Jamesfcb70ef2011-02-02 16:02:30 -0800266
267 return emerge_args
268
269 def Initialize(self, args):
270 """Initializer. Parses arguments and sets up portage state."""
271
272 # Parse and strip out args that are just intended for parallel_emerge.
273 emerge_args = self.ParseParallelEmergeArgs(args)
274
275 # Setup various environment variables based on our current board. These
276 # variables are normally setup inside emerge-${BOARD}, but since we don't
277 # call that script, we have to set it up here. These variables serve to
278 # point our tools at /build/BOARD and to setup cross compiles to the
279 # appropriate board as configured in toolchain.conf.
280 if self.board:
Yu-Ju Hongdd9bb2b2014-01-03 17:08:26 -0800281 sysroot = cros_build_lib.GetSysroot(board=self.board)
282 os.environ["PORTAGE_CONFIGROOT"] = sysroot
283 os.environ["PORTAGE_SYSROOT"] = sysroot
284 os.environ["SYSROOT"] = sysroot
David Jamesfcb70ef2011-02-02 16:02:30 -0800285
286 # Although CHROMEOS_ROOT isn't specific to boards, it's normally setup
287 # inside emerge-${BOARD}, so we set it up here for compatibility. It
288 # will be going away soon as we migrate to CROS_WORKON_SRCROOT.
289 os.environ.setdefault("CHROMEOS_ROOT", os.environ["HOME"] + "/trunk")
290
291 # Turn off interactive delays
292 os.environ["EBEEP_IGNORE"] = "1"
293 os.environ["EPAUSE_IGNORE"] = "1"
Mike Frysinger0a647fc2012-08-06 14:36:05 -0400294 os.environ["CLEAN_DELAY"] = "0"
David Jamesfcb70ef2011-02-02 16:02:30 -0800295
296 # Parse the emerge options.
David Jamesea3ca332011-05-26 11:48:29 -0700297 action, opts, cmdline_packages = parse_opts(emerge_args, silent=True)
David Jamesfcb70ef2011-02-02 16:02:30 -0800298
299 # Set environment variables based on options. Portage normally sets these
300 # environment variables in emerge_main, but we can't use that function,
301 # because it also does a bunch of other stuff that we don't want.
302 # TODO(davidjames): Patch portage to move this logic into a function we can
303 # reuse here.
304 if "--debug" in opts:
305 os.environ["PORTAGE_DEBUG"] = "1"
306 if "--config-root" in opts:
307 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
308 if "--root" in opts:
309 os.environ["ROOT"] = opts["--root"]
310 if "--accept-properties" in opts:
311 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
312
David James88d780c2014-02-05 13:03:29 -0800313 # If we're installing packages to the board, we can disable vardb locks.
314 # This is safe because we only run up to one instance of parallel_emerge in
315 # parallel.
316 # TODO(davidjames): Enable this for the host too.
317 if self.board:
David Jamesfcb70ef2011-02-02 16:02:30 -0800318 os.environ.setdefault("PORTAGE_LOCKS", "false")
David Jamesfcb70ef2011-02-02 16:02:30 -0800319
320 # Now that we've setup the necessary environment variables, we can load the
321 # emerge config from disk.
322 settings, trees, mtimedb = load_emerge_config()
323
David Jamesea3ca332011-05-26 11:48:29 -0700324 # Add in EMERGE_DEFAULT_OPTS, if specified.
325 tmpcmdline = []
326 if "--ignore-default-opts" not in opts:
327 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
328 tmpcmdline.extend(emerge_args)
329 action, opts, cmdline_packages = parse_opts(tmpcmdline)
330
331 # If we're installing to the board, we want the --root-deps option so that
332 # portage will install the build dependencies to that location as well.
333 if self.board:
334 opts.setdefault("--root-deps", True)
335
David Jamesfcb70ef2011-02-02 16:02:30 -0800336 # Check whether our portage tree is out of date. Typically, this happens
337 # when you're setting up a new portage tree, such as in setup_board and
338 # make_chroot. In that case, portage applies a bunch of global updates
339 # here. Once the updates are finished, we need to commit any changes
340 # that the global update made to our mtimedb, and reload the config.
341 #
342 # Portage normally handles this logic in emerge_main, but again, we can't
343 # use that function here.
344 if _global_updates(trees, mtimedb["updates"]):
345 mtimedb.commit()
346 settings, trees, mtimedb = load_emerge_config(trees=trees)
347
348 # Setup implied options. Portage normally handles this logic in
349 # emerge_main.
350 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
351 opts.setdefault("--buildpkg", True)
352 if "--getbinpkgonly" in opts:
353 opts.setdefault("--usepkgonly", True)
354 opts.setdefault("--getbinpkg", True)
355 if "getbinpkg" in settings.features:
356 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
357 opts["--getbinpkg"] = True
358 if "--getbinpkg" in opts or "--usepkgonly" in opts:
359 opts.setdefault("--usepkg", True)
360 if "--fetch-all-uri" in opts:
361 opts.setdefault("--fetchonly", True)
362 if "--skipfirst" in opts:
363 opts.setdefault("--resume", True)
364 if "--buildpkgonly" in opts:
365 # --buildpkgonly will not merge anything, so it overrides all binary
366 # package options.
367 for opt in ("--getbinpkg", "--getbinpkgonly",
368 "--usepkg", "--usepkgonly"):
369 opts.pop(opt, None)
370 if (settings.get("PORTAGE_DEBUG", "") == "1" and
371 "python-trace" in settings.features):
372 portage.debug.set_trace(True)
373
374 # Complain about unsupported options
David James386ccd12011-05-04 20:17:42 -0700375 for opt in ("--ask", "--ask-enter-invalid", "--resume", "--skipfirst"):
David Jamesfcb70ef2011-02-02 16:02:30 -0800376 if opt in opts:
377 print "%s is not supported by parallel_emerge" % opt
378 sys.exit(1)
379
380 # Make emerge specific adjustments to the config (e.g. colors!)
381 adjust_configs(opts, trees)
382
383 # Save our configuration so far in the emerge object
384 emerge = self.emerge
385 emerge.action, emerge.opts = action, opts
386 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
387 emerge.cmdline_packages = cmdline_packages
388 root = settings["ROOT"]
389 emerge.root_config = trees[root]["root_config"]
390
David James386ccd12011-05-04 20:17:42 -0700391 if "--usepkg" in opts:
David Jamesfcb70ef2011-02-02 16:02:30 -0800392 emerge.trees[root]["bintree"].populate("--getbinpkg" in opts)
393
David Jamesfcb70ef2011-02-02 16:02:30 -0800394 def CreateDepgraph(self, emerge, packages):
395 """Create an emerge depgraph object."""
396 # Setup emerge options.
397 emerge_opts = emerge.opts.copy()
398
David James386ccd12011-05-04 20:17:42 -0700399 # Ask portage to build a dependency graph. with the options we specified
400 # above.
David Jamesfcb70ef2011-02-02 16:02:30 -0800401 params = create_depgraph_params(emerge_opts, emerge.action)
David Jamesbf1e3442011-05-28 07:44:20 -0700402 success, depgraph, favorites = backtrack_depgraph(
David James386ccd12011-05-04 20:17:42 -0700403 emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
404 packages, emerge.spinner)
405 emerge.depgraph = depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800406
David James386ccd12011-05-04 20:17:42 -0700407 # Is it impossible to honor the user's request? Bail!
408 if not success:
409 depgraph.display_problems()
410 sys.exit(1)
David Jamesfcb70ef2011-02-02 16:02:30 -0800411
412 emerge.depgraph = depgraph
David Jamesbf1e3442011-05-28 07:44:20 -0700413 emerge.favorites = favorites
David Jamesfcb70ef2011-02-02 16:02:30 -0800414
David Jamesdeebd692011-05-09 17:02:52 -0700415 # Prime and flush emerge caches.
416 root = emerge.settings["ROOT"]
417 vardb = emerge.trees[root]["vartree"].dbapi
David James0bdc5de2011-05-12 16:22:26 -0700418 if "--pretend" not in emerge.opts:
419 vardb.counter_tick()
David Jamesdeebd692011-05-09 17:02:52 -0700420 vardb.flush_cache()
421
David James386ccd12011-05-04 20:17:42 -0700422 def GenDependencyTree(self):
David Jamesfcb70ef2011-02-02 16:02:30 -0800423 """Get dependency tree info from emerge.
424
David Jamesfcb70ef2011-02-02 16:02:30 -0800425 Returns:
426 Dependency tree
427 """
428 start = time.time()
429
430 emerge = self.emerge
431
432 # Create a list of packages to merge
433 packages = set(emerge.cmdline_packages[:])
David Jamesfcb70ef2011-02-02 16:02:30 -0800434
435 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
436 # need any extra output from portage.
437 portage.util.noiselimit = -1
438
439 # My favorite feature: The silent spinner. It doesn't spin. Ever.
440 # I'd disable the colors by default too, but they look kind of cool.
441 emerge.spinner = stdout_spinner()
442 emerge.spinner.update = emerge.spinner.update_quiet
443
444 if "--quiet" not in emerge.opts:
445 print "Calculating deps..."
446
447 self.CreateDepgraph(emerge, packages)
448 depgraph = emerge.depgraph
449
450 # Build our own tree from the emerge digraph.
451 deps_tree = {}
Don Garrett25f309a2014-03-19 14:02:12 -0700452 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -0800453 digraph = depgraph._dynamic_config.digraph
David James3f778802011-08-25 19:31:45 -0700454 root = emerge.settings["ROOT"]
Bertrand SIMONNETb35e19e2014-07-28 16:29:58 -0700455 final_db = get_db(depgraph._dynamic_config, root)
David Jamesfcb70ef2011-02-02 16:02:30 -0800456 for node, node_deps in digraph.nodes.items():
457 # Calculate dependency packages that need to be installed first. Each
458 # child on the digraph is a dependency. The "operation" field specifies
459 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
460 # contains the type of dependency (e.g. build, runtime, runtime_post,
461 # etc.)
462 #
David Jamesfcb70ef2011-02-02 16:02:30 -0800463 # Portage refers to the identifiers for packages as a CPV. This acronym
464 # stands for Component/Path/Version.
465 #
466 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
467 # Split up, this CPV would be:
468 # C -- Component: chromeos-base
469 # P -- Path: power_manager
470 # V -- Version: 0.0.1-r1
471 #
472 # We just refer to CPVs as packages here because it's easier.
473 deps = {}
474 for child, priorities in node_deps[0].items():
David James3f778802011-08-25 19:31:45 -0700475 if isinstance(child, Package) and child.root == root:
476 cpv = str(child.cpv)
477 action = str(child.operation)
478
479 # If we're uninstalling a package, check whether Portage is
480 # installing a replacement. If so, just depend on the installation
481 # of the new package, because the old package will automatically
482 # be uninstalled at that time.
483 if action == "uninstall":
484 for pkg in final_db.match_pkgs(child.slot_atom):
485 cpv = str(pkg.cpv)
486 action = "merge"
487 break
488
489 deps[cpv] = dict(action=action,
490 deptypes=[str(x) for x in priorities],
491 deps={})
David Jamesfcb70ef2011-02-02 16:02:30 -0800492
493 # We've built our list of deps, so we can add our package to the tree.
David James3f778802011-08-25 19:31:45 -0700494 if isinstance(node, Package) and node.root == root:
David Jamesfcb70ef2011-02-02 16:02:30 -0800495 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
496 deps=deps)
497
David Jamesfcb70ef2011-02-02 16:02:30 -0800498 # Ask portage for its install plan, so that we can only throw out
David James386ccd12011-05-04 20:17:42 -0700499 # dependencies that portage throws out.
David Jamesfcb70ef2011-02-02 16:02:30 -0800500 deps_info = {}
501 for pkg in depgraph.altlist():
502 if isinstance(pkg, Package):
David James3f778802011-08-25 19:31:45 -0700503 assert pkg.root == root
David Jamesfcb70ef2011-02-02 16:02:30 -0800504 self.package_db[pkg.cpv] = pkg
505
David Jamesfcb70ef2011-02-02 16:02:30 -0800506 # Save off info about the package
David James386ccd12011-05-04 20:17:42 -0700507 deps_info[str(pkg.cpv)] = {"idx": len(deps_info)}
David Jamesfcb70ef2011-02-02 16:02:30 -0800508
509 seconds = time.time() - start
510 if "--quiet" not in emerge.opts:
511 print "Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60)
512
513 return deps_tree, deps_info
514
515 def PrintTree(self, deps, depth=""):
516 """Print the deps we have seen in the emerge output.
517
518 Args:
519 deps: Dependency tree structure.
520 depth: Allows printing the tree recursively, with indentation.
521 """
522 for entry in sorted(deps):
523 action = deps[entry]["action"]
524 print "%s %s (%s)" % (depth, entry, action)
525 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
526
David James386ccd12011-05-04 20:17:42 -0700527 def GenDependencyGraph(self, deps_tree, deps_info):
David Jamesfcb70ef2011-02-02 16:02:30 -0800528 """Generate a doubly linked dependency graph.
529
530 Args:
531 deps_tree: Dependency tree structure.
532 deps_info: More details on the dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500533
David Jamesfcb70ef2011-02-02 16:02:30 -0800534 Returns:
535 Deps graph in the form of a dict of packages, with each package
536 specifying a "needs" list and "provides" list.
537 """
538 emerge = self.emerge
David Jamesfcb70ef2011-02-02 16:02:30 -0800539
David Jamesfcb70ef2011-02-02 16:02:30 -0800540 # deps_map is the actual dependency graph.
541 #
542 # Each package specifies a "needs" list and a "provides" list. The "needs"
543 # list indicates which packages we depend on. The "provides" list
544 # indicates the reverse dependencies -- what packages need us.
545 #
546 # We also provide some other information in the dependency graph:
547 # - action: What we're planning on doing with this package. Generally,
548 # "merge", "nomerge", or "uninstall"
David Jamesfcb70ef2011-02-02 16:02:30 -0800549 deps_map = {}
550
551 def ReverseTree(packages):
552 """Convert tree to digraph.
553
554 Take the tree of package -> requirements and reverse it to a digraph of
555 buildable packages -> packages they unblock.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500556
David Jamesfcb70ef2011-02-02 16:02:30 -0800557 Args:
558 packages: Tree(s) of dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500559
David Jamesfcb70ef2011-02-02 16:02:30 -0800560 Returns:
561 Unsanitized digraph.
562 """
David James8c7e5e32011-06-28 11:26:03 -0700563 binpkg_phases = set(["setup", "preinst", "postinst"])
David James3f778802011-08-25 19:31:45 -0700564 needed_dep_types = set(["blocker", "buildtime", "runtime"])
David Jamesfcb70ef2011-02-02 16:02:30 -0800565 for pkg in packages:
566
567 # Create an entry for the package
568 action = packages[pkg]["action"]
David James8c7e5e32011-06-28 11:26:03 -0700569 default_pkg = {"needs": {}, "provides": set(), "action": action,
570 "nodeps": False, "binary": False}
David Jamesfcb70ef2011-02-02 16:02:30 -0800571 this_pkg = deps_map.setdefault(pkg, default_pkg)
572
David James8c7e5e32011-06-28 11:26:03 -0700573 if pkg in deps_info:
574 this_pkg["idx"] = deps_info[pkg]["idx"]
575
576 # If a package doesn't have any defined phases that might use the
577 # dependent packages (i.e. pkg_setup, pkg_preinst, or pkg_postinst),
578 # we can install this package before its deps are ready.
579 emerge_pkg = self.package_db.get(pkg)
580 if emerge_pkg and emerge_pkg.type_name == "binary":
581 this_pkg["binary"] = True
Mike Frysinger66652ec2014-04-24 11:42:25 -0400582 defined_phases = emerge_pkg.defined_phases
David James8c7e5e32011-06-28 11:26:03 -0700583 defined_binpkg_phases = binpkg_phases.intersection(defined_phases)
584 if not defined_binpkg_phases:
585 this_pkg["nodeps"] = True
586
David Jamesfcb70ef2011-02-02 16:02:30 -0800587 # Create entries for dependencies of this package first.
588 ReverseTree(packages[pkg]["deps"])
589
590 # Add dependencies to this package.
591 for dep, dep_item in packages[pkg]["deps"].iteritems():
David James8c7e5e32011-06-28 11:26:03 -0700592 # We only need to enforce strict ordering of dependencies if the
David James3f778802011-08-25 19:31:45 -0700593 # dependency is a blocker, or is a buildtime or runtime dependency.
594 # (I.e., ignored, optional, and runtime_post dependencies don't
595 # depend on ordering.)
David James8c7e5e32011-06-28 11:26:03 -0700596 dep_types = dep_item["deptypes"]
597 if needed_dep_types.intersection(dep_types):
598 deps_map[dep]["provides"].add(pkg)
599 this_pkg["needs"][dep] = "/".join(dep_types)
David Jamesfcb70ef2011-02-02 16:02:30 -0800600
David James3f778802011-08-25 19:31:45 -0700601 # If there's a blocker, Portage may need to move files from one
602 # package to another, which requires editing the CONTENTS files of
603 # both packages. To avoid race conditions while editing this file,
604 # the two packages must not be installed in parallel, so we can't
605 # safely ignore dependencies. See http://crosbug.com/19328
606 if "blocker" in dep_types:
607 this_pkg["nodeps"] = False
608
David Jamesfcb70ef2011-02-02 16:02:30 -0800609 def FindCycles():
610 """Find cycles in the dependency tree.
611
612 Returns:
613 A dict mapping cyclic packages to a dict of the deps that cause
614 cycles. For each dep that causes cycles, it returns an example
615 traversal of the graph that shows the cycle.
616 """
617
618 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
619 """Find cycles in cyclic dependencies starting at specified package.
620
621 Args:
622 pkg: Package identifier.
623 cycles: A dict mapping cyclic packages to a dict of the deps that
624 cause cycles. For each dep that causes cycles, it returns an
625 example traversal of the graph that shows the cycle.
626 unresolved: Nodes that have been visited but are not fully processed.
627 resolved: Nodes that have been visited and are fully processed.
628 """
629 pkg_cycles = cycles.get(pkg)
630 if pkg in resolved and not pkg_cycles:
631 # If we already looked at this package, and found no cyclic
632 # dependencies, we can stop now.
633 return
634 unresolved.append(pkg)
635 for dep in deps_map[pkg]["needs"]:
636 if dep in unresolved:
637 idx = unresolved.index(dep)
638 mycycle = unresolved[idx:] + [dep]
David James321490a2012-12-17 12:05:56 -0800639 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800640 pkg1, pkg2 = mycycle[i], mycycle[i+1]
641 cycles.setdefault(pkg1, {}).setdefault(pkg2, mycycle)
642 elif not pkg_cycles or dep not in pkg_cycles:
643 # Looks like we haven't seen this edge before.
644 FindCyclesAtNode(dep, cycles, unresolved, resolved)
645 unresolved.pop()
646 resolved.add(pkg)
647
648 cycles, unresolved, resolved = {}, [], set()
649 for pkg in deps_map:
650 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
651 return cycles
652
David James386ccd12011-05-04 20:17:42 -0700653 def RemoveUnusedPackages():
David Jamesfcb70ef2011-02-02 16:02:30 -0800654 """Remove installed packages, propagating dependencies."""
David Jamesfcb70ef2011-02-02 16:02:30 -0800655 # Schedule packages that aren't on the install list for removal
656 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
657
David Jamesfcb70ef2011-02-02 16:02:30 -0800658 # Remove the packages we don't want, simplifying the graph and making
659 # it easier for us to crack cycles.
660 for pkg in sorted(rm_pkgs):
661 this_pkg = deps_map[pkg]
662 needs = this_pkg["needs"]
663 provides = this_pkg["provides"]
664 for dep in needs:
665 dep_provides = deps_map[dep]["provides"]
666 dep_provides.update(provides)
667 dep_provides.discard(pkg)
668 dep_provides.discard(dep)
669 for target in provides:
670 target_needs = deps_map[target]["needs"]
671 target_needs.update(needs)
672 target_needs.pop(pkg, None)
673 target_needs.pop(target, None)
674 del deps_map[pkg]
675
676 def PrintCycleBreak(basedep, dep, mycycle):
677 """Print details about a cycle that we are planning on breaking.
678
Mike Frysinger02e1e072013-11-10 22:11:34 -0500679 We are breaking a cycle where dep needs basedep. mycycle is an
680 example cycle which contains dep -> basedep.
681 """
David Jamesfcb70ef2011-02-02 16:02:30 -0800682
David Jamesfcb70ef2011-02-02 16:02:30 -0800683 needs = deps_map[dep]["needs"]
684 depinfo = needs.get(basedep, "deleted")
David Jamesfcb70ef2011-02-02 16:02:30 -0800685
David James3f778802011-08-25 19:31:45 -0700686 # It's OK to swap install order for blockers, as long as the two
687 # packages aren't installed in parallel. If there is a cycle, then
688 # we know the packages depend on each other already, so we can drop the
689 # blocker safely without printing a warning.
690 if depinfo == "blocker":
691 return
692
David Jamesfcb70ef2011-02-02 16:02:30 -0800693 # Notify the user that we're breaking a cycle.
694 print "Breaking %s -> %s (%s)" % (dep, basedep, depinfo)
695
696 # Show cycle.
David James321490a2012-12-17 12:05:56 -0800697 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800698 pkg1, pkg2 = mycycle[i], mycycle[i+1]
699 needs = deps_map[pkg1]["needs"]
700 depinfo = needs.get(pkg2, "deleted")
701 if pkg1 == dep and pkg2 == basedep:
702 depinfo = depinfo + ", deleting"
703 print " %s -> %s (%s)" % (pkg1, pkg2, depinfo)
704
705 def SanitizeTree():
706 """Remove circular dependencies.
707
708 We prune all dependencies involved in cycles that go against the emerge
709 ordering. This has a nice property: we're guaranteed to merge
710 dependencies in the same order that portage does.
711
712 Because we don't treat any dependencies as "soft" unless they're killed
713 by a cycle, we pay attention to a larger number of dependencies when
714 merging. This hurts performance a bit, but helps reliability.
715 """
716 start = time.time()
717 cycles = FindCycles()
718 while cycles:
719 for dep, mycycles in cycles.iteritems():
720 for basedep, mycycle in mycycles.iteritems():
721 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
Matt Tennant08797302011-10-17 16:18:45 -0700722 if "--quiet" not in emerge.opts:
723 PrintCycleBreak(basedep, dep, mycycle)
David Jamesfcb70ef2011-02-02 16:02:30 -0800724 del deps_map[dep]["needs"][basedep]
725 deps_map[basedep]["provides"].remove(dep)
726 cycles = FindCycles()
727 seconds = time.time() - start
728 if "--quiet" not in emerge.opts and seconds >= 0.1:
729 print "Tree sanitized in %dm%.1fs" % (seconds / 60, seconds % 60)
730
David James8c7e5e32011-06-28 11:26:03 -0700731 def FindRecursiveProvides(pkg, seen):
732 """Find all nodes that require a particular package.
733
734 Assumes that graph is acyclic.
735
736 Args:
737 pkg: Package identifier.
738 seen: Nodes that have been visited so far.
739 """
740 if pkg in seen:
741 return
742 seen.add(pkg)
743 info = deps_map[pkg]
744 info["tprovides"] = info["provides"].copy()
745 for dep in info["provides"]:
746 FindRecursiveProvides(dep, seen)
747 info["tprovides"].update(deps_map[dep]["tprovides"])
748
David Jamesa22906f2011-05-04 19:53:26 -0700749 ReverseTree(deps_tree)
David Jamesa22906f2011-05-04 19:53:26 -0700750
David James386ccd12011-05-04 20:17:42 -0700751 # We need to remove unused packages so that we can use the dependency
752 # ordering of the install process to show us what cycles to crack.
753 RemoveUnusedPackages()
David Jamesfcb70ef2011-02-02 16:02:30 -0800754 SanitizeTree()
David James8c7e5e32011-06-28 11:26:03 -0700755 seen = set()
756 for pkg in deps_map:
757 FindRecursiveProvides(pkg, seen)
David Jamesfcb70ef2011-02-02 16:02:30 -0800758 return deps_map
759
760 def PrintInstallPlan(self, deps_map):
761 """Print an emerge-style install plan.
762
763 The install plan lists what packages we're installing, in order.
764 It's useful for understanding what parallel_emerge is doing.
765
766 Args:
767 deps_map: The dependency graph.
768 """
769
770 def InstallPlanAtNode(target, deps_map):
771 nodes = []
772 nodes.append(target)
773 for dep in deps_map[target]["provides"]:
774 del deps_map[dep]["needs"][target]
775 if not deps_map[dep]["needs"]:
776 nodes.extend(InstallPlanAtNode(dep, deps_map))
777 return nodes
778
779 deps_map = copy.deepcopy(deps_map)
780 install_plan = []
781 plan = set()
782 for target, info in deps_map.iteritems():
783 if not info["needs"] and target not in plan:
784 for item in InstallPlanAtNode(target, deps_map):
785 plan.add(item)
786 install_plan.append(self.package_db[item])
787
788 for pkg in plan:
789 del deps_map[pkg]
790
791 if deps_map:
792 print "Cyclic dependencies:", " ".join(deps_map)
793 PrintDepsMap(deps_map)
794 sys.exit(1)
795
796 self.emerge.depgraph.display(install_plan)
797
798
799def PrintDepsMap(deps_map):
800 """Print dependency graph, for each package list it's prerequisites."""
801 for i in sorted(deps_map):
802 print "%s: (%s) needs" % (i, deps_map[i]["action"])
803 needs = deps_map[i]["needs"]
804 for j in sorted(needs):
805 print " %s" % (j)
806 if not needs:
807 print " no dependencies"
808
809
810class EmergeJobState(object):
Don Garrett25f309a2014-03-19 14:02:12 -0700811 """Structure describing the EmergeJobState."""
812
David Jamesfcb70ef2011-02-02 16:02:30 -0800813 __slots__ = ["done", "filename", "last_notify_timestamp", "last_output_seek",
814 "last_output_timestamp", "pkgname", "retcode", "start_timestamp",
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700815 "target", "fetch_only", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800816
817 def __init__(self, target, pkgname, done, filename, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700818 retcode=None, fetch_only=False, unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800819
820 # The full name of the target we're building (e.g.
Mike Frysingerfd969312014-04-02 22:16:42 -0400821 # virtual/target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800822 self.target = target
823
Mike Frysingerfd969312014-04-02 22:16:42 -0400824 # The short name of the target we're building (e.g. target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800825 self.pkgname = pkgname
826
827 # Whether the job is done. (True if the job is done; false otherwise.)
828 self.done = done
829
830 # The filename where output is currently stored.
831 self.filename = filename
832
833 # The timestamp of the last time we printed the name of the log file. We
834 # print this at the beginning of the job, so this starts at
835 # start_timestamp.
836 self.last_notify_timestamp = start_timestamp
837
838 # The location (in bytes) of the end of the last complete line we printed.
839 # This starts off at zero. We use this to jump to the right place when we
840 # print output from the same ebuild multiple times.
841 self.last_output_seek = 0
842
843 # The timestamp of the last time we printed output. Since we haven't
844 # printed output yet, this starts at zero.
845 self.last_output_timestamp = 0
846
847 # The return code of our job, if the job is actually finished.
848 self.retcode = retcode
849
Brian Harring0be85c62012-03-17 19:52:12 -0700850 # Was this just a fetch job?
851 self.fetch_only = fetch_only
852
David Jamesfcb70ef2011-02-02 16:02:30 -0800853 # The timestamp when our job started.
854 self.start_timestamp = start_timestamp
855
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700856 # No emerge, only unpack packages.
857 self.unpack_only = unpack_only
858
David Jamesfcb70ef2011-02-02 16:02:30 -0800859
David James321490a2012-12-17 12:05:56 -0800860def KillHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700861 # Kill self and all subprocesses.
862 os.killpg(0, signal.SIGKILL)
863
Mike Frysingercc838832014-05-24 13:10:30 -0400864
David Jamesfcb70ef2011-02-02 16:02:30 -0800865def SetupWorkerSignals():
David James321490a2012-12-17 12:05:56 -0800866 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700867 # Set KILLED flag.
868 KILLED.set()
David James13cead42011-05-18 16:22:01 -0700869
David James7358d032011-05-19 10:40:03 -0700870 # Remove our signal handlers so we don't get called recursively.
871 signal.signal(signal.SIGINT, KillHandler)
872 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -0800873
874 # Ensure that we exit quietly and cleanly, if possible, when we receive
875 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all
876 # of the child processes will print details about KeyboardInterrupt
877 # exceptions, which isn't very helpful.
878 signal.signal(signal.SIGINT, ExitHandler)
879 signal.signal(signal.SIGTERM, ExitHandler)
880
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400881
882def EmergeProcess(output, target, *args, **kwargs):
David James1ed3e252011-10-05 20:26:15 -0700883 """Merge a package in a subprocess.
884
885 Args:
David James1ed3e252011-10-05 20:26:15 -0700886 output: Temporary file to write output.
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400887 target: The package we'll be processing (for display purposes).
David James6b29d052012-11-02 10:27:27 -0700888 *args: Arguments to pass to Scheduler constructor.
889 **kwargs: Keyword arguments to pass to Scheduler constructor.
David James1ed3e252011-10-05 20:26:15 -0700890
891 Returns:
892 The exit code returned by the subprocess.
893 """
894 pid = os.fork()
895 if pid == 0:
896 try:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400897 proctitle.settitle('EmergeProcess', target)
898
David James1ed3e252011-10-05 20:26:15 -0700899 # Sanity checks.
Mike Frysingerf02736e2013-11-08 15:27:00 -0500900 if sys.stdout.fileno() != 1:
901 raise Exception("sys.stdout.fileno() != 1")
902 if sys.stderr.fileno() != 2:
903 raise Exception("sys.stderr.fileno() != 2")
David James1ed3e252011-10-05 20:26:15 -0700904
905 # - Redirect 1 (stdout) and 2 (stderr) at our temporary file.
906 # - Redirect 0 to point at sys.stdin. In this case, sys.stdin
907 # points at a file reading os.devnull, because multiprocessing mucks
908 # with sys.stdin.
909 # - Leave the sys.stdin and output filehandles alone.
910 fd_pipes = {0: sys.stdin.fileno(),
911 1: output.fileno(),
912 2: output.fileno(),
913 sys.stdin.fileno(): sys.stdin.fileno(),
914 output.fileno(): output.fileno()}
Mike Frysinger66652ec2014-04-24 11:42:25 -0400915 # pylint: disable=W0212
916 portage.process._setup_pipes(fd_pipes, close_fds=False)
David James1ed3e252011-10-05 20:26:15 -0700917
918 # Portage doesn't like when sys.stdin.fileno() != 0, so point sys.stdin
919 # at the filehandle we just created in _setup_pipes.
920 if sys.stdin.fileno() != 0:
David James6b29d052012-11-02 10:27:27 -0700921 sys.__stdin__ = sys.stdin = os.fdopen(0, "r")
922
923 scheduler = Scheduler(*args, **kwargs)
924
925 # Enable blocker handling even though we're in --nodeps mode. This
926 # allows us to unmerge the blocker after we've merged the replacement.
927 scheduler._opts_ignore_blockers = frozenset()
David James1ed3e252011-10-05 20:26:15 -0700928
929 # Actually do the merge.
930 retval = scheduler.merge()
931
932 # We catch all exceptions here (including SystemExit, KeyboardInterrupt,
933 # etc) so as to ensure that we don't confuse the multiprocessing module,
934 # which expects that all forked children exit with os._exit().
David James321490a2012-12-17 12:05:56 -0800935 # pylint: disable=W0702
David James1ed3e252011-10-05 20:26:15 -0700936 except:
937 traceback.print_exc(file=output)
938 retval = 1
939 sys.stdout.flush()
940 sys.stderr.flush()
941 output.flush()
Don Garrett25f309a2014-03-19 14:02:12 -0700942 # pylint: disable=W0212
David James1ed3e252011-10-05 20:26:15 -0700943 os._exit(retval)
944 else:
945 # Return the exit code of the subprocess.
946 return os.waitpid(pid, 0)[1]
David Jamesfcb70ef2011-02-02 16:02:30 -0800947
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700948
949def UnpackPackage(pkg_state):
950 """Unpacks package described by pkg_state.
951
952 Args:
953 pkg_state: EmergeJobState object describing target.
954
955 Returns:
956 Exit code returned by subprocess.
957 """
958 pkgdir = os.environ.get("PKGDIR",
959 os.path.join(os.environ["SYSROOT"], "packages"))
960 root = os.environ.get("ROOT", os.environ["SYSROOT"])
961 path = os.path.join(pkgdir, pkg_state.target + ".tbz2")
962 comp = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)
963 cmd = [comp, "-dc"]
964 if comp.endswith("pbzip2"):
965 cmd.append("--ignore-trailing-garbage=1")
966 cmd.append(path)
967
968 result = cros_build_lib.RunCommand(cmd, cwd=root, stdout_to_pipe=True,
969 print_cmd=False, error_code_ok=True)
970
971 # If we were not successful, return now and don't attempt untar.
972 if result.returncode:
973 return result.returncode
974
975 cmd = ["sudo", "tar", "-xf", "-", "-C", root]
976 result = cros_build_lib.RunCommand(cmd, cwd=root, input=result.output,
977 print_cmd=False, error_code_ok=True)
978
979 return result.returncode
980
981
982def EmergeWorker(task_queue, job_queue, emerge, package_db, fetch_only=False,
983 unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800984 """This worker emerges any packages given to it on the task_queue.
985
986 Args:
987 task_queue: The queue of tasks for this worker to do.
988 job_queue: The queue of results from the worker.
989 emerge: An EmergeData() object.
990 package_db: A dict, mapping package ids to portage Package objects.
Brian Harring0be85c62012-03-17 19:52:12 -0700991 fetch_only: A bool, indicating if we should just fetch the target.
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700992 unpack_only: A bool, indicating if we should just unpack the target.
David Jamesfcb70ef2011-02-02 16:02:30 -0800993
994 It expects package identifiers to be passed to it via task_queue. When
995 a task is started, it pushes the (target, filename) to the started_queue.
996 The output is stored in filename. When a merge starts or finishes, we push
997 EmergeJobState objects to the job_queue.
998 """
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400999 if fetch_only:
1000 mode = 'fetch'
1001 elif unpack_only:
1002 mode = 'unpack'
1003 else:
1004 mode = 'emerge'
1005 proctitle.settitle('EmergeWorker', mode, '[idle]')
David Jamesfcb70ef2011-02-02 16:02:30 -08001006
1007 SetupWorkerSignals()
1008 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
David Jamesdeebd692011-05-09 17:02:52 -07001009
1010 # Disable flushing of caches to save on I/O.
David James7a1ea4b2011-10-13 15:06:41 -07001011 root = emerge.settings["ROOT"]
1012 vardb = emerge.trees[root]["vartree"].dbapi
1013 vardb._flush_cache_enabled = False
Brian Harring0be85c62012-03-17 19:52:12 -07001014 bindb = emerge.trees[root]["bintree"].dbapi
1015 # Might be a set, might be a list, might be None; no clue, just use shallow
1016 # copy to ensure we can roll it back.
Don Garrett25f309a2014-03-19 14:02:12 -07001017 # pylint: disable=W0212
Brian Harring0be85c62012-03-17 19:52:12 -07001018 original_remotepkgs = copy.copy(bindb.bintree._remotepkgs)
David Jamesdeebd692011-05-09 17:02:52 -07001019
David Jamesfcb70ef2011-02-02 16:02:30 -08001020 opts, spinner = emerge.opts, emerge.spinner
1021 opts["--nodeps"] = True
Brian Harring0be85c62012-03-17 19:52:12 -07001022 if fetch_only:
1023 opts["--fetchonly"] = True
1024
David Jamesfcb70ef2011-02-02 16:02:30 -08001025 while True:
1026 # Wait for a new item to show up on the queue. This is a blocking wait,
1027 # so if there's nothing to do, we just sit here.
Brian Harring0be85c62012-03-17 19:52:12 -07001028 pkg_state = task_queue.get()
1029 if pkg_state is None:
David Jamesfcb70ef2011-02-02 16:02:30 -08001030 # If target is None, this means that the main thread wants us to quit.
1031 # The other workers need to exit too, so we'll push the message back on
1032 # to the queue so they'll get it too.
Brian Harring0be85c62012-03-17 19:52:12 -07001033 task_queue.put(None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001034 return
David James7358d032011-05-19 10:40:03 -07001035 if KILLED.is_set():
1036 return
1037
Brian Harring0be85c62012-03-17 19:52:12 -07001038 target = pkg_state.target
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001039 proctitle.settitle('EmergeWorker', mode, target)
Brian Harring0be85c62012-03-17 19:52:12 -07001040
David Jamesfcb70ef2011-02-02 16:02:30 -08001041 db_pkg = package_db[target]
Brian Harring0be85c62012-03-17 19:52:12 -07001042
1043 if db_pkg.type_name == "binary":
1044 if not fetch_only and pkg_state.fetched_successfully:
1045 # Ensure portage doesn't think our pkg is remote- else it'll force
1046 # a redownload of it (even if the on-disk file is fine). In-memory
1047 # caching basically, implemented dumbly.
1048 bindb.bintree._remotepkgs = None
1049 else:
1050 bindb.bintree_remotepkgs = original_remotepkgs
1051
David Jamesfcb70ef2011-02-02 16:02:30 -08001052 db_pkg.root_config = emerge.root_config
1053 install_list = [db_pkg]
1054 pkgname = db_pkg.pf
1055 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False)
David James01b1e0f2012-06-07 17:18:05 -07001056 os.chmod(output.name, 644)
David Jamesfcb70ef2011-02-02 16:02:30 -08001057 start_timestamp = time.time()
Brian Harring0be85c62012-03-17 19:52:12 -07001058 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001059 fetch_only=fetch_only, unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001060 job_queue.put(job)
1061 if "--pretend" in opts:
1062 retcode = 0
1063 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001064 try:
David James386ccd12011-05-04 20:17:42 -07001065 emerge.scheduler_graph.mergelist = install_list
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001066 if unpack_only:
1067 retcode = UnpackPackage(pkg_state)
1068 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001069 retcode = EmergeProcess(output, target, settings, trees, mtimedb,
1070 opts, spinner, favorites=emerge.favorites,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001071 graph_config=emerge.scheduler_graph)
David Jamesfcb70ef2011-02-02 16:02:30 -08001072 except Exception:
1073 traceback.print_exc(file=output)
1074 retcode = 1
David James1ed3e252011-10-05 20:26:15 -07001075 output.close()
David Jamesfcb70ef2011-02-02 16:02:30 -08001076
David James7358d032011-05-19 10:40:03 -07001077 if KILLED.is_set():
1078 return
1079
David Jamesfcb70ef2011-02-02 16:02:30 -08001080 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001081 retcode, fetch_only=fetch_only,
1082 unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001083 job_queue.put(job)
1084
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001085 # Set the title back to idle as the multiprocess pool won't destroy us;
1086 # when another job comes up, it'll re-use this process.
1087 proctitle.settitle('EmergeWorker', mode, '[idle]')
1088
David Jamesfcb70ef2011-02-02 16:02:30 -08001089
1090class LinePrinter(object):
1091 """Helper object to print a single line."""
1092
1093 def __init__(self, line):
1094 self.line = line
1095
David James321490a2012-12-17 12:05:56 -08001096 def Print(self, _seek_locations):
David Jamesfcb70ef2011-02-02 16:02:30 -08001097 print self.line
1098
1099
1100class JobPrinter(object):
1101 """Helper object to print output of a job."""
1102
1103 def __init__(self, job, unlink=False):
1104 """Print output of job.
1105
Mike Frysinger02e1e072013-11-10 22:11:34 -05001106 If unlink is True, unlink the job output file when done.
1107 """
David Jamesfcb70ef2011-02-02 16:02:30 -08001108 self.current_time = time.time()
1109 self.job = job
1110 self.unlink = unlink
1111
1112 def Print(self, seek_locations):
1113
1114 job = self.job
1115
1116 # Calculate how long the job has been running.
1117 seconds = self.current_time - job.start_timestamp
1118
1119 # Note that we've printed out the job so far.
1120 job.last_output_timestamp = self.current_time
1121
1122 # Note that we're starting the job
1123 info = "job %s (%dm%.1fs)" % (job.pkgname, seconds / 60, seconds % 60)
1124 last_output_seek = seek_locations.get(job.filename, 0)
1125 if last_output_seek:
1126 print "=== Continue output for %s ===" % info
1127 else:
1128 print "=== Start output for %s ===" % info
1129
1130 # Print actual output from job
1131 f = codecs.open(job.filename, encoding='utf-8', errors='replace')
1132 f.seek(last_output_seek)
1133 prefix = job.pkgname + ":"
1134 for line in f:
1135
1136 # Save off our position in the file
1137 if line and line[-1] == "\n":
1138 last_output_seek = f.tell()
1139 line = line[:-1]
1140
1141 # Print our line
1142 print prefix, line.encode('utf-8', 'replace')
1143 f.close()
1144
1145 # Save our last spot in the file so that we don't print out the same
1146 # location twice.
1147 seek_locations[job.filename] = last_output_seek
1148
1149 # Note end of output section
1150 if job.done:
1151 print "=== Complete: %s ===" % info
1152 else:
1153 print "=== Still running: %s ===" % info
1154
1155 if self.unlink:
1156 os.unlink(job.filename)
1157
1158
1159def PrintWorker(queue):
1160 """A worker that prints stuff to the screen as requested."""
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001161 proctitle.settitle('PrintWorker')
David Jamesfcb70ef2011-02-02 16:02:30 -08001162
David James321490a2012-12-17 12:05:56 -08001163 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -07001164 # Set KILLED flag.
1165 KILLED.set()
1166
David Jamesfcb70ef2011-02-02 16:02:30 -08001167 # Switch to default signal handlers so that we'll die after two signals.
David James7358d032011-05-19 10:40:03 -07001168 signal.signal(signal.SIGINT, KillHandler)
1169 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001170
1171 # Don't exit on the first SIGINT / SIGTERM, because the parent worker will
1172 # handle it and tell us when we need to exit.
1173 signal.signal(signal.SIGINT, ExitHandler)
1174 signal.signal(signal.SIGTERM, ExitHandler)
1175
1176 # seek_locations is a map indicating the position we are at in each file.
1177 # It starts off empty, but is set by the various Print jobs as we go along
1178 # to indicate where we left off in each file.
1179 seek_locations = {}
1180 while True:
1181 try:
1182 job = queue.get()
1183 if job:
1184 job.Print(seek_locations)
David Jamesbccf8eb2011-07-27 14:06:06 -07001185 sys.stdout.flush()
David Jamesfcb70ef2011-02-02 16:02:30 -08001186 else:
1187 break
1188 except IOError as ex:
1189 if ex.errno == errno.EINTR:
1190 # Looks like we received a signal. Keep printing.
1191 continue
1192 raise
1193
Brian Harring867e2362012-03-17 04:05:17 -07001194
Brian Harring0be85c62012-03-17 19:52:12 -07001195class TargetState(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001196 """Structure descriting the TargetState."""
Brian Harring867e2362012-03-17 04:05:17 -07001197
Brian Harring0be85c62012-03-17 19:52:12 -07001198 __slots__ = ("target", "info", "score", "prefetched", "fetched_successfully")
Brian Harring867e2362012-03-17 04:05:17 -07001199
David James321490a2012-12-17 12:05:56 -08001200 def __init__(self, target, info):
Brian Harring867e2362012-03-17 04:05:17 -07001201 self.target, self.info = target, info
Brian Harring0be85c62012-03-17 19:52:12 -07001202 self.fetched_successfully = False
1203 self.prefetched = False
David James321490a2012-12-17 12:05:56 -08001204 self.score = None
Brian Harring867e2362012-03-17 04:05:17 -07001205 self.update_score()
1206
1207 def __cmp__(self, other):
1208 return cmp(self.score, other.score)
1209
1210 def update_score(self):
1211 self.score = (
1212 -len(self.info["tprovides"]),
Brian Harring0be85c62012-03-17 19:52:12 -07001213 len(self.info["needs"]),
Brian Harring11c5eeb2012-03-18 11:02:39 -07001214 not self.info["binary"],
Brian Harring867e2362012-03-17 04:05:17 -07001215 -len(self.info["provides"]),
1216 self.info["idx"],
1217 self.target,
1218 )
1219
1220
1221class ScoredHeap(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001222 """Implementation of a general purpose scored heap."""
Brian Harring867e2362012-03-17 04:05:17 -07001223
Brian Harring0be85c62012-03-17 19:52:12 -07001224 __slots__ = ("heap", "_heap_set")
1225
Brian Harring867e2362012-03-17 04:05:17 -07001226 def __init__(self, initial=()):
Brian Harring0be85c62012-03-17 19:52:12 -07001227 self.heap = list()
1228 self._heap_set = set()
1229 if initial:
1230 self.multi_put(initial)
Brian Harring867e2362012-03-17 04:05:17 -07001231
1232 def get(self):
Brian Harring0be85c62012-03-17 19:52:12 -07001233 item = heapq.heappop(self.heap)
1234 self._heap_set.remove(item.target)
1235 return item
Brian Harring867e2362012-03-17 04:05:17 -07001236
Brian Harring0be85c62012-03-17 19:52:12 -07001237 def put(self, item):
1238 if not isinstance(item, TargetState):
1239 raise ValueError("Item %r isn't a TargetState" % (item,))
1240 heapq.heappush(self.heap, item)
1241 self._heap_set.add(item.target)
Brian Harring867e2362012-03-17 04:05:17 -07001242
Brian Harring0be85c62012-03-17 19:52:12 -07001243 def multi_put(self, sequence):
1244 sequence = list(sequence)
1245 self.heap.extend(sequence)
1246 self._heap_set.update(x.target for x in sequence)
Brian Harring867e2362012-03-17 04:05:17 -07001247 self.sort()
1248
David James5c9996d2012-03-24 10:50:46 -07001249 def sort(self):
1250 heapq.heapify(self.heap)
1251
Brian Harring0be85c62012-03-17 19:52:12 -07001252 def __contains__(self, target):
1253 return target in self._heap_set
1254
1255 def __nonzero__(self):
1256 return bool(self.heap)
1257
Brian Harring867e2362012-03-17 04:05:17 -07001258 def __len__(self):
1259 return len(self.heap)
1260
1261
David Jamesfcb70ef2011-02-02 16:02:30 -08001262class EmergeQueue(object):
1263 """Class to schedule emerge jobs according to a dependency graph."""
1264
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001265 def __init__(self, deps_map, emerge, package_db, show_output, unpack_only):
David Jamesfcb70ef2011-02-02 16:02:30 -08001266 # Store the dependency graph.
1267 self._deps_map = deps_map
Brian Harring0be85c62012-03-17 19:52:12 -07001268 self._state_map = {}
David Jamesfcb70ef2011-02-02 16:02:30 -08001269 # Initialize the running queue to empty
Brian Harring0be85c62012-03-17 19:52:12 -07001270 self._build_jobs = {}
1271 self._build_ready = ScoredHeap()
1272 self._fetch_jobs = {}
1273 self._fetch_ready = ScoredHeap()
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001274 self._unpack_jobs = {}
1275 self._unpack_ready = ScoredHeap()
David Jamesfcb70ef2011-02-02 16:02:30 -08001276 # List of total package installs represented in deps_map.
1277 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
1278 self._total_jobs = len(install_jobs)
1279 self._show_output = show_output
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001280 self._unpack_only = unpack_only
David Jamesfcb70ef2011-02-02 16:02:30 -08001281
1282 if "--pretend" in emerge.opts:
1283 print "Skipping merge because of --pretend mode."
1284 sys.exit(0)
1285
David Jamesaaf49e42014-04-24 09:40:05 -07001286 # Set up a session so we can easily terminate all children.
1287 self._SetupSession()
David James7358d032011-05-19 10:40:03 -07001288
David Jamesfcb70ef2011-02-02 16:02:30 -08001289 # Setup scheduler graph object. This is used by the child processes
1290 # to help schedule jobs.
1291 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1292
1293 # Calculate how many jobs we can run in parallel. We don't want to pass
1294 # the --jobs flag over to emerge itself, because that'll tell emerge to
1295 # hide its output, and said output is quite useful for debugging hung
1296 # jobs.
1297 procs = min(self._total_jobs,
1298 emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001299 self._build_procs = self._unpack_procs = self._fetch_procs = max(1, procs)
David James8c7e5e32011-06-28 11:26:03 -07001300 self._load_avg = emerge.opts.pop("--load-average", None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001301 self._job_queue = multiprocessing.Queue()
1302 self._print_queue = multiprocessing.Queue()
Brian Harring0be85c62012-03-17 19:52:12 -07001303
1304 self._fetch_queue = multiprocessing.Queue()
1305 args = (self._fetch_queue, self._job_queue, emerge, package_db, True)
1306 self._fetch_pool = multiprocessing.Pool(self._fetch_procs, EmergeWorker,
1307 args)
1308
1309 self._build_queue = multiprocessing.Queue()
1310 args = (self._build_queue, self._job_queue, emerge, package_db)
1311 self._build_pool = multiprocessing.Pool(self._build_procs, EmergeWorker,
1312 args)
1313
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001314 if self._unpack_only:
1315 # Unpack pool only required on unpack_only jobs.
1316 self._unpack_queue = multiprocessing.Queue()
1317 args = (self._unpack_queue, self._job_queue, emerge, package_db, False,
1318 True)
1319 self._unpack_pool = multiprocessing.Pool(self._unpack_procs, EmergeWorker,
1320 args)
1321
David Jamesfcb70ef2011-02-02 16:02:30 -08001322 self._print_worker = multiprocessing.Process(target=PrintWorker,
1323 args=[self._print_queue])
1324 self._print_worker.start()
1325
1326 # Initialize the failed queue to empty.
1327 self._retry_queue = []
1328 self._failed = set()
1329
David Jamesfcb70ef2011-02-02 16:02:30 -08001330 # Setup an exit handler so that we print nice messages if we are
1331 # terminated.
1332 self._SetupExitHandler()
1333
1334 # Schedule our jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001335 self._state_map.update(
1336 (pkg, TargetState(pkg, data)) for pkg, data in deps_map.iteritems())
1337 self._fetch_ready.multi_put(self._state_map.itervalues())
David Jamesfcb70ef2011-02-02 16:02:30 -08001338
David Jamesaaf49e42014-04-24 09:40:05 -07001339 def _SetupSession(self):
1340 """Set up a session so we can easily terminate all children."""
1341 # When we call os.setsid(), this sets up a session / process group for this
1342 # process and all children. These session groups are needed so that we can
1343 # easily kill all children (including processes launched by emerge) before
1344 # we exit.
1345 #
1346 # One unfortunate side effect of os.setsid() is that it blocks CTRL-C from
1347 # being received. To work around this, we only call os.setsid() in a forked
1348 # process, so that the parent can still watch for CTRL-C. The parent will
1349 # just sit around, watching for signals and propagating them to the child,
1350 # until the child exits.
1351 #
1352 # TODO(davidjames): It would be nice if we could replace this with cgroups.
1353 pid = os.fork()
1354 if pid == 0:
1355 os.setsid()
1356 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001357 proctitle.settitle('SessionManager')
1358
David Jamesaaf49e42014-04-24 09:40:05 -07001359 def PropagateToChildren(signum, _frame):
1360 # Just propagate the signals down to the child. We'll exit when the
1361 # child does.
1362 try:
1363 os.kill(pid, signum)
1364 except OSError as ex:
1365 if ex.errno != errno.ESRCH:
1366 raise
1367 signal.signal(signal.SIGINT, PropagateToChildren)
1368 signal.signal(signal.SIGTERM, PropagateToChildren)
1369
1370 def StopGroup(_signum, _frame):
1371 # When we get stopped, stop the children.
1372 try:
1373 os.killpg(pid, signal.SIGSTOP)
1374 os.kill(0, signal.SIGSTOP)
1375 except OSError as ex:
1376 if ex.errno != errno.ESRCH:
1377 raise
1378 signal.signal(signal.SIGTSTP, StopGroup)
1379
1380 def ContinueGroup(_signum, _frame):
1381 # Launch the children again after being stopped.
1382 try:
1383 os.killpg(pid, signal.SIGCONT)
1384 except OSError as ex:
1385 if ex.errno != errno.ESRCH:
1386 raise
1387 signal.signal(signal.SIGCONT, ContinueGroup)
1388
1389 # Loop until the children exit. We exit with os._exit to be sure we
1390 # don't run any finalizers (those will be run by the child process.)
1391 # pylint: disable=W0212
1392 while True:
1393 try:
1394 # Wait for the process to exit. When it does, exit with the return
1395 # value of the subprocess.
1396 os._exit(osutils.GetExitStatus(os.waitpid(pid, 0)[1]))
1397 except OSError as ex:
1398 if ex.errno == errno.EINTR:
1399 continue
1400 traceback.print_exc()
1401 os._exit(1)
1402 except BaseException:
1403 traceback.print_exc()
1404 os._exit(1)
1405
David Jamesfcb70ef2011-02-02 16:02:30 -08001406 def _SetupExitHandler(self):
1407
David James321490a2012-12-17 12:05:56 -08001408 def ExitHandler(signum, _frame):
David James7358d032011-05-19 10:40:03 -07001409 # Set KILLED flag.
1410 KILLED.set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001411
1412 # Kill our signal handlers so we don't get called recursively
David James7358d032011-05-19 10:40:03 -07001413 signal.signal(signal.SIGINT, KillHandler)
1414 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001415
1416 # Print our current job status
Brian Harring0be85c62012-03-17 19:52:12 -07001417 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001418 if job:
1419 self._print_queue.put(JobPrinter(job, unlink=True))
1420
1421 # Notify the user that we are exiting
1422 self._Print("Exiting on signal %s" % signum)
David James7358d032011-05-19 10:40:03 -07001423 self._print_queue.put(None)
1424 self._print_worker.join()
David Jamesfcb70ef2011-02-02 16:02:30 -08001425
1426 # Kill child threads, then exit.
David James7358d032011-05-19 10:40:03 -07001427 os.killpg(0, signal.SIGKILL)
David Jamesfcb70ef2011-02-02 16:02:30 -08001428 sys.exit(1)
1429
1430 # Print out job status when we are killed
1431 signal.signal(signal.SIGINT, ExitHandler)
1432 signal.signal(signal.SIGTERM, ExitHandler)
1433
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001434 def _ScheduleUnpack(self, pkg_state):
1435 self._unpack_jobs[pkg_state.target] = None
1436 self._unpack_queue.put(pkg_state)
1437
Brian Harring0be85c62012-03-17 19:52:12 -07001438 def _Schedule(self, pkg_state):
David Jamesfcb70ef2011-02-02 16:02:30 -08001439 # We maintain a tree of all deps, if this doesn't need
David James8c7e5e32011-06-28 11:26:03 -07001440 # to be installed just free up its children and continue.
David Jamesfcb70ef2011-02-02 16:02:30 -08001441 # It is possible to reinstall deps of deps, without reinstalling
1442 # first level deps, like so:
Mike Frysingerfd969312014-04-02 22:16:42 -04001443 # virtual/target-os (merge) -> eselect (nomerge) -> python (merge)
Brian Harring0be85c62012-03-17 19:52:12 -07001444 this_pkg = pkg_state.info
1445 target = pkg_state.target
1446 if pkg_state.info is not None:
1447 if this_pkg["action"] == "nomerge":
1448 self._Finish(target)
1449 elif target not in self._build_jobs:
1450 # Kick off the build if it's marked to be built.
1451 self._build_jobs[target] = None
1452 self._build_queue.put(pkg_state)
1453 return True
David Jamesfcb70ef2011-02-02 16:02:30 -08001454
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001455 def _ScheduleLoop(self, unpack_only=False):
1456 if unpack_only:
1457 ready_queue = self._unpack_ready
1458 jobs_queue = self._unpack_jobs
1459 procs = self._unpack_procs
1460 else:
1461 ready_queue = self._build_ready
1462 jobs_queue = self._build_jobs
1463 procs = self._build_procs
1464
David James8c7e5e32011-06-28 11:26:03 -07001465 # If the current load exceeds our desired load average, don't schedule
1466 # more than one job.
1467 if self._load_avg and os.getloadavg()[0] > self._load_avg:
1468 needed_jobs = 1
1469 else:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001470 needed_jobs = procs
David James8c7e5e32011-06-28 11:26:03 -07001471
1472 # Schedule more jobs.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001473 while ready_queue and len(jobs_queue) < needed_jobs:
1474 state = ready_queue.get()
1475 if unpack_only:
1476 self._ScheduleUnpack(state)
1477 else:
1478 if state.target not in self._failed:
1479 self._Schedule(state)
David Jamesfcb70ef2011-02-02 16:02:30 -08001480
1481 def _Print(self, line):
1482 """Print a single line."""
1483 self._print_queue.put(LinePrinter(line))
1484
1485 def _Status(self):
1486 """Print status."""
1487 current_time = time.time()
1488 no_output = True
1489
1490 # Print interim output every minute if --show-output is used. Otherwise,
1491 # print notifications about running packages every 2 minutes, and print
1492 # full output for jobs that have been running for 60 minutes or more.
1493 if self._show_output:
1494 interval = 60
1495 notify_interval = 0
1496 else:
1497 interval = 60 * 60
1498 notify_interval = 60 * 2
David James321490a2012-12-17 12:05:56 -08001499 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001500 if job:
1501 last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
1502 if last_timestamp + interval < current_time:
1503 self._print_queue.put(JobPrinter(job))
1504 job.last_output_timestamp = current_time
1505 no_output = False
1506 elif (notify_interval and
1507 job.last_notify_timestamp + notify_interval < current_time):
1508 job_seconds = current_time - job.start_timestamp
1509 args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
1510 info = "Still building %s (%dm%.1fs). Logs in %s" % args
1511 job.last_notify_timestamp = current_time
1512 self._Print(info)
1513 no_output = False
1514
1515 # If we haven't printed any messages yet, print a general status message
1516 # here.
1517 if no_output:
1518 seconds = current_time - GLOBAL_START
Brian Harring0be85c62012-03-17 19:52:12 -07001519 fjobs, fready = len(self._fetch_jobs), len(self._fetch_ready)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001520 ujobs, uready = len(self._unpack_jobs), len(self._unpack_ready)
Brian Harring0be85c62012-03-17 19:52:12 -07001521 bjobs, bready = len(self._build_jobs), len(self._build_ready)
1522 retries = len(self._retry_queue)
1523 pending = max(0, len(self._deps_map) - fjobs - bjobs)
1524 line = "Pending %s/%s, " % (pending, self._total_jobs)
1525 if fjobs or fready:
1526 line += "Fetching %s/%s, " % (fjobs, fready + fjobs)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001527 if ujobs or uready:
1528 line += "Unpacking %s/%s, " % (ujobs, uready + ujobs)
Brian Harring0be85c62012-03-17 19:52:12 -07001529 if bjobs or bready or retries:
1530 line += "Building %s/%s, " % (bjobs, bready + bjobs)
1531 if retries:
1532 line += "Retrying %s, " % (retries,)
David James8c7e5e32011-06-28 11:26:03 -07001533 load = " ".join(str(x) for x in os.getloadavg())
Brian Harring0be85c62012-03-17 19:52:12 -07001534 line += ("[Time %dm%.1fs Load %s]" % (seconds/60, seconds %60, load))
1535 self._Print(line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001536
1537 def _Finish(self, target):
David James8c7e5e32011-06-28 11:26:03 -07001538 """Mark a target as completed and unblock dependencies."""
1539 this_pkg = self._deps_map[target]
1540 if this_pkg["needs"] and this_pkg["nodeps"]:
1541 # We got installed, but our deps have not been installed yet. Dependent
1542 # packages should only be installed when our needs have been fully met.
1543 this_pkg["action"] = "nomerge"
1544 else:
David James8c7e5e32011-06-28 11:26:03 -07001545 for dep in this_pkg["provides"]:
1546 dep_pkg = self._deps_map[dep]
Brian Harring0be85c62012-03-17 19:52:12 -07001547 state = self._state_map[dep]
David James8c7e5e32011-06-28 11:26:03 -07001548 del dep_pkg["needs"][target]
Brian Harring0be85c62012-03-17 19:52:12 -07001549 state.update_score()
1550 if not state.prefetched:
1551 if dep in self._fetch_ready:
1552 # If it's not currently being fetched, update the prioritization
1553 self._fetch_ready.sort()
1554 elif not dep_pkg["needs"]:
David James8c7e5e32011-06-28 11:26:03 -07001555 if dep_pkg["nodeps"] and dep_pkg["action"] == "nomerge":
1556 self._Finish(dep)
1557 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001558 self._build_ready.put(self._state_map[dep])
David James8c7e5e32011-06-28 11:26:03 -07001559 self._deps_map.pop(target)
David Jamesfcb70ef2011-02-02 16:02:30 -08001560
1561 def _Retry(self):
David James8c7e5e32011-06-28 11:26:03 -07001562 while self._retry_queue:
Brian Harring0be85c62012-03-17 19:52:12 -07001563 state = self._retry_queue.pop(0)
1564 if self._Schedule(state):
1565 self._Print("Retrying emerge of %s." % state.target)
David James8c7e5e32011-06-28 11:26:03 -07001566 break
David Jamesfcb70ef2011-02-02 16:02:30 -08001567
Brian Harringa43f5952012-04-12 01:19:34 -07001568 def _Shutdown(self):
David Jamesfcb70ef2011-02-02 16:02:30 -08001569 # Tell emerge workers to exit. They all exit when 'None' is pushed
1570 # to the queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001571
Brian Harringa43f5952012-04-12 01:19:34 -07001572 # Shutdown the workers first; then jobs (which is how they feed things back)
1573 # then finally the print queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001574
Brian Harringa43f5952012-04-12 01:19:34 -07001575 def _stop(queue, pool):
1576 if pool is None:
1577 return
1578 try:
1579 queue.put(None)
1580 pool.close()
1581 pool.join()
1582 finally:
1583 pool.terminate()
Brian Harring0be85c62012-03-17 19:52:12 -07001584
Brian Harringa43f5952012-04-12 01:19:34 -07001585 _stop(self._fetch_queue, self._fetch_pool)
1586 self._fetch_queue = self._fetch_pool = None
Brian Harring0be85c62012-03-17 19:52:12 -07001587
Brian Harringa43f5952012-04-12 01:19:34 -07001588 _stop(self._build_queue, self._build_pool)
1589 self._build_queue = self._build_pool = None
1590
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001591 if self._unpack_only:
1592 _stop(self._unpack_queue, self._unpack_pool)
1593 self._unpack_queue = self._unpack_pool = None
1594
Brian Harringa43f5952012-04-12 01:19:34 -07001595 if self._job_queue is not None:
1596 self._job_queue.close()
1597 self._job_queue = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001598
1599 # Now that our workers are finished, we can kill the print queue.
Brian Harringa43f5952012-04-12 01:19:34 -07001600 if self._print_worker is not None:
1601 try:
1602 self._print_queue.put(None)
1603 self._print_queue.close()
1604 self._print_worker.join()
1605 finally:
1606 self._print_worker.terminate()
1607 self._print_queue = self._print_worker = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001608
1609 def Run(self):
1610 """Run through the scheduled ebuilds.
1611
1612 Keep running so long as we have uninstalled packages in the
1613 dependency graph to merge.
1614 """
Brian Harringa43f5952012-04-12 01:19:34 -07001615 if not self._deps_map:
1616 return
1617
Brian Harring0be85c62012-03-17 19:52:12 -07001618 # Start the fetchers.
1619 for _ in xrange(min(self._fetch_procs, len(self._fetch_ready))):
1620 state = self._fetch_ready.get()
1621 self._fetch_jobs[state.target] = None
1622 self._fetch_queue.put(state)
1623
1624 # Print an update, then get going.
1625 self._Status()
1626
David Jamese703d0f2012-01-12 16:27:45 -08001627 retried = set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001628 while self._deps_map:
1629 # Check here that we are actually waiting for something.
Brian Harring0be85c62012-03-17 19:52:12 -07001630 if (self._build_queue.empty() and
David Jamesfcb70ef2011-02-02 16:02:30 -08001631 self._job_queue.empty() and
Brian Harring0be85c62012-03-17 19:52:12 -07001632 not self._fetch_jobs and
1633 not self._fetch_ready and
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001634 not self._unpack_jobs and
1635 not self._unpack_ready and
Brian Harring0be85c62012-03-17 19:52:12 -07001636 not self._build_jobs and
1637 not self._build_ready and
David Jamesfcb70ef2011-02-02 16:02:30 -08001638 self._deps_map):
1639 # If we have failed on a package, retry it now.
1640 if self._retry_queue:
1641 self._Retry()
1642 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001643 # Tell the user why we're exiting.
1644 if self._failed:
Mike Frysingerf2ff9172012-11-01 18:47:41 -04001645 print 'Packages failed:\n\t%s' % '\n\t'.join(self._failed)
David James0eae23e2012-07-03 15:04:25 -07001646 status_file = os.environ.get("PARALLEL_EMERGE_STATUS_FILE")
1647 if status_file:
David James321490a2012-12-17 12:05:56 -08001648 failed_pkgs = set(portage.versions.cpv_getkey(x)
1649 for x in self._failed)
David James0eae23e2012-07-03 15:04:25 -07001650 with open(status_file, "a") as f:
1651 f.write("%s\n" % " ".join(failed_pkgs))
David Jamesfcb70ef2011-02-02 16:02:30 -08001652 else:
1653 print "Deadlock! Circular dependencies!"
1654 sys.exit(1)
1655
David James321490a2012-12-17 12:05:56 -08001656 for _ in xrange(12):
David Jamesa74289a2011-08-12 10:41:24 -07001657 try:
1658 job = self._job_queue.get(timeout=5)
1659 break
1660 except Queue.Empty:
1661 # Check if any more jobs can be scheduled.
1662 self._ScheduleLoop()
1663 else:
Brian Harring706747c2012-03-16 03:04:31 -07001664 # Print an update every 60 seconds.
David Jamesfcb70ef2011-02-02 16:02:30 -08001665 self._Status()
1666 continue
1667
1668 target = job.target
1669
Brian Harring0be85c62012-03-17 19:52:12 -07001670 if job.fetch_only:
1671 if not job.done:
1672 self._fetch_jobs[job.target] = job
1673 else:
1674 state = self._state_map[job.target]
1675 state.prefetched = True
1676 state.fetched_successfully = (job.retcode == 0)
1677 del self._fetch_jobs[job.target]
1678 self._Print("Fetched %s in %2.2fs"
1679 % (target, time.time() - job.start_timestamp))
1680
1681 if self._show_output or job.retcode != 0:
1682 self._print_queue.put(JobPrinter(job, unlink=True))
1683 else:
1684 os.unlink(job.filename)
1685 # Failure or not, let build work with it next.
1686 if not self._deps_map[job.target]["needs"]:
1687 self._build_ready.put(state)
1688 self._ScheduleLoop()
1689
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001690 if self._unpack_only and job.retcode == 0:
1691 self._unpack_ready.put(state)
1692 self._ScheduleLoop(unpack_only=True)
1693
Brian Harring0be85c62012-03-17 19:52:12 -07001694 if self._fetch_ready:
1695 state = self._fetch_ready.get()
1696 self._fetch_queue.put(state)
1697 self._fetch_jobs[state.target] = None
1698 else:
1699 # Minor optimization; shut down fetchers early since we know
1700 # the queue is empty.
1701 self._fetch_queue.put(None)
1702 continue
1703
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001704 if job.unpack_only:
1705 if not job.done:
1706 self._unpack_jobs[target] = job
1707 else:
1708 del self._unpack_jobs[target]
1709 self._Print("Unpacked %s in %2.2fs"
1710 % (target, time.time() - job.start_timestamp))
1711 if self._show_output or job.retcode != 0:
1712 self._print_queue.put(JobPrinter(job, unlink=True))
1713 else:
1714 os.unlink(job.filename)
1715 if self._unpack_ready:
1716 state = self._unpack_ready.get()
1717 self._unpack_queue.put(state)
1718 self._unpack_jobs[state.target] = None
1719 continue
1720
David Jamesfcb70ef2011-02-02 16:02:30 -08001721 if not job.done:
Brian Harring0be85c62012-03-17 19:52:12 -07001722 self._build_jobs[target] = job
David Jamesfcb70ef2011-02-02 16:02:30 -08001723 self._Print("Started %s (logged in %s)" % (target, job.filename))
1724 continue
1725
1726 # Print output of job
1727 if self._show_output or job.retcode != 0:
1728 self._print_queue.put(JobPrinter(job, unlink=True))
1729 else:
1730 os.unlink(job.filename)
Brian Harring0be85c62012-03-17 19:52:12 -07001731 del self._build_jobs[target]
David Jamesfcb70ef2011-02-02 16:02:30 -08001732
1733 seconds = time.time() - job.start_timestamp
1734 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
David James32420cc2011-08-25 21:32:46 -07001735 previously_failed = target in self._failed
David Jamesfcb70ef2011-02-02 16:02:30 -08001736
1737 # Complain if necessary.
1738 if job.retcode != 0:
1739 # Handle job failure.
David James32420cc2011-08-25 21:32:46 -07001740 if previously_failed:
David Jamesfcb70ef2011-02-02 16:02:30 -08001741 # If this job has failed previously, give up.
1742 self._Print("Failed %s. Your build has failed." % details)
1743 else:
1744 # Queue up this build to try again after a long while.
David Jamese703d0f2012-01-12 16:27:45 -08001745 retried.add(target)
Brian Harring0be85c62012-03-17 19:52:12 -07001746 self._retry_queue.append(self._state_map[target])
David Jamesfcb70ef2011-02-02 16:02:30 -08001747 self._failed.add(target)
1748 self._Print("Failed %s, retrying later." % details)
1749 else:
David James32420cc2011-08-25 21:32:46 -07001750 if previously_failed:
1751 # Remove target from list of failed packages.
1752 self._failed.remove(target)
1753
1754 self._Print("Completed %s" % details)
1755
1756 # Mark as completed and unblock waiting ebuilds.
1757 self._Finish(target)
1758
1759 if previously_failed and self._retry_queue:
David Jamesfcb70ef2011-02-02 16:02:30 -08001760 # If we have successfully retried a failed package, and there
1761 # are more failed packages, try the next one. We will only have
1762 # one retrying package actively running at a time.
1763 self._Retry()
1764
David Jamesfcb70ef2011-02-02 16:02:30 -08001765
David James8c7e5e32011-06-28 11:26:03 -07001766 # Schedule pending jobs and print an update.
1767 self._ScheduleLoop()
1768 self._Status()
David Jamesfcb70ef2011-02-02 16:02:30 -08001769
David Jamese703d0f2012-01-12 16:27:45 -08001770 # If packages were retried, output a warning.
1771 if retried:
1772 self._Print("")
1773 self._Print("WARNING: The following packages failed the first time,")
1774 self._Print("but succeeded upon retry. This might indicate incorrect")
1775 self._Print("dependencies.")
1776 for pkg in retried:
1777 self._Print(" %s" % pkg)
1778 self._Print("@@@STEP_WARNINGS@@@")
1779 self._Print("")
1780
David Jamesfcb70ef2011-02-02 16:02:30 -08001781 # Tell child threads to exit.
1782 self._Print("Merge complete")
David Jamesfcb70ef2011-02-02 16:02:30 -08001783
1784
Brian Harring30675052012-02-29 12:18:22 -08001785def main(argv):
Brian Harring8294d652012-05-23 02:20:52 -07001786 try:
1787 return real_main(argv)
1788 finally:
1789 # Work around multiprocessing sucking and not cleaning up after itself.
1790 # http://bugs.python.org/issue4106;
1791 # Step one; ensure GC is ran *prior* to the VM starting shutdown.
1792 gc.collect()
1793 # Step two; go looking for those threads and try to manually reap
1794 # them if we can.
1795 for x in threading.enumerate():
1796 # Filter on the name, and ident; if ident is None, the thread
1797 # wasn't started.
1798 if x.name == 'QueueFeederThread' and x.ident is not None:
1799 x.join(1)
David Jamesfcb70ef2011-02-02 16:02:30 -08001800
Brian Harring8294d652012-05-23 02:20:52 -07001801
Bertrand SIMONNETb35e19e2014-07-28 16:29:58 -07001802def get_db(config, root):
Mike Frysinger33fbccb2014-09-05 17:09:07 -04001803 """Return the dbapi.
Bertrand SIMONNETb35e19e2014-07-28 16:29:58 -07001804 Handles both portage 2.1.11 and 2.2.10 (where mydbapi has been removed).
1805
1806 TODO(bsimonnet): Remove this once portage has been uprevd.
1807 """
1808 try:
1809 return config.mydbapi[root]
1810 except AttributeError:
1811 # pylint: disable=W0212
1812 return config._filtered_trees[root]['graph_db']
1813
1814
Brian Harring8294d652012-05-23 02:20:52 -07001815def real_main(argv):
Brian Harring30675052012-02-29 12:18:22 -08001816 parallel_emerge_args = argv[:]
David Jamesfcb70ef2011-02-02 16:02:30 -08001817 deps = DepGraphGenerator()
Brian Harring30675052012-02-29 12:18:22 -08001818 deps.Initialize(parallel_emerge_args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001819 emerge = deps.emerge
1820
1821 if emerge.action is not None:
Brian Harring30675052012-02-29 12:18:22 -08001822 argv = deps.ParseParallelEmergeArgs(argv)
Brian Harring8294d652012-05-23 02:20:52 -07001823 return emerge_main(argv)
David Jamesfcb70ef2011-02-02 16:02:30 -08001824 elif not emerge.cmdline_packages:
1825 Usage()
Brian Harring8294d652012-05-23 02:20:52 -07001826 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001827
1828 # Unless we're in pretend mode, there's not much point running without
1829 # root access. We need to be able to install packages.
1830 #
1831 # NOTE: Even if you're running --pretend, it's a good idea to run
1832 # parallel_emerge with root access so that portage can write to the
1833 # dependency cache. This is important for performance.
David James321490a2012-12-17 12:05:56 -08001834 if "--pretend" not in emerge.opts and portage.data.secpass < 2:
David Jamesfcb70ef2011-02-02 16:02:30 -08001835 print "parallel_emerge: superuser access is required."
Brian Harring8294d652012-05-23 02:20:52 -07001836 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001837
1838 if "--quiet" not in emerge.opts:
1839 cmdline_packages = " ".join(emerge.cmdline_packages)
David Jamesfcb70ef2011-02-02 16:02:30 -08001840 print "Starting fast-emerge."
1841 print " Building package %s on %s" % (cmdline_packages,
1842 deps.board or "root")
David Jamesfcb70ef2011-02-02 16:02:30 -08001843
David James386ccd12011-05-04 20:17:42 -07001844 deps_tree, deps_info = deps.GenDependencyTree()
David Jamesfcb70ef2011-02-02 16:02:30 -08001845
1846 # You want me to be verbose? I'll give you two trees! Twice as much value.
1847 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
1848 deps.PrintTree(deps_tree)
1849
David James386ccd12011-05-04 20:17:42 -07001850 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001851
1852 # OK, time to print out our progress so far.
1853 deps.PrintInstallPlan(deps_graph)
1854 if "--tree" in emerge.opts:
1855 PrintDepsMap(deps_graph)
1856
1857 # Are we upgrading portage? If so, and there are more packages to merge,
1858 # schedule a restart of parallel_emerge to merge the rest. This ensures that
1859 # we pick up all updates to portage settings before merging any more
1860 # packages.
1861 portage_upgrade = False
1862 root = emerge.settings["ROOT"]
Don Garrett25f309a2014-03-19 14:02:12 -07001863 # pylint: disable=W0212
Bertrand SIMONNETb35e19e2014-07-28 16:29:58 -07001864 final_db = get_db(emerge.depgraph._dynamic_config, root)
David Jamesfcb70ef2011-02-02 16:02:30 -08001865 if root == "/":
1866 for db_pkg in final_db.match_pkgs("sys-apps/portage"):
1867 portage_pkg = deps_graph.get(db_pkg.cpv)
David James0ff16f22012-11-02 14:18:07 -07001868 if portage_pkg:
David Jamesfcb70ef2011-02-02 16:02:30 -08001869 portage_upgrade = True
1870 if "--quiet" not in emerge.opts:
1871 print "Upgrading portage first, then restarting..."
1872
David James0ff16f22012-11-02 14:18:07 -07001873 # Upgrade Portage first, then the rest of the packages.
1874 #
1875 # In order to grant the child permission to run setsid, we need to run sudo
1876 # again. We preserve SUDO_USER here in case an ebuild depends on it.
1877 if portage_upgrade:
1878 # Calculate what arguments to use when re-invoking.
1879 args = ["sudo", "-E", "SUDO_USER=%s" % os.environ.get("SUDO_USER", "")]
1880 args += [os.path.abspath(sys.argv[0])] + parallel_emerge_args
1881 args += ["--exclude=sys-apps/portage"]
1882
1883 # First upgrade Portage.
1884 passthrough_args = ("--quiet", "--pretend", "--verbose")
1885 emerge_args = [k for k in emerge.opts if k in passthrough_args]
1886 ret = emerge_main(emerge_args + ["portage"])
1887 if ret != 0:
1888 return ret
1889
1890 # Now upgrade the rest.
1891 os.execvp(args[0], args)
1892
David Jamesfcb70ef2011-02-02 16:02:30 -08001893 # Run the queued emerges.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001894 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output,
1895 deps.unpack_only)
Brian Harringa43f5952012-04-12 01:19:34 -07001896 try:
1897 scheduler.Run()
1898 finally:
Don Garrett25f309a2014-03-19 14:02:12 -07001899 # pylint: disable=W0212
Brian Harringa43f5952012-04-12 01:19:34 -07001900 scheduler._Shutdown()
David James97ce8902011-08-16 09:51:05 -07001901 scheduler = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001902
Mike Frysingerd20a6e22012-10-04 19:01:10 -04001903 clean_logs(emerge.settings)
1904
David Jamesfcb70ef2011-02-02 16:02:30 -08001905 print "Done"
Brian Harring8294d652012-05-23 02:20:52 -07001906 return 0