blob: e893c5d30f55d22d25eeaf33f4224ffb718222e1 [file] [log] [blame]
Mike Frysinger9f7e4ee2013-03-13 15:43:03 -04001#!/usr/bin/python
Mike Frysinger0a647fc2012-08-06 14:36:05 -04002# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
David Jamesfcb70ef2011-02-02 16:02:30 -08003# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""Program to run emerge in parallel, for significant speedup.
7
8Usage:
David James386ccd12011-05-04 20:17:42 -07009 ./parallel_emerge [--board=BOARD] [--workon=PKGS]
David Jamesfcb70ef2011-02-02 16:02:30 -080010 [--force-remote-binary=PKGS] [emerge args] package
11
David James78b6cd92012-04-02 21:36:12 -070012This script runs multiple emerge processes in parallel, using appropriate
13Portage APIs. It is faster than standard emerge because it has a
14multiprocess model instead of an asynchronous model.
David Jamesfcb70ef2011-02-02 16:02:30 -080015"""
16
17import codecs
18import copy
19import errno
Brian Harring8294d652012-05-23 02:20:52 -070020import gc
David James8c7e5e32011-06-28 11:26:03 -070021import heapq
David Jamesfcb70ef2011-02-02 16:02:30 -080022import multiprocessing
23import os
Mike Frysinger1ae28092013-10-17 17:17:22 -040024try:
25 import Queue
26except ImportError:
27 # Python-3 renamed to "queue". We still use Queue to avoid collisions
28 # with naming variables as "queue". Maybe we'll transition at some point.
29 # pylint: disable=F0401
30 import queue as Queue
David Jamesfcb70ef2011-02-02 16:02:30 -080031import signal
32import sys
33import tempfile
Brian Harring8294d652012-05-23 02:20:52 -070034import threading
David Jamesfcb70ef2011-02-02 16:02:30 -080035import time
36import traceback
David Jamesfcb70ef2011-02-02 16:02:30 -080037
Thiago Goncalesf4acc422013-07-17 10:26:35 -070038from chromite.lib import cros_build_lib
David Jamesaaf49e42014-04-24 09:40:05 -070039from chromite.lib import osutils
Mike Frysingerd74fe4a2014-04-24 11:43:38 -040040from chromite.lib import proctitle
Thiago Goncalesf4acc422013-07-17 10:26:35 -070041
David Jamesfcb70ef2011-02-02 16:02:30 -080042# If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
43# Chromium OS, the default "portage" user doesn't have the necessary
44# permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
45# is "root" here because we get called through sudo.
46#
47# We need to set this before importing any portage modules, because portage
48# looks up "PORTAGE_USERNAME" at import time.
49#
50# NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
51# encounter this case unless they have an old chroot or blow away the
52# environment by running sudo without the -E specifier.
53if "PORTAGE_USERNAME" not in os.environ:
54 homedir = os.environ.get("HOME")
55 if homedir:
56 os.environ["PORTAGE_USERNAME"] = os.path.basename(homedir)
57
58# Portage doesn't expose dependency trees in its public API, so we have to
59# make use of some private APIs here. These modules are found under
60# /usr/lib/portage/pym/.
61#
62# TODO(davidjames): Update Portage to expose public APIs for these features.
Don Garrett25f309a2014-03-19 14:02:12 -070063# pylint: disable=F0401
David Jamesfcb70ef2011-02-02 16:02:30 -080064from _emerge.actions import adjust_configs
65from _emerge.actions import load_emerge_config
66from _emerge.create_depgraph_params import create_depgraph_params
David James386ccd12011-05-04 20:17:42 -070067from _emerge.depgraph import backtrack_depgraph
Mike Frysinger901eaad2012-10-10 18:18:03 -040068try:
69 from _emerge.main import clean_logs
70except ImportError:
71 # Older portage versions did not provide clean_logs, so stub it.
72 # We need this if running in an older chroot that hasn't yet upgraded
73 # the portage version.
74 clean_logs = lambda x: None
David Jamesfcb70ef2011-02-02 16:02:30 -080075from _emerge.main import emerge_main
76from _emerge.main import parse_opts
77from _emerge.Package import Package
78from _emerge.Scheduler import Scheduler
David Jamesfcb70ef2011-02-02 16:02:30 -080079from _emerge.stdout_spinner import stdout_spinner
David James386ccd12011-05-04 20:17:42 -070080from portage._global_updates import _global_updates
David Jamesfcb70ef2011-02-02 16:02:30 -080081import portage
82import portage.debug
Don Garrettf8bf7842014-03-20 17:03:42 -070083# pylint: enable=F0401
Mike Frysinger91d7da92013-02-19 15:53:46 -050084
David Jamesfcb70ef2011-02-02 16:02:30 -080085
David Jamesfcb70ef2011-02-02 16:02:30 -080086def Usage():
87 """Print usage."""
88 print "Usage:"
David James386ccd12011-05-04 20:17:42 -070089 print " ./parallel_emerge [--board=BOARD] [--workon=PKGS]"
David Jamesfcb70ef2011-02-02 16:02:30 -080090 print " [--rebuild] [emerge args] package"
91 print
92 print "Packages specified as workon packages are always built from source."
David Jamesfcb70ef2011-02-02 16:02:30 -080093 print
94 print "The --workon argument is mainly useful when you want to build and"
95 print "install packages that you are working on unconditionally, but do not"
96 print "to have to rev the package to indicate you want to build it from"
97 print "source. The build_packages script will automatically supply the"
98 print "workon argument to emerge, ensuring that packages selected using"
99 print "cros-workon are rebuilt."
100 print
101 print "The --rebuild option rebuilds packages whenever their dependencies"
102 print "are changed. This ensures that your build is correct."
David Jamesfcb70ef2011-02-02 16:02:30 -0800103
104
David Jamesfcb70ef2011-02-02 16:02:30 -0800105# Global start time
106GLOBAL_START = time.time()
107
David James7358d032011-05-19 10:40:03 -0700108# Whether process has been killed by a signal.
109KILLED = multiprocessing.Event()
110
David Jamesfcb70ef2011-02-02 16:02:30 -0800111
112class EmergeData(object):
113 """This simple struct holds various emerge variables.
114
115 This struct helps us easily pass emerge variables around as a unit.
116 These variables are used for calculating dependencies and installing
117 packages.
118 """
119
David Jamesbf1e3442011-05-28 07:44:20 -0700120 __slots__ = ["action", "cmdline_packages", "depgraph", "favorites",
121 "mtimedb", "opts", "root_config", "scheduler_graph",
122 "settings", "spinner", "trees"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800123
124 def __init__(self):
125 # The action the user requested. If the user is installing packages, this
126 # is None. If the user is doing anything other than installing packages,
127 # this will contain the action name, which will map exactly to the
128 # long-form name of the associated emerge option.
129 #
130 # Example: If you call parallel_emerge --unmerge package, the action name
131 # will be "unmerge"
132 self.action = None
133
134 # The list of packages the user passed on the command-line.
135 self.cmdline_packages = None
136
137 # The emerge dependency graph. It'll contain all the packages involved in
138 # this merge, along with their versions.
139 self.depgraph = None
140
David Jamesbf1e3442011-05-28 07:44:20 -0700141 # The list of candidates to add to the world file.
142 self.favorites = None
143
David Jamesfcb70ef2011-02-02 16:02:30 -0800144 # A dict of the options passed to emerge. This dict has been cleaned up
145 # a bit by parse_opts, so that it's a bit easier for the emerge code to
146 # look at the options.
147 #
148 # Emerge takes a few shortcuts in its cleanup process to make parsing of
149 # the options dict easier. For example, if you pass in "--usepkg=n", the
150 # "--usepkg" flag is just left out of the dictionary altogether. Because
151 # --usepkg=n is the default, this makes parsing easier, because emerge
152 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
153 #
154 # These cleanup processes aren't applied to all options. For example, the
155 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
156 # applied by emerge, see the parse_opts function in the _emerge.main
157 # package.
158 self.opts = None
159
160 # A dictionary used by portage to maintain global state. This state is
161 # loaded from disk when portage starts up, and saved to disk whenever we
162 # call mtimedb.commit().
163 #
164 # This database contains information about global updates (i.e., what
165 # version of portage we have) and what we're currently doing. Portage
166 # saves what it is currently doing in this database so that it can be
167 # resumed when you call it with the --resume option.
168 #
169 # parallel_emerge does not save what it is currently doing in the mtimedb,
170 # so we do not support the --resume option.
171 self.mtimedb = None
172
173 # The portage configuration for our current root. This contains the portage
174 # settings (see below) and the three portage trees for our current root.
175 # (The three portage trees are explained below, in the documentation for
176 # the "trees" member.)
177 self.root_config = None
178
179 # The scheduler graph is used by emerge to calculate what packages to
180 # install. We don't actually install any deps, so this isn't really used,
181 # but we pass it in to the Scheduler object anyway.
182 self.scheduler_graph = None
183
184 # Portage settings for our current session. Most of these settings are set
185 # in make.conf inside our current install root.
186 self.settings = None
187
188 # The spinner, which spews stuff to stdout to indicate that portage is
189 # doing something. We maintain our own spinner, so we set the portage
190 # spinner to "silent" mode.
191 self.spinner = None
192
193 # The portage trees. There are separate portage trees for each root. To get
194 # the portage tree for the current root, you can look in self.trees[root],
195 # where root = self.settings["ROOT"].
196 #
197 # In each root, there are three trees: vartree, porttree, and bintree.
198 # - vartree: A database of the currently-installed packages.
199 # - porttree: A database of ebuilds, that can be used to build packages.
200 # - bintree: A database of binary packages.
201 self.trees = None
202
203
204class DepGraphGenerator(object):
205 """Grab dependency information about packages from portage.
206
207 Typical usage:
208 deps = DepGraphGenerator()
209 deps.Initialize(sys.argv[1:])
210 deps_tree, deps_info = deps.GenDependencyTree()
211 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
212 deps.PrintTree(deps_tree)
213 PrintDepsMap(deps_graph)
214 """
215
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700216 __slots__ = ["board", "emerge", "package_db", "show_output", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800217
218 def __init__(self):
219 self.board = None
220 self.emerge = EmergeData()
David Jamesfcb70ef2011-02-02 16:02:30 -0800221 self.package_db = {}
David Jamesfcb70ef2011-02-02 16:02:30 -0800222 self.show_output = False
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700223 self.unpack_only = False
David Jamesfcb70ef2011-02-02 16:02:30 -0800224
225 def ParseParallelEmergeArgs(self, argv):
226 """Read the parallel emerge arguments from the command-line.
227
228 We need to be compatible with emerge arg format. We scrape arguments that
229 are specific to parallel_emerge, and pass through the rest directly to
230 emerge.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500231
David Jamesfcb70ef2011-02-02 16:02:30 -0800232 Args:
233 argv: arguments list
Mike Frysinger1a736a82013-12-12 01:50:59 -0500234
David Jamesfcb70ef2011-02-02 16:02:30 -0800235 Returns:
236 Arguments that don't belong to parallel_emerge
237 """
238 emerge_args = []
239 for arg in argv:
240 # Specifically match arguments that are specific to parallel_emerge, and
241 # pass through the rest.
242 if arg.startswith("--board="):
243 self.board = arg.replace("--board=", "")
244 elif arg.startswith("--workon="):
245 workon_str = arg.replace("--workon=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700246 emerge_args.append("--reinstall-atoms=%s" % workon_str)
247 emerge_args.append("--usepkg-exclude=%s" % workon_str)
David Jamesfcb70ef2011-02-02 16:02:30 -0800248 elif arg.startswith("--force-remote-binary="):
249 force_remote_binary = arg.replace("--force-remote-binary=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700250 emerge_args.append("--useoldpkg-atoms=%s" % force_remote_binary)
David Jamesfcb70ef2011-02-02 16:02:30 -0800251 elif arg == "--show-output":
252 self.show_output = True
David James386ccd12011-05-04 20:17:42 -0700253 elif arg == "--rebuild":
David James7a1ea4b2011-10-13 15:06:41 -0700254 emerge_args.append("--rebuild-if-unbuilt")
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700255 elif arg == "--unpackonly":
256 emerge_args.append("--fetchonly")
257 self.unpack_only = True
David Jamesfcb70ef2011-02-02 16:02:30 -0800258 else:
259 # Not one of our options, so pass through to emerge.
260 emerge_args.append(arg)
261
David James386ccd12011-05-04 20:17:42 -0700262 # These packages take a really long time to build, so, for expediency, we
263 # are blacklisting them from automatic rebuilds because one of their
264 # dependencies needs to be recompiled.
Mike Frysinger5c2a9052014-04-15 15:52:04 -0400265 for pkg in ("chromeos-base/chromeos-chrome",):
David James7a1ea4b2011-10-13 15:06:41 -0700266 emerge_args.append("--rebuild-exclude=%s" % pkg)
David Jamesfcb70ef2011-02-02 16:02:30 -0800267
268 return emerge_args
269
270 def Initialize(self, args):
271 """Initializer. Parses arguments and sets up portage state."""
272
273 # Parse and strip out args that are just intended for parallel_emerge.
274 emerge_args = self.ParseParallelEmergeArgs(args)
275
276 # Setup various environment variables based on our current board. These
277 # variables are normally setup inside emerge-${BOARD}, but since we don't
278 # call that script, we have to set it up here. These variables serve to
279 # point our tools at /build/BOARD and to setup cross compiles to the
280 # appropriate board as configured in toolchain.conf.
281 if self.board:
Yu-Ju Hongdd9bb2b2014-01-03 17:08:26 -0800282 sysroot = cros_build_lib.GetSysroot(board=self.board)
283 os.environ["PORTAGE_CONFIGROOT"] = sysroot
284 os.environ["PORTAGE_SYSROOT"] = sysroot
285 os.environ["SYSROOT"] = sysroot
David Jamesfcb70ef2011-02-02 16:02:30 -0800286
287 # Although CHROMEOS_ROOT isn't specific to boards, it's normally setup
288 # inside emerge-${BOARD}, so we set it up here for compatibility. It
289 # will be going away soon as we migrate to CROS_WORKON_SRCROOT.
290 os.environ.setdefault("CHROMEOS_ROOT", os.environ["HOME"] + "/trunk")
291
292 # Turn off interactive delays
293 os.environ["EBEEP_IGNORE"] = "1"
294 os.environ["EPAUSE_IGNORE"] = "1"
Mike Frysinger0a647fc2012-08-06 14:36:05 -0400295 os.environ["CLEAN_DELAY"] = "0"
David Jamesfcb70ef2011-02-02 16:02:30 -0800296
297 # Parse the emerge options.
David Jamesea3ca332011-05-26 11:48:29 -0700298 action, opts, cmdline_packages = parse_opts(emerge_args, silent=True)
David Jamesfcb70ef2011-02-02 16:02:30 -0800299
300 # Set environment variables based on options. Portage normally sets these
301 # environment variables in emerge_main, but we can't use that function,
302 # because it also does a bunch of other stuff that we don't want.
303 # TODO(davidjames): Patch portage to move this logic into a function we can
304 # reuse here.
305 if "--debug" in opts:
306 os.environ["PORTAGE_DEBUG"] = "1"
307 if "--config-root" in opts:
308 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
309 if "--root" in opts:
310 os.environ["ROOT"] = opts["--root"]
311 if "--accept-properties" in opts:
312 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
313
David James88d780c2014-02-05 13:03:29 -0800314 # If we're installing packages to the board, we can disable vardb locks.
315 # This is safe because we only run up to one instance of parallel_emerge in
316 # parallel.
317 # TODO(davidjames): Enable this for the host too.
318 if self.board:
David Jamesfcb70ef2011-02-02 16:02:30 -0800319 os.environ.setdefault("PORTAGE_LOCKS", "false")
David Jamesfcb70ef2011-02-02 16:02:30 -0800320
321 # Now that we've setup the necessary environment variables, we can load the
322 # emerge config from disk.
323 settings, trees, mtimedb = load_emerge_config()
324
David Jamesea3ca332011-05-26 11:48:29 -0700325 # Add in EMERGE_DEFAULT_OPTS, if specified.
326 tmpcmdline = []
327 if "--ignore-default-opts" not in opts:
328 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
329 tmpcmdline.extend(emerge_args)
330 action, opts, cmdline_packages = parse_opts(tmpcmdline)
331
332 # If we're installing to the board, we want the --root-deps option so that
333 # portage will install the build dependencies to that location as well.
334 if self.board:
335 opts.setdefault("--root-deps", True)
336
David Jamesfcb70ef2011-02-02 16:02:30 -0800337 # Check whether our portage tree is out of date. Typically, this happens
338 # when you're setting up a new portage tree, such as in setup_board and
339 # make_chroot. In that case, portage applies a bunch of global updates
340 # here. Once the updates are finished, we need to commit any changes
341 # that the global update made to our mtimedb, and reload the config.
342 #
343 # Portage normally handles this logic in emerge_main, but again, we can't
344 # use that function here.
345 if _global_updates(trees, mtimedb["updates"]):
346 mtimedb.commit()
347 settings, trees, mtimedb = load_emerge_config(trees=trees)
348
349 # Setup implied options. Portage normally handles this logic in
350 # emerge_main.
351 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
352 opts.setdefault("--buildpkg", True)
353 if "--getbinpkgonly" in opts:
354 opts.setdefault("--usepkgonly", True)
355 opts.setdefault("--getbinpkg", True)
356 if "getbinpkg" in settings.features:
357 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
358 opts["--getbinpkg"] = True
359 if "--getbinpkg" in opts or "--usepkgonly" in opts:
360 opts.setdefault("--usepkg", True)
361 if "--fetch-all-uri" in opts:
362 opts.setdefault("--fetchonly", True)
363 if "--skipfirst" in opts:
364 opts.setdefault("--resume", True)
365 if "--buildpkgonly" in opts:
366 # --buildpkgonly will not merge anything, so it overrides all binary
367 # package options.
368 for opt in ("--getbinpkg", "--getbinpkgonly",
369 "--usepkg", "--usepkgonly"):
370 opts.pop(opt, None)
371 if (settings.get("PORTAGE_DEBUG", "") == "1" and
372 "python-trace" in settings.features):
373 portage.debug.set_trace(True)
374
375 # Complain about unsupported options
David James386ccd12011-05-04 20:17:42 -0700376 for opt in ("--ask", "--ask-enter-invalid", "--resume", "--skipfirst"):
David Jamesfcb70ef2011-02-02 16:02:30 -0800377 if opt in opts:
378 print "%s is not supported by parallel_emerge" % opt
379 sys.exit(1)
380
381 # Make emerge specific adjustments to the config (e.g. colors!)
382 adjust_configs(opts, trees)
383
384 # Save our configuration so far in the emerge object
385 emerge = self.emerge
386 emerge.action, emerge.opts = action, opts
387 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
388 emerge.cmdline_packages = cmdline_packages
389 root = settings["ROOT"]
390 emerge.root_config = trees[root]["root_config"]
391
David James386ccd12011-05-04 20:17:42 -0700392 if "--usepkg" in opts:
David Jamesfcb70ef2011-02-02 16:02:30 -0800393 emerge.trees[root]["bintree"].populate("--getbinpkg" in opts)
394
David Jamesfcb70ef2011-02-02 16:02:30 -0800395 def CreateDepgraph(self, emerge, packages):
396 """Create an emerge depgraph object."""
397 # Setup emerge options.
398 emerge_opts = emerge.opts.copy()
399
David James386ccd12011-05-04 20:17:42 -0700400 # Ask portage to build a dependency graph. with the options we specified
401 # above.
David Jamesfcb70ef2011-02-02 16:02:30 -0800402 params = create_depgraph_params(emerge_opts, emerge.action)
David Jamesbf1e3442011-05-28 07:44:20 -0700403 success, depgraph, favorites = backtrack_depgraph(
David James386ccd12011-05-04 20:17:42 -0700404 emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
405 packages, emerge.spinner)
406 emerge.depgraph = depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800407
David James386ccd12011-05-04 20:17:42 -0700408 # Is it impossible to honor the user's request? Bail!
409 if not success:
410 depgraph.display_problems()
411 sys.exit(1)
David Jamesfcb70ef2011-02-02 16:02:30 -0800412
413 emerge.depgraph = depgraph
David Jamesbf1e3442011-05-28 07:44:20 -0700414 emerge.favorites = favorites
David Jamesfcb70ef2011-02-02 16:02:30 -0800415
David Jamesdeebd692011-05-09 17:02:52 -0700416 # Prime and flush emerge caches.
417 root = emerge.settings["ROOT"]
418 vardb = emerge.trees[root]["vartree"].dbapi
David James0bdc5de2011-05-12 16:22:26 -0700419 if "--pretend" not in emerge.opts:
420 vardb.counter_tick()
David Jamesdeebd692011-05-09 17:02:52 -0700421 vardb.flush_cache()
422
David James386ccd12011-05-04 20:17:42 -0700423 def GenDependencyTree(self):
David Jamesfcb70ef2011-02-02 16:02:30 -0800424 """Get dependency tree info from emerge.
425
David Jamesfcb70ef2011-02-02 16:02:30 -0800426 Returns:
427 Dependency tree
428 """
429 start = time.time()
430
431 emerge = self.emerge
432
433 # Create a list of packages to merge
434 packages = set(emerge.cmdline_packages[:])
David Jamesfcb70ef2011-02-02 16:02:30 -0800435
436 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
437 # need any extra output from portage.
438 portage.util.noiselimit = -1
439
440 # My favorite feature: The silent spinner. It doesn't spin. Ever.
441 # I'd disable the colors by default too, but they look kind of cool.
442 emerge.spinner = stdout_spinner()
443 emerge.spinner.update = emerge.spinner.update_quiet
444
445 if "--quiet" not in emerge.opts:
446 print "Calculating deps..."
447
448 self.CreateDepgraph(emerge, packages)
449 depgraph = emerge.depgraph
450
451 # Build our own tree from the emerge digraph.
452 deps_tree = {}
Don Garrett25f309a2014-03-19 14:02:12 -0700453 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -0800454 digraph = depgraph._dynamic_config.digraph
David James3f778802011-08-25 19:31:45 -0700455 root = emerge.settings["ROOT"]
Bertrand SIMONNETb35e19e2014-07-28 16:29:58 -0700456 final_db = get_db(depgraph._dynamic_config, root)
David Jamesfcb70ef2011-02-02 16:02:30 -0800457 for node, node_deps in digraph.nodes.items():
458 # Calculate dependency packages that need to be installed first. Each
459 # child on the digraph is a dependency. The "operation" field specifies
460 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
461 # contains the type of dependency (e.g. build, runtime, runtime_post,
462 # etc.)
463 #
David Jamesfcb70ef2011-02-02 16:02:30 -0800464 # Portage refers to the identifiers for packages as a CPV. This acronym
465 # stands for Component/Path/Version.
466 #
467 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
468 # Split up, this CPV would be:
469 # C -- Component: chromeos-base
470 # P -- Path: power_manager
471 # V -- Version: 0.0.1-r1
472 #
473 # We just refer to CPVs as packages here because it's easier.
474 deps = {}
475 for child, priorities in node_deps[0].items():
David James3f778802011-08-25 19:31:45 -0700476 if isinstance(child, Package) and child.root == root:
477 cpv = str(child.cpv)
478 action = str(child.operation)
479
480 # If we're uninstalling a package, check whether Portage is
481 # installing a replacement. If so, just depend on the installation
482 # of the new package, because the old package will automatically
483 # be uninstalled at that time.
484 if action == "uninstall":
485 for pkg in final_db.match_pkgs(child.slot_atom):
486 cpv = str(pkg.cpv)
487 action = "merge"
488 break
489
490 deps[cpv] = dict(action=action,
491 deptypes=[str(x) for x in priorities],
492 deps={})
David Jamesfcb70ef2011-02-02 16:02:30 -0800493
494 # We've built our list of deps, so we can add our package to the tree.
David James3f778802011-08-25 19:31:45 -0700495 if isinstance(node, Package) and node.root == root:
David Jamesfcb70ef2011-02-02 16:02:30 -0800496 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
497 deps=deps)
498
David Jamesfcb70ef2011-02-02 16:02:30 -0800499 # Ask portage for its install plan, so that we can only throw out
David James386ccd12011-05-04 20:17:42 -0700500 # dependencies that portage throws out.
David Jamesfcb70ef2011-02-02 16:02:30 -0800501 deps_info = {}
502 for pkg in depgraph.altlist():
503 if isinstance(pkg, Package):
David James3f778802011-08-25 19:31:45 -0700504 assert pkg.root == root
David Jamesfcb70ef2011-02-02 16:02:30 -0800505 self.package_db[pkg.cpv] = pkg
506
David Jamesfcb70ef2011-02-02 16:02:30 -0800507 # Save off info about the package
David James386ccd12011-05-04 20:17:42 -0700508 deps_info[str(pkg.cpv)] = {"idx": len(deps_info)}
David Jamesfcb70ef2011-02-02 16:02:30 -0800509
510 seconds = time.time() - start
511 if "--quiet" not in emerge.opts:
512 print "Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60)
513
514 return deps_tree, deps_info
515
516 def PrintTree(self, deps, depth=""):
517 """Print the deps we have seen in the emerge output.
518
519 Args:
520 deps: Dependency tree structure.
521 depth: Allows printing the tree recursively, with indentation.
522 """
523 for entry in sorted(deps):
524 action = deps[entry]["action"]
525 print "%s %s (%s)" % (depth, entry, action)
526 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
527
David James386ccd12011-05-04 20:17:42 -0700528 def GenDependencyGraph(self, deps_tree, deps_info):
David Jamesfcb70ef2011-02-02 16:02:30 -0800529 """Generate a doubly linked dependency graph.
530
531 Args:
532 deps_tree: Dependency tree structure.
533 deps_info: More details on the dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500534
David Jamesfcb70ef2011-02-02 16:02:30 -0800535 Returns:
536 Deps graph in the form of a dict of packages, with each package
537 specifying a "needs" list and "provides" list.
538 """
539 emerge = self.emerge
David Jamesfcb70ef2011-02-02 16:02:30 -0800540
David Jamesfcb70ef2011-02-02 16:02:30 -0800541 # deps_map is the actual dependency graph.
542 #
543 # Each package specifies a "needs" list and a "provides" list. The "needs"
544 # list indicates which packages we depend on. The "provides" list
545 # indicates the reverse dependencies -- what packages need us.
546 #
547 # We also provide some other information in the dependency graph:
548 # - action: What we're planning on doing with this package. Generally,
549 # "merge", "nomerge", or "uninstall"
David Jamesfcb70ef2011-02-02 16:02:30 -0800550 deps_map = {}
551
552 def ReverseTree(packages):
553 """Convert tree to digraph.
554
555 Take the tree of package -> requirements and reverse it to a digraph of
556 buildable packages -> packages they unblock.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500557
David Jamesfcb70ef2011-02-02 16:02:30 -0800558 Args:
559 packages: Tree(s) of dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500560
David Jamesfcb70ef2011-02-02 16:02:30 -0800561 Returns:
562 Unsanitized digraph.
563 """
David James8c7e5e32011-06-28 11:26:03 -0700564 binpkg_phases = set(["setup", "preinst", "postinst"])
David James3f778802011-08-25 19:31:45 -0700565 needed_dep_types = set(["blocker", "buildtime", "runtime"])
David Jamesfcb70ef2011-02-02 16:02:30 -0800566 for pkg in packages:
567
568 # Create an entry for the package
569 action = packages[pkg]["action"]
David James8c7e5e32011-06-28 11:26:03 -0700570 default_pkg = {"needs": {}, "provides": set(), "action": action,
571 "nodeps": False, "binary": False}
David Jamesfcb70ef2011-02-02 16:02:30 -0800572 this_pkg = deps_map.setdefault(pkg, default_pkg)
573
David James8c7e5e32011-06-28 11:26:03 -0700574 if pkg in deps_info:
575 this_pkg["idx"] = deps_info[pkg]["idx"]
576
577 # If a package doesn't have any defined phases that might use the
578 # dependent packages (i.e. pkg_setup, pkg_preinst, or pkg_postinst),
579 # we can install this package before its deps are ready.
580 emerge_pkg = self.package_db.get(pkg)
581 if emerge_pkg and emerge_pkg.type_name == "binary":
582 this_pkg["binary"] = True
Mike Frysinger66652ec2014-04-24 11:42:25 -0400583 defined_phases = emerge_pkg.defined_phases
David James8c7e5e32011-06-28 11:26:03 -0700584 defined_binpkg_phases = binpkg_phases.intersection(defined_phases)
585 if not defined_binpkg_phases:
586 this_pkg["nodeps"] = True
587
David Jamesfcb70ef2011-02-02 16:02:30 -0800588 # Create entries for dependencies of this package first.
589 ReverseTree(packages[pkg]["deps"])
590
591 # Add dependencies to this package.
592 for dep, dep_item in packages[pkg]["deps"].iteritems():
David James8c7e5e32011-06-28 11:26:03 -0700593 # We only need to enforce strict ordering of dependencies if the
David James3f778802011-08-25 19:31:45 -0700594 # dependency is a blocker, or is a buildtime or runtime dependency.
595 # (I.e., ignored, optional, and runtime_post dependencies don't
596 # depend on ordering.)
David James8c7e5e32011-06-28 11:26:03 -0700597 dep_types = dep_item["deptypes"]
598 if needed_dep_types.intersection(dep_types):
599 deps_map[dep]["provides"].add(pkg)
600 this_pkg["needs"][dep] = "/".join(dep_types)
David Jamesfcb70ef2011-02-02 16:02:30 -0800601
David James3f778802011-08-25 19:31:45 -0700602 # If there's a blocker, Portage may need to move files from one
603 # package to another, which requires editing the CONTENTS files of
604 # both packages. To avoid race conditions while editing this file,
605 # the two packages must not be installed in parallel, so we can't
606 # safely ignore dependencies. See http://crosbug.com/19328
607 if "blocker" in dep_types:
608 this_pkg["nodeps"] = False
609
David Jamesfcb70ef2011-02-02 16:02:30 -0800610 def FindCycles():
611 """Find cycles in the dependency tree.
612
613 Returns:
614 A dict mapping cyclic packages to a dict of the deps that cause
615 cycles. For each dep that causes cycles, it returns an example
616 traversal of the graph that shows the cycle.
617 """
618
619 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
620 """Find cycles in cyclic dependencies starting at specified package.
621
622 Args:
623 pkg: Package identifier.
624 cycles: A dict mapping cyclic packages to a dict of the deps that
625 cause cycles. For each dep that causes cycles, it returns an
626 example traversal of the graph that shows the cycle.
627 unresolved: Nodes that have been visited but are not fully processed.
628 resolved: Nodes that have been visited and are fully processed.
629 """
630 pkg_cycles = cycles.get(pkg)
631 if pkg in resolved and not pkg_cycles:
632 # If we already looked at this package, and found no cyclic
633 # dependencies, we can stop now.
634 return
635 unresolved.append(pkg)
636 for dep in deps_map[pkg]["needs"]:
637 if dep in unresolved:
638 idx = unresolved.index(dep)
639 mycycle = unresolved[idx:] + [dep]
David James321490a2012-12-17 12:05:56 -0800640 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800641 pkg1, pkg2 = mycycle[i], mycycle[i+1]
642 cycles.setdefault(pkg1, {}).setdefault(pkg2, mycycle)
643 elif not pkg_cycles or dep not in pkg_cycles:
644 # Looks like we haven't seen this edge before.
645 FindCyclesAtNode(dep, cycles, unresolved, resolved)
646 unresolved.pop()
647 resolved.add(pkg)
648
649 cycles, unresolved, resolved = {}, [], set()
650 for pkg in deps_map:
651 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
652 return cycles
653
David James386ccd12011-05-04 20:17:42 -0700654 def RemoveUnusedPackages():
David Jamesfcb70ef2011-02-02 16:02:30 -0800655 """Remove installed packages, propagating dependencies."""
David Jamesfcb70ef2011-02-02 16:02:30 -0800656 # Schedule packages that aren't on the install list for removal
657 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
658
David Jamesfcb70ef2011-02-02 16:02:30 -0800659 # Remove the packages we don't want, simplifying the graph and making
660 # it easier for us to crack cycles.
661 for pkg in sorted(rm_pkgs):
662 this_pkg = deps_map[pkg]
663 needs = this_pkg["needs"]
664 provides = this_pkg["provides"]
665 for dep in needs:
666 dep_provides = deps_map[dep]["provides"]
667 dep_provides.update(provides)
668 dep_provides.discard(pkg)
669 dep_provides.discard(dep)
670 for target in provides:
671 target_needs = deps_map[target]["needs"]
672 target_needs.update(needs)
673 target_needs.pop(pkg, None)
674 target_needs.pop(target, None)
675 del deps_map[pkg]
676
677 def PrintCycleBreak(basedep, dep, mycycle):
678 """Print details about a cycle that we are planning on breaking.
679
Mike Frysinger02e1e072013-11-10 22:11:34 -0500680 We are breaking a cycle where dep needs basedep. mycycle is an
681 example cycle which contains dep -> basedep.
682 """
David Jamesfcb70ef2011-02-02 16:02:30 -0800683
David Jamesfcb70ef2011-02-02 16:02:30 -0800684 needs = deps_map[dep]["needs"]
685 depinfo = needs.get(basedep, "deleted")
David Jamesfcb70ef2011-02-02 16:02:30 -0800686
David James3f778802011-08-25 19:31:45 -0700687 # It's OK to swap install order for blockers, as long as the two
688 # packages aren't installed in parallel. If there is a cycle, then
689 # we know the packages depend on each other already, so we can drop the
690 # blocker safely without printing a warning.
691 if depinfo == "blocker":
692 return
693
David Jamesfcb70ef2011-02-02 16:02:30 -0800694 # Notify the user that we're breaking a cycle.
695 print "Breaking %s -> %s (%s)" % (dep, basedep, depinfo)
696
697 # Show cycle.
David James321490a2012-12-17 12:05:56 -0800698 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800699 pkg1, pkg2 = mycycle[i], mycycle[i+1]
700 needs = deps_map[pkg1]["needs"]
701 depinfo = needs.get(pkg2, "deleted")
702 if pkg1 == dep and pkg2 == basedep:
703 depinfo = depinfo + ", deleting"
704 print " %s -> %s (%s)" % (pkg1, pkg2, depinfo)
705
706 def SanitizeTree():
707 """Remove circular dependencies.
708
709 We prune all dependencies involved in cycles that go against the emerge
710 ordering. This has a nice property: we're guaranteed to merge
711 dependencies in the same order that portage does.
712
713 Because we don't treat any dependencies as "soft" unless they're killed
714 by a cycle, we pay attention to a larger number of dependencies when
715 merging. This hurts performance a bit, but helps reliability.
716 """
717 start = time.time()
718 cycles = FindCycles()
719 while cycles:
720 for dep, mycycles in cycles.iteritems():
721 for basedep, mycycle in mycycles.iteritems():
722 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
Matt Tennant08797302011-10-17 16:18:45 -0700723 if "--quiet" not in emerge.opts:
724 PrintCycleBreak(basedep, dep, mycycle)
David Jamesfcb70ef2011-02-02 16:02:30 -0800725 del deps_map[dep]["needs"][basedep]
726 deps_map[basedep]["provides"].remove(dep)
727 cycles = FindCycles()
728 seconds = time.time() - start
729 if "--quiet" not in emerge.opts and seconds >= 0.1:
730 print "Tree sanitized in %dm%.1fs" % (seconds / 60, seconds % 60)
731
David James8c7e5e32011-06-28 11:26:03 -0700732 def FindRecursiveProvides(pkg, seen):
733 """Find all nodes that require a particular package.
734
735 Assumes that graph is acyclic.
736
737 Args:
738 pkg: Package identifier.
739 seen: Nodes that have been visited so far.
740 """
741 if pkg in seen:
742 return
743 seen.add(pkg)
744 info = deps_map[pkg]
745 info["tprovides"] = info["provides"].copy()
746 for dep in info["provides"]:
747 FindRecursiveProvides(dep, seen)
748 info["tprovides"].update(deps_map[dep]["tprovides"])
749
David Jamesa22906f2011-05-04 19:53:26 -0700750 ReverseTree(deps_tree)
David Jamesa22906f2011-05-04 19:53:26 -0700751
David James386ccd12011-05-04 20:17:42 -0700752 # We need to remove unused packages so that we can use the dependency
753 # ordering of the install process to show us what cycles to crack.
754 RemoveUnusedPackages()
David Jamesfcb70ef2011-02-02 16:02:30 -0800755 SanitizeTree()
David James8c7e5e32011-06-28 11:26:03 -0700756 seen = set()
757 for pkg in deps_map:
758 FindRecursiveProvides(pkg, seen)
David Jamesfcb70ef2011-02-02 16:02:30 -0800759 return deps_map
760
761 def PrintInstallPlan(self, deps_map):
762 """Print an emerge-style install plan.
763
764 The install plan lists what packages we're installing, in order.
765 It's useful for understanding what parallel_emerge is doing.
766
767 Args:
768 deps_map: The dependency graph.
769 """
770
771 def InstallPlanAtNode(target, deps_map):
772 nodes = []
773 nodes.append(target)
774 for dep in deps_map[target]["provides"]:
775 del deps_map[dep]["needs"][target]
776 if not deps_map[dep]["needs"]:
777 nodes.extend(InstallPlanAtNode(dep, deps_map))
778 return nodes
779
780 deps_map = copy.deepcopy(deps_map)
781 install_plan = []
782 plan = set()
783 for target, info in deps_map.iteritems():
784 if not info["needs"] and target not in plan:
785 for item in InstallPlanAtNode(target, deps_map):
786 plan.add(item)
787 install_plan.append(self.package_db[item])
788
789 for pkg in plan:
790 del deps_map[pkg]
791
792 if deps_map:
793 print "Cyclic dependencies:", " ".join(deps_map)
794 PrintDepsMap(deps_map)
795 sys.exit(1)
796
797 self.emerge.depgraph.display(install_plan)
798
799
800def PrintDepsMap(deps_map):
801 """Print dependency graph, for each package list it's prerequisites."""
802 for i in sorted(deps_map):
803 print "%s: (%s) needs" % (i, deps_map[i]["action"])
804 needs = deps_map[i]["needs"]
805 for j in sorted(needs):
806 print " %s" % (j)
807 if not needs:
808 print " no dependencies"
809
810
811class EmergeJobState(object):
Don Garrett25f309a2014-03-19 14:02:12 -0700812 """Structure describing the EmergeJobState."""
813
David Jamesfcb70ef2011-02-02 16:02:30 -0800814 __slots__ = ["done", "filename", "last_notify_timestamp", "last_output_seek",
815 "last_output_timestamp", "pkgname", "retcode", "start_timestamp",
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700816 "target", "fetch_only", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800817
818 def __init__(self, target, pkgname, done, filename, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700819 retcode=None, fetch_only=False, unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800820
821 # The full name of the target we're building (e.g.
Mike Frysingerfd969312014-04-02 22:16:42 -0400822 # virtual/target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800823 self.target = target
824
Mike Frysingerfd969312014-04-02 22:16:42 -0400825 # The short name of the target we're building (e.g. target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800826 self.pkgname = pkgname
827
828 # Whether the job is done. (True if the job is done; false otherwise.)
829 self.done = done
830
831 # The filename where output is currently stored.
832 self.filename = filename
833
834 # The timestamp of the last time we printed the name of the log file. We
835 # print this at the beginning of the job, so this starts at
836 # start_timestamp.
837 self.last_notify_timestamp = start_timestamp
838
839 # The location (in bytes) of the end of the last complete line we printed.
840 # This starts off at zero. We use this to jump to the right place when we
841 # print output from the same ebuild multiple times.
842 self.last_output_seek = 0
843
844 # The timestamp of the last time we printed output. Since we haven't
845 # printed output yet, this starts at zero.
846 self.last_output_timestamp = 0
847
848 # The return code of our job, if the job is actually finished.
849 self.retcode = retcode
850
Brian Harring0be85c62012-03-17 19:52:12 -0700851 # Was this just a fetch job?
852 self.fetch_only = fetch_only
853
David Jamesfcb70ef2011-02-02 16:02:30 -0800854 # The timestamp when our job started.
855 self.start_timestamp = start_timestamp
856
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700857 # No emerge, only unpack packages.
858 self.unpack_only = unpack_only
859
David Jamesfcb70ef2011-02-02 16:02:30 -0800860
David James321490a2012-12-17 12:05:56 -0800861def KillHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700862 # Kill self and all subprocesses.
863 os.killpg(0, signal.SIGKILL)
864
Mike Frysingercc838832014-05-24 13:10:30 -0400865
David Jamesfcb70ef2011-02-02 16:02:30 -0800866def SetupWorkerSignals():
David James321490a2012-12-17 12:05:56 -0800867 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700868 # Set KILLED flag.
869 KILLED.set()
David James13cead42011-05-18 16:22:01 -0700870
David James7358d032011-05-19 10:40:03 -0700871 # Remove our signal handlers so we don't get called recursively.
872 signal.signal(signal.SIGINT, KillHandler)
873 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -0800874
875 # Ensure that we exit quietly and cleanly, if possible, when we receive
876 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all
877 # of the child processes will print details about KeyboardInterrupt
878 # exceptions, which isn't very helpful.
879 signal.signal(signal.SIGINT, ExitHandler)
880 signal.signal(signal.SIGTERM, ExitHandler)
881
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400882
883def EmergeProcess(output, target, *args, **kwargs):
David James1ed3e252011-10-05 20:26:15 -0700884 """Merge a package in a subprocess.
885
886 Args:
David James1ed3e252011-10-05 20:26:15 -0700887 output: Temporary file to write output.
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400888 target: The package we'll be processing (for display purposes).
David James6b29d052012-11-02 10:27:27 -0700889 *args: Arguments to pass to Scheduler constructor.
890 **kwargs: Keyword arguments to pass to Scheduler constructor.
David James1ed3e252011-10-05 20:26:15 -0700891
892 Returns:
893 The exit code returned by the subprocess.
894 """
895 pid = os.fork()
896 if pid == 0:
897 try:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400898 proctitle.settitle('EmergeProcess', target)
899
David James1ed3e252011-10-05 20:26:15 -0700900 # Sanity checks.
Mike Frysingerf02736e2013-11-08 15:27:00 -0500901 if sys.stdout.fileno() != 1:
902 raise Exception("sys.stdout.fileno() != 1")
903 if sys.stderr.fileno() != 2:
904 raise Exception("sys.stderr.fileno() != 2")
David James1ed3e252011-10-05 20:26:15 -0700905
906 # - Redirect 1 (stdout) and 2 (stderr) at our temporary file.
907 # - Redirect 0 to point at sys.stdin. In this case, sys.stdin
908 # points at a file reading os.devnull, because multiprocessing mucks
909 # with sys.stdin.
910 # - Leave the sys.stdin and output filehandles alone.
911 fd_pipes = {0: sys.stdin.fileno(),
912 1: output.fileno(),
913 2: output.fileno(),
914 sys.stdin.fileno(): sys.stdin.fileno(),
915 output.fileno(): output.fileno()}
Mike Frysinger66652ec2014-04-24 11:42:25 -0400916 # pylint: disable=W0212
917 portage.process._setup_pipes(fd_pipes, close_fds=False)
David James1ed3e252011-10-05 20:26:15 -0700918
919 # Portage doesn't like when sys.stdin.fileno() != 0, so point sys.stdin
920 # at the filehandle we just created in _setup_pipes.
921 if sys.stdin.fileno() != 0:
David James6b29d052012-11-02 10:27:27 -0700922 sys.__stdin__ = sys.stdin = os.fdopen(0, "r")
923
924 scheduler = Scheduler(*args, **kwargs)
925
926 # Enable blocker handling even though we're in --nodeps mode. This
927 # allows us to unmerge the blocker after we've merged the replacement.
928 scheduler._opts_ignore_blockers = frozenset()
David James1ed3e252011-10-05 20:26:15 -0700929
930 # Actually do the merge.
931 retval = scheduler.merge()
932
933 # We catch all exceptions here (including SystemExit, KeyboardInterrupt,
934 # etc) so as to ensure that we don't confuse the multiprocessing module,
935 # which expects that all forked children exit with os._exit().
David James321490a2012-12-17 12:05:56 -0800936 # pylint: disable=W0702
David James1ed3e252011-10-05 20:26:15 -0700937 except:
938 traceback.print_exc(file=output)
939 retval = 1
940 sys.stdout.flush()
941 sys.stderr.flush()
942 output.flush()
Don Garrett25f309a2014-03-19 14:02:12 -0700943 # pylint: disable=W0212
David James1ed3e252011-10-05 20:26:15 -0700944 os._exit(retval)
945 else:
946 # Return the exit code of the subprocess.
947 return os.waitpid(pid, 0)[1]
David Jamesfcb70ef2011-02-02 16:02:30 -0800948
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700949
950def UnpackPackage(pkg_state):
951 """Unpacks package described by pkg_state.
952
953 Args:
954 pkg_state: EmergeJobState object describing target.
955
956 Returns:
957 Exit code returned by subprocess.
958 """
959 pkgdir = os.environ.get("PKGDIR",
960 os.path.join(os.environ["SYSROOT"], "packages"))
961 root = os.environ.get("ROOT", os.environ["SYSROOT"])
962 path = os.path.join(pkgdir, pkg_state.target + ".tbz2")
963 comp = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)
964 cmd = [comp, "-dc"]
965 if comp.endswith("pbzip2"):
966 cmd.append("--ignore-trailing-garbage=1")
967 cmd.append(path)
968
969 result = cros_build_lib.RunCommand(cmd, cwd=root, stdout_to_pipe=True,
970 print_cmd=False, error_code_ok=True)
971
972 # If we were not successful, return now and don't attempt untar.
973 if result.returncode:
974 return result.returncode
975
976 cmd = ["sudo", "tar", "-xf", "-", "-C", root]
977 result = cros_build_lib.RunCommand(cmd, cwd=root, input=result.output,
978 print_cmd=False, error_code_ok=True)
979
980 return result.returncode
981
982
983def EmergeWorker(task_queue, job_queue, emerge, package_db, fetch_only=False,
984 unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800985 """This worker emerges any packages given to it on the task_queue.
986
987 Args:
988 task_queue: The queue of tasks for this worker to do.
989 job_queue: The queue of results from the worker.
990 emerge: An EmergeData() object.
991 package_db: A dict, mapping package ids to portage Package objects.
Brian Harring0be85c62012-03-17 19:52:12 -0700992 fetch_only: A bool, indicating if we should just fetch the target.
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700993 unpack_only: A bool, indicating if we should just unpack the target.
David Jamesfcb70ef2011-02-02 16:02:30 -0800994
995 It expects package identifiers to be passed to it via task_queue. When
996 a task is started, it pushes the (target, filename) to the started_queue.
997 The output is stored in filename. When a merge starts or finishes, we push
998 EmergeJobState objects to the job_queue.
999 """
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001000 if fetch_only:
1001 mode = 'fetch'
1002 elif unpack_only:
1003 mode = 'unpack'
1004 else:
1005 mode = 'emerge'
1006 proctitle.settitle('EmergeWorker', mode, '[idle]')
David Jamesfcb70ef2011-02-02 16:02:30 -08001007
1008 SetupWorkerSignals()
1009 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
David Jamesdeebd692011-05-09 17:02:52 -07001010
1011 # Disable flushing of caches to save on I/O.
David James7a1ea4b2011-10-13 15:06:41 -07001012 root = emerge.settings["ROOT"]
1013 vardb = emerge.trees[root]["vartree"].dbapi
1014 vardb._flush_cache_enabled = False
Brian Harring0be85c62012-03-17 19:52:12 -07001015 bindb = emerge.trees[root]["bintree"].dbapi
1016 # Might be a set, might be a list, might be None; no clue, just use shallow
1017 # copy to ensure we can roll it back.
Don Garrett25f309a2014-03-19 14:02:12 -07001018 # pylint: disable=W0212
Brian Harring0be85c62012-03-17 19:52:12 -07001019 original_remotepkgs = copy.copy(bindb.bintree._remotepkgs)
David Jamesdeebd692011-05-09 17:02:52 -07001020
David Jamesfcb70ef2011-02-02 16:02:30 -08001021 opts, spinner = emerge.opts, emerge.spinner
1022 opts["--nodeps"] = True
Brian Harring0be85c62012-03-17 19:52:12 -07001023 if fetch_only:
1024 opts["--fetchonly"] = True
1025
David Jamesfcb70ef2011-02-02 16:02:30 -08001026 while True:
1027 # Wait for a new item to show up on the queue. This is a blocking wait,
1028 # so if there's nothing to do, we just sit here.
Brian Harring0be85c62012-03-17 19:52:12 -07001029 pkg_state = task_queue.get()
1030 if pkg_state is None:
David Jamesfcb70ef2011-02-02 16:02:30 -08001031 # If target is None, this means that the main thread wants us to quit.
1032 # The other workers need to exit too, so we'll push the message back on
1033 # to the queue so they'll get it too.
Brian Harring0be85c62012-03-17 19:52:12 -07001034 task_queue.put(None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001035 return
David James7358d032011-05-19 10:40:03 -07001036 if KILLED.is_set():
1037 return
1038
Brian Harring0be85c62012-03-17 19:52:12 -07001039 target = pkg_state.target
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001040 proctitle.settitle('EmergeWorker', mode, target)
Brian Harring0be85c62012-03-17 19:52:12 -07001041
David Jamesfcb70ef2011-02-02 16:02:30 -08001042 db_pkg = package_db[target]
Brian Harring0be85c62012-03-17 19:52:12 -07001043
1044 if db_pkg.type_name == "binary":
1045 if not fetch_only and pkg_state.fetched_successfully:
1046 # Ensure portage doesn't think our pkg is remote- else it'll force
1047 # a redownload of it (even if the on-disk file is fine). In-memory
1048 # caching basically, implemented dumbly.
1049 bindb.bintree._remotepkgs = None
1050 else:
1051 bindb.bintree_remotepkgs = original_remotepkgs
1052
David Jamesfcb70ef2011-02-02 16:02:30 -08001053 db_pkg.root_config = emerge.root_config
1054 install_list = [db_pkg]
1055 pkgname = db_pkg.pf
1056 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False)
David James01b1e0f2012-06-07 17:18:05 -07001057 os.chmod(output.name, 644)
David Jamesfcb70ef2011-02-02 16:02:30 -08001058 start_timestamp = time.time()
Brian Harring0be85c62012-03-17 19:52:12 -07001059 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001060 fetch_only=fetch_only, unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001061 job_queue.put(job)
1062 if "--pretend" in opts:
1063 retcode = 0
1064 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001065 try:
David James386ccd12011-05-04 20:17:42 -07001066 emerge.scheduler_graph.mergelist = install_list
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001067 if unpack_only:
1068 retcode = UnpackPackage(pkg_state)
1069 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001070 retcode = EmergeProcess(output, target, settings, trees, mtimedb,
1071 opts, spinner, favorites=emerge.favorites,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001072 graph_config=emerge.scheduler_graph)
David Jamesfcb70ef2011-02-02 16:02:30 -08001073 except Exception:
1074 traceback.print_exc(file=output)
1075 retcode = 1
David James1ed3e252011-10-05 20:26:15 -07001076 output.close()
David Jamesfcb70ef2011-02-02 16:02:30 -08001077
David James7358d032011-05-19 10:40:03 -07001078 if KILLED.is_set():
1079 return
1080
David Jamesfcb70ef2011-02-02 16:02:30 -08001081 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001082 retcode, fetch_only=fetch_only,
1083 unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001084 job_queue.put(job)
1085
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001086 # Set the title back to idle as the multiprocess pool won't destroy us;
1087 # when another job comes up, it'll re-use this process.
1088 proctitle.settitle('EmergeWorker', mode, '[idle]')
1089
David Jamesfcb70ef2011-02-02 16:02:30 -08001090
1091class LinePrinter(object):
1092 """Helper object to print a single line."""
1093
1094 def __init__(self, line):
1095 self.line = line
1096
David James321490a2012-12-17 12:05:56 -08001097 def Print(self, _seek_locations):
David Jamesfcb70ef2011-02-02 16:02:30 -08001098 print self.line
1099
1100
1101class JobPrinter(object):
1102 """Helper object to print output of a job."""
1103
1104 def __init__(self, job, unlink=False):
1105 """Print output of job.
1106
Mike Frysinger02e1e072013-11-10 22:11:34 -05001107 If unlink is True, unlink the job output file when done.
1108 """
David Jamesfcb70ef2011-02-02 16:02:30 -08001109 self.current_time = time.time()
1110 self.job = job
1111 self.unlink = unlink
1112
1113 def Print(self, seek_locations):
1114
1115 job = self.job
1116
1117 # Calculate how long the job has been running.
1118 seconds = self.current_time - job.start_timestamp
1119
1120 # Note that we've printed out the job so far.
1121 job.last_output_timestamp = self.current_time
1122
1123 # Note that we're starting the job
1124 info = "job %s (%dm%.1fs)" % (job.pkgname, seconds / 60, seconds % 60)
1125 last_output_seek = seek_locations.get(job.filename, 0)
1126 if last_output_seek:
1127 print "=== Continue output for %s ===" % info
1128 else:
1129 print "=== Start output for %s ===" % info
1130
1131 # Print actual output from job
1132 f = codecs.open(job.filename, encoding='utf-8', errors='replace')
1133 f.seek(last_output_seek)
1134 prefix = job.pkgname + ":"
1135 for line in f:
1136
1137 # Save off our position in the file
1138 if line and line[-1] == "\n":
1139 last_output_seek = f.tell()
1140 line = line[:-1]
1141
1142 # Print our line
1143 print prefix, line.encode('utf-8', 'replace')
1144 f.close()
1145
1146 # Save our last spot in the file so that we don't print out the same
1147 # location twice.
1148 seek_locations[job.filename] = last_output_seek
1149
1150 # Note end of output section
1151 if job.done:
1152 print "=== Complete: %s ===" % info
1153 else:
1154 print "=== Still running: %s ===" % info
1155
1156 if self.unlink:
1157 os.unlink(job.filename)
1158
1159
1160def PrintWorker(queue):
1161 """A worker that prints stuff to the screen as requested."""
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001162 proctitle.settitle('PrintWorker')
David Jamesfcb70ef2011-02-02 16:02:30 -08001163
David James321490a2012-12-17 12:05:56 -08001164 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -07001165 # Set KILLED flag.
1166 KILLED.set()
1167
David Jamesfcb70ef2011-02-02 16:02:30 -08001168 # Switch to default signal handlers so that we'll die after two signals.
David James7358d032011-05-19 10:40:03 -07001169 signal.signal(signal.SIGINT, KillHandler)
1170 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001171
1172 # Don't exit on the first SIGINT / SIGTERM, because the parent worker will
1173 # handle it and tell us when we need to exit.
1174 signal.signal(signal.SIGINT, ExitHandler)
1175 signal.signal(signal.SIGTERM, ExitHandler)
1176
1177 # seek_locations is a map indicating the position we are at in each file.
1178 # It starts off empty, but is set by the various Print jobs as we go along
1179 # to indicate where we left off in each file.
1180 seek_locations = {}
1181 while True:
1182 try:
1183 job = queue.get()
1184 if job:
1185 job.Print(seek_locations)
David Jamesbccf8eb2011-07-27 14:06:06 -07001186 sys.stdout.flush()
David Jamesfcb70ef2011-02-02 16:02:30 -08001187 else:
1188 break
1189 except IOError as ex:
1190 if ex.errno == errno.EINTR:
1191 # Looks like we received a signal. Keep printing.
1192 continue
1193 raise
1194
Brian Harring867e2362012-03-17 04:05:17 -07001195
Brian Harring0be85c62012-03-17 19:52:12 -07001196class TargetState(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001197 """Structure descriting the TargetState."""
Brian Harring867e2362012-03-17 04:05:17 -07001198
Brian Harring0be85c62012-03-17 19:52:12 -07001199 __slots__ = ("target", "info", "score", "prefetched", "fetched_successfully")
Brian Harring867e2362012-03-17 04:05:17 -07001200
David James321490a2012-12-17 12:05:56 -08001201 def __init__(self, target, info):
Brian Harring867e2362012-03-17 04:05:17 -07001202 self.target, self.info = target, info
Brian Harring0be85c62012-03-17 19:52:12 -07001203 self.fetched_successfully = False
1204 self.prefetched = False
David James321490a2012-12-17 12:05:56 -08001205 self.score = None
Brian Harring867e2362012-03-17 04:05:17 -07001206 self.update_score()
1207
1208 def __cmp__(self, other):
1209 return cmp(self.score, other.score)
1210
1211 def update_score(self):
1212 self.score = (
1213 -len(self.info["tprovides"]),
Brian Harring0be85c62012-03-17 19:52:12 -07001214 len(self.info["needs"]),
Brian Harring11c5eeb2012-03-18 11:02:39 -07001215 not self.info["binary"],
Brian Harring867e2362012-03-17 04:05:17 -07001216 -len(self.info["provides"]),
1217 self.info["idx"],
1218 self.target,
1219 )
1220
1221
1222class ScoredHeap(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001223 """Implementation of a general purpose scored heap."""
Brian Harring867e2362012-03-17 04:05:17 -07001224
Brian Harring0be85c62012-03-17 19:52:12 -07001225 __slots__ = ("heap", "_heap_set")
1226
Brian Harring867e2362012-03-17 04:05:17 -07001227 def __init__(self, initial=()):
Brian Harring0be85c62012-03-17 19:52:12 -07001228 self.heap = list()
1229 self._heap_set = set()
1230 if initial:
1231 self.multi_put(initial)
Brian Harring867e2362012-03-17 04:05:17 -07001232
1233 def get(self):
Brian Harring0be85c62012-03-17 19:52:12 -07001234 item = heapq.heappop(self.heap)
1235 self._heap_set.remove(item.target)
1236 return item
Brian Harring867e2362012-03-17 04:05:17 -07001237
Brian Harring0be85c62012-03-17 19:52:12 -07001238 def put(self, item):
1239 if not isinstance(item, TargetState):
1240 raise ValueError("Item %r isn't a TargetState" % (item,))
1241 heapq.heappush(self.heap, item)
1242 self._heap_set.add(item.target)
Brian Harring867e2362012-03-17 04:05:17 -07001243
Brian Harring0be85c62012-03-17 19:52:12 -07001244 def multi_put(self, sequence):
1245 sequence = list(sequence)
1246 self.heap.extend(sequence)
1247 self._heap_set.update(x.target for x in sequence)
Brian Harring867e2362012-03-17 04:05:17 -07001248 self.sort()
1249
David James5c9996d2012-03-24 10:50:46 -07001250 def sort(self):
1251 heapq.heapify(self.heap)
1252
Brian Harring0be85c62012-03-17 19:52:12 -07001253 def __contains__(self, target):
1254 return target in self._heap_set
1255
1256 def __nonzero__(self):
1257 return bool(self.heap)
1258
Brian Harring867e2362012-03-17 04:05:17 -07001259 def __len__(self):
1260 return len(self.heap)
1261
1262
David Jamesfcb70ef2011-02-02 16:02:30 -08001263class EmergeQueue(object):
1264 """Class to schedule emerge jobs according to a dependency graph."""
1265
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001266 def __init__(self, deps_map, emerge, package_db, show_output, unpack_only):
David Jamesfcb70ef2011-02-02 16:02:30 -08001267 # Store the dependency graph.
1268 self._deps_map = deps_map
Brian Harring0be85c62012-03-17 19:52:12 -07001269 self._state_map = {}
David Jamesfcb70ef2011-02-02 16:02:30 -08001270 # Initialize the running queue to empty
Brian Harring0be85c62012-03-17 19:52:12 -07001271 self._build_jobs = {}
1272 self._build_ready = ScoredHeap()
1273 self._fetch_jobs = {}
1274 self._fetch_ready = ScoredHeap()
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001275 self._unpack_jobs = {}
1276 self._unpack_ready = ScoredHeap()
David Jamesfcb70ef2011-02-02 16:02:30 -08001277 # List of total package installs represented in deps_map.
1278 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
1279 self._total_jobs = len(install_jobs)
1280 self._show_output = show_output
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001281 self._unpack_only = unpack_only
David Jamesfcb70ef2011-02-02 16:02:30 -08001282
1283 if "--pretend" in emerge.opts:
1284 print "Skipping merge because of --pretend mode."
1285 sys.exit(0)
1286
David Jamesaaf49e42014-04-24 09:40:05 -07001287 # Set up a session so we can easily terminate all children.
1288 self._SetupSession()
David James7358d032011-05-19 10:40:03 -07001289
David Jamesfcb70ef2011-02-02 16:02:30 -08001290 # Setup scheduler graph object. This is used by the child processes
1291 # to help schedule jobs.
1292 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1293
1294 # Calculate how many jobs we can run in parallel. We don't want to pass
1295 # the --jobs flag over to emerge itself, because that'll tell emerge to
1296 # hide its output, and said output is quite useful for debugging hung
1297 # jobs.
1298 procs = min(self._total_jobs,
1299 emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001300 self._build_procs = self._unpack_procs = self._fetch_procs = max(1, procs)
David James8c7e5e32011-06-28 11:26:03 -07001301 self._load_avg = emerge.opts.pop("--load-average", None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001302 self._job_queue = multiprocessing.Queue()
1303 self._print_queue = multiprocessing.Queue()
Brian Harring0be85c62012-03-17 19:52:12 -07001304
1305 self._fetch_queue = multiprocessing.Queue()
1306 args = (self._fetch_queue, self._job_queue, emerge, package_db, True)
1307 self._fetch_pool = multiprocessing.Pool(self._fetch_procs, EmergeWorker,
1308 args)
1309
1310 self._build_queue = multiprocessing.Queue()
1311 args = (self._build_queue, self._job_queue, emerge, package_db)
1312 self._build_pool = multiprocessing.Pool(self._build_procs, EmergeWorker,
1313 args)
1314
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001315 if self._unpack_only:
1316 # Unpack pool only required on unpack_only jobs.
1317 self._unpack_queue = multiprocessing.Queue()
1318 args = (self._unpack_queue, self._job_queue, emerge, package_db, False,
1319 True)
1320 self._unpack_pool = multiprocessing.Pool(self._unpack_procs, EmergeWorker,
1321 args)
1322
David Jamesfcb70ef2011-02-02 16:02:30 -08001323 self._print_worker = multiprocessing.Process(target=PrintWorker,
1324 args=[self._print_queue])
1325 self._print_worker.start()
1326
1327 # Initialize the failed queue to empty.
1328 self._retry_queue = []
1329 self._failed = set()
1330
David Jamesfcb70ef2011-02-02 16:02:30 -08001331 # Setup an exit handler so that we print nice messages if we are
1332 # terminated.
1333 self._SetupExitHandler()
1334
1335 # Schedule our jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001336 self._state_map.update(
1337 (pkg, TargetState(pkg, data)) for pkg, data in deps_map.iteritems())
1338 self._fetch_ready.multi_put(self._state_map.itervalues())
David Jamesfcb70ef2011-02-02 16:02:30 -08001339
David Jamesaaf49e42014-04-24 09:40:05 -07001340 def _SetupSession(self):
1341 """Set up a session so we can easily terminate all children."""
1342 # When we call os.setsid(), this sets up a session / process group for this
1343 # process and all children. These session groups are needed so that we can
1344 # easily kill all children (including processes launched by emerge) before
1345 # we exit.
1346 #
1347 # One unfortunate side effect of os.setsid() is that it blocks CTRL-C from
1348 # being received. To work around this, we only call os.setsid() in a forked
1349 # process, so that the parent can still watch for CTRL-C. The parent will
1350 # just sit around, watching for signals and propagating them to the child,
1351 # until the child exits.
1352 #
1353 # TODO(davidjames): It would be nice if we could replace this with cgroups.
1354 pid = os.fork()
1355 if pid == 0:
1356 os.setsid()
1357 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001358 proctitle.settitle('SessionManager')
1359
David Jamesaaf49e42014-04-24 09:40:05 -07001360 def PropagateToChildren(signum, _frame):
1361 # Just propagate the signals down to the child. We'll exit when the
1362 # child does.
1363 try:
1364 os.kill(pid, signum)
1365 except OSError as ex:
1366 if ex.errno != errno.ESRCH:
1367 raise
1368 signal.signal(signal.SIGINT, PropagateToChildren)
1369 signal.signal(signal.SIGTERM, PropagateToChildren)
1370
1371 def StopGroup(_signum, _frame):
1372 # When we get stopped, stop the children.
1373 try:
1374 os.killpg(pid, signal.SIGSTOP)
1375 os.kill(0, signal.SIGSTOP)
1376 except OSError as ex:
1377 if ex.errno != errno.ESRCH:
1378 raise
1379 signal.signal(signal.SIGTSTP, StopGroup)
1380
1381 def ContinueGroup(_signum, _frame):
1382 # Launch the children again after being stopped.
1383 try:
1384 os.killpg(pid, signal.SIGCONT)
1385 except OSError as ex:
1386 if ex.errno != errno.ESRCH:
1387 raise
1388 signal.signal(signal.SIGCONT, ContinueGroup)
1389
1390 # Loop until the children exit. We exit with os._exit to be sure we
1391 # don't run any finalizers (those will be run by the child process.)
1392 # pylint: disable=W0212
1393 while True:
1394 try:
1395 # Wait for the process to exit. When it does, exit with the return
1396 # value of the subprocess.
1397 os._exit(osutils.GetExitStatus(os.waitpid(pid, 0)[1]))
1398 except OSError as ex:
1399 if ex.errno == errno.EINTR:
1400 continue
1401 traceback.print_exc()
1402 os._exit(1)
1403 except BaseException:
1404 traceback.print_exc()
1405 os._exit(1)
1406
David Jamesfcb70ef2011-02-02 16:02:30 -08001407 def _SetupExitHandler(self):
1408
David James321490a2012-12-17 12:05:56 -08001409 def ExitHandler(signum, _frame):
David James7358d032011-05-19 10:40:03 -07001410 # Set KILLED flag.
1411 KILLED.set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001412
1413 # Kill our signal handlers so we don't get called recursively
David James7358d032011-05-19 10:40:03 -07001414 signal.signal(signal.SIGINT, KillHandler)
1415 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001416
1417 # Print our current job status
Brian Harring0be85c62012-03-17 19:52:12 -07001418 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001419 if job:
1420 self._print_queue.put(JobPrinter(job, unlink=True))
1421
1422 # Notify the user that we are exiting
1423 self._Print("Exiting on signal %s" % signum)
David James7358d032011-05-19 10:40:03 -07001424 self._print_queue.put(None)
1425 self._print_worker.join()
David Jamesfcb70ef2011-02-02 16:02:30 -08001426
1427 # Kill child threads, then exit.
David James7358d032011-05-19 10:40:03 -07001428 os.killpg(0, signal.SIGKILL)
David Jamesfcb70ef2011-02-02 16:02:30 -08001429 sys.exit(1)
1430
1431 # Print out job status when we are killed
1432 signal.signal(signal.SIGINT, ExitHandler)
1433 signal.signal(signal.SIGTERM, ExitHandler)
1434
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001435 def _ScheduleUnpack(self, pkg_state):
1436 self._unpack_jobs[pkg_state.target] = None
1437 self._unpack_queue.put(pkg_state)
1438
Brian Harring0be85c62012-03-17 19:52:12 -07001439 def _Schedule(self, pkg_state):
David Jamesfcb70ef2011-02-02 16:02:30 -08001440 # We maintain a tree of all deps, if this doesn't need
David James8c7e5e32011-06-28 11:26:03 -07001441 # to be installed just free up its children and continue.
David Jamesfcb70ef2011-02-02 16:02:30 -08001442 # It is possible to reinstall deps of deps, without reinstalling
1443 # first level deps, like so:
Mike Frysingerfd969312014-04-02 22:16:42 -04001444 # virtual/target-os (merge) -> eselect (nomerge) -> python (merge)
Brian Harring0be85c62012-03-17 19:52:12 -07001445 this_pkg = pkg_state.info
1446 target = pkg_state.target
1447 if pkg_state.info is not None:
1448 if this_pkg["action"] == "nomerge":
1449 self._Finish(target)
1450 elif target not in self._build_jobs:
1451 # Kick off the build if it's marked to be built.
1452 self._build_jobs[target] = None
1453 self._build_queue.put(pkg_state)
1454 return True
David Jamesfcb70ef2011-02-02 16:02:30 -08001455
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001456 def _ScheduleLoop(self, unpack_only=False):
1457 if unpack_only:
1458 ready_queue = self._unpack_ready
1459 jobs_queue = self._unpack_jobs
1460 procs = self._unpack_procs
1461 else:
1462 ready_queue = self._build_ready
1463 jobs_queue = self._build_jobs
1464 procs = self._build_procs
1465
David James8c7e5e32011-06-28 11:26:03 -07001466 # If the current load exceeds our desired load average, don't schedule
1467 # more than one job.
1468 if self._load_avg and os.getloadavg()[0] > self._load_avg:
1469 needed_jobs = 1
1470 else:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001471 needed_jobs = procs
David James8c7e5e32011-06-28 11:26:03 -07001472
1473 # Schedule more jobs.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001474 while ready_queue and len(jobs_queue) < needed_jobs:
1475 state = ready_queue.get()
1476 if unpack_only:
1477 self._ScheduleUnpack(state)
1478 else:
1479 if state.target not in self._failed:
1480 self._Schedule(state)
David Jamesfcb70ef2011-02-02 16:02:30 -08001481
1482 def _Print(self, line):
1483 """Print a single line."""
1484 self._print_queue.put(LinePrinter(line))
1485
1486 def _Status(self):
1487 """Print status."""
1488 current_time = time.time()
1489 no_output = True
1490
1491 # Print interim output every minute if --show-output is used. Otherwise,
1492 # print notifications about running packages every 2 minutes, and print
1493 # full output for jobs that have been running for 60 minutes or more.
1494 if self._show_output:
1495 interval = 60
1496 notify_interval = 0
1497 else:
1498 interval = 60 * 60
1499 notify_interval = 60 * 2
David James321490a2012-12-17 12:05:56 -08001500 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001501 if job:
1502 last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
1503 if last_timestamp + interval < current_time:
1504 self._print_queue.put(JobPrinter(job))
1505 job.last_output_timestamp = current_time
1506 no_output = False
1507 elif (notify_interval and
1508 job.last_notify_timestamp + notify_interval < current_time):
1509 job_seconds = current_time - job.start_timestamp
1510 args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
1511 info = "Still building %s (%dm%.1fs). Logs in %s" % args
1512 job.last_notify_timestamp = current_time
1513 self._Print(info)
1514 no_output = False
1515
1516 # If we haven't printed any messages yet, print a general status message
1517 # here.
1518 if no_output:
1519 seconds = current_time - GLOBAL_START
Brian Harring0be85c62012-03-17 19:52:12 -07001520 fjobs, fready = len(self._fetch_jobs), len(self._fetch_ready)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001521 ujobs, uready = len(self._unpack_jobs), len(self._unpack_ready)
Brian Harring0be85c62012-03-17 19:52:12 -07001522 bjobs, bready = len(self._build_jobs), len(self._build_ready)
1523 retries = len(self._retry_queue)
1524 pending = max(0, len(self._deps_map) - fjobs - bjobs)
1525 line = "Pending %s/%s, " % (pending, self._total_jobs)
1526 if fjobs or fready:
1527 line += "Fetching %s/%s, " % (fjobs, fready + fjobs)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001528 if ujobs or uready:
1529 line += "Unpacking %s/%s, " % (ujobs, uready + ujobs)
Brian Harring0be85c62012-03-17 19:52:12 -07001530 if bjobs or bready or retries:
1531 line += "Building %s/%s, " % (bjobs, bready + bjobs)
1532 if retries:
1533 line += "Retrying %s, " % (retries,)
David James8c7e5e32011-06-28 11:26:03 -07001534 load = " ".join(str(x) for x in os.getloadavg())
Brian Harring0be85c62012-03-17 19:52:12 -07001535 line += ("[Time %dm%.1fs Load %s]" % (seconds/60, seconds %60, load))
1536 self._Print(line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001537
1538 def _Finish(self, target):
David James8c7e5e32011-06-28 11:26:03 -07001539 """Mark a target as completed and unblock dependencies."""
1540 this_pkg = self._deps_map[target]
1541 if this_pkg["needs"] and this_pkg["nodeps"]:
1542 # We got installed, but our deps have not been installed yet. Dependent
1543 # packages should only be installed when our needs have been fully met.
1544 this_pkg["action"] = "nomerge"
1545 else:
David James8c7e5e32011-06-28 11:26:03 -07001546 for dep in this_pkg["provides"]:
1547 dep_pkg = self._deps_map[dep]
Brian Harring0be85c62012-03-17 19:52:12 -07001548 state = self._state_map[dep]
David James8c7e5e32011-06-28 11:26:03 -07001549 del dep_pkg["needs"][target]
Brian Harring0be85c62012-03-17 19:52:12 -07001550 state.update_score()
1551 if not state.prefetched:
1552 if dep in self._fetch_ready:
1553 # If it's not currently being fetched, update the prioritization
1554 self._fetch_ready.sort()
1555 elif not dep_pkg["needs"]:
David James8c7e5e32011-06-28 11:26:03 -07001556 if dep_pkg["nodeps"] and dep_pkg["action"] == "nomerge":
1557 self._Finish(dep)
1558 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001559 self._build_ready.put(self._state_map[dep])
David James8c7e5e32011-06-28 11:26:03 -07001560 self._deps_map.pop(target)
David Jamesfcb70ef2011-02-02 16:02:30 -08001561
1562 def _Retry(self):
David James8c7e5e32011-06-28 11:26:03 -07001563 while self._retry_queue:
Brian Harring0be85c62012-03-17 19:52:12 -07001564 state = self._retry_queue.pop(0)
1565 if self._Schedule(state):
1566 self._Print("Retrying emerge of %s." % state.target)
David James8c7e5e32011-06-28 11:26:03 -07001567 break
David Jamesfcb70ef2011-02-02 16:02:30 -08001568
Brian Harringa43f5952012-04-12 01:19:34 -07001569 def _Shutdown(self):
David Jamesfcb70ef2011-02-02 16:02:30 -08001570 # Tell emerge workers to exit. They all exit when 'None' is pushed
1571 # to the queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001572
Brian Harringa43f5952012-04-12 01:19:34 -07001573 # Shutdown the workers first; then jobs (which is how they feed things back)
1574 # then finally the print queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001575
Brian Harringa43f5952012-04-12 01:19:34 -07001576 def _stop(queue, pool):
1577 if pool is None:
1578 return
1579 try:
1580 queue.put(None)
1581 pool.close()
1582 pool.join()
1583 finally:
1584 pool.terminate()
Brian Harring0be85c62012-03-17 19:52:12 -07001585
Brian Harringa43f5952012-04-12 01:19:34 -07001586 _stop(self._fetch_queue, self._fetch_pool)
1587 self._fetch_queue = self._fetch_pool = None
Brian Harring0be85c62012-03-17 19:52:12 -07001588
Brian Harringa43f5952012-04-12 01:19:34 -07001589 _stop(self._build_queue, self._build_pool)
1590 self._build_queue = self._build_pool = None
1591
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001592 if self._unpack_only:
1593 _stop(self._unpack_queue, self._unpack_pool)
1594 self._unpack_queue = self._unpack_pool = None
1595
Brian Harringa43f5952012-04-12 01:19:34 -07001596 if self._job_queue is not None:
1597 self._job_queue.close()
1598 self._job_queue = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001599
1600 # Now that our workers are finished, we can kill the print queue.
Brian Harringa43f5952012-04-12 01:19:34 -07001601 if self._print_worker is not None:
1602 try:
1603 self._print_queue.put(None)
1604 self._print_queue.close()
1605 self._print_worker.join()
1606 finally:
1607 self._print_worker.terminate()
1608 self._print_queue = self._print_worker = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001609
1610 def Run(self):
1611 """Run through the scheduled ebuilds.
1612
1613 Keep running so long as we have uninstalled packages in the
1614 dependency graph to merge.
1615 """
Brian Harringa43f5952012-04-12 01:19:34 -07001616 if not self._deps_map:
1617 return
1618
Brian Harring0be85c62012-03-17 19:52:12 -07001619 # Start the fetchers.
1620 for _ in xrange(min(self._fetch_procs, len(self._fetch_ready))):
1621 state = self._fetch_ready.get()
1622 self._fetch_jobs[state.target] = None
1623 self._fetch_queue.put(state)
1624
1625 # Print an update, then get going.
1626 self._Status()
1627
David Jamese703d0f2012-01-12 16:27:45 -08001628 retried = set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001629 while self._deps_map:
1630 # Check here that we are actually waiting for something.
Brian Harring0be85c62012-03-17 19:52:12 -07001631 if (self._build_queue.empty() and
David Jamesfcb70ef2011-02-02 16:02:30 -08001632 self._job_queue.empty() and
Brian Harring0be85c62012-03-17 19:52:12 -07001633 not self._fetch_jobs and
1634 not self._fetch_ready and
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001635 not self._unpack_jobs and
1636 not self._unpack_ready and
Brian Harring0be85c62012-03-17 19:52:12 -07001637 not self._build_jobs and
1638 not self._build_ready and
David Jamesfcb70ef2011-02-02 16:02:30 -08001639 self._deps_map):
1640 # If we have failed on a package, retry it now.
1641 if self._retry_queue:
1642 self._Retry()
1643 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001644 # Tell the user why we're exiting.
1645 if self._failed:
Mike Frysingerf2ff9172012-11-01 18:47:41 -04001646 print 'Packages failed:\n\t%s' % '\n\t'.join(self._failed)
David James0eae23e2012-07-03 15:04:25 -07001647 status_file = os.environ.get("PARALLEL_EMERGE_STATUS_FILE")
1648 if status_file:
David James321490a2012-12-17 12:05:56 -08001649 failed_pkgs = set(portage.versions.cpv_getkey(x)
1650 for x in self._failed)
David James0eae23e2012-07-03 15:04:25 -07001651 with open(status_file, "a") as f:
1652 f.write("%s\n" % " ".join(failed_pkgs))
David Jamesfcb70ef2011-02-02 16:02:30 -08001653 else:
1654 print "Deadlock! Circular dependencies!"
1655 sys.exit(1)
1656
David James321490a2012-12-17 12:05:56 -08001657 for _ in xrange(12):
David Jamesa74289a2011-08-12 10:41:24 -07001658 try:
1659 job = self._job_queue.get(timeout=5)
1660 break
1661 except Queue.Empty:
1662 # Check if any more jobs can be scheduled.
1663 self._ScheduleLoop()
1664 else:
Brian Harring706747c2012-03-16 03:04:31 -07001665 # Print an update every 60 seconds.
David Jamesfcb70ef2011-02-02 16:02:30 -08001666 self._Status()
1667 continue
1668
1669 target = job.target
1670
Brian Harring0be85c62012-03-17 19:52:12 -07001671 if job.fetch_only:
1672 if not job.done:
1673 self._fetch_jobs[job.target] = job
1674 else:
1675 state = self._state_map[job.target]
1676 state.prefetched = True
1677 state.fetched_successfully = (job.retcode == 0)
1678 del self._fetch_jobs[job.target]
1679 self._Print("Fetched %s in %2.2fs"
1680 % (target, time.time() - job.start_timestamp))
1681
1682 if self._show_output or job.retcode != 0:
1683 self._print_queue.put(JobPrinter(job, unlink=True))
1684 else:
1685 os.unlink(job.filename)
1686 # Failure or not, let build work with it next.
1687 if not self._deps_map[job.target]["needs"]:
1688 self._build_ready.put(state)
1689 self._ScheduleLoop()
1690
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001691 if self._unpack_only and job.retcode == 0:
1692 self._unpack_ready.put(state)
1693 self._ScheduleLoop(unpack_only=True)
1694
Brian Harring0be85c62012-03-17 19:52:12 -07001695 if self._fetch_ready:
1696 state = self._fetch_ready.get()
1697 self._fetch_queue.put(state)
1698 self._fetch_jobs[state.target] = None
1699 else:
1700 # Minor optimization; shut down fetchers early since we know
1701 # the queue is empty.
1702 self._fetch_queue.put(None)
1703 continue
1704
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001705 if job.unpack_only:
1706 if not job.done:
1707 self._unpack_jobs[target] = job
1708 else:
1709 del self._unpack_jobs[target]
1710 self._Print("Unpacked %s in %2.2fs"
1711 % (target, time.time() - job.start_timestamp))
1712 if self._show_output or job.retcode != 0:
1713 self._print_queue.put(JobPrinter(job, unlink=True))
1714 else:
1715 os.unlink(job.filename)
1716 if self._unpack_ready:
1717 state = self._unpack_ready.get()
1718 self._unpack_queue.put(state)
1719 self._unpack_jobs[state.target] = None
1720 continue
1721
David Jamesfcb70ef2011-02-02 16:02:30 -08001722 if not job.done:
Brian Harring0be85c62012-03-17 19:52:12 -07001723 self._build_jobs[target] = job
David Jamesfcb70ef2011-02-02 16:02:30 -08001724 self._Print("Started %s (logged in %s)" % (target, job.filename))
1725 continue
1726
1727 # Print output of job
1728 if self._show_output or job.retcode != 0:
1729 self._print_queue.put(JobPrinter(job, unlink=True))
1730 else:
1731 os.unlink(job.filename)
Brian Harring0be85c62012-03-17 19:52:12 -07001732 del self._build_jobs[target]
David Jamesfcb70ef2011-02-02 16:02:30 -08001733
1734 seconds = time.time() - job.start_timestamp
1735 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
David James32420cc2011-08-25 21:32:46 -07001736 previously_failed = target in self._failed
David Jamesfcb70ef2011-02-02 16:02:30 -08001737
1738 # Complain if necessary.
1739 if job.retcode != 0:
1740 # Handle job failure.
David James32420cc2011-08-25 21:32:46 -07001741 if previously_failed:
David Jamesfcb70ef2011-02-02 16:02:30 -08001742 # If this job has failed previously, give up.
1743 self._Print("Failed %s. Your build has failed." % details)
1744 else:
1745 # Queue up this build to try again after a long while.
David Jamese703d0f2012-01-12 16:27:45 -08001746 retried.add(target)
Brian Harring0be85c62012-03-17 19:52:12 -07001747 self._retry_queue.append(self._state_map[target])
David Jamesfcb70ef2011-02-02 16:02:30 -08001748 self._failed.add(target)
1749 self._Print("Failed %s, retrying later." % details)
1750 else:
David James32420cc2011-08-25 21:32:46 -07001751 if previously_failed:
1752 # Remove target from list of failed packages.
1753 self._failed.remove(target)
1754
1755 self._Print("Completed %s" % details)
1756
1757 # Mark as completed and unblock waiting ebuilds.
1758 self._Finish(target)
1759
1760 if previously_failed and self._retry_queue:
David Jamesfcb70ef2011-02-02 16:02:30 -08001761 # If we have successfully retried a failed package, and there
1762 # are more failed packages, try the next one. We will only have
1763 # one retrying package actively running at a time.
1764 self._Retry()
1765
David Jamesfcb70ef2011-02-02 16:02:30 -08001766
David James8c7e5e32011-06-28 11:26:03 -07001767 # Schedule pending jobs and print an update.
1768 self._ScheduleLoop()
1769 self._Status()
David Jamesfcb70ef2011-02-02 16:02:30 -08001770
David Jamese703d0f2012-01-12 16:27:45 -08001771 # If packages were retried, output a warning.
1772 if retried:
1773 self._Print("")
1774 self._Print("WARNING: The following packages failed the first time,")
1775 self._Print("but succeeded upon retry. This might indicate incorrect")
1776 self._Print("dependencies.")
1777 for pkg in retried:
1778 self._Print(" %s" % pkg)
1779 self._Print("@@@STEP_WARNINGS@@@")
1780 self._Print("")
1781
David Jamesfcb70ef2011-02-02 16:02:30 -08001782 # Tell child threads to exit.
1783 self._Print("Merge complete")
David Jamesfcb70ef2011-02-02 16:02:30 -08001784
1785
Brian Harring30675052012-02-29 12:18:22 -08001786def main(argv):
Brian Harring8294d652012-05-23 02:20:52 -07001787 try:
1788 return real_main(argv)
1789 finally:
1790 # Work around multiprocessing sucking and not cleaning up after itself.
1791 # http://bugs.python.org/issue4106;
1792 # Step one; ensure GC is ran *prior* to the VM starting shutdown.
1793 gc.collect()
1794 # Step two; go looking for those threads and try to manually reap
1795 # them if we can.
1796 for x in threading.enumerate():
1797 # Filter on the name, and ident; if ident is None, the thread
1798 # wasn't started.
1799 if x.name == 'QueueFeederThread' and x.ident is not None:
1800 x.join(1)
David Jamesfcb70ef2011-02-02 16:02:30 -08001801
Brian Harring8294d652012-05-23 02:20:52 -07001802
Bertrand SIMONNETb35e19e2014-07-28 16:29:58 -07001803def get_db(config, root):
Mike Frysinger33fbccb2014-09-05 17:09:07 -04001804 """Return the dbapi.
Bertrand SIMONNETb35e19e2014-07-28 16:29:58 -07001805 Handles both portage 2.1.11 and 2.2.10 (where mydbapi has been removed).
1806
1807 TODO(bsimonnet): Remove this once portage has been uprevd.
1808 """
1809 try:
1810 return config.mydbapi[root]
1811 except AttributeError:
1812 # pylint: disable=W0212
1813 return config._filtered_trees[root]['graph_db']
1814
1815
Brian Harring8294d652012-05-23 02:20:52 -07001816def real_main(argv):
Brian Harring30675052012-02-29 12:18:22 -08001817 parallel_emerge_args = argv[:]
David Jamesfcb70ef2011-02-02 16:02:30 -08001818 deps = DepGraphGenerator()
Brian Harring30675052012-02-29 12:18:22 -08001819 deps.Initialize(parallel_emerge_args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001820 emerge = deps.emerge
1821
1822 if emerge.action is not None:
Brian Harring30675052012-02-29 12:18:22 -08001823 argv = deps.ParseParallelEmergeArgs(argv)
Brian Harring8294d652012-05-23 02:20:52 -07001824 return emerge_main(argv)
David Jamesfcb70ef2011-02-02 16:02:30 -08001825 elif not emerge.cmdline_packages:
1826 Usage()
Brian Harring8294d652012-05-23 02:20:52 -07001827 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001828
1829 # Unless we're in pretend mode, there's not much point running without
1830 # root access. We need to be able to install packages.
1831 #
1832 # NOTE: Even if you're running --pretend, it's a good idea to run
1833 # parallel_emerge with root access so that portage can write to the
1834 # dependency cache. This is important for performance.
David James321490a2012-12-17 12:05:56 -08001835 if "--pretend" not in emerge.opts and portage.data.secpass < 2:
David Jamesfcb70ef2011-02-02 16:02:30 -08001836 print "parallel_emerge: superuser access is required."
Brian Harring8294d652012-05-23 02:20:52 -07001837 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001838
1839 if "--quiet" not in emerge.opts:
1840 cmdline_packages = " ".join(emerge.cmdline_packages)
David Jamesfcb70ef2011-02-02 16:02:30 -08001841 print "Starting fast-emerge."
1842 print " Building package %s on %s" % (cmdline_packages,
1843 deps.board or "root")
David Jamesfcb70ef2011-02-02 16:02:30 -08001844
David James386ccd12011-05-04 20:17:42 -07001845 deps_tree, deps_info = deps.GenDependencyTree()
David Jamesfcb70ef2011-02-02 16:02:30 -08001846
1847 # You want me to be verbose? I'll give you two trees! Twice as much value.
1848 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
1849 deps.PrintTree(deps_tree)
1850
David James386ccd12011-05-04 20:17:42 -07001851 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001852
1853 # OK, time to print out our progress so far.
1854 deps.PrintInstallPlan(deps_graph)
1855 if "--tree" in emerge.opts:
1856 PrintDepsMap(deps_graph)
1857
1858 # Are we upgrading portage? If so, and there are more packages to merge,
1859 # schedule a restart of parallel_emerge to merge the rest. This ensures that
1860 # we pick up all updates to portage settings before merging any more
1861 # packages.
1862 portage_upgrade = False
1863 root = emerge.settings["ROOT"]
Don Garrett25f309a2014-03-19 14:02:12 -07001864 # pylint: disable=W0212
Bertrand SIMONNETb35e19e2014-07-28 16:29:58 -07001865 final_db = get_db(emerge.depgraph._dynamic_config, root)
David Jamesfcb70ef2011-02-02 16:02:30 -08001866 if root == "/":
1867 for db_pkg in final_db.match_pkgs("sys-apps/portage"):
1868 portage_pkg = deps_graph.get(db_pkg.cpv)
David James0ff16f22012-11-02 14:18:07 -07001869 if portage_pkg:
David Jamesfcb70ef2011-02-02 16:02:30 -08001870 portage_upgrade = True
1871 if "--quiet" not in emerge.opts:
1872 print "Upgrading portage first, then restarting..."
1873
David James0ff16f22012-11-02 14:18:07 -07001874 # Upgrade Portage first, then the rest of the packages.
1875 #
1876 # In order to grant the child permission to run setsid, we need to run sudo
1877 # again. We preserve SUDO_USER here in case an ebuild depends on it.
1878 if portage_upgrade:
1879 # Calculate what arguments to use when re-invoking.
1880 args = ["sudo", "-E", "SUDO_USER=%s" % os.environ.get("SUDO_USER", "")]
1881 args += [os.path.abspath(sys.argv[0])] + parallel_emerge_args
1882 args += ["--exclude=sys-apps/portage"]
1883
1884 # First upgrade Portage.
1885 passthrough_args = ("--quiet", "--pretend", "--verbose")
1886 emerge_args = [k for k in emerge.opts if k in passthrough_args]
1887 ret = emerge_main(emerge_args + ["portage"])
1888 if ret != 0:
1889 return ret
1890
1891 # Now upgrade the rest.
1892 os.execvp(args[0], args)
1893
David Jamesfcb70ef2011-02-02 16:02:30 -08001894 # Run the queued emerges.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001895 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output,
1896 deps.unpack_only)
Brian Harringa43f5952012-04-12 01:19:34 -07001897 try:
1898 scheduler.Run()
1899 finally:
Don Garrett25f309a2014-03-19 14:02:12 -07001900 # pylint: disable=W0212
Brian Harringa43f5952012-04-12 01:19:34 -07001901 scheduler._Shutdown()
David James97ce8902011-08-16 09:51:05 -07001902 scheduler = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001903
Mike Frysingerd20a6e22012-10-04 19:01:10 -04001904 clean_logs(emerge.settings)
1905
David Jamesfcb70ef2011-02-02 16:02:30 -08001906 print "Done"
Brian Harring8294d652012-05-23 02:20:52 -07001907 return 0