blob: 31b902ecd4edb22ddc0053f074220f613c6fe4f9 [file] [log] [blame]
Mike Frysinger9f7e4ee2013-03-13 15:43:03 -04001#!/usr/bin/python
Mike Frysinger0a647fc2012-08-06 14:36:05 -04002# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
David Jamesfcb70ef2011-02-02 16:02:30 -08003# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""Program to run emerge in parallel, for significant speedup.
7
8Usage:
David James386ccd12011-05-04 20:17:42 -07009 ./parallel_emerge [--board=BOARD] [--workon=PKGS]
David Jamesfcb70ef2011-02-02 16:02:30 -080010 [--force-remote-binary=PKGS] [emerge args] package
11
David James78b6cd92012-04-02 21:36:12 -070012This script runs multiple emerge processes in parallel, using appropriate
13Portage APIs. It is faster than standard emerge because it has a
14multiprocess model instead of an asynchronous model.
David Jamesfcb70ef2011-02-02 16:02:30 -080015"""
16
17import codecs
18import copy
19import errno
Brian Harring8294d652012-05-23 02:20:52 -070020import gc
David James8c7e5e32011-06-28 11:26:03 -070021import heapq
David Jamesfcb70ef2011-02-02 16:02:30 -080022import multiprocessing
23import os
Mike Frysinger1ae28092013-10-17 17:17:22 -040024try:
25 import Queue
26except ImportError:
27 # Python-3 renamed to "queue". We still use Queue to avoid collisions
28 # with naming variables as "queue". Maybe we'll transition at some point.
29 # pylint: disable=F0401
30 import queue as Queue
David Jamesfcb70ef2011-02-02 16:02:30 -080031import signal
32import sys
33import tempfile
Brian Harring8294d652012-05-23 02:20:52 -070034import threading
David Jamesfcb70ef2011-02-02 16:02:30 -080035import time
36import traceback
David Jamesfcb70ef2011-02-02 16:02:30 -080037
Thiago Goncalesf4acc422013-07-17 10:26:35 -070038from chromite.lib import cros_build_lib
David Jamesaaf49e42014-04-24 09:40:05 -070039from chromite.lib import osutils
Mike Frysingerd74fe4a2014-04-24 11:43:38 -040040from chromite.lib import proctitle
Thiago Goncalesf4acc422013-07-17 10:26:35 -070041
David Jamesfcb70ef2011-02-02 16:02:30 -080042# If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
43# Chromium OS, the default "portage" user doesn't have the necessary
44# permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
45# is "root" here because we get called through sudo.
46#
47# We need to set this before importing any portage modules, because portage
48# looks up "PORTAGE_USERNAME" at import time.
49#
50# NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
51# encounter this case unless they have an old chroot or blow away the
52# environment by running sudo without the -E specifier.
53if "PORTAGE_USERNAME" not in os.environ:
54 homedir = os.environ.get("HOME")
55 if homedir:
56 os.environ["PORTAGE_USERNAME"] = os.path.basename(homedir)
57
58# Portage doesn't expose dependency trees in its public API, so we have to
59# make use of some private APIs here. These modules are found under
60# /usr/lib/portage/pym/.
61#
62# TODO(davidjames): Update Portage to expose public APIs for these features.
Don Garrett25f309a2014-03-19 14:02:12 -070063# pylint: disable=F0401
David Jamesfcb70ef2011-02-02 16:02:30 -080064from _emerge.actions import adjust_configs
65from _emerge.actions import load_emerge_config
66from _emerge.create_depgraph_params import create_depgraph_params
David James386ccd12011-05-04 20:17:42 -070067from _emerge.depgraph import backtrack_depgraph
Mike Frysinger901eaad2012-10-10 18:18:03 -040068try:
69 from _emerge.main import clean_logs
70except ImportError:
71 # Older portage versions did not provide clean_logs, so stub it.
72 # We need this if running in an older chroot that hasn't yet upgraded
73 # the portage version.
74 clean_logs = lambda x: None
David Jamesfcb70ef2011-02-02 16:02:30 -080075from _emerge.main import emerge_main
76from _emerge.main import parse_opts
77from _emerge.Package import Package
78from _emerge.Scheduler import Scheduler
David Jamesfcb70ef2011-02-02 16:02:30 -080079from _emerge.stdout_spinner import stdout_spinner
David James386ccd12011-05-04 20:17:42 -070080from portage._global_updates import _global_updates
David Jamesfcb70ef2011-02-02 16:02:30 -080081import portage
82import portage.debug
David James9becf572014-04-25 14:39:05 +000083from portage.versions import vercmp
Don Garrettf8bf7842014-03-20 17:03:42 -070084# pylint: enable=F0401
Mike Frysinger91d7da92013-02-19 15:53:46 -050085
David Jamesfcb70ef2011-02-02 16:02:30 -080086
David Jamesfcb70ef2011-02-02 16:02:30 -080087def Usage():
88 """Print usage."""
89 print "Usage:"
David James386ccd12011-05-04 20:17:42 -070090 print " ./parallel_emerge [--board=BOARD] [--workon=PKGS]"
David Jamesfcb70ef2011-02-02 16:02:30 -080091 print " [--rebuild] [emerge args] package"
92 print
93 print "Packages specified as workon packages are always built from source."
David Jamesfcb70ef2011-02-02 16:02:30 -080094 print
95 print "The --workon argument is mainly useful when you want to build and"
96 print "install packages that you are working on unconditionally, but do not"
97 print "to have to rev the package to indicate you want to build it from"
98 print "source. The build_packages script will automatically supply the"
99 print "workon argument to emerge, ensuring that packages selected using"
100 print "cros-workon are rebuilt."
101 print
102 print "The --rebuild option rebuilds packages whenever their dependencies"
103 print "are changed. This ensures that your build is correct."
David Jamesfcb70ef2011-02-02 16:02:30 -0800104
105
David Jamesfcb70ef2011-02-02 16:02:30 -0800106# Global start time
107GLOBAL_START = time.time()
108
David James7358d032011-05-19 10:40:03 -0700109# Whether process has been killed by a signal.
110KILLED = multiprocessing.Event()
111
David Jamesfcb70ef2011-02-02 16:02:30 -0800112
113class EmergeData(object):
114 """This simple struct holds various emerge variables.
115
116 This struct helps us easily pass emerge variables around as a unit.
117 These variables are used for calculating dependencies and installing
118 packages.
119 """
120
David Jamesbf1e3442011-05-28 07:44:20 -0700121 __slots__ = ["action", "cmdline_packages", "depgraph", "favorites",
122 "mtimedb", "opts", "root_config", "scheduler_graph",
123 "settings", "spinner", "trees"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800124
125 def __init__(self):
126 # The action the user requested. If the user is installing packages, this
127 # is None. If the user is doing anything other than installing packages,
128 # this will contain the action name, which will map exactly to the
129 # long-form name of the associated emerge option.
130 #
131 # Example: If you call parallel_emerge --unmerge package, the action name
132 # will be "unmerge"
133 self.action = None
134
135 # The list of packages the user passed on the command-line.
136 self.cmdline_packages = None
137
138 # The emerge dependency graph. It'll contain all the packages involved in
139 # this merge, along with their versions.
140 self.depgraph = None
141
David Jamesbf1e3442011-05-28 07:44:20 -0700142 # The list of candidates to add to the world file.
143 self.favorites = None
144
David Jamesfcb70ef2011-02-02 16:02:30 -0800145 # A dict of the options passed to emerge. This dict has been cleaned up
146 # a bit by parse_opts, so that it's a bit easier for the emerge code to
147 # look at the options.
148 #
149 # Emerge takes a few shortcuts in its cleanup process to make parsing of
150 # the options dict easier. For example, if you pass in "--usepkg=n", the
151 # "--usepkg" flag is just left out of the dictionary altogether. Because
152 # --usepkg=n is the default, this makes parsing easier, because emerge
153 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
154 #
155 # These cleanup processes aren't applied to all options. For example, the
156 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
157 # applied by emerge, see the parse_opts function in the _emerge.main
158 # package.
159 self.opts = None
160
161 # A dictionary used by portage to maintain global state. This state is
162 # loaded from disk when portage starts up, and saved to disk whenever we
163 # call mtimedb.commit().
164 #
165 # This database contains information about global updates (i.e., what
166 # version of portage we have) and what we're currently doing. Portage
167 # saves what it is currently doing in this database so that it can be
168 # resumed when you call it with the --resume option.
169 #
170 # parallel_emerge does not save what it is currently doing in the mtimedb,
171 # so we do not support the --resume option.
172 self.mtimedb = None
173
174 # The portage configuration for our current root. This contains the portage
175 # settings (see below) and the three portage trees for our current root.
176 # (The three portage trees are explained below, in the documentation for
177 # the "trees" member.)
178 self.root_config = None
179
180 # The scheduler graph is used by emerge to calculate what packages to
181 # install. We don't actually install any deps, so this isn't really used,
182 # but we pass it in to the Scheduler object anyway.
183 self.scheduler_graph = None
184
185 # Portage settings for our current session. Most of these settings are set
186 # in make.conf inside our current install root.
187 self.settings = None
188
189 # The spinner, which spews stuff to stdout to indicate that portage is
190 # doing something. We maintain our own spinner, so we set the portage
191 # spinner to "silent" mode.
192 self.spinner = None
193
194 # The portage trees. There are separate portage trees for each root. To get
195 # the portage tree for the current root, you can look in self.trees[root],
196 # where root = self.settings["ROOT"].
197 #
198 # In each root, there are three trees: vartree, porttree, and bintree.
199 # - vartree: A database of the currently-installed packages.
200 # - porttree: A database of ebuilds, that can be used to build packages.
201 # - bintree: A database of binary packages.
202 self.trees = None
203
204
205class DepGraphGenerator(object):
206 """Grab dependency information about packages from portage.
207
208 Typical usage:
209 deps = DepGraphGenerator()
210 deps.Initialize(sys.argv[1:])
211 deps_tree, deps_info = deps.GenDependencyTree()
212 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
213 deps.PrintTree(deps_tree)
214 PrintDepsMap(deps_graph)
215 """
216
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700217 __slots__ = ["board", "emerge", "package_db", "show_output", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800218
219 def __init__(self):
220 self.board = None
221 self.emerge = EmergeData()
David Jamesfcb70ef2011-02-02 16:02:30 -0800222 self.package_db = {}
David Jamesfcb70ef2011-02-02 16:02:30 -0800223 self.show_output = False
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700224 self.unpack_only = False
David Jamesfcb70ef2011-02-02 16:02:30 -0800225
226 def ParseParallelEmergeArgs(self, argv):
227 """Read the parallel emerge arguments from the command-line.
228
229 We need to be compatible with emerge arg format. We scrape arguments that
230 are specific to parallel_emerge, and pass through the rest directly to
231 emerge.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500232
David Jamesfcb70ef2011-02-02 16:02:30 -0800233 Args:
234 argv: arguments list
Mike Frysinger1a736a82013-12-12 01:50:59 -0500235
David Jamesfcb70ef2011-02-02 16:02:30 -0800236 Returns:
237 Arguments that don't belong to parallel_emerge
238 """
239 emerge_args = []
240 for arg in argv:
241 # Specifically match arguments that are specific to parallel_emerge, and
242 # pass through the rest.
243 if arg.startswith("--board="):
244 self.board = arg.replace("--board=", "")
245 elif arg.startswith("--workon="):
246 workon_str = arg.replace("--workon=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700247 emerge_args.append("--reinstall-atoms=%s" % workon_str)
248 emerge_args.append("--usepkg-exclude=%s" % workon_str)
David Jamesfcb70ef2011-02-02 16:02:30 -0800249 elif arg.startswith("--force-remote-binary="):
250 force_remote_binary = arg.replace("--force-remote-binary=", "")
David James7a1ea4b2011-10-13 15:06:41 -0700251 emerge_args.append("--useoldpkg-atoms=%s" % force_remote_binary)
David Jamesfcb70ef2011-02-02 16:02:30 -0800252 elif arg == "--show-output":
253 self.show_output = True
David James386ccd12011-05-04 20:17:42 -0700254 elif arg == "--rebuild":
David James7a1ea4b2011-10-13 15:06:41 -0700255 emerge_args.append("--rebuild-if-unbuilt")
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700256 elif arg == "--unpackonly":
257 emerge_args.append("--fetchonly")
258 self.unpack_only = True
David Jamesfcb70ef2011-02-02 16:02:30 -0800259 else:
260 # Not one of our options, so pass through to emerge.
261 emerge_args.append(arg)
262
David James386ccd12011-05-04 20:17:42 -0700263 # These packages take a really long time to build, so, for expediency, we
264 # are blacklisting them from automatic rebuilds because one of their
265 # dependencies needs to be recompiled.
Mike Frysinger5c2a9052014-04-15 15:52:04 -0400266 for pkg in ("chromeos-base/chromeos-chrome",):
David James7a1ea4b2011-10-13 15:06:41 -0700267 emerge_args.append("--rebuild-exclude=%s" % pkg)
David Jamesfcb70ef2011-02-02 16:02:30 -0800268
269 return emerge_args
270
271 def Initialize(self, args):
272 """Initializer. Parses arguments and sets up portage state."""
273
274 # Parse and strip out args that are just intended for parallel_emerge.
275 emerge_args = self.ParseParallelEmergeArgs(args)
276
277 # Setup various environment variables based on our current board. These
278 # variables are normally setup inside emerge-${BOARD}, but since we don't
279 # call that script, we have to set it up here. These variables serve to
280 # point our tools at /build/BOARD and to setup cross compiles to the
281 # appropriate board as configured in toolchain.conf.
282 if self.board:
Yu-Ju Hongdd9bb2b2014-01-03 17:08:26 -0800283 sysroot = cros_build_lib.GetSysroot(board=self.board)
284 os.environ["PORTAGE_CONFIGROOT"] = sysroot
285 os.environ["PORTAGE_SYSROOT"] = sysroot
286 os.environ["SYSROOT"] = sysroot
David Jamesfcb70ef2011-02-02 16:02:30 -0800287
288 # Although CHROMEOS_ROOT isn't specific to boards, it's normally setup
289 # inside emerge-${BOARD}, so we set it up here for compatibility. It
290 # will be going away soon as we migrate to CROS_WORKON_SRCROOT.
291 os.environ.setdefault("CHROMEOS_ROOT", os.environ["HOME"] + "/trunk")
292
293 # Turn off interactive delays
294 os.environ["EBEEP_IGNORE"] = "1"
295 os.environ["EPAUSE_IGNORE"] = "1"
Mike Frysinger0a647fc2012-08-06 14:36:05 -0400296 os.environ["CLEAN_DELAY"] = "0"
David Jamesfcb70ef2011-02-02 16:02:30 -0800297
298 # Parse the emerge options.
David Jamesea3ca332011-05-26 11:48:29 -0700299 action, opts, cmdline_packages = parse_opts(emerge_args, silent=True)
David Jamesfcb70ef2011-02-02 16:02:30 -0800300
301 # Set environment variables based on options. Portage normally sets these
302 # environment variables in emerge_main, but we can't use that function,
303 # because it also does a bunch of other stuff that we don't want.
304 # TODO(davidjames): Patch portage to move this logic into a function we can
305 # reuse here.
306 if "--debug" in opts:
307 os.environ["PORTAGE_DEBUG"] = "1"
308 if "--config-root" in opts:
309 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
310 if "--root" in opts:
311 os.environ["ROOT"] = opts["--root"]
312 if "--accept-properties" in opts:
313 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
314
David James88d780c2014-02-05 13:03:29 -0800315 # If we're installing packages to the board, we can disable vardb locks.
316 # This is safe because we only run up to one instance of parallel_emerge in
317 # parallel.
318 # TODO(davidjames): Enable this for the host too.
319 if self.board:
David Jamesfcb70ef2011-02-02 16:02:30 -0800320 os.environ.setdefault("PORTAGE_LOCKS", "false")
David Jamesfcb70ef2011-02-02 16:02:30 -0800321
322 # Now that we've setup the necessary environment variables, we can load the
323 # emerge config from disk.
324 settings, trees, mtimedb = load_emerge_config()
325
David Jamesea3ca332011-05-26 11:48:29 -0700326 # Add in EMERGE_DEFAULT_OPTS, if specified.
327 tmpcmdline = []
328 if "--ignore-default-opts" not in opts:
329 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
330 tmpcmdline.extend(emerge_args)
331 action, opts, cmdline_packages = parse_opts(tmpcmdline)
332
333 # If we're installing to the board, we want the --root-deps option so that
334 # portage will install the build dependencies to that location as well.
335 if self.board:
336 opts.setdefault("--root-deps", True)
337
David Jamesfcb70ef2011-02-02 16:02:30 -0800338 # Check whether our portage tree is out of date. Typically, this happens
339 # when you're setting up a new portage tree, such as in setup_board and
340 # make_chroot. In that case, portage applies a bunch of global updates
341 # here. Once the updates are finished, we need to commit any changes
342 # that the global update made to our mtimedb, and reload the config.
343 #
344 # Portage normally handles this logic in emerge_main, but again, we can't
345 # use that function here.
346 if _global_updates(trees, mtimedb["updates"]):
347 mtimedb.commit()
348 settings, trees, mtimedb = load_emerge_config(trees=trees)
349
350 # Setup implied options. Portage normally handles this logic in
351 # emerge_main.
352 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
353 opts.setdefault("--buildpkg", True)
354 if "--getbinpkgonly" in opts:
355 opts.setdefault("--usepkgonly", True)
356 opts.setdefault("--getbinpkg", True)
357 if "getbinpkg" in settings.features:
358 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
359 opts["--getbinpkg"] = True
360 if "--getbinpkg" in opts or "--usepkgonly" in opts:
361 opts.setdefault("--usepkg", True)
362 if "--fetch-all-uri" in opts:
363 opts.setdefault("--fetchonly", True)
364 if "--skipfirst" in opts:
365 opts.setdefault("--resume", True)
366 if "--buildpkgonly" in opts:
367 # --buildpkgonly will not merge anything, so it overrides all binary
368 # package options.
369 for opt in ("--getbinpkg", "--getbinpkgonly",
370 "--usepkg", "--usepkgonly"):
371 opts.pop(opt, None)
372 if (settings.get("PORTAGE_DEBUG", "") == "1" and
373 "python-trace" in settings.features):
374 portage.debug.set_trace(True)
375
376 # Complain about unsupported options
David James386ccd12011-05-04 20:17:42 -0700377 for opt in ("--ask", "--ask-enter-invalid", "--resume", "--skipfirst"):
David Jamesfcb70ef2011-02-02 16:02:30 -0800378 if opt in opts:
379 print "%s is not supported by parallel_emerge" % opt
380 sys.exit(1)
381
382 # Make emerge specific adjustments to the config (e.g. colors!)
383 adjust_configs(opts, trees)
384
385 # Save our configuration so far in the emerge object
386 emerge = self.emerge
387 emerge.action, emerge.opts = action, opts
388 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
389 emerge.cmdline_packages = cmdline_packages
390 root = settings["ROOT"]
391 emerge.root_config = trees[root]["root_config"]
392
David James386ccd12011-05-04 20:17:42 -0700393 if "--usepkg" in opts:
David Jamesfcb70ef2011-02-02 16:02:30 -0800394 emerge.trees[root]["bintree"].populate("--getbinpkg" in opts)
395
David Jamesfcb70ef2011-02-02 16:02:30 -0800396 def CreateDepgraph(self, emerge, packages):
397 """Create an emerge depgraph object."""
398 # Setup emerge options.
399 emerge_opts = emerge.opts.copy()
400
David James386ccd12011-05-04 20:17:42 -0700401 # Ask portage to build a dependency graph. with the options we specified
402 # above.
David Jamesfcb70ef2011-02-02 16:02:30 -0800403 params = create_depgraph_params(emerge_opts, emerge.action)
David Jamesbf1e3442011-05-28 07:44:20 -0700404 success, depgraph, favorites = backtrack_depgraph(
David James386ccd12011-05-04 20:17:42 -0700405 emerge.settings, emerge.trees, emerge_opts, params, emerge.action,
406 packages, emerge.spinner)
407 emerge.depgraph = depgraph
David Jamesfcb70ef2011-02-02 16:02:30 -0800408
David James386ccd12011-05-04 20:17:42 -0700409 # Is it impossible to honor the user's request? Bail!
410 if not success:
411 depgraph.display_problems()
412 sys.exit(1)
David Jamesfcb70ef2011-02-02 16:02:30 -0800413
414 emerge.depgraph = depgraph
David Jamesbf1e3442011-05-28 07:44:20 -0700415 emerge.favorites = favorites
David Jamesfcb70ef2011-02-02 16:02:30 -0800416
David Jamesdeebd692011-05-09 17:02:52 -0700417 # Prime and flush emerge caches.
418 root = emerge.settings["ROOT"]
419 vardb = emerge.trees[root]["vartree"].dbapi
David James0bdc5de2011-05-12 16:22:26 -0700420 if "--pretend" not in emerge.opts:
421 vardb.counter_tick()
David Jamesdeebd692011-05-09 17:02:52 -0700422 vardb.flush_cache()
423
David James386ccd12011-05-04 20:17:42 -0700424 def GenDependencyTree(self):
David Jamesfcb70ef2011-02-02 16:02:30 -0800425 """Get dependency tree info from emerge.
426
David Jamesfcb70ef2011-02-02 16:02:30 -0800427 Returns:
428 Dependency tree
429 """
430 start = time.time()
431
432 emerge = self.emerge
433
434 # Create a list of packages to merge
435 packages = set(emerge.cmdline_packages[:])
David Jamesfcb70ef2011-02-02 16:02:30 -0800436
437 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
438 # need any extra output from portage.
439 portage.util.noiselimit = -1
440
441 # My favorite feature: The silent spinner. It doesn't spin. Ever.
442 # I'd disable the colors by default too, but they look kind of cool.
443 emerge.spinner = stdout_spinner()
444 emerge.spinner.update = emerge.spinner.update_quiet
445
446 if "--quiet" not in emerge.opts:
447 print "Calculating deps..."
448
449 self.CreateDepgraph(emerge, packages)
450 depgraph = emerge.depgraph
451
452 # Build our own tree from the emerge digraph.
453 deps_tree = {}
Don Garrett25f309a2014-03-19 14:02:12 -0700454 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -0800455 digraph = depgraph._dynamic_config.digraph
David James3f778802011-08-25 19:31:45 -0700456 root = emerge.settings["ROOT"]
457 final_db = depgraph._dynamic_config.mydbapi[root]
David Jamesfcb70ef2011-02-02 16:02:30 -0800458 for node, node_deps in digraph.nodes.items():
459 # Calculate dependency packages that need to be installed first. Each
460 # child on the digraph is a dependency. The "operation" field specifies
461 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
462 # contains the type of dependency (e.g. build, runtime, runtime_post,
463 # etc.)
464 #
David Jamesfcb70ef2011-02-02 16:02:30 -0800465 # Portage refers to the identifiers for packages as a CPV. This acronym
466 # stands for Component/Path/Version.
467 #
468 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
469 # Split up, this CPV would be:
470 # C -- Component: chromeos-base
471 # P -- Path: power_manager
472 # V -- Version: 0.0.1-r1
473 #
474 # We just refer to CPVs as packages here because it's easier.
475 deps = {}
476 for child, priorities in node_deps[0].items():
David James3f778802011-08-25 19:31:45 -0700477 if isinstance(child, Package) and child.root == root:
478 cpv = str(child.cpv)
479 action = str(child.operation)
480
481 # If we're uninstalling a package, check whether Portage is
482 # installing a replacement. If so, just depend on the installation
483 # of the new package, because the old package will automatically
484 # be uninstalled at that time.
485 if action == "uninstall":
486 for pkg in final_db.match_pkgs(child.slot_atom):
487 cpv = str(pkg.cpv)
488 action = "merge"
489 break
490
491 deps[cpv] = dict(action=action,
492 deptypes=[str(x) for x in priorities],
493 deps={})
David Jamesfcb70ef2011-02-02 16:02:30 -0800494
495 # We've built our list of deps, so we can add our package to the tree.
David James3f778802011-08-25 19:31:45 -0700496 if isinstance(node, Package) and node.root == root:
David Jamesfcb70ef2011-02-02 16:02:30 -0800497 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
498 deps=deps)
499
David Jamesfcb70ef2011-02-02 16:02:30 -0800500 # Ask portage for its install plan, so that we can only throw out
David James386ccd12011-05-04 20:17:42 -0700501 # dependencies that portage throws out.
David Jamesfcb70ef2011-02-02 16:02:30 -0800502 deps_info = {}
503 for pkg in depgraph.altlist():
504 if isinstance(pkg, Package):
David James3f778802011-08-25 19:31:45 -0700505 assert pkg.root == root
David Jamesfcb70ef2011-02-02 16:02:30 -0800506 self.package_db[pkg.cpv] = pkg
507
David Jamesfcb70ef2011-02-02 16:02:30 -0800508 # Save off info about the package
David James386ccd12011-05-04 20:17:42 -0700509 deps_info[str(pkg.cpv)] = {"idx": len(deps_info)}
David Jamesfcb70ef2011-02-02 16:02:30 -0800510
511 seconds = time.time() - start
512 if "--quiet" not in emerge.opts:
513 print "Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60)
514
515 return deps_tree, deps_info
516
517 def PrintTree(self, deps, depth=""):
518 """Print the deps we have seen in the emerge output.
519
520 Args:
521 deps: Dependency tree structure.
522 depth: Allows printing the tree recursively, with indentation.
523 """
524 for entry in sorted(deps):
525 action = deps[entry]["action"]
526 print "%s %s (%s)" % (depth, entry, action)
527 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
528
David James386ccd12011-05-04 20:17:42 -0700529 def GenDependencyGraph(self, deps_tree, deps_info):
David Jamesfcb70ef2011-02-02 16:02:30 -0800530 """Generate a doubly linked dependency graph.
531
532 Args:
533 deps_tree: Dependency tree structure.
534 deps_info: More details on the dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500535
David Jamesfcb70ef2011-02-02 16:02:30 -0800536 Returns:
537 Deps graph in the form of a dict of packages, with each package
538 specifying a "needs" list and "provides" list.
539 """
540 emerge = self.emerge
David Jamesfcb70ef2011-02-02 16:02:30 -0800541
David Jamesfcb70ef2011-02-02 16:02:30 -0800542 # deps_map is the actual dependency graph.
543 #
544 # Each package specifies a "needs" list and a "provides" list. The "needs"
545 # list indicates which packages we depend on. The "provides" list
546 # indicates the reverse dependencies -- what packages need us.
547 #
548 # We also provide some other information in the dependency graph:
549 # - action: What we're planning on doing with this package. Generally,
550 # "merge", "nomerge", or "uninstall"
David Jamesfcb70ef2011-02-02 16:02:30 -0800551 deps_map = {}
552
553 def ReverseTree(packages):
554 """Convert tree to digraph.
555
556 Take the tree of package -> requirements and reverse it to a digraph of
557 buildable packages -> packages they unblock.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500558
David Jamesfcb70ef2011-02-02 16:02:30 -0800559 Args:
560 packages: Tree(s) of dependencies.
Mike Frysinger1a736a82013-12-12 01:50:59 -0500561
David Jamesfcb70ef2011-02-02 16:02:30 -0800562 Returns:
563 Unsanitized digraph.
564 """
David James8c7e5e32011-06-28 11:26:03 -0700565 binpkg_phases = set(["setup", "preinst", "postinst"])
David James3f778802011-08-25 19:31:45 -0700566 needed_dep_types = set(["blocker", "buildtime", "runtime"])
David Jamesfcb70ef2011-02-02 16:02:30 -0800567 for pkg in packages:
568
569 # Create an entry for the package
570 action = packages[pkg]["action"]
David James8c7e5e32011-06-28 11:26:03 -0700571 default_pkg = {"needs": {}, "provides": set(), "action": action,
572 "nodeps": False, "binary": False}
David Jamesfcb70ef2011-02-02 16:02:30 -0800573 this_pkg = deps_map.setdefault(pkg, default_pkg)
574
David James8c7e5e32011-06-28 11:26:03 -0700575 if pkg in deps_info:
576 this_pkg["idx"] = deps_info[pkg]["idx"]
577
578 # If a package doesn't have any defined phases that might use the
579 # dependent packages (i.e. pkg_setup, pkg_preinst, or pkg_postinst),
580 # we can install this package before its deps are ready.
581 emerge_pkg = self.package_db.get(pkg)
582 if emerge_pkg and emerge_pkg.type_name == "binary":
583 this_pkg["binary"] = True
David James9becf572014-04-25 14:39:05 +0000584 if 0 <= vercmp(portage.VERSION, "2.1.11.50"):
585 defined_phases = emerge_pkg.defined_phases
586 else:
587 defined_phases = emerge_pkg.metadata.defined_phases
David James8c7e5e32011-06-28 11:26:03 -0700588 defined_binpkg_phases = binpkg_phases.intersection(defined_phases)
589 if not defined_binpkg_phases:
590 this_pkg["nodeps"] = True
591
David Jamesfcb70ef2011-02-02 16:02:30 -0800592 # Create entries for dependencies of this package first.
593 ReverseTree(packages[pkg]["deps"])
594
595 # Add dependencies to this package.
596 for dep, dep_item in packages[pkg]["deps"].iteritems():
David James8c7e5e32011-06-28 11:26:03 -0700597 # We only need to enforce strict ordering of dependencies if the
David James3f778802011-08-25 19:31:45 -0700598 # dependency is a blocker, or is a buildtime or runtime dependency.
599 # (I.e., ignored, optional, and runtime_post dependencies don't
600 # depend on ordering.)
David James8c7e5e32011-06-28 11:26:03 -0700601 dep_types = dep_item["deptypes"]
602 if needed_dep_types.intersection(dep_types):
603 deps_map[dep]["provides"].add(pkg)
604 this_pkg["needs"][dep] = "/".join(dep_types)
David Jamesfcb70ef2011-02-02 16:02:30 -0800605
David James3f778802011-08-25 19:31:45 -0700606 # If there's a blocker, Portage may need to move files from one
607 # package to another, which requires editing the CONTENTS files of
608 # both packages. To avoid race conditions while editing this file,
609 # the two packages must not be installed in parallel, so we can't
610 # safely ignore dependencies. See http://crosbug.com/19328
611 if "blocker" in dep_types:
612 this_pkg["nodeps"] = False
613
David Jamesfcb70ef2011-02-02 16:02:30 -0800614 def FindCycles():
615 """Find cycles in the dependency tree.
616
617 Returns:
618 A dict mapping cyclic packages to a dict of the deps that cause
619 cycles. For each dep that causes cycles, it returns an example
620 traversal of the graph that shows the cycle.
621 """
622
623 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
624 """Find cycles in cyclic dependencies starting at specified package.
625
626 Args:
627 pkg: Package identifier.
628 cycles: A dict mapping cyclic packages to a dict of the deps that
629 cause cycles. For each dep that causes cycles, it returns an
630 example traversal of the graph that shows the cycle.
631 unresolved: Nodes that have been visited but are not fully processed.
632 resolved: Nodes that have been visited and are fully processed.
633 """
634 pkg_cycles = cycles.get(pkg)
635 if pkg in resolved and not pkg_cycles:
636 # If we already looked at this package, and found no cyclic
637 # dependencies, we can stop now.
638 return
639 unresolved.append(pkg)
640 for dep in deps_map[pkg]["needs"]:
641 if dep in unresolved:
642 idx = unresolved.index(dep)
643 mycycle = unresolved[idx:] + [dep]
David James321490a2012-12-17 12:05:56 -0800644 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800645 pkg1, pkg2 = mycycle[i], mycycle[i+1]
646 cycles.setdefault(pkg1, {}).setdefault(pkg2, mycycle)
647 elif not pkg_cycles or dep not in pkg_cycles:
648 # Looks like we haven't seen this edge before.
649 FindCyclesAtNode(dep, cycles, unresolved, resolved)
650 unresolved.pop()
651 resolved.add(pkg)
652
653 cycles, unresolved, resolved = {}, [], set()
654 for pkg in deps_map:
655 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
656 return cycles
657
David James386ccd12011-05-04 20:17:42 -0700658 def RemoveUnusedPackages():
David Jamesfcb70ef2011-02-02 16:02:30 -0800659 """Remove installed packages, propagating dependencies."""
David Jamesfcb70ef2011-02-02 16:02:30 -0800660 # Schedule packages that aren't on the install list for removal
661 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
662
David Jamesfcb70ef2011-02-02 16:02:30 -0800663 # Remove the packages we don't want, simplifying the graph and making
664 # it easier for us to crack cycles.
665 for pkg in sorted(rm_pkgs):
666 this_pkg = deps_map[pkg]
667 needs = this_pkg["needs"]
668 provides = this_pkg["provides"]
669 for dep in needs:
670 dep_provides = deps_map[dep]["provides"]
671 dep_provides.update(provides)
672 dep_provides.discard(pkg)
673 dep_provides.discard(dep)
674 for target in provides:
675 target_needs = deps_map[target]["needs"]
676 target_needs.update(needs)
677 target_needs.pop(pkg, None)
678 target_needs.pop(target, None)
679 del deps_map[pkg]
680
681 def PrintCycleBreak(basedep, dep, mycycle):
682 """Print details about a cycle that we are planning on breaking.
683
Mike Frysinger02e1e072013-11-10 22:11:34 -0500684 We are breaking a cycle where dep needs basedep. mycycle is an
685 example cycle which contains dep -> basedep.
686 """
David Jamesfcb70ef2011-02-02 16:02:30 -0800687
David Jamesfcb70ef2011-02-02 16:02:30 -0800688 needs = deps_map[dep]["needs"]
689 depinfo = needs.get(basedep, "deleted")
David Jamesfcb70ef2011-02-02 16:02:30 -0800690
David James3f778802011-08-25 19:31:45 -0700691 # It's OK to swap install order for blockers, as long as the two
692 # packages aren't installed in parallel. If there is a cycle, then
693 # we know the packages depend on each other already, so we can drop the
694 # blocker safely without printing a warning.
695 if depinfo == "blocker":
696 return
697
David Jamesfcb70ef2011-02-02 16:02:30 -0800698 # Notify the user that we're breaking a cycle.
699 print "Breaking %s -> %s (%s)" % (dep, basedep, depinfo)
700
701 # Show cycle.
David James321490a2012-12-17 12:05:56 -0800702 for i in xrange(len(mycycle) - 1):
David Jamesfcb70ef2011-02-02 16:02:30 -0800703 pkg1, pkg2 = mycycle[i], mycycle[i+1]
704 needs = deps_map[pkg1]["needs"]
705 depinfo = needs.get(pkg2, "deleted")
706 if pkg1 == dep and pkg2 == basedep:
707 depinfo = depinfo + ", deleting"
708 print " %s -> %s (%s)" % (pkg1, pkg2, depinfo)
709
710 def SanitizeTree():
711 """Remove circular dependencies.
712
713 We prune all dependencies involved in cycles that go against the emerge
714 ordering. This has a nice property: we're guaranteed to merge
715 dependencies in the same order that portage does.
716
717 Because we don't treat any dependencies as "soft" unless they're killed
718 by a cycle, we pay attention to a larger number of dependencies when
719 merging. This hurts performance a bit, but helps reliability.
720 """
721 start = time.time()
722 cycles = FindCycles()
723 while cycles:
724 for dep, mycycles in cycles.iteritems():
725 for basedep, mycycle in mycycles.iteritems():
726 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
Matt Tennant08797302011-10-17 16:18:45 -0700727 if "--quiet" not in emerge.opts:
728 PrintCycleBreak(basedep, dep, mycycle)
David Jamesfcb70ef2011-02-02 16:02:30 -0800729 del deps_map[dep]["needs"][basedep]
730 deps_map[basedep]["provides"].remove(dep)
731 cycles = FindCycles()
732 seconds = time.time() - start
733 if "--quiet" not in emerge.opts and seconds >= 0.1:
734 print "Tree sanitized in %dm%.1fs" % (seconds / 60, seconds % 60)
735
David James8c7e5e32011-06-28 11:26:03 -0700736 def FindRecursiveProvides(pkg, seen):
737 """Find all nodes that require a particular package.
738
739 Assumes that graph is acyclic.
740
741 Args:
742 pkg: Package identifier.
743 seen: Nodes that have been visited so far.
744 """
745 if pkg in seen:
746 return
747 seen.add(pkg)
748 info = deps_map[pkg]
749 info["tprovides"] = info["provides"].copy()
750 for dep in info["provides"]:
751 FindRecursiveProvides(dep, seen)
752 info["tprovides"].update(deps_map[dep]["tprovides"])
753
David Jamesa22906f2011-05-04 19:53:26 -0700754 ReverseTree(deps_tree)
David Jamesa22906f2011-05-04 19:53:26 -0700755
David James386ccd12011-05-04 20:17:42 -0700756 # We need to remove unused packages so that we can use the dependency
757 # ordering of the install process to show us what cycles to crack.
758 RemoveUnusedPackages()
David Jamesfcb70ef2011-02-02 16:02:30 -0800759 SanitizeTree()
David James8c7e5e32011-06-28 11:26:03 -0700760 seen = set()
761 for pkg in deps_map:
762 FindRecursiveProvides(pkg, seen)
David Jamesfcb70ef2011-02-02 16:02:30 -0800763 return deps_map
764
765 def PrintInstallPlan(self, deps_map):
766 """Print an emerge-style install plan.
767
768 The install plan lists what packages we're installing, in order.
769 It's useful for understanding what parallel_emerge is doing.
770
771 Args:
772 deps_map: The dependency graph.
773 """
774
775 def InstallPlanAtNode(target, deps_map):
776 nodes = []
777 nodes.append(target)
778 for dep in deps_map[target]["provides"]:
779 del deps_map[dep]["needs"][target]
780 if not deps_map[dep]["needs"]:
781 nodes.extend(InstallPlanAtNode(dep, deps_map))
782 return nodes
783
784 deps_map = copy.deepcopy(deps_map)
785 install_plan = []
786 plan = set()
787 for target, info in deps_map.iteritems():
788 if not info["needs"] and target not in plan:
789 for item in InstallPlanAtNode(target, deps_map):
790 plan.add(item)
791 install_plan.append(self.package_db[item])
792
793 for pkg in plan:
794 del deps_map[pkg]
795
796 if deps_map:
797 print "Cyclic dependencies:", " ".join(deps_map)
798 PrintDepsMap(deps_map)
799 sys.exit(1)
800
801 self.emerge.depgraph.display(install_plan)
802
803
804def PrintDepsMap(deps_map):
805 """Print dependency graph, for each package list it's prerequisites."""
806 for i in sorted(deps_map):
807 print "%s: (%s) needs" % (i, deps_map[i]["action"])
808 needs = deps_map[i]["needs"]
809 for j in sorted(needs):
810 print " %s" % (j)
811 if not needs:
812 print " no dependencies"
813
814
815class EmergeJobState(object):
Don Garrett25f309a2014-03-19 14:02:12 -0700816 """Structure describing the EmergeJobState."""
817
David Jamesfcb70ef2011-02-02 16:02:30 -0800818 __slots__ = ["done", "filename", "last_notify_timestamp", "last_output_seek",
819 "last_output_timestamp", "pkgname", "retcode", "start_timestamp",
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700820 "target", "fetch_only", "unpack_only"]
David Jamesfcb70ef2011-02-02 16:02:30 -0800821
822 def __init__(self, target, pkgname, done, filename, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700823 retcode=None, fetch_only=False, unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800824
825 # The full name of the target we're building (e.g.
Mike Frysingerfd969312014-04-02 22:16:42 -0400826 # virtual/target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800827 self.target = target
828
Mike Frysingerfd969312014-04-02 22:16:42 -0400829 # The short name of the target we're building (e.g. target-os-1-r60)
David Jamesfcb70ef2011-02-02 16:02:30 -0800830 self.pkgname = pkgname
831
832 # Whether the job is done. (True if the job is done; false otherwise.)
833 self.done = done
834
835 # The filename where output is currently stored.
836 self.filename = filename
837
838 # The timestamp of the last time we printed the name of the log file. We
839 # print this at the beginning of the job, so this starts at
840 # start_timestamp.
841 self.last_notify_timestamp = start_timestamp
842
843 # The location (in bytes) of the end of the last complete line we printed.
844 # This starts off at zero. We use this to jump to the right place when we
845 # print output from the same ebuild multiple times.
846 self.last_output_seek = 0
847
848 # The timestamp of the last time we printed output. Since we haven't
849 # printed output yet, this starts at zero.
850 self.last_output_timestamp = 0
851
852 # The return code of our job, if the job is actually finished.
853 self.retcode = retcode
854
Brian Harring0be85c62012-03-17 19:52:12 -0700855 # Was this just a fetch job?
856 self.fetch_only = fetch_only
857
David Jamesfcb70ef2011-02-02 16:02:30 -0800858 # The timestamp when our job started.
859 self.start_timestamp = start_timestamp
860
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700861 # No emerge, only unpack packages.
862 self.unpack_only = unpack_only
863
David Jamesfcb70ef2011-02-02 16:02:30 -0800864
David James321490a2012-12-17 12:05:56 -0800865def KillHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700866 # Kill self and all subprocesses.
867 os.killpg(0, signal.SIGKILL)
868
David Jamesfcb70ef2011-02-02 16:02:30 -0800869def SetupWorkerSignals():
David James321490a2012-12-17 12:05:56 -0800870 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -0700871 # Set KILLED flag.
872 KILLED.set()
David James13cead42011-05-18 16:22:01 -0700873
David James7358d032011-05-19 10:40:03 -0700874 # Remove our signal handlers so we don't get called recursively.
875 signal.signal(signal.SIGINT, KillHandler)
876 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -0800877
878 # Ensure that we exit quietly and cleanly, if possible, when we receive
879 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all
880 # of the child processes will print details about KeyboardInterrupt
881 # exceptions, which isn't very helpful.
882 signal.signal(signal.SIGINT, ExitHandler)
883 signal.signal(signal.SIGTERM, ExitHandler)
884
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400885
886def EmergeProcess(output, target, *args, **kwargs):
David James1ed3e252011-10-05 20:26:15 -0700887 """Merge a package in a subprocess.
888
889 Args:
David James1ed3e252011-10-05 20:26:15 -0700890 output: Temporary file to write output.
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400891 target: The package we'll be processing (for display purposes).
David James6b29d052012-11-02 10:27:27 -0700892 *args: Arguments to pass to Scheduler constructor.
893 **kwargs: Keyword arguments to pass to Scheduler constructor.
David James1ed3e252011-10-05 20:26:15 -0700894
895 Returns:
896 The exit code returned by the subprocess.
897 """
898 pid = os.fork()
899 if pid == 0:
900 try:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -0400901 proctitle.settitle('EmergeProcess', target)
902
David James1ed3e252011-10-05 20:26:15 -0700903 # Sanity checks.
Mike Frysingerf02736e2013-11-08 15:27:00 -0500904 if sys.stdout.fileno() != 1:
905 raise Exception("sys.stdout.fileno() != 1")
906 if sys.stderr.fileno() != 2:
907 raise Exception("sys.stderr.fileno() != 2")
David James1ed3e252011-10-05 20:26:15 -0700908
909 # - Redirect 1 (stdout) and 2 (stderr) at our temporary file.
910 # - Redirect 0 to point at sys.stdin. In this case, sys.stdin
911 # points at a file reading os.devnull, because multiprocessing mucks
912 # with sys.stdin.
913 # - Leave the sys.stdin and output filehandles alone.
914 fd_pipes = {0: sys.stdin.fileno(),
915 1: output.fileno(),
916 2: output.fileno(),
917 sys.stdin.fileno(): sys.stdin.fileno(),
918 output.fileno(): output.fileno()}
David James9becf572014-04-25 14:39:05 +0000919 if 0 <= vercmp(portage.VERSION, "2.1.11.50"):
920 # pylint: disable=W0212
921 portage.process._setup_pipes(fd_pipes, close_fds=False)
922 else:
923 # pylint: disable=W0212
924 portage.process._setup_pipes(fd_pipes)
David James1ed3e252011-10-05 20:26:15 -0700925
926 # Portage doesn't like when sys.stdin.fileno() != 0, so point sys.stdin
927 # at the filehandle we just created in _setup_pipes.
928 if sys.stdin.fileno() != 0:
David James6b29d052012-11-02 10:27:27 -0700929 sys.__stdin__ = sys.stdin = os.fdopen(0, "r")
930
931 scheduler = Scheduler(*args, **kwargs)
932
933 # Enable blocker handling even though we're in --nodeps mode. This
934 # allows us to unmerge the blocker after we've merged the replacement.
935 scheduler._opts_ignore_blockers = frozenset()
David James1ed3e252011-10-05 20:26:15 -0700936
937 # Actually do the merge.
938 retval = scheduler.merge()
939
940 # We catch all exceptions here (including SystemExit, KeyboardInterrupt,
941 # etc) so as to ensure that we don't confuse the multiprocessing module,
942 # which expects that all forked children exit with os._exit().
David James321490a2012-12-17 12:05:56 -0800943 # pylint: disable=W0702
David James1ed3e252011-10-05 20:26:15 -0700944 except:
945 traceback.print_exc(file=output)
946 retval = 1
947 sys.stdout.flush()
948 sys.stderr.flush()
949 output.flush()
Don Garrett25f309a2014-03-19 14:02:12 -0700950 # pylint: disable=W0212
David James1ed3e252011-10-05 20:26:15 -0700951 os._exit(retval)
952 else:
953 # Return the exit code of the subprocess.
954 return os.waitpid(pid, 0)[1]
David Jamesfcb70ef2011-02-02 16:02:30 -0800955
Thiago Goncalesf4acc422013-07-17 10:26:35 -0700956
957def UnpackPackage(pkg_state):
958 """Unpacks package described by pkg_state.
959
960 Args:
961 pkg_state: EmergeJobState object describing target.
962
963 Returns:
964 Exit code returned by subprocess.
965 """
966 pkgdir = os.environ.get("PKGDIR",
967 os.path.join(os.environ["SYSROOT"], "packages"))
968 root = os.environ.get("ROOT", os.environ["SYSROOT"])
969 path = os.path.join(pkgdir, pkg_state.target + ".tbz2")
970 comp = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)
971 cmd = [comp, "-dc"]
972 if comp.endswith("pbzip2"):
973 cmd.append("--ignore-trailing-garbage=1")
974 cmd.append(path)
975
976 result = cros_build_lib.RunCommand(cmd, cwd=root, stdout_to_pipe=True,
977 print_cmd=False, error_code_ok=True)
978
979 # If we were not successful, return now and don't attempt untar.
980 if result.returncode:
981 return result.returncode
982
983 cmd = ["sudo", "tar", "-xf", "-", "-C", root]
984 result = cros_build_lib.RunCommand(cmd, cwd=root, input=result.output,
985 print_cmd=False, error_code_ok=True)
986
987 return result.returncode
988
989
990def EmergeWorker(task_queue, job_queue, emerge, package_db, fetch_only=False,
991 unpack_only=False):
David Jamesfcb70ef2011-02-02 16:02:30 -0800992 """This worker emerges any packages given to it on the task_queue.
993
994 Args:
995 task_queue: The queue of tasks for this worker to do.
996 job_queue: The queue of results from the worker.
997 emerge: An EmergeData() object.
998 package_db: A dict, mapping package ids to portage Package objects.
Brian Harring0be85c62012-03-17 19:52:12 -0700999 fetch_only: A bool, indicating if we should just fetch the target.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001000 unpack_only: A bool, indicating if we should just unpack the target.
David Jamesfcb70ef2011-02-02 16:02:30 -08001001
1002 It expects package identifiers to be passed to it via task_queue. When
1003 a task is started, it pushes the (target, filename) to the started_queue.
1004 The output is stored in filename. When a merge starts or finishes, we push
1005 EmergeJobState objects to the job_queue.
1006 """
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001007 if fetch_only:
1008 mode = 'fetch'
1009 elif unpack_only:
1010 mode = 'unpack'
1011 else:
1012 mode = 'emerge'
1013 proctitle.settitle('EmergeWorker', mode, '[idle]')
David Jamesfcb70ef2011-02-02 16:02:30 -08001014
1015 SetupWorkerSignals()
1016 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
David Jamesdeebd692011-05-09 17:02:52 -07001017
1018 # Disable flushing of caches to save on I/O.
David James7a1ea4b2011-10-13 15:06:41 -07001019 root = emerge.settings["ROOT"]
1020 vardb = emerge.trees[root]["vartree"].dbapi
1021 vardb._flush_cache_enabled = False
Brian Harring0be85c62012-03-17 19:52:12 -07001022 bindb = emerge.trees[root]["bintree"].dbapi
1023 # Might be a set, might be a list, might be None; no clue, just use shallow
1024 # copy to ensure we can roll it back.
Don Garrett25f309a2014-03-19 14:02:12 -07001025 # pylint: disable=W0212
Brian Harring0be85c62012-03-17 19:52:12 -07001026 original_remotepkgs = copy.copy(bindb.bintree._remotepkgs)
David Jamesdeebd692011-05-09 17:02:52 -07001027
David Jamesfcb70ef2011-02-02 16:02:30 -08001028 opts, spinner = emerge.opts, emerge.spinner
1029 opts["--nodeps"] = True
Brian Harring0be85c62012-03-17 19:52:12 -07001030 if fetch_only:
1031 opts["--fetchonly"] = True
1032
David Jamesfcb70ef2011-02-02 16:02:30 -08001033 while True:
1034 # Wait for a new item to show up on the queue. This is a blocking wait,
1035 # so if there's nothing to do, we just sit here.
Brian Harring0be85c62012-03-17 19:52:12 -07001036 pkg_state = task_queue.get()
1037 if pkg_state is None:
David Jamesfcb70ef2011-02-02 16:02:30 -08001038 # If target is None, this means that the main thread wants us to quit.
1039 # The other workers need to exit too, so we'll push the message back on
1040 # to the queue so they'll get it too.
Brian Harring0be85c62012-03-17 19:52:12 -07001041 task_queue.put(None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001042 return
David James7358d032011-05-19 10:40:03 -07001043 if KILLED.is_set():
1044 return
1045
Brian Harring0be85c62012-03-17 19:52:12 -07001046 target = pkg_state.target
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001047 proctitle.settitle('EmergeWorker', mode, target)
Brian Harring0be85c62012-03-17 19:52:12 -07001048
David Jamesfcb70ef2011-02-02 16:02:30 -08001049 db_pkg = package_db[target]
Brian Harring0be85c62012-03-17 19:52:12 -07001050
1051 if db_pkg.type_name == "binary":
1052 if not fetch_only and pkg_state.fetched_successfully:
1053 # Ensure portage doesn't think our pkg is remote- else it'll force
1054 # a redownload of it (even if the on-disk file is fine). In-memory
1055 # caching basically, implemented dumbly.
1056 bindb.bintree._remotepkgs = None
1057 else:
1058 bindb.bintree_remotepkgs = original_remotepkgs
1059
David Jamesfcb70ef2011-02-02 16:02:30 -08001060 db_pkg.root_config = emerge.root_config
1061 install_list = [db_pkg]
1062 pkgname = db_pkg.pf
1063 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False)
David James01b1e0f2012-06-07 17:18:05 -07001064 os.chmod(output.name, 644)
David Jamesfcb70ef2011-02-02 16:02:30 -08001065 start_timestamp = time.time()
Brian Harring0be85c62012-03-17 19:52:12 -07001066 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001067 fetch_only=fetch_only, unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001068 job_queue.put(job)
1069 if "--pretend" in opts:
1070 retcode = 0
1071 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001072 try:
David James386ccd12011-05-04 20:17:42 -07001073 emerge.scheduler_graph.mergelist = install_list
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001074 if unpack_only:
1075 retcode = UnpackPackage(pkg_state)
1076 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001077 retcode = EmergeProcess(output, target, settings, trees, mtimedb,
1078 opts, spinner, favorites=emerge.favorites,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001079 graph_config=emerge.scheduler_graph)
David Jamesfcb70ef2011-02-02 16:02:30 -08001080 except Exception:
1081 traceback.print_exc(file=output)
1082 retcode = 1
David James1ed3e252011-10-05 20:26:15 -07001083 output.close()
David Jamesfcb70ef2011-02-02 16:02:30 -08001084
David James7358d032011-05-19 10:40:03 -07001085 if KILLED.is_set():
1086 return
1087
David Jamesfcb70ef2011-02-02 16:02:30 -08001088 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp,
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001089 retcode, fetch_only=fetch_only,
1090 unpack_only=unpack_only)
David Jamesfcb70ef2011-02-02 16:02:30 -08001091 job_queue.put(job)
1092
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001093 # Set the title back to idle as the multiprocess pool won't destroy us;
1094 # when another job comes up, it'll re-use this process.
1095 proctitle.settitle('EmergeWorker', mode, '[idle]')
1096
David Jamesfcb70ef2011-02-02 16:02:30 -08001097
1098class LinePrinter(object):
1099 """Helper object to print a single line."""
1100
1101 def __init__(self, line):
1102 self.line = line
1103
David James321490a2012-12-17 12:05:56 -08001104 def Print(self, _seek_locations):
David Jamesfcb70ef2011-02-02 16:02:30 -08001105 print self.line
1106
1107
1108class JobPrinter(object):
1109 """Helper object to print output of a job."""
1110
1111 def __init__(self, job, unlink=False):
1112 """Print output of job.
1113
Mike Frysinger02e1e072013-11-10 22:11:34 -05001114 If unlink is True, unlink the job output file when done.
1115 """
David Jamesfcb70ef2011-02-02 16:02:30 -08001116 self.current_time = time.time()
1117 self.job = job
1118 self.unlink = unlink
1119
1120 def Print(self, seek_locations):
1121
1122 job = self.job
1123
1124 # Calculate how long the job has been running.
1125 seconds = self.current_time - job.start_timestamp
1126
1127 # Note that we've printed out the job so far.
1128 job.last_output_timestamp = self.current_time
1129
1130 # Note that we're starting the job
1131 info = "job %s (%dm%.1fs)" % (job.pkgname, seconds / 60, seconds % 60)
1132 last_output_seek = seek_locations.get(job.filename, 0)
1133 if last_output_seek:
1134 print "=== Continue output for %s ===" % info
1135 else:
1136 print "=== Start output for %s ===" % info
1137
1138 # Print actual output from job
1139 f = codecs.open(job.filename, encoding='utf-8', errors='replace')
1140 f.seek(last_output_seek)
1141 prefix = job.pkgname + ":"
1142 for line in f:
1143
1144 # Save off our position in the file
1145 if line and line[-1] == "\n":
1146 last_output_seek = f.tell()
1147 line = line[:-1]
1148
1149 # Print our line
1150 print prefix, line.encode('utf-8', 'replace')
1151 f.close()
1152
1153 # Save our last spot in the file so that we don't print out the same
1154 # location twice.
1155 seek_locations[job.filename] = last_output_seek
1156
1157 # Note end of output section
1158 if job.done:
1159 print "=== Complete: %s ===" % info
1160 else:
1161 print "=== Still running: %s ===" % info
1162
1163 if self.unlink:
1164 os.unlink(job.filename)
1165
1166
1167def PrintWorker(queue):
1168 """A worker that prints stuff to the screen as requested."""
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001169 proctitle.settitle('PrintWorker')
David Jamesfcb70ef2011-02-02 16:02:30 -08001170
David James321490a2012-12-17 12:05:56 -08001171 def ExitHandler(_signum, _frame):
David James7358d032011-05-19 10:40:03 -07001172 # Set KILLED flag.
1173 KILLED.set()
1174
David Jamesfcb70ef2011-02-02 16:02:30 -08001175 # Switch to default signal handlers so that we'll die after two signals.
David James7358d032011-05-19 10:40:03 -07001176 signal.signal(signal.SIGINT, KillHandler)
1177 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001178
1179 # Don't exit on the first SIGINT / SIGTERM, because the parent worker will
1180 # handle it and tell us when we need to exit.
1181 signal.signal(signal.SIGINT, ExitHandler)
1182 signal.signal(signal.SIGTERM, ExitHandler)
1183
1184 # seek_locations is a map indicating the position we are at in each file.
1185 # It starts off empty, but is set by the various Print jobs as we go along
1186 # to indicate where we left off in each file.
1187 seek_locations = {}
1188 while True:
1189 try:
1190 job = queue.get()
1191 if job:
1192 job.Print(seek_locations)
David Jamesbccf8eb2011-07-27 14:06:06 -07001193 sys.stdout.flush()
David Jamesfcb70ef2011-02-02 16:02:30 -08001194 else:
1195 break
1196 except IOError as ex:
1197 if ex.errno == errno.EINTR:
1198 # Looks like we received a signal. Keep printing.
1199 continue
1200 raise
1201
Brian Harring867e2362012-03-17 04:05:17 -07001202
Brian Harring0be85c62012-03-17 19:52:12 -07001203class TargetState(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001204 """Structure descriting the TargetState."""
Brian Harring867e2362012-03-17 04:05:17 -07001205
Brian Harring0be85c62012-03-17 19:52:12 -07001206 __slots__ = ("target", "info", "score", "prefetched", "fetched_successfully")
Brian Harring867e2362012-03-17 04:05:17 -07001207
David James321490a2012-12-17 12:05:56 -08001208 def __init__(self, target, info):
Brian Harring867e2362012-03-17 04:05:17 -07001209 self.target, self.info = target, info
Brian Harring0be85c62012-03-17 19:52:12 -07001210 self.fetched_successfully = False
1211 self.prefetched = False
David James321490a2012-12-17 12:05:56 -08001212 self.score = None
Brian Harring867e2362012-03-17 04:05:17 -07001213 self.update_score()
1214
1215 def __cmp__(self, other):
1216 return cmp(self.score, other.score)
1217
1218 def update_score(self):
1219 self.score = (
1220 -len(self.info["tprovides"]),
Brian Harring0be85c62012-03-17 19:52:12 -07001221 len(self.info["needs"]),
Brian Harring11c5eeb2012-03-18 11:02:39 -07001222 not self.info["binary"],
Brian Harring867e2362012-03-17 04:05:17 -07001223 -len(self.info["provides"]),
1224 self.info["idx"],
1225 self.target,
1226 )
1227
1228
1229class ScoredHeap(object):
Don Garrett25f309a2014-03-19 14:02:12 -07001230 """Implementation of a general purpose scored heap."""
Brian Harring867e2362012-03-17 04:05:17 -07001231
Brian Harring0be85c62012-03-17 19:52:12 -07001232 __slots__ = ("heap", "_heap_set")
1233
Brian Harring867e2362012-03-17 04:05:17 -07001234 def __init__(self, initial=()):
Brian Harring0be85c62012-03-17 19:52:12 -07001235 self.heap = list()
1236 self._heap_set = set()
1237 if initial:
1238 self.multi_put(initial)
Brian Harring867e2362012-03-17 04:05:17 -07001239
1240 def get(self):
Brian Harring0be85c62012-03-17 19:52:12 -07001241 item = heapq.heappop(self.heap)
1242 self._heap_set.remove(item.target)
1243 return item
Brian Harring867e2362012-03-17 04:05:17 -07001244
Brian Harring0be85c62012-03-17 19:52:12 -07001245 def put(self, item):
1246 if not isinstance(item, TargetState):
1247 raise ValueError("Item %r isn't a TargetState" % (item,))
1248 heapq.heappush(self.heap, item)
1249 self._heap_set.add(item.target)
Brian Harring867e2362012-03-17 04:05:17 -07001250
Brian Harring0be85c62012-03-17 19:52:12 -07001251 def multi_put(self, sequence):
1252 sequence = list(sequence)
1253 self.heap.extend(sequence)
1254 self._heap_set.update(x.target for x in sequence)
Brian Harring867e2362012-03-17 04:05:17 -07001255 self.sort()
1256
David James5c9996d2012-03-24 10:50:46 -07001257 def sort(self):
1258 heapq.heapify(self.heap)
1259
Brian Harring0be85c62012-03-17 19:52:12 -07001260 def __contains__(self, target):
1261 return target in self._heap_set
1262
1263 def __nonzero__(self):
1264 return bool(self.heap)
1265
Brian Harring867e2362012-03-17 04:05:17 -07001266 def __len__(self):
1267 return len(self.heap)
1268
1269
David Jamesfcb70ef2011-02-02 16:02:30 -08001270class EmergeQueue(object):
1271 """Class to schedule emerge jobs according to a dependency graph."""
1272
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001273 def __init__(self, deps_map, emerge, package_db, show_output, unpack_only):
David Jamesfcb70ef2011-02-02 16:02:30 -08001274 # Store the dependency graph.
1275 self._deps_map = deps_map
Brian Harring0be85c62012-03-17 19:52:12 -07001276 self._state_map = {}
David Jamesfcb70ef2011-02-02 16:02:30 -08001277 # Initialize the running queue to empty
Brian Harring0be85c62012-03-17 19:52:12 -07001278 self._build_jobs = {}
1279 self._build_ready = ScoredHeap()
1280 self._fetch_jobs = {}
1281 self._fetch_ready = ScoredHeap()
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001282 self._unpack_jobs = {}
1283 self._unpack_ready = ScoredHeap()
David Jamesfcb70ef2011-02-02 16:02:30 -08001284 # List of total package installs represented in deps_map.
1285 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
1286 self._total_jobs = len(install_jobs)
1287 self._show_output = show_output
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001288 self._unpack_only = unpack_only
David Jamesfcb70ef2011-02-02 16:02:30 -08001289
1290 if "--pretend" in emerge.opts:
1291 print "Skipping merge because of --pretend mode."
1292 sys.exit(0)
1293
David Jamesaaf49e42014-04-24 09:40:05 -07001294 # Set up a session so we can easily terminate all children.
1295 self._SetupSession()
David James7358d032011-05-19 10:40:03 -07001296
David Jamesfcb70ef2011-02-02 16:02:30 -08001297 # Setup scheduler graph object. This is used by the child processes
1298 # to help schedule jobs.
1299 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1300
1301 # Calculate how many jobs we can run in parallel. We don't want to pass
1302 # the --jobs flag over to emerge itself, because that'll tell emerge to
1303 # hide its output, and said output is quite useful for debugging hung
1304 # jobs.
1305 procs = min(self._total_jobs,
1306 emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001307 self._build_procs = self._unpack_procs = self._fetch_procs = max(1, procs)
David James8c7e5e32011-06-28 11:26:03 -07001308 self._load_avg = emerge.opts.pop("--load-average", None)
David Jamesfcb70ef2011-02-02 16:02:30 -08001309 self._job_queue = multiprocessing.Queue()
1310 self._print_queue = multiprocessing.Queue()
Brian Harring0be85c62012-03-17 19:52:12 -07001311
1312 self._fetch_queue = multiprocessing.Queue()
1313 args = (self._fetch_queue, self._job_queue, emerge, package_db, True)
1314 self._fetch_pool = multiprocessing.Pool(self._fetch_procs, EmergeWorker,
1315 args)
1316
1317 self._build_queue = multiprocessing.Queue()
1318 args = (self._build_queue, self._job_queue, emerge, package_db)
1319 self._build_pool = multiprocessing.Pool(self._build_procs, EmergeWorker,
1320 args)
1321
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001322 if self._unpack_only:
1323 # Unpack pool only required on unpack_only jobs.
1324 self._unpack_queue = multiprocessing.Queue()
1325 args = (self._unpack_queue, self._job_queue, emerge, package_db, False,
1326 True)
1327 self._unpack_pool = multiprocessing.Pool(self._unpack_procs, EmergeWorker,
1328 args)
1329
David Jamesfcb70ef2011-02-02 16:02:30 -08001330 self._print_worker = multiprocessing.Process(target=PrintWorker,
1331 args=[self._print_queue])
1332 self._print_worker.start()
1333
1334 # Initialize the failed queue to empty.
1335 self._retry_queue = []
1336 self._failed = set()
1337
David Jamesfcb70ef2011-02-02 16:02:30 -08001338 # Setup an exit handler so that we print nice messages if we are
1339 # terminated.
1340 self._SetupExitHandler()
1341
1342 # Schedule our jobs.
Brian Harring0be85c62012-03-17 19:52:12 -07001343 self._state_map.update(
1344 (pkg, TargetState(pkg, data)) for pkg, data in deps_map.iteritems())
1345 self._fetch_ready.multi_put(self._state_map.itervalues())
David Jamesfcb70ef2011-02-02 16:02:30 -08001346
David Jamesaaf49e42014-04-24 09:40:05 -07001347 def _SetupSession(self):
1348 """Set up a session so we can easily terminate all children."""
1349 # When we call os.setsid(), this sets up a session / process group for this
1350 # process and all children. These session groups are needed so that we can
1351 # easily kill all children (including processes launched by emerge) before
1352 # we exit.
1353 #
1354 # One unfortunate side effect of os.setsid() is that it blocks CTRL-C from
1355 # being received. To work around this, we only call os.setsid() in a forked
1356 # process, so that the parent can still watch for CTRL-C. The parent will
1357 # just sit around, watching for signals and propagating them to the child,
1358 # until the child exits.
1359 #
1360 # TODO(davidjames): It would be nice if we could replace this with cgroups.
1361 pid = os.fork()
1362 if pid == 0:
1363 os.setsid()
1364 else:
Mike Frysingerd74fe4a2014-04-24 11:43:38 -04001365 proctitle.settitle('SessionManager')
1366
David Jamesaaf49e42014-04-24 09:40:05 -07001367 def PropagateToChildren(signum, _frame):
1368 # Just propagate the signals down to the child. We'll exit when the
1369 # child does.
1370 try:
1371 os.kill(pid, signum)
1372 except OSError as ex:
1373 if ex.errno != errno.ESRCH:
1374 raise
1375 signal.signal(signal.SIGINT, PropagateToChildren)
1376 signal.signal(signal.SIGTERM, PropagateToChildren)
1377
1378 def StopGroup(_signum, _frame):
1379 # When we get stopped, stop the children.
1380 try:
1381 os.killpg(pid, signal.SIGSTOP)
1382 os.kill(0, signal.SIGSTOP)
1383 except OSError as ex:
1384 if ex.errno != errno.ESRCH:
1385 raise
1386 signal.signal(signal.SIGTSTP, StopGroup)
1387
1388 def ContinueGroup(_signum, _frame):
1389 # Launch the children again after being stopped.
1390 try:
1391 os.killpg(pid, signal.SIGCONT)
1392 except OSError as ex:
1393 if ex.errno != errno.ESRCH:
1394 raise
1395 signal.signal(signal.SIGCONT, ContinueGroup)
1396
1397 # Loop until the children exit. We exit with os._exit to be sure we
1398 # don't run any finalizers (those will be run by the child process.)
1399 # pylint: disable=W0212
1400 while True:
1401 try:
1402 # Wait for the process to exit. When it does, exit with the return
1403 # value of the subprocess.
1404 os._exit(osutils.GetExitStatus(os.waitpid(pid, 0)[1]))
1405 except OSError as ex:
1406 if ex.errno == errno.EINTR:
1407 continue
1408 traceback.print_exc()
1409 os._exit(1)
1410 except BaseException:
1411 traceback.print_exc()
1412 os._exit(1)
1413
David Jamesfcb70ef2011-02-02 16:02:30 -08001414 def _SetupExitHandler(self):
1415
David James321490a2012-12-17 12:05:56 -08001416 def ExitHandler(signum, _frame):
David James7358d032011-05-19 10:40:03 -07001417 # Set KILLED flag.
1418 KILLED.set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001419
1420 # Kill our signal handlers so we don't get called recursively
David James7358d032011-05-19 10:40:03 -07001421 signal.signal(signal.SIGINT, KillHandler)
1422 signal.signal(signal.SIGTERM, KillHandler)
David Jamesfcb70ef2011-02-02 16:02:30 -08001423
1424 # Print our current job status
Brian Harring0be85c62012-03-17 19:52:12 -07001425 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001426 if job:
1427 self._print_queue.put(JobPrinter(job, unlink=True))
1428
1429 # Notify the user that we are exiting
1430 self._Print("Exiting on signal %s" % signum)
David James7358d032011-05-19 10:40:03 -07001431 self._print_queue.put(None)
1432 self._print_worker.join()
David Jamesfcb70ef2011-02-02 16:02:30 -08001433
1434 # Kill child threads, then exit.
David James7358d032011-05-19 10:40:03 -07001435 os.killpg(0, signal.SIGKILL)
David Jamesfcb70ef2011-02-02 16:02:30 -08001436 sys.exit(1)
1437
1438 # Print out job status when we are killed
1439 signal.signal(signal.SIGINT, ExitHandler)
1440 signal.signal(signal.SIGTERM, ExitHandler)
1441
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001442 def _ScheduleUnpack(self, pkg_state):
1443 self._unpack_jobs[pkg_state.target] = None
1444 self._unpack_queue.put(pkg_state)
1445
Brian Harring0be85c62012-03-17 19:52:12 -07001446 def _Schedule(self, pkg_state):
David Jamesfcb70ef2011-02-02 16:02:30 -08001447 # We maintain a tree of all deps, if this doesn't need
David James8c7e5e32011-06-28 11:26:03 -07001448 # to be installed just free up its children and continue.
David Jamesfcb70ef2011-02-02 16:02:30 -08001449 # It is possible to reinstall deps of deps, without reinstalling
1450 # first level deps, like so:
Mike Frysingerfd969312014-04-02 22:16:42 -04001451 # virtual/target-os (merge) -> eselect (nomerge) -> python (merge)
Brian Harring0be85c62012-03-17 19:52:12 -07001452 this_pkg = pkg_state.info
1453 target = pkg_state.target
1454 if pkg_state.info is not None:
1455 if this_pkg["action"] == "nomerge":
1456 self._Finish(target)
1457 elif target not in self._build_jobs:
1458 # Kick off the build if it's marked to be built.
1459 self._build_jobs[target] = None
1460 self._build_queue.put(pkg_state)
1461 return True
David Jamesfcb70ef2011-02-02 16:02:30 -08001462
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001463 def _ScheduleLoop(self, unpack_only=False):
1464 if unpack_only:
1465 ready_queue = self._unpack_ready
1466 jobs_queue = self._unpack_jobs
1467 procs = self._unpack_procs
1468 else:
1469 ready_queue = self._build_ready
1470 jobs_queue = self._build_jobs
1471 procs = self._build_procs
1472
David James8c7e5e32011-06-28 11:26:03 -07001473 # If the current load exceeds our desired load average, don't schedule
1474 # more than one job.
1475 if self._load_avg and os.getloadavg()[0] > self._load_avg:
1476 needed_jobs = 1
1477 else:
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001478 needed_jobs = procs
David James8c7e5e32011-06-28 11:26:03 -07001479
1480 # Schedule more jobs.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001481 while ready_queue and len(jobs_queue) < needed_jobs:
1482 state = ready_queue.get()
1483 if unpack_only:
1484 self._ScheduleUnpack(state)
1485 else:
1486 if state.target not in self._failed:
1487 self._Schedule(state)
David Jamesfcb70ef2011-02-02 16:02:30 -08001488
1489 def _Print(self, line):
1490 """Print a single line."""
1491 self._print_queue.put(LinePrinter(line))
1492
1493 def _Status(self):
1494 """Print status."""
1495 current_time = time.time()
1496 no_output = True
1497
1498 # Print interim output every minute if --show-output is used. Otherwise,
1499 # print notifications about running packages every 2 minutes, and print
1500 # full output for jobs that have been running for 60 minutes or more.
1501 if self._show_output:
1502 interval = 60
1503 notify_interval = 0
1504 else:
1505 interval = 60 * 60
1506 notify_interval = 60 * 2
David James321490a2012-12-17 12:05:56 -08001507 for job in self._build_jobs.itervalues():
David Jamesfcb70ef2011-02-02 16:02:30 -08001508 if job:
1509 last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
1510 if last_timestamp + interval < current_time:
1511 self._print_queue.put(JobPrinter(job))
1512 job.last_output_timestamp = current_time
1513 no_output = False
1514 elif (notify_interval and
1515 job.last_notify_timestamp + notify_interval < current_time):
1516 job_seconds = current_time - job.start_timestamp
1517 args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
1518 info = "Still building %s (%dm%.1fs). Logs in %s" % args
1519 job.last_notify_timestamp = current_time
1520 self._Print(info)
1521 no_output = False
1522
1523 # If we haven't printed any messages yet, print a general status message
1524 # here.
1525 if no_output:
1526 seconds = current_time - GLOBAL_START
Brian Harring0be85c62012-03-17 19:52:12 -07001527 fjobs, fready = len(self._fetch_jobs), len(self._fetch_ready)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001528 ujobs, uready = len(self._unpack_jobs), len(self._unpack_ready)
Brian Harring0be85c62012-03-17 19:52:12 -07001529 bjobs, bready = len(self._build_jobs), len(self._build_ready)
1530 retries = len(self._retry_queue)
1531 pending = max(0, len(self._deps_map) - fjobs - bjobs)
1532 line = "Pending %s/%s, " % (pending, self._total_jobs)
1533 if fjobs or fready:
1534 line += "Fetching %s/%s, " % (fjobs, fready + fjobs)
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001535 if ujobs or uready:
1536 line += "Unpacking %s/%s, " % (ujobs, uready + ujobs)
Brian Harring0be85c62012-03-17 19:52:12 -07001537 if bjobs or bready or retries:
1538 line += "Building %s/%s, " % (bjobs, bready + bjobs)
1539 if retries:
1540 line += "Retrying %s, " % (retries,)
David James8c7e5e32011-06-28 11:26:03 -07001541 load = " ".join(str(x) for x in os.getloadavg())
Brian Harring0be85c62012-03-17 19:52:12 -07001542 line += ("[Time %dm%.1fs Load %s]" % (seconds/60, seconds %60, load))
1543 self._Print(line)
David Jamesfcb70ef2011-02-02 16:02:30 -08001544
1545 def _Finish(self, target):
David James8c7e5e32011-06-28 11:26:03 -07001546 """Mark a target as completed and unblock dependencies."""
1547 this_pkg = self._deps_map[target]
1548 if this_pkg["needs"] and this_pkg["nodeps"]:
1549 # We got installed, but our deps have not been installed yet. Dependent
1550 # packages should only be installed when our needs have been fully met.
1551 this_pkg["action"] = "nomerge"
1552 else:
David James8c7e5e32011-06-28 11:26:03 -07001553 for dep in this_pkg["provides"]:
1554 dep_pkg = self._deps_map[dep]
Brian Harring0be85c62012-03-17 19:52:12 -07001555 state = self._state_map[dep]
David James8c7e5e32011-06-28 11:26:03 -07001556 del dep_pkg["needs"][target]
Brian Harring0be85c62012-03-17 19:52:12 -07001557 state.update_score()
1558 if not state.prefetched:
1559 if dep in self._fetch_ready:
1560 # If it's not currently being fetched, update the prioritization
1561 self._fetch_ready.sort()
1562 elif not dep_pkg["needs"]:
David James8c7e5e32011-06-28 11:26:03 -07001563 if dep_pkg["nodeps"] and dep_pkg["action"] == "nomerge":
1564 self._Finish(dep)
1565 else:
Brian Harring0be85c62012-03-17 19:52:12 -07001566 self._build_ready.put(self._state_map[dep])
David James8c7e5e32011-06-28 11:26:03 -07001567 self._deps_map.pop(target)
David Jamesfcb70ef2011-02-02 16:02:30 -08001568
1569 def _Retry(self):
David James8c7e5e32011-06-28 11:26:03 -07001570 while self._retry_queue:
Brian Harring0be85c62012-03-17 19:52:12 -07001571 state = self._retry_queue.pop(0)
1572 if self._Schedule(state):
1573 self._Print("Retrying emerge of %s." % state.target)
David James8c7e5e32011-06-28 11:26:03 -07001574 break
David Jamesfcb70ef2011-02-02 16:02:30 -08001575
Brian Harringa43f5952012-04-12 01:19:34 -07001576 def _Shutdown(self):
David Jamesfcb70ef2011-02-02 16:02:30 -08001577 # Tell emerge workers to exit. They all exit when 'None' is pushed
1578 # to the queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001579
Brian Harringa43f5952012-04-12 01:19:34 -07001580 # Shutdown the workers first; then jobs (which is how they feed things back)
1581 # then finally the print queue.
Brian Harring0be85c62012-03-17 19:52:12 -07001582
Brian Harringa43f5952012-04-12 01:19:34 -07001583 def _stop(queue, pool):
1584 if pool is None:
1585 return
1586 try:
1587 queue.put(None)
1588 pool.close()
1589 pool.join()
1590 finally:
1591 pool.terminate()
Brian Harring0be85c62012-03-17 19:52:12 -07001592
Brian Harringa43f5952012-04-12 01:19:34 -07001593 _stop(self._fetch_queue, self._fetch_pool)
1594 self._fetch_queue = self._fetch_pool = None
Brian Harring0be85c62012-03-17 19:52:12 -07001595
Brian Harringa43f5952012-04-12 01:19:34 -07001596 _stop(self._build_queue, self._build_pool)
1597 self._build_queue = self._build_pool = None
1598
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001599 if self._unpack_only:
1600 _stop(self._unpack_queue, self._unpack_pool)
1601 self._unpack_queue = self._unpack_pool = None
1602
Brian Harringa43f5952012-04-12 01:19:34 -07001603 if self._job_queue is not None:
1604 self._job_queue.close()
1605 self._job_queue = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001606
1607 # Now that our workers are finished, we can kill the print queue.
Brian Harringa43f5952012-04-12 01:19:34 -07001608 if self._print_worker is not None:
1609 try:
1610 self._print_queue.put(None)
1611 self._print_queue.close()
1612 self._print_worker.join()
1613 finally:
1614 self._print_worker.terminate()
1615 self._print_queue = self._print_worker = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001616
1617 def Run(self):
1618 """Run through the scheduled ebuilds.
1619
1620 Keep running so long as we have uninstalled packages in the
1621 dependency graph to merge.
1622 """
Brian Harringa43f5952012-04-12 01:19:34 -07001623 if not self._deps_map:
1624 return
1625
Brian Harring0be85c62012-03-17 19:52:12 -07001626 # Start the fetchers.
1627 for _ in xrange(min(self._fetch_procs, len(self._fetch_ready))):
1628 state = self._fetch_ready.get()
1629 self._fetch_jobs[state.target] = None
1630 self._fetch_queue.put(state)
1631
1632 # Print an update, then get going.
1633 self._Status()
1634
David Jamese703d0f2012-01-12 16:27:45 -08001635 retried = set()
David Jamesfcb70ef2011-02-02 16:02:30 -08001636 while self._deps_map:
1637 # Check here that we are actually waiting for something.
Brian Harring0be85c62012-03-17 19:52:12 -07001638 if (self._build_queue.empty() and
David Jamesfcb70ef2011-02-02 16:02:30 -08001639 self._job_queue.empty() and
Brian Harring0be85c62012-03-17 19:52:12 -07001640 not self._fetch_jobs and
1641 not self._fetch_ready and
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001642 not self._unpack_jobs and
1643 not self._unpack_ready and
Brian Harring0be85c62012-03-17 19:52:12 -07001644 not self._build_jobs and
1645 not self._build_ready and
David Jamesfcb70ef2011-02-02 16:02:30 -08001646 self._deps_map):
1647 # If we have failed on a package, retry it now.
1648 if self._retry_queue:
1649 self._Retry()
1650 else:
David Jamesfcb70ef2011-02-02 16:02:30 -08001651 # Tell the user why we're exiting.
1652 if self._failed:
Mike Frysingerf2ff9172012-11-01 18:47:41 -04001653 print 'Packages failed:\n\t%s' % '\n\t'.join(self._failed)
David James0eae23e2012-07-03 15:04:25 -07001654 status_file = os.environ.get("PARALLEL_EMERGE_STATUS_FILE")
1655 if status_file:
David James321490a2012-12-17 12:05:56 -08001656 failed_pkgs = set(portage.versions.cpv_getkey(x)
1657 for x in self._failed)
David James0eae23e2012-07-03 15:04:25 -07001658 with open(status_file, "a") as f:
1659 f.write("%s\n" % " ".join(failed_pkgs))
David Jamesfcb70ef2011-02-02 16:02:30 -08001660 else:
1661 print "Deadlock! Circular dependencies!"
1662 sys.exit(1)
1663
David James321490a2012-12-17 12:05:56 -08001664 for _ in xrange(12):
David Jamesa74289a2011-08-12 10:41:24 -07001665 try:
1666 job = self._job_queue.get(timeout=5)
1667 break
1668 except Queue.Empty:
1669 # Check if any more jobs can be scheduled.
1670 self._ScheduleLoop()
1671 else:
Brian Harring706747c2012-03-16 03:04:31 -07001672 # Print an update every 60 seconds.
David Jamesfcb70ef2011-02-02 16:02:30 -08001673 self._Status()
1674 continue
1675
1676 target = job.target
1677
Brian Harring0be85c62012-03-17 19:52:12 -07001678 if job.fetch_only:
1679 if not job.done:
1680 self._fetch_jobs[job.target] = job
1681 else:
1682 state = self._state_map[job.target]
1683 state.prefetched = True
1684 state.fetched_successfully = (job.retcode == 0)
1685 del self._fetch_jobs[job.target]
1686 self._Print("Fetched %s in %2.2fs"
1687 % (target, time.time() - job.start_timestamp))
1688
1689 if self._show_output or job.retcode != 0:
1690 self._print_queue.put(JobPrinter(job, unlink=True))
1691 else:
1692 os.unlink(job.filename)
1693 # Failure or not, let build work with it next.
1694 if not self._deps_map[job.target]["needs"]:
1695 self._build_ready.put(state)
1696 self._ScheduleLoop()
1697
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001698 if self._unpack_only and job.retcode == 0:
1699 self._unpack_ready.put(state)
1700 self._ScheduleLoop(unpack_only=True)
1701
Brian Harring0be85c62012-03-17 19:52:12 -07001702 if self._fetch_ready:
1703 state = self._fetch_ready.get()
1704 self._fetch_queue.put(state)
1705 self._fetch_jobs[state.target] = None
1706 else:
1707 # Minor optimization; shut down fetchers early since we know
1708 # the queue is empty.
1709 self._fetch_queue.put(None)
1710 continue
1711
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001712 if job.unpack_only:
1713 if not job.done:
1714 self._unpack_jobs[target] = job
1715 else:
1716 del self._unpack_jobs[target]
1717 self._Print("Unpacked %s in %2.2fs"
1718 % (target, time.time() - job.start_timestamp))
1719 if self._show_output or job.retcode != 0:
1720 self._print_queue.put(JobPrinter(job, unlink=True))
1721 else:
1722 os.unlink(job.filename)
1723 if self._unpack_ready:
1724 state = self._unpack_ready.get()
1725 self._unpack_queue.put(state)
1726 self._unpack_jobs[state.target] = None
1727 continue
1728
David Jamesfcb70ef2011-02-02 16:02:30 -08001729 if not job.done:
Brian Harring0be85c62012-03-17 19:52:12 -07001730 self._build_jobs[target] = job
David Jamesfcb70ef2011-02-02 16:02:30 -08001731 self._Print("Started %s (logged in %s)" % (target, job.filename))
1732 continue
1733
1734 # Print output of job
1735 if self._show_output or job.retcode != 0:
1736 self._print_queue.put(JobPrinter(job, unlink=True))
1737 else:
1738 os.unlink(job.filename)
Brian Harring0be85c62012-03-17 19:52:12 -07001739 del self._build_jobs[target]
David Jamesfcb70ef2011-02-02 16:02:30 -08001740
1741 seconds = time.time() - job.start_timestamp
1742 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
David James32420cc2011-08-25 21:32:46 -07001743 previously_failed = target in self._failed
David Jamesfcb70ef2011-02-02 16:02:30 -08001744
1745 # Complain if necessary.
1746 if job.retcode != 0:
1747 # Handle job failure.
David James32420cc2011-08-25 21:32:46 -07001748 if previously_failed:
David Jamesfcb70ef2011-02-02 16:02:30 -08001749 # If this job has failed previously, give up.
1750 self._Print("Failed %s. Your build has failed." % details)
1751 else:
1752 # Queue up this build to try again after a long while.
David Jamese703d0f2012-01-12 16:27:45 -08001753 retried.add(target)
Brian Harring0be85c62012-03-17 19:52:12 -07001754 self._retry_queue.append(self._state_map[target])
David Jamesfcb70ef2011-02-02 16:02:30 -08001755 self._failed.add(target)
1756 self._Print("Failed %s, retrying later." % details)
1757 else:
David James32420cc2011-08-25 21:32:46 -07001758 if previously_failed:
1759 # Remove target from list of failed packages.
1760 self._failed.remove(target)
1761
1762 self._Print("Completed %s" % details)
1763
1764 # Mark as completed and unblock waiting ebuilds.
1765 self._Finish(target)
1766
1767 if previously_failed and self._retry_queue:
David Jamesfcb70ef2011-02-02 16:02:30 -08001768 # If we have successfully retried a failed package, and there
1769 # are more failed packages, try the next one. We will only have
1770 # one retrying package actively running at a time.
1771 self._Retry()
1772
David Jamesfcb70ef2011-02-02 16:02:30 -08001773
David James8c7e5e32011-06-28 11:26:03 -07001774 # Schedule pending jobs and print an update.
1775 self._ScheduleLoop()
1776 self._Status()
David Jamesfcb70ef2011-02-02 16:02:30 -08001777
David Jamese703d0f2012-01-12 16:27:45 -08001778 # If packages were retried, output a warning.
1779 if retried:
1780 self._Print("")
1781 self._Print("WARNING: The following packages failed the first time,")
1782 self._Print("but succeeded upon retry. This might indicate incorrect")
1783 self._Print("dependencies.")
1784 for pkg in retried:
1785 self._Print(" %s" % pkg)
1786 self._Print("@@@STEP_WARNINGS@@@")
1787 self._Print("")
1788
David Jamesfcb70ef2011-02-02 16:02:30 -08001789 # Tell child threads to exit.
1790 self._Print("Merge complete")
David Jamesfcb70ef2011-02-02 16:02:30 -08001791
1792
Brian Harring30675052012-02-29 12:18:22 -08001793def main(argv):
Brian Harring8294d652012-05-23 02:20:52 -07001794 try:
1795 return real_main(argv)
1796 finally:
1797 # Work around multiprocessing sucking and not cleaning up after itself.
1798 # http://bugs.python.org/issue4106;
1799 # Step one; ensure GC is ran *prior* to the VM starting shutdown.
1800 gc.collect()
1801 # Step two; go looking for those threads and try to manually reap
1802 # them if we can.
1803 for x in threading.enumerate():
1804 # Filter on the name, and ident; if ident is None, the thread
1805 # wasn't started.
1806 if x.name == 'QueueFeederThread' and x.ident is not None:
1807 x.join(1)
David Jamesfcb70ef2011-02-02 16:02:30 -08001808
Brian Harring8294d652012-05-23 02:20:52 -07001809
1810def real_main(argv):
Brian Harring30675052012-02-29 12:18:22 -08001811 parallel_emerge_args = argv[:]
David Jamesfcb70ef2011-02-02 16:02:30 -08001812 deps = DepGraphGenerator()
Brian Harring30675052012-02-29 12:18:22 -08001813 deps.Initialize(parallel_emerge_args)
David Jamesfcb70ef2011-02-02 16:02:30 -08001814 emerge = deps.emerge
1815
1816 if emerge.action is not None:
Brian Harring30675052012-02-29 12:18:22 -08001817 argv = deps.ParseParallelEmergeArgs(argv)
Brian Harring8294d652012-05-23 02:20:52 -07001818 return emerge_main(argv)
David Jamesfcb70ef2011-02-02 16:02:30 -08001819 elif not emerge.cmdline_packages:
1820 Usage()
Brian Harring8294d652012-05-23 02:20:52 -07001821 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001822
1823 # Unless we're in pretend mode, there's not much point running without
1824 # root access. We need to be able to install packages.
1825 #
1826 # NOTE: Even if you're running --pretend, it's a good idea to run
1827 # parallel_emerge with root access so that portage can write to the
1828 # dependency cache. This is important for performance.
David James321490a2012-12-17 12:05:56 -08001829 if "--pretend" not in emerge.opts and portage.data.secpass < 2:
David Jamesfcb70ef2011-02-02 16:02:30 -08001830 print "parallel_emerge: superuser access is required."
Brian Harring8294d652012-05-23 02:20:52 -07001831 return 1
David Jamesfcb70ef2011-02-02 16:02:30 -08001832
1833 if "--quiet" not in emerge.opts:
1834 cmdline_packages = " ".join(emerge.cmdline_packages)
David Jamesfcb70ef2011-02-02 16:02:30 -08001835 print "Starting fast-emerge."
1836 print " Building package %s on %s" % (cmdline_packages,
1837 deps.board or "root")
David Jamesfcb70ef2011-02-02 16:02:30 -08001838
David James386ccd12011-05-04 20:17:42 -07001839 deps_tree, deps_info = deps.GenDependencyTree()
David Jamesfcb70ef2011-02-02 16:02:30 -08001840
1841 # You want me to be verbose? I'll give you two trees! Twice as much value.
1842 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
1843 deps.PrintTree(deps_tree)
1844
David James386ccd12011-05-04 20:17:42 -07001845 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
David Jamesfcb70ef2011-02-02 16:02:30 -08001846
1847 # OK, time to print out our progress so far.
1848 deps.PrintInstallPlan(deps_graph)
1849 if "--tree" in emerge.opts:
1850 PrintDepsMap(deps_graph)
1851
1852 # Are we upgrading portage? If so, and there are more packages to merge,
1853 # schedule a restart of parallel_emerge to merge the rest. This ensures that
1854 # we pick up all updates to portage settings before merging any more
1855 # packages.
1856 portage_upgrade = False
1857 root = emerge.settings["ROOT"]
Don Garrett25f309a2014-03-19 14:02:12 -07001858 # pylint: disable=W0212
David Jamesfcb70ef2011-02-02 16:02:30 -08001859 final_db = emerge.depgraph._dynamic_config.mydbapi[root]
1860 if root == "/":
1861 for db_pkg in final_db.match_pkgs("sys-apps/portage"):
1862 portage_pkg = deps_graph.get(db_pkg.cpv)
David James0ff16f22012-11-02 14:18:07 -07001863 if portage_pkg:
David Jamesfcb70ef2011-02-02 16:02:30 -08001864 portage_upgrade = True
1865 if "--quiet" not in emerge.opts:
1866 print "Upgrading portage first, then restarting..."
1867
David James0ff16f22012-11-02 14:18:07 -07001868 # Upgrade Portage first, then the rest of the packages.
1869 #
1870 # In order to grant the child permission to run setsid, we need to run sudo
1871 # again. We preserve SUDO_USER here in case an ebuild depends on it.
1872 if portage_upgrade:
1873 # Calculate what arguments to use when re-invoking.
1874 args = ["sudo", "-E", "SUDO_USER=%s" % os.environ.get("SUDO_USER", "")]
1875 args += [os.path.abspath(sys.argv[0])] + parallel_emerge_args
1876 args += ["--exclude=sys-apps/portage"]
1877
1878 # First upgrade Portage.
1879 passthrough_args = ("--quiet", "--pretend", "--verbose")
1880 emerge_args = [k for k in emerge.opts if k in passthrough_args]
1881 ret = emerge_main(emerge_args + ["portage"])
1882 if ret != 0:
1883 return ret
1884
1885 # Now upgrade the rest.
1886 os.execvp(args[0], args)
1887
David Jamesfcb70ef2011-02-02 16:02:30 -08001888 # Run the queued emerges.
Thiago Goncalesf4acc422013-07-17 10:26:35 -07001889 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output,
1890 deps.unpack_only)
Brian Harringa43f5952012-04-12 01:19:34 -07001891 try:
1892 scheduler.Run()
1893 finally:
Don Garrett25f309a2014-03-19 14:02:12 -07001894 # pylint: disable=W0212
Brian Harringa43f5952012-04-12 01:19:34 -07001895 scheduler._Shutdown()
David James97ce8902011-08-16 09:51:05 -07001896 scheduler = None
David Jamesfcb70ef2011-02-02 16:02:30 -08001897
Mike Frysingerd20a6e22012-10-04 19:01:10 -04001898 clean_logs(emerge.settings)
1899
David Jamesfcb70ef2011-02-02 16:02:30 -08001900 print "Done"
Brian Harring8294d652012-05-23 02:20:52 -07001901 return 0