blob: 5591a6e418b45e9b7b90c8758674949b0d4dcca9 [file] [log] [blame]
David Jamesfcb70ef2011-02-02 16:02:30 -08001#!/usr/bin/python2.6
2# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""Program to run emerge in parallel, for significant speedup.
7
8Usage:
9 ./parallel_emerge [--board=BOARD] [--workon=PKGS] [--no-workon-deps]
10 [--force-remote-binary=PKGS] [emerge args] package
11
12Basic operation:
13 Runs 'emerge -p --debug' to display dependencies, and stores a
14 dependency graph. All non-blocked packages are launched in parallel,
15 as 'emerge --nodeps package' with any blocked packages being emerged
16 immediately upon deps being met.
17
18 For this to work effectively, /usr/lib/portage/pym/portage/locks.py
19 must be stubbed out, preventing portage from slowing itself with
20 unneccesary locking, as this script ensures that emerge is run in such
21 a way that common resources are never in conflict. This is controlled
22 by an environment variable PORTAGE_LOCKS set in parallel emerge
23 subprocesses.
24
25 Parallel Emerge unlocks two things during operation, here's what you
26 must do to keep this safe:
27 * Storage dir containing binary packages. - Don't emerge new
28 packages while installing the existing ones.
29 * Portage database - You must not examine deps while modifying the
30 database. Therefore you may only parallelize "-p" read only access,
31 or "--nodeps" write only access.
32 Caveats:
33 * Some ebuild packages have incorrectly specified deps, and running
34 them in parallel is more likely to bring out these failures.
35 * Some ebuilds (especially the build part) have complex dependencies
36 that are not captured well by this script (it may be necessary to
37 install an old package to build, but then install a newer version
38 of the same package for a runtime dep).
39"""
40
41import codecs
42import copy
43import errno
44import multiprocessing
45import os
46import Queue
47import shlex
48import signal
49import sys
50import tempfile
51import time
52import traceback
53import urllib2
54
55# If PORTAGE_USERNAME isn't specified, scrape it from the $HOME variable. On
56# Chromium OS, the default "portage" user doesn't have the necessary
57# permissions. It'd be easier if we could default to $USERNAME, but $USERNAME
58# is "root" here because we get called through sudo.
59#
60# We need to set this before importing any portage modules, because portage
61# looks up "PORTAGE_USERNAME" at import time.
62#
63# NOTE: .bashrc sets PORTAGE_USERNAME = $USERNAME, so most people won't
64# encounter this case unless they have an old chroot or blow away the
65# environment by running sudo without the -E specifier.
66if "PORTAGE_USERNAME" not in os.environ:
67 homedir = os.environ.get("HOME")
68 if homedir:
69 os.environ["PORTAGE_USERNAME"] = os.path.basename(homedir)
70
71# Portage doesn't expose dependency trees in its public API, so we have to
72# make use of some private APIs here. These modules are found under
73# /usr/lib/portage/pym/.
74#
75# TODO(davidjames): Update Portage to expose public APIs for these features.
76from _emerge.actions import adjust_configs
77from _emerge.actions import load_emerge_config
78from _emerge.create_depgraph_params import create_depgraph_params
79from _emerge.depgraph import depgraph as emerge_depgraph
80from _emerge.depgraph import _frozen_depgraph_config
81from _emerge.main import emerge_main
82from _emerge.main import parse_opts
83from _emerge.Package import Package
84from _emerge.Scheduler import Scheduler
85from _emerge.SetArg import SetArg
86from _emerge.stdout_spinner import stdout_spinner
87import portage
88import portage.debug
89import portage.versions
90
91new_portage = not portage.VERSION.startswith("2.1.7.")
92if new_portage:
93 from portage._global_updates import _global_updates
94else:
95 from portage import _global_updates
96
97def Usage():
98 """Print usage."""
99 print "Usage:"
100 print " ./parallel_emerge [--board=BOARD] [--workon=PKGS] [--no-workon-deps]"
101 print " [--rebuild] [emerge args] package"
102 print
103 print "Packages specified as workon packages are always built from source."
104 print "Unless --no-workon-deps is specified, packages that depend on these"
105 print "packages are also built from source."
106 print
107 print "The --workon argument is mainly useful when you want to build and"
108 print "install packages that you are working on unconditionally, but do not"
109 print "to have to rev the package to indicate you want to build it from"
110 print "source. The build_packages script will automatically supply the"
111 print "workon argument to emerge, ensuring that packages selected using"
112 print "cros-workon are rebuilt."
113 print
114 print "The --rebuild option rebuilds packages whenever their dependencies"
115 print "are changed. This ensures that your build is correct."
116 sys.exit(1)
117
118
119# These are dependencies that are not specified in the package,
120# but will prevent the package from installing.
121secret_deps = {}
122
123# Global start time
124GLOBAL_START = time.time()
125
126
127class EmergeData(object):
128 """This simple struct holds various emerge variables.
129
130 This struct helps us easily pass emerge variables around as a unit.
131 These variables are used for calculating dependencies and installing
132 packages.
133 """
134
135 __slots__ = ["action", "cmdline_packages", "depgraph", "mtimedb", "opts",
136 "root_config", "scheduler_graph", "settings", "spinner",
137 "trees"]
138
139 def __init__(self):
140 # The action the user requested. If the user is installing packages, this
141 # is None. If the user is doing anything other than installing packages,
142 # this will contain the action name, which will map exactly to the
143 # long-form name of the associated emerge option.
144 #
145 # Example: If you call parallel_emerge --unmerge package, the action name
146 # will be "unmerge"
147 self.action = None
148
149 # The list of packages the user passed on the command-line.
150 self.cmdline_packages = None
151
152 # The emerge dependency graph. It'll contain all the packages involved in
153 # this merge, along with their versions.
154 self.depgraph = None
155
156 # A dict of the options passed to emerge. This dict has been cleaned up
157 # a bit by parse_opts, so that it's a bit easier for the emerge code to
158 # look at the options.
159 #
160 # Emerge takes a few shortcuts in its cleanup process to make parsing of
161 # the options dict easier. For example, if you pass in "--usepkg=n", the
162 # "--usepkg" flag is just left out of the dictionary altogether. Because
163 # --usepkg=n is the default, this makes parsing easier, because emerge
164 # can just assume that if "--usepkg" is in the dictionary, it's enabled.
165 #
166 # These cleanup processes aren't applied to all options. For example, the
167 # --with-bdeps flag is passed in as-is. For a full list of the cleanups
168 # applied by emerge, see the parse_opts function in the _emerge.main
169 # package.
170 self.opts = None
171
172 # A dictionary used by portage to maintain global state. This state is
173 # loaded from disk when portage starts up, and saved to disk whenever we
174 # call mtimedb.commit().
175 #
176 # This database contains information about global updates (i.e., what
177 # version of portage we have) and what we're currently doing. Portage
178 # saves what it is currently doing in this database so that it can be
179 # resumed when you call it with the --resume option.
180 #
181 # parallel_emerge does not save what it is currently doing in the mtimedb,
182 # so we do not support the --resume option.
183 self.mtimedb = None
184
185 # The portage configuration for our current root. This contains the portage
186 # settings (see below) and the three portage trees for our current root.
187 # (The three portage trees are explained below, in the documentation for
188 # the "trees" member.)
189 self.root_config = None
190
191 # The scheduler graph is used by emerge to calculate what packages to
192 # install. We don't actually install any deps, so this isn't really used,
193 # but we pass it in to the Scheduler object anyway.
194 self.scheduler_graph = None
195
196 # Portage settings for our current session. Most of these settings are set
197 # in make.conf inside our current install root.
198 self.settings = None
199
200 # The spinner, which spews stuff to stdout to indicate that portage is
201 # doing something. We maintain our own spinner, so we set the portage
202 # spinner to "silent" mode.
203 self.spinner = None
204
205 # The portage trees. There are separate portage trees for each root. To get
206 # the portage tree for the current root, you can look in self.trees[root],
207 # where root = self.settings["ROOT"].
208 #
209 # In each root, there are three trees: vartree, porttree, and bintree.
210 # - vartree: A database of the currently-installed packages.
211 # - porttree: A database of ebuilds, that can be used to build packages.
212 # - bintree: A database of binary packages.
213 self.trees = None
214
215
216class DepGraphGenerator(object):
217 """Grab dependency information about packages from portage.
218
219 Typical usage:
220 deps = DepGraphGenerator()
221 deps.Initialize(sys.argv[1:])
222 deps_tree, deps_info = deps.GenDependencyTree()
223 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info)
224 deps.PrintTree(deps_tree)
225 PrintDepsMap(deps_graph)
226 """
227
228 __slots__ = ["board", "emerge", "mandatory_source", "no_workon_deps",
229 "nomerge", "package_db", "rebuild", "show_output",
230 "force_remote_binary", "forced_remote_binary_packages"]
231
232 def __init__(self):
233 self.board = None
234 self.emerge = EmergeData()
235 self.mandatory_source = set()
236 self.no_workon_deps = False
237 self.nomerge = set()
238 self.package_db = {}
239 self.rebuild = False
240 self.show_output = False
241 self.force_remote_binary = set()
242 self.forced_remote_binary_packages = set()
243
244 def ParseParallelEmergeArgs(self, argv):
245 """Read the parallel emerge arguments from the command-line.
246
247 We need to be compatible with emerge arg format. We scrape arguments that
248 are specific to parallel_emerge, and pass through the rest directly to
249 emerge.
250 Args:
251 argv: arguments list
252 Returns:
253 Arguments that don't belong to parallel_emerge
254 """
255 emerge_args = []
256 for arg in argv:
257 # Specifically match arguments that are specific to parallel_emerge, and
258 # pass through the rest.
259 if arg.startswith("--board="):
260 self.board = arg.replace("--board=", "")
261 elif arg.startswith("--workon="):
262 workon_str = arg.replace("--workon=", "")
263 package_list = shlex.split(" ".join(shlex.split(workon_str)))
264 self.mandatory_source.update(package_list)
265 elif arg.startswith("--force-remote-binary="):
266 force_remote_binary = arg.replace("--force-remote-binary=", "")
267 force_remote_binary = \
268 shlex.split(" ".join(shlex.split(force_remote_binary)))
269 self.force_remote_binary.update(force_remote_binary)
270 elif arg.startswith("--nomerge="):
271 nomerge_str = arg.replace("--nomerge=", "")
272 package_list = shlex.split(" ".join(shlex.split(nomerge_str)))
273 self.nomerge.update(package_list)
274 elif arg == "--no-workon-deps":
275 self.no_workon_deps = True
276 elif arg == "--rebuild":
277 self.rebuild = True
278 elif arg == "--show-output":
279 self.show_output = True
280 else:
281 # Not one of our options, so pass through to emerge.
282 emerge_args.append(arg)
283
284 if self.rebuild:
285 if self.no_workon_deps:
286 print "--rebuild is not compatible with --no-workon-deps"
287 sys.exit(1)
288
289 return emerge_args
290
291 def Initialize(self, args):
292 """Initializer. Parses arguments and sets up portage state."""
293
294 # Parse and strip out args that are just intended for parallel_emerge.
295 emerge_args = self.ParseParallelEmergeArgs(args)
296
297 # Setup various environment variables based on our current board. These
298 # variables are normally setup inside emerge-${BOARD}, but since we don't
299 # call that script, we have to set it up here. These variables serve to
300 # point our tools at /build/BOARD and to setup cross compiles to the
301 # appropriate board as configured in toolchain.conf.
302 if self.board:
303 os.environ["PORTAGE_CONFIGROOT"] = "/build/" + self.board
304 os.environ["PORTAGE_SYSROOT"] = "/build/" + self.board
305 os.environ["SYSROOT"] = "/build/" + self.board
306 srcroot = "%s/../../src" % os.path.dirname(os.path.realpath(__file__))
307 # Strip the variant out of the board name to look for the toolchain. This
308 # is similar to what setup_board does.
309 board_no_variant = self.board.split('_')[0]
310 public_toolchain_path = ("%s/overlays/overlay-%s/toolchain.conf" %
311 (srcroot, board_no_variant))
312 private_toolchain_path = (
313 "%s/private-overlays/overlay-%s-private/toolchain.conf" %
314 (srcroot, board_no_variant))
315 if os.path.isfile(public_toolchain_path):
316 toolchain_path = public_toolchain_path
317 elif os.path.isfile(private_toolchain_path):
318 toolchain_path = private_toolchain_path
319 else:
320 print "Not able to locate toolchain.conf in board overlays"
321 sys.exit(1)
322
323 f = open(toolchain_path)
324 os.environ["CHOST"] = f.readline().strip()
325 f.close()
326
327 # Although CHROMEOS_ROOT isn't specific to boards, it's normally setup
328 # inside emerge-${BOARD}, so we set it up here for compatibility. It
329 # will be going away soon as we migrate to CROS_WORKON_SRCROOT.
330 os.environ.setdefault("CHROMEOS_ROOT", os.environ["HOME"] + "/trunk")
331
332 # Turn off interactive delays
333 os.environ["EBEEP_IGNORE"] = "1"
334 os.environ["EPAUSE_IGNORE"] = "1"
335 os.environ["UNMERGE_DELAY"] = "0"
336
337 # Parse the emerge options.
338 action, opts, cmdline_packages = parse_opts(emerge_args)
339
340 # If we're installing to the board, we want the --root-deps option so that
341 # portage will install the build dependencies to that location as well.
342 if self.board:
343 opts.setdefault("--root-deps", True)
344
345 # Set environment variables based on options. Portage normally sets these
346 # environment variables in emerge_main, but we can't use that function,
347 # because it also does a bunch of other stuff that we don't want.
348 # TODO(davidjames): Patch portage to move this logic into a function we can
349 # reuse here.
350 if "--debug" in opts:
351 os.environ["PORTAGE_DEBUG"] = "1"
352 if "--config-root" in opts:
353 os.environ["PORTAGE_CONFIGROOT"] = opts["--config-root"]
354 if "--root" in opts:
355 os.environ["ROOT"] = opts["--root"]
356 if "--accept-properties" in opts:
357 os.environ["ACCEPT_PROPERTIES"] = opts["--accept-properties"]
358
359 # Portage has two flags for doing collision protection: collision-protect
360 # and protect-owned. The protect-owned feature is enabled by default and
361 # is quite useful: it checks to make sure that we don't have multiple
362 # packages that own the same file. The collision-protect feature is more
363 # strict, and less useful: it fails if it finds a conflicting file, even
364 # if that file was created by an earlier ebuild that failed to install.
365 #
366 # We want to disable collision-protect here because we don't handle
367 # failures during the merge step very well. Sometimes we leave old files
368 # lying around and they cause problems, so for now we disable the flag.
369 # TODO(davidjames): Look for a better solution.
370 features = os.environ.get("FEATURES", "") + " -collision-protect"
371
372 # If we're installing packages to the board, and we're not using the
373 # official flag, we can enable the following optimizations:
374 # 1) Don't lock during install step. This allows multiple packages to be
375 # installed at once. This is safe because our board packages do not
376 # muck with each other during the post-install step.
377 # 2) Don't update the environment until the end of the build. This is
378 # safe because board packages don't need to run during the build --
379 # they're cross-compiled, so our CPU architecture doesn't support them
380 # anyway.
381 if self.board and os.environ.get("CHROMEOS_OFFICIAL") != "1":
382 os.environ.setdefault("PORTAGE_LOCKS", "false")
383 features = features + " no-env-update"
384
385 os.environ["FEATURES"] = features
386
387 # Now that we've setup the necessary environment variables, we can load the
388 # emerge config from disk.
389 settings, trees, mtimedb = load_emerge_config()
390
391 # Check whether our portage tree is out of date. Typically, this happens
392 # when you're setting up a new portage tree, such as in setup_board and
393 # make_chroot. In that case, portage applies a bunch of global updates
394 # here. Once the updates are finished, we need to commit any changes
395 # that the global update made to our mtimedb, and reload the config.
396 #
397 # Portage normally handles this logic in emerge_main, but again, we can't
398 # use that function here.
399 if _global_updates(trees, mtimedb["updates"]):
400 mtimedb.commit()
401 settings, trees, mtimedb = load_emerge_config(trees=trees)
402
403 # Setup implied options. Portage normally handles this logic in
404 # emerge_main.
405 if "--buildpkgonly" in opts or "buildpkg" in settings.features:
406 opts.setdefault("--buildpkg", True)
407 if "--getbinpkgonly" in opts:
408 opts.setdefault("--usepkgonly", True)
409 opts.setdefault("--getbinpkg", True)
410 if "getbinpkg" in settings.features:
411 # Per emerge_main, FEATURES=getbinpkg overrides --getbinpkg=n
412 opts["--getbinpkg"] = True
413 if "--getbinpkg" in opts or "--usepkgonly" in opts:
414 opts.setdefault("--usepkg", True)
415 if "--fetch-all-uri" in opts:
416 opts.setdefault("--fetchonly", True)
417 if "--skipfirst" in opts:
418 opts.setdefault("--resume", True)
419 if "--buildpkgonly" in opts:
420 # --buildpkgonly will not merge anything, so it overrides all binary
421 # package options.
422 for opt in ("--getbinpkg", "--getbinpkgonly",
423 "--usepkg", "--usepkgonly"):
424 opts.pop(opt, None)
425 if (settings.get("PORTAGE_DEBUG", "") == "1" and
426 "python-trace" in settings.features):
427 portage.debug.set_trace(True)
428
429 # Complain about unsupported options
430 for opt in ("--ask", "--ask-enter-invalid", "--complete-graph",
431 "--resume", "--skipfirst"):
432 if opt in opts:
433 print "%s is not supported by parallel_emerge" % opt
434 sys.exit(1)
435
436 # Make emerge specific adjustments to the config (e.g. colors!)
437 adjust_configs(opts, trees)
438
439 # Save our configuration so far in the emerge object
440 emerge = self.emerge
441 emerge.action, emerge.opts = action, opts
442 emerge.settings, emerge.trees, emerge.mtimedb = settings, trees, mtimedb
443 emerge.cmdline_packages = cmdline_packages
444 root = settings["ROOT"]
445 emerge.root_config = trees[root]["root_config"]
446
447 if new_portage and "--usepkg" in opts:
448 emerge.trees[root]["bintree"].populate("--getbinpkg" in opts)
449
450 def CheckUseFlags(self, pkgsettings, cur_pkg, new_pkg):
451 """Are the use flags in cur_pkg up to date?
452
453 Return True if use flags are up to date; return false otherwise."""
454
455 # cur_use: The set of flags that were enabled when the package was
456 # first installed.
457 # cur_iuse: The set of flags that affected the specified package
458 # when it was first installed.
459 #
460 # The intersection of cur_use and cur_iuse provides the set of
461 # flags that were enabled and affected the specified package.
462 cur_use = cur_pkg.use.enabled
463 cur_iuse = cur_pkg.iuse.all
464
465 # Check whether this package is already installed with the right use
466 # flags.
467 #
468 # now_use: The set of flags (special and non-special) that are now
469 # enabled for the specified package.
470 # now_iuse: The set of non-special flags that affect the specified
471 # package.
472 now_use = new_pkg.use.enabled
473 now_iuse = new_pkg.iuse.all
474
475 # Tell portage we want to lookup the flags for the specified package
476 # in package.use.{mask,force}
477 pkgsettings.setcpv(new_pkg.cpv)
478
479 # Grab the set of flags that are requested for the given package.
480 # This includes flags that don't affect the package, and includes
481 # all sources of flags (e.g. USE environment variable, make.conf,
482 # make.defaults, package.use.{mask,force}, etc.).
483 #
484 # This is used by portage in the _reinstall_for_flags function below.
485 forced_flags = set(pkgsettings.useforce).union(pkgsettings.usemask)
486
487 depgraph = self.emerge.depgraph
488 flags = depgraph._reinstall_for_flags(forced_flags, cur_use,
489 cur_iuse, now_use, now_iuse)
490 return not flags
491
492 def CreateDepgraph(self, emerge, packages):
493 """Create an emerge depgraph object."""
494 # Setup emerge options.
495 emerge_opts = emerge.opts.copy()
496
497 # Enable --emptytree so that we get the full tree, which we need for
498 # dependency analysis. By default, with this option, emerge optimizes
499 # the graph by removing uninstall instructions from the graph. By
500 # specifying --tree as well, we tell emerge that it's not safe to remove
501 # uninstall instructions because we're planning on analyzing the output.
502 emerge_opts["--tree"] = True
503 emerge_opts["--emptytree"] = True
504
505 # Set up parameters.
506 params = create_depgraph_params(emerge_opts, emerge.action)
507 frozen_config = _frozen_depgraph_config(emerge.settings, emerge.trees,
508 emerge_opts, emerge.spinner)
509 backtrack_max = emerge_opts.get('--backtrack', 5)
510 backtrack_parameters = {}
511 allow_backtracking = backtrack_max > 0
512
513 # Try up to backtrack_max times to create a working depgraph. Each time we
514 # run into a conflict, mask the offending package and try again.
515 # TODO(davidjames): When Portage supports --force-remote-binary directly,
516 # switch back to using the backtrack_depgraph function.
517 for i in range(backtrack_max + 2):
518 # Create a depgraph object.
519 depgraph = emerge_depgraph(emerge.settings, emerge.trees, emerge_opts,
520 params, emerge.spinner, frozen_config=frozen_config,
521 allow_backtracking=allow_backtracking,
522 **backtrack_parameters)
523
524 if i == 0:
525 for cpv in self.forced_remote_binary_packages:
526 # If --force-remote-binary was specified, we want to use this package
527 # regardless of its use flags. Unfortunately, Portage doesn't support
528 # ignoring use flags for just one package. To convince Portage to
529 # install the package, we trick Portage into thinking the package has
530 # the right use flags.
531 # TODO(davidjames): Update Portage to support --force-remote-binary
532 # directly, so that this hack isn't necessary.
533 pkg = depgraph._pkg(cpv, "binary", emerge.root_config)
534 pkgsettings = frozen_config.pkgsettings[pkg.root]
535 pkgsettings.setcpv(pkg)
536 pkg.use.enabled = pkgsettings["PORTAGE_USE"].split()
537
538 # Select the packages we want.
539 success, favorites = depgraph.select_files(packages)
540 if success:
541 break
542 elif depgraph.need_restart() and i < backtrack_max:
543 # Looks like we found some packages that can't be installed due to
544 # conflicts. Try again, masking out the conflicting packages.
545 if new_portage:
546 backtrack_parameters = depgraph.get_backtrack_parameters()
547 else:
548 backtrack_parameters = {
549 'runtime_pkg_mask': depgraph.get_runtime_pkg_mask()
550 }
551 elif allow_backtracking and i > 0:
552 # Looks like we can't solve the graph. Stop backtracking and report an
553 # error message.
554 backtrack_parameters.pop('runtime_pkg_mask', None)
555 allow_backtracking = False
556 else:
557 break
558
559 # Delete the --tree option, because we don't really want to display a
560 # tree. We just wanted to get emerge to leave uninstall instructions on
561 # the graph. Later, when we display the graph, we'll want standard-looking
562 # output, so removing the --tree option is important.
563 frozen_config.myopts.pop("--tree", None)
564
565 emerge.depgraph = depgraph
566
567 # Is it impossible to honor the user's request? Bail!
568 if not success:
569 depgraph.display_problems()
570 sys.exit(1)
571
572 def GenDependencyTree(self, remote_pkgs):
573 """Get dependency tree info from emerge.
574
575 TODO(): Update cros_extract_deps to also use this code.
576 Returns:
577 Dependency tree
578 """
579 start = time.time()
580
581 emerge = self.emerge
582
583 # Create a list of packages to merge
584 packages = set(emerge.cmdline_packages[:])
585 if self.mandatory_source:
586 packages.update(self.mandatory_source)
587 if self.force_remote_binary:
588 forced_pkgs = {}
589 for pkg in remote_pkgs:
590 category, pkgname, _, _ = portage.catpkgsplit(pkg)
591 full_pkgname = "%s/%s" % (category, pkgname)
592 if (pkgname in self.force_remote_binary or
593 full_pkgname in self.force_remote_binary):
594 forced_pkgs.setdefault(full_pkgname, []).append(pkg)
595
596 # Add forced binary packages to the dependency list. This is necessary
597 # to ensure that the install plan contains the right package.
598 #
599 # Putting the forced binary package at the beginning of the list is an
600 # optimization that helps avoid unnecessary backtracking (e.g., if
601 # Portage first selects the wrong version, and then backtracks later, it
602 # takes a bit longer and uses up an unnecessary backtrack iteration.)
603 packages = list(packages)
604 for pkgs in forced_pkgs.values():
605 forced_package = portage.versions.best(pkgs)
606 packages.insert(0, "=%s" % forced_package)
607 self.forced_remote_binary_packages.add(forced_package)
608
609 # Tell emerge to be quiet. We print plenty of info ourselves so we don't
610 # need any extra output from portage.
611 portage.util.noiselimit = -1
612
613 # My favorite feature: The silent spinner. It doesn't spin. Ever.
614 # I'd disable the colors by default too, but they look kind of cool.
615 emerge.spinner = stdout_spinner()
616 emerge.spinner.update = emerge.spinner.update_quiet
617
618 if "--quiet" not in emerge.opts:
619 print "Calculating deps..."
620
621 self.CreateDepgraph(emerge, packages)
622 depgraph = emerge.depgraph
623
624 # Build our own tree from the emerge digraph.
625 deps_tree = {}
626 digraph = depgraph._dynamic_config.digraph
627 for node, node_deps in digraph.nodes.items():
628 # Calculate dependency packages that need to be installed first. Each
629 # child on the digraph is a dependency. The "operation" field specifies
630 # what we're doing (e.g. merge, uninstall, etc.). The "priorities" array
631 # contains the type of dependency (e.g. build, runtime, runtime_post,
632 # etc.)
633 #
634 # Emerge itself actually treats some dependencies as "soft" dependencies
635 # and sometimes ignores them. We don't do that -- we honor all
636 # dependencies unless we're forced to prune them because they're cyclic.
637 #
638 # Portage refers to the identifiers for packages as a CPV. This acronym
639 # stands for Component/Path/Version.
640 #
641 # Here's an example CPV: chromeos-base/power_manager-0.0.1-r1
642 # Split up, this CPV would be:
643 # C -- Component: chromeos-base
644 # P -- Path: power_manager
645 # V -- Version: 0.0.1-r1
646 #
647 # We just refer to CPVs as packages here because it's easier.
648 deps = {}
649 for child, priorities in node_deps[0].items():
650 if isinstance(child, SetArg): continue
651 deps[str(child.cpv)] = dict(action=str(child.operation),
652 deptype=str(priorities[-1]),
653 deps={})
654
655 # We've built our list of deps, so we can add our package to the tree.
656 if isinstance(node, Package):
657 deps_tree[str(node.cpv)] = dict(action=str(node.operation),
658 deps=deps)
659
660 emptytree = "--emptytree" in emerge.opts
661
662 # Ask portage for its install plan, so that we can only throw out
663 # dependencies that portage throws out. Also, keep track of the old
664 # versions of packages that we're either upgrading or replacing.
665 #
666 # The "vardb" is the database of installed packages.
667 root = emerge.settings["ROOT"]
668 frozen_config = depgraph._frozen_config
669 vardb = frozen_config.trees[root]["vartree"].dbapi
670 pkgsettings = frozen_config.pkgsettings[root]
671
672 deps_info = {}
673 for pkg in depgraph.altlist():
674 if isinstance(pkg, Package):
675 # If we're not using --force-remote-binary, check what flags are being
676 # used by the real package.
677 if "--usepkgonly" not in emerge.opts:
678 try:
679 pkg = emerge.depgraph._pkg(pkg.cpv, "ebuild", emerge.root_config)
680 except portage.exception.PackageNotFound:
681 # This is a --force-remote-binary package.
682 pass
683 self.package_db[pkg.cpv] = pkg
684
685 # If we're not in emptytree mode, and we're going to replace a package
686 # that is already installed, then this operation is possibly optional.
687 # ("--selective" mode is handled later, in RemoveInstalledPackages())
688 optional = False
689 if not emptytree:
690 for vardb_pkg in vardb.match_pkgs(pkg.cpv):
691 if self.CheckUseFlags(pkgsettings, vardb_pkg, pkg):
692 optional = True
693 break
694
695 # Save off info about the package
696 deps_info[str(pkg.cpv)] = {"idx": len(deps_info),
697 "optional": optional}
698
699 seconds = time.time() - start
700 if "--quiet" not in emerge.opts:
701 print "Deps calculated in %dm%.1fs" % (seconds / 60, seconds % 60)
702
703 return deps_tree, deps_info
704
705 def PrintTree(self, deps, depth=""):
706 """Print the deps we have seen in the emerge output.
707
708 Args:
709 deps: Dependency tree structure.
710 depth: Allows printing the tree recursively, with indentation.
711 """
712 for entry in sorted(deps):
713 action = deps[entry]["action"]
714 print "%s %s (%s)" % (depth, entry, action)
715 self.PrintTree(deps[entry]["deps"], depth=depth + " ")
716
717 def RemotePackageDatabase(self, binhost_url):
718 """Grab the latest binary package database from the prebuilt server.
719
720 We need to know the modification times of the prebuilt packages so that we
721 know when it is OK to use these packages and when we should rebuild them
722 instead.
723
724 Args:
725 binhost_url: Base URL of remote packages (PORTAGE_BINHOST).
726
727 Returns:
728 A dict mapping package identifiers to modification times.
729 """
730
731 if not binhost_url:
732 return {}
733
734 def retry_urlopen(url, tries=3):
735 """Open the specified url, retrying if we run into temporary errors.
736
737 We retry for both network errors and 5xx Server Errors. We do not retry
738 for HTTP errors with a non-5xx code.
739
740 Args:
741 url: The specified url.
742 tries: The number of times to try.
743
744 Returns:
745 The result of urllib2.urlopen(url).
746 """
747 for i in range(tries):
748 try:
749 return urllib2.urlopen(url)
750 except urllib2.HTTPError as e:
751 print "Cannot GET %s: %s" % (url, str(e))
752 if i + 1 >= tries or e.code < 500:
753 raise
754 except urllib2.URLError as e:
755 print "Cannot GET %s: %s" % (url, str(e))
756 if i + 1 >= tries:
757 raise
758 print "Sleeping for 10 seconds before retrying..."
759 time.sleep(10)
760
761 url = os.path.join(binhost_url, "Packages")
762 try:
763 f = retry_urlopen(url)
764 except urllib2.HTTPError as e:
765 if e.code == 404:
766 return {}
767 else:
768 raise
769 prebuilt_pkgs = {}
770 for line in f:
771 if line.startswith("CPV: "):
772 pkg = line.replace("CPV: ", "").rstrip()
773 elif line.startswith("MTIME: "):
774 prebuilt_pkgs[pkg] = int(line[:-1].replace("MTIME: ", ""))
775 f.close()
776
777 return prebuilt_pkgs
778
779 def GenDependencyGraph(self, deps_tree, deps_info, remote_pkgs):
780 """Generate a doubly linked dependency graph.
781
782 Args:
783 deps_tree: Dependency tree structure.
784 deps_info: More details on the dependencies.
785 Returns:
786 Deps graph in the form of a dict of packages, with each package
787 specifying a "needs" list and "provides" list.
788 """
789 emerge = self.emerge
790 root = emerge.settings["ROOT"]
791
792 # It's useful to know what packages will actually end up on the
793 # system at some point. Packages in final_db are either already
794 # installed, or will be installed by the time we're done.
795 final_db = emerge.depgraph._dynamic_config.mydbapi[root]
796
797 # final_pkgs is a set of the packages we found in the final_db. These
798 # packages are either already installed, or will be installed by the time
799 # we're done. It's populated in BuildFinalPackageSet()
800 final_pkgs = set()
801
802 # These packages take a really long time to build, so, for expediency, we
803 # are blacklisting them from automatic rebuilds because one of their
804 # dependencies needs to be recompiled.
805 rebuild_blacklist = set()
806 for pkg in ("chromeos-base/chromeos-chrome", "media-plugins/o3d",
807 "dev-java/icedtea"):
808 for match in final_db.match_pkgs(pkg):
809 rebuild_blacklist.add(str(match.cpv))
810
811 # deps_map is the actual dependency graph.
812 #
813 # Each package specifies a "needs" list and a "provides" list. The "needs"
814 # list indicates which packages we depend on. The "provides" list
815 # indicates the reverse dependencies -- what packages need us.
816 #
817 # We also provide some other information in the dependency graph:
818 # - action: What we're planning on doing with this package. Generally,
819 # "merge", "nomerge", or "uninstall"
820 # - mandatory_source:
821 # If true, indicates that this package must be compiled from source.
822 # We set this for "workon" packages, and for packages where the
823 # binaries are known to be out of date.
824 # - mandatory:
825 # If true, indicates that this package must be installed. We don't care
826 # whether it's binary or source, unless the mandatory_source flag is
827 # also set.
828 # - force_remote_binary:
829 # If true, indicates that we want to update to the latest remote prebuilt
830 # of this package. Packages that depend on this package should be built
831 # from source.
832 #
833 deps_map = {}
834
835 def ReverseTree(packages):
836 """Convert tree to digraph.
837
838 Take the tree of package -> requirements and reverse it to a digraph of
839 buildable packages -> packages they unblock.
840 Args:
841 packages: Tree(s) of dependencies.
842 Returns:
843 Unsanitized digraph.
844 """
845 for pkg in packages:
846
847 # Create an entry for the package
848 action = packages[pkg]["action"]
849 default_pkg = {"needs": {}, "provides": set(), "action": action,
850 "mandatory_source": False, "mandatory": False,
851 "force_remote_binary": False}
852 this_pkg = deps_map.setdefault(pkg, default_pkg)
853
854 # Create entries for dependencies of this package first.
855 ReverseTree(packages[pkg]["deps"])
856
857 # Add dependencies to this package.
858 for dep, dep_item in packages[pkg]["deps"].iteritems():
859 dep_pkg = deps_map[dep]
860 dep_type = dep_item["deptype"]
861 if dep_type != "runtime_post":
862 dep_pkg["provides"].add(pkg)
863 this_pkg["needs"][dep] = dep_type
864
865 def BuildFinalPackageSet():
866 # If this package is installed, or will get installed, add it to
867 # final_pkgs
868 for pkg in deps_map:
869 for match in final_db.match_pkgs(pkg):
870 final_pkgs.add(str(match.cpv))
871
872 def FindCycles():
873 """Find cycles in the dependency tree.
874
875 Returns:
876 A dict mapping cyclic packages to a dict of the deps that cause
877 cycles. For each dep that causes cycles, it returns an example
878 traversal of the graph that shows the cycle.
879 """
880
881 def FindCyclesAtNode(pkg, cycles, unresolved, resolved):
882 """Find cycles in cyclic dependencies starting at specified package.
883
884 Args:
885 pkg: Package identifier.
886 cycles: A dict mapping cyclic packages to a dict of the deps that
887 cause cycles. For each dep that causes cycles, it returns an
888 example traversal of the graph that shows the cycle.
889 unresolved: Nodes that have been visited but are not fully processed.
890 resolved: Nodes that have been visited and are fully processed.
891 """
892 pkg_cycles = cycles.get(pkg)
893 if pkg in resolved and not pkg_cycles:
894 # If we already looked at this package, and found no cyclic
895 # dependencies, we can stop now.
896 return
897 unresolved.append(pkg)
898 for dep in deps_map[pkg]["needs"]:
899 if dep in unresolved:
900 idx = unresolved.index(dep)
901 mycycle = unresolved[idx:] + [dep]
902 for i in range(len(mycycle) - 1):
903 pkg1, pkg2 = mycycle[i], mycycle[i+1]
904 cycles.setdefault(pkg1, {}).setdefault(pkg2, mycycle)
905 elif not pkg_cycles or dep not in pkg_cycles:
906 # Looks like we haven't seen this edge before.
907 FindCyclesAtNode(dep, cycles, unresolved, resolved)
908 unresolved.pop()
909 resolved.add(pkg)
910
911 cycles, unresolved, resolved = {}, [], set()
912 for pkg in deps_map:
913 FindCyclesAtNode(pkg, cycles, unresolved, resolved)
914 return cycles
915
916 def RemoveInstalledPackages():
917 """Remove installed packages, propagating dependencies."""
918
919 # If we're in non-selective mode, the packages specified on the command
920 # line are generally mandatory.
921 #
922 # There are a few exceptions to this rule:
923 # 1. If the package isn't getting installed because it's in
924 # package.provided, it's not mandatory.
925 # 2. If the package isn't getting installed because we're in --onlydeps
926 # mode, it's not mandatory either.
927 if "--selective" in emerge.opts:
928 selective = emerge.opts["--selective"] != "n"
929 else:
930 selective = ("--noreplace" in emerge.opts or
931 "--update" in emerge.opts or
932 "--newuse" in emerge.opts or
933 "--reinstall" in emerge.opts)
934 onlydeps = "--onlydeps" in emerge.opts
935 if not selective:
936 for pkg in emerge.cmdline_packages:
937 # If the package specified on the command-line is in our install
938 # list, mark it as non-optional.
939 found = False
940 for db_pkg in final_db.match_pkgs(pkg):
941 this_pkg = deps_info.get(db_pkg.cpv)
942 if this_pkg:
943 found = True
944 this_pkg["optional"] = False
945
946 # We didn't find the package in our final db. If we're not in
947 # --onlydeps mode, this likely means that the package was specified
948 # in package.provided.
949 if not found and not onlydeps and "--verbose" in emerge.opts:
950 print "Skipping %s (is it in package.provided?)" % pkg
951
952 # Schedule packages that aren't on the install list for removal
953 rm_pkgs = set(deps_map.keys()) - set(deps_info.keys())
954
955 # Schedule optional packages for removal
956 for pkg, info in deps_info.items():
957 if info["optional"]:
958 rm_pkgs.add(pkg)
959
960 # Schedule nomerge packages for removal
961 for pkg in self.nomerge:
962 for db_pkg in final_db.match_pkgs(pkg):
963 if db_pkg.cpv in deps_map:
964 rm_pkgs.add(str(db_pkg.cpv))
965
966 # Remove the packages we don't want, simplifying the graph and making
967 # it easier for us to crack cycles.
968 for pkg in sorted(rm_pkgs):
969 this_pkg = deps_map[pkg]
970 needs = this_pkg["needs"]
971 provides = this_pkg["provides"]
972 for dep in needs:
973 dep_provides = deps_map[dep]["provides"]
974 dep_provides.update(provides)
975 dep_provides.discard(pkg)
976 dep_provides.discard(dep)
977 for target in provides:
978 target_needs = deps_map[target]["needs"]
979 target_needs.update(needs)
980 target_needs.pop(pkg, None)
981 target_needs.pop(target, None)
982 del deps_map[pkg]
983
984 def PrintCycleBreak(basedep, dep, mycycle):
985 """Print details about a cycle that we are planning on breaking.
986
987 We are breaking a cycle where dep needs basedep. mycycle is an
988 example cycle which contains dep -> basedep."""
989
990 # If it's an optional dependency, there's no need to spam the user with
991 # warning messages.
992 needs = deps_map[dep]["needs"]
993 depinfo = needs.get(basedep, "deleted")
994 if depinfo == "optional":
995 return
996
997 # Notify the user that we're breaking a cycle.
998 print "Breaking %s -> %s (%s)" % (dep, basedep, depinfo)
999
1000 # Show cycle.
1001 for i in range(len(mycycle) - 1):
1002 pkg1, pkg2 = mycycle[i], mycycle[i+1]
1003 needs = deps_map[pkg1]["needs"]
1004 depinfo = needs.get(pkg2, "deleted")
1005 if pkg1 == dep and pkg2 == basedep:
1006 depinfo = depinfo + ", deleting"
1007 print " %s -> %s (%s)" % (pkg1, pkg2, depinfo)
1008
1009 def SanitizeTree():
1010 """Remove circular dependencies.
1011
1012 We prune all dependencies involved in cycles that go against the emerge
1013 ordering. This has a nice property: we're guaranteed to merge
1014 dependencies in the same order that portage does.
1015
1016 Because we don't treat any dependencies as "soft" unless they're killed
1017 by a cycle, we pay attention to a larger number of dependencies when
1018 merging. This hurts performance a bit, but helps reliability.
1019 """
1020 start = time.time()
1021 cycles = FindCycles()
1022 while cycles:
1023 for dep, mycycles in cycles.iteritems():
1024 for basedep, mycycle in mycycles.iteritems():
1025 if deps_info[basedep]["idx"] >= deps_info[dep]["idx"]:
1026 PrintCycleBreak(basedep, dep, mycycle)
1027 del deps_map[dep]["needs"][basedep]
1028 deps_map[basedep]["provides"].remove(dep)
1029 cycles = FindCycles()
1030 seconds = time.time() - start
1031 if "--quiet" not in emerge.opts and seconds >= 0.1:
1032 print "Tree sanitized in %dm%.1fs" % (seconds / 60, seconds % 60)
1033
1034 def AddSecretDeps():
1035 """Find these tagged packages and add extra dependencies.
1036
1037 For debugging dependency problems.
1038 """
1039 for bad in secret_deps:
1040 needed = secret_deps[bad]
1041 bad_pkg = None
1042 needed_pkg = None
1043 for dep in deps_map:
1044 if dep.find(bad) != -1:
1045 bad_pkg = dep
1046 if dep.find(needed) != -1:
1047 needed_pkg = dep
1048 if bad_pkg and needed_pkg:
1049 deps_map[needed_pkg]["provides"].add(bad_pkg)
1050 deps_map[bad_pkg]["needs"][needed_pkg] = "secret"
1051
1052 def MergeChildren(pkg, merge_type):
1053 """Merge this package and all packages it provides."""
1054
1055 this_pkg = deps_map[pkg]
1056 if (this_pkg[merge_type] or pkg not in final_pkgs):
1057 return
1058
1059 if pkg not in deps_info:
1060 emerge_cmd = "emerge"
1061 if self.board:
1062 emerge_cmd = "emerge-%s" % self.board
1063 emerge_cmd += " -pe =%s %s" % (pkg, " ".join(emerge.cmdline_packages))
1064 use_str = os.environ.get("USE")
1065 if use_str:
1066 emerge_cmd = 'USE="%s" %s' % (use_str, emerge_cmd)
1067 print "ERROR: emerge has refused to update %s" % pkg
1068 print "Are there impossible-to-satisfy constraints in the dependency"
1069 print "graph? To debug the issue, try the following command:"
1070 print " %s" % emerge_cmd
1071 sys.exit(1)
1072
1073 # Mark this package as non-optional
1074 deps_info[pkg]["optional"] = False
1075 this_pkg[merge_type] = True
1076 for w in this_pkg["provides"].difference(rebuild_blacklist):
1077 MergeChildren(w, merge_type)
1078
1079 if this_pkg["action"] == "nomerge":
1080 this_pkg["action"] = "merge"
1081
1082 def LocalPackageDatabase():
1083 """Get the modification times of the packages in the local database.
1084
1085 We need to know the modification times of the local packages so that we
1086 know when they need to be rebuilt.
1087
1088 Returns:
1089 A dict mapping package identifiers to modification times.
1090 """
1091 if self.board:
1092 path = "/build/%s/packages/Packages" % self.board
1093 else:
1094 path = "/var/lib/portage/pkgs/Packages"
1095 local_pkgs = {}
1096 for line in file(path):
1097 if line.startswith("CPV: "):
1098 pkg = line.replace("CPV: ", "").rstrip()
1099 elif line.startswith("MTIME: "):
1100 local_pkgs[pkg] = int(line[:-1].replace("MTIME: ", ""))
1101
1102 return local_pkgs
1103
1104 def AutoRebuildDeps(local_pkgs, remote_pkgs, cycles):
1105 """Recursively rebuild packages when necessary using modification times.
1106
1107 If you've modified a package, it's a good idea to rebuild all the packages
1108 that depend on it from source. This function looks for any packages which
1109 depend on packages that have been modified and ensures that they get
1110 rebuilt.
1111
1112 Args:
1113 local_pkgs: Modification times from the local database.
1114 remote_pkgs: Modification times from the prebuilt server.
1115 cycles: Dictionary returned from FindCycles()
1116
1117 Returns:
1118 The set of packages we marked as needing to be merged.
1119 """
1120
1121 def PrebuiltsReady(pkg, pkg_db, cache):
1122 """Check whether the prebuilts are ready for pkg and all deps.
1123
1124 Args:
1125 pkg: The specified package.
1126 pkg_db: The package DB to use.
1127 cache: A dict, where the results are stored.
1128
1129 Returns:
1130 True iff the prebuilts are ready for pkg and all deps.
1131 """
1132 if pkg in cache:
1133 return cache[pkg]
1134 if pkg not in pkg_db and pkg not in self.forced_remote_binary_packages:
1135 cache[pkg] = False
1136 else:
1137 cache[pkg] = True
1138 for dep in deps_map[pkg]["needs"]:
1139 if not PrebuiltsReady(dep, pkg_db, cache):
1140 cache[pkg] = False
1141 break
1142 return cache[pkg]
1143
1144 def LastModifiedWithDeps(pkg, pkg_db, cache):
1145 """Calculate the last modified time of a package and its dependencies.
1146
1147 This function looks at all the packages needed by the specified package
1148 and checks the most recent modification time of all of those packages.
1149 If the dependencies of a package were modified more recently than the
1150 package itself, then we know the package needs to be rebuilt.
1151
1152 Args:
1153 pkg: The specified package.
1154 pkg_db: The package DB to use.
1155 cache: A dict, where the last modified times are stored.
1156
1157 Returns:
1158 The last modified time of the specified package and its dependencies.
1159 """
1160 if pkg in cache:
1161 return cache[pkg]
1162
1163 cache[pkg] = pkg_db.get(pkg, 0)
1164 for dep in deps_map[pkg]["needs"]:
1165 t = LastModifiedWithDeps(dep, pkg_db, cache)
1166 cache[pkg] = max(cache[pkg], t)
1167 return cache[pkg]
1168
1169 # For every package that's getting updated in our local cache (binary
1170 # or source), make sure we also update the children. If a package is
1171 # built from source, all children must also be built from source.
1172 local_ready_cache, remote_ready_cache = {}, {}
1173 local_mtime_cache, remote_mtime_cache = {}, {}
1174 for pkg in final_pkgs.difference(rebuild_blacklist):
1175 # If all the necessary local packages are ready, and their
1176 # modification times are in sync, we don't need to do anything here.
1177 local_mtime = LastModifiedWithDeps(pkg, local_pkgs, local_mtime_cache)
1178 local_ready = PrebuiltsReady(pkg, local_pkgs, local_ready_cache)
1179 if (not local_ready or local_pkgs.get(pkg, 0) < local_mtime and
1180 pkg not in cycles):
1181 # OK, at least one package is missing from the local cache or is
1182 # outdated. This means we're going to have to install the package
1183 # and all dependencies.
1184 #
1185 # If all the necessary remote packages are ready, and they're at
1186 # least as new as our local packages, we can install them.
1187 # Otherwise, we need to build from source.
1188 remote_mtime = LastModifiedWithDeps(pkg, remote_pkgs,
1189 remote_mtime_cache)
1190 remote_ready = PrebuiltsReady(pkg, remote_pkgs, remote_ready_cache)
1191 if remote_ready and (local_mtime <= remote_mtime or pkg in cycles):
1192 MergeChildren(pkg, "mandatory")
1193 else:
1194 MergeChildren(pkg, "mandatory_source")
1195
1196 def UsePrebuiltPackages(remote_pkgs):
1197 """Update packages that can use prebuilts to do so."""
1198 start = time.time()
1199
1200 # Build list of prebuilt packages.
1201 prebuilt_pkgs = {}
1202 for pkg, info in deps_map.iteritems():
1203 if info and info["action"] == "merge":
1204 if (not info["force_remote_binary"] and info["mandatory_source"] or
1205 "--usepkgonly" not in emerge.opts and pkg not in remote_pkgs):
1206 continue
1207
1208 db_pkg = emerge.depgraph._pkg(pkg, "binary", emerge.root_config)
1209 if info["force_remote_binary"]:
1210 # Undo our earlier hacks to the use flags so that the use flags
1211 # display correctly.
1212 db_pkg.use.enabled = db_pkg.metadata["USE"].split()
1213 prebuilt_pkgs[pkg] = db_pkg
1214
1215 # Calculate what packages need to be rebuilt due to changes in use flags.
1216 pkgsettings = emerge.depgraph._frozen_config.pkgsettings[root]
1217 for pkg, db_pkg in prebuilt_pkgs.iteritems():
1218 if not self.CheckUseFlags(pkgsettings, db_pkg, self.package_db[pkg]):
1219 MergeChildren(pkg, "mandatory_source")
1220
1221 # Convert eligible packages to binaries.
1222 for pkg, info in deps_map.iteritems():
1223 if info and info["action"] == "merge" and pkg in prebuilt_pkgs:
1224 if not info["mandatory_source"] or info["force_remote_binary"]:
1225 self.package_db[pkg] = prebuilt_pkgs[pkg]
1226
1227 seconds = time.time() - start
1228 if "--quiet" not in emerge.opts:
1229 print "Prebuilt DB populated in %dm%.1fs" % (seconds / 60, seconds % 60)
1230
1231 return prebuilt_pkgs
1232
1233 ReverseTree(deps_tree)
1234 BuildFinalPackageSet()
1235 AddSecretDeps()
1236
1237 # Mark that we want to use remote binaries only for a particular package.
1238 vardb = emerge.depgraph._frozen_config.trees[root]["vartree"].dbapi
1239 for pkg in self.force_remote_binary:
1240 for db_pkg in final_db.match_pkgs(pkg):
1241 match = deps_map.get(str(db_pkg.cpv))
1242 if match:
1243 match["force_remote_binary"] = True
1244
1245 rebuild_blacklist.add(str(db_pkg.cpv))
1246 if not vardb.match_pkgs(db_pkg.cpv):
1247 MergeChildren(str(db_pkg.cpv), "mandatory")
1248
1249 if self.no_workon_deps:
1250 for pkg in self.mandatory_source.copy():
1251 for db_pkg in final_db.match_pkgs(pkg):
1252 deps_map[str(db_pkg.cpv)]["mandatory_source"] = True
1253 else:
1254 for pkg in self.mandatory_source.copy():
1255 for db_pkg in final_db.match_pkgs(pkg):
1256 MergeChildren(str(db_pkg.cpv), "mandatory_source")
1257
1258 cycles = FindCycles()
1259 if self.rebuild:
1260 local_pkgs = LocalPackageDatabase()
1261 AutoRebuildDeps(local_pkgs, remote_pkgs, cycles)
1262
1263 # We need to remove installed packages so that we can use the dependency
1264 # ordering of the install process to show us what cycles to crack. Once
1265 # we've done that, we also need to recalculate our list of cycles so that
1266 # we don't include the installed packages in our cycles.
1267 RemoveInstalledPackages()
1268 SanitizeTree()
1269 if deps_map:
1270 if "--usepkg" in emerge.opts:
1271 UsePrebuiltPackages(remote_pkgs)
1272 return deps_map
1273
1274 def PrintInstallPlan(self, deps_map):
1275 """Print an emerge-style install plan.
1276
1277 The install plan lists what packages we're installing, in order.
1278 It's useful for understanding what parallel_emerge is doing.
1279
1280 Args:
1281 deps_map: The dependency graph.
1282 """
1283
1284 def InstallPlanAtNode(target, deps_map):
1285 nodes = []
1286 nodes.append(target)
1287 for dep in deps_map[target]["provides"]:
1288 del deps_map[dep]["needs"][target]
1289 if not deps_map[dep]["needs"]:
1290 nodes.extend(InstallPlanAtNode(dep, deps_map))
1291 return nodes
1292
1293 deps_map = copy.deepcopy(deps_map)
1294 install_plan = []
1295 plan = set()
1296 for target, info in deps_map.iteritems():
1297 if not info["needs"] and target not in plan:
1298 for item in InstallPlanAtNode(target, deps_map):
1299 plan.add(item)
1300 install_plan.append(self.package_db[item])
1301
1302 for pkg in plan:
1303 del deps_map[pkg]
1304
1305 if deps_map:
1306 print "Cyclic dependencies:", " ".join(deps_map)
1307 PrintDepsMap(deps_map)
1308 sys.exit(1)
1309
1310 self.emerge.depgraph.display(install_plan)
1311
1312
1313def PrintDepsMap(deps_map):
1314 """Print dependency graph, for each package list it's prerequisites."""
1315 for i in sorted(deps_map):
1316 print "%s: (%s) needs" % (i, deps_map[i]["action"])
1317 needs = deps_map[i]["needs"]
1318 for j in sorted(needs):
1319 print " %s" % (j)
1320 if not needs:
1321 print " no dependencies"
1322
1323
1324class EmergeJobState(object):
1325 __slots__ = ["done", "filename", "last_notify_timestamp", "last_output_seek",
1326 "last_output_timestamp", "pkgname", "retcode", "start_timestamp",
1327 "target"]
1328
1329 def __init__(self, target, pkgname, done, filename, start_timestamp,
1330 retcode=None):
1331
1332 # The full name of the target we're building (e.g.
1333 # chromeos-base/chromeos-0.0.1-r60)
1334 self.target = target
1335
1336 # The short name of the target we're building (e.g. chromeos-0.0.1-r60)
1337 self.pkgname = pkgname
1338
1339 # Whether the job is done. (True if the job is done; false otherwise.)
1340 self.done = done
1341
1342 # The filename where output is currently stored.
1343 self.filename = filename
1344
1345 # The timestamp of the last time we printed the name of the log file. We
1346 # print this at the beginning of the job, so this starts at
1347 # start_timestamp.
1348 self.last_notify_timestamp = start_timestamp
1349
1350 # The location (in bytes) of the end of the last complete line we printed.
1351 # This starts off at zero. We use this to jump to the right place when we
1352 # print output from the same ebuild multiple times.
1353 self.last_output_seek = 0
1354
1355 # The timestamp of the last time we printed output. Since we haven't
1356 # printed output yet, this starts at zero.
1357 self.last_output_timestamp = 0
1358
1359 # The return code of our job, if the job is actually finished.
1360 self.retcode = retcode
1361
1362 # The timestamp when our job started.
1363 self.start_timestamp = start_timestamp
1364
1365
1366def SetupWorkerSignals():
1367 def ExitHandler(signum, frame):
1368 # Remove our signal handlers so we don't get called recursively.
1369 signal.signal(signal.SIGINT, signal.SIG_DFL)
1370 signal.signal(signal.SIGTERM, signal.SIG_DFL)
1371
1372 # Try to exit cleanly
1373 sys.exit(1)
1374
1375 # Ensure that we exit quietly and cleanly, if possible, when we receive
1376 # SIGTERM or SIGINT signals. By default, when the user hits CTRL-C, all
1377 # of the child processes will print details about KeyboardInterrupt
1378 # exceptions, which isn't very helpful.
1379 signal.signal(signal.SIGINT, ExitHandler)
1380 signal.signal(signal.SIGTERM, ExitHandler)
1381
1382
1383def EmergeWorker(task_queue, job_queue, emerge, package_db):
1384 """This worker emerges any packages given to it on the task_queue.
1385
1386 Args:
1387 task_queue: The queue of tasks for this worker to do.
1388 job_queue: The queue of results from the worker.
1389 emerge: An EmergeData() object.
1390 package_db: A dict, mapping package ids to portage Package objects.
1391
1392 It expects package identifiers to be passed to it via task_queue. When
1393 a task is started, it pushes the (target, filename) to the started_queue.
1394 The output is stored in filename. When a merge starts or finishes, we push
1395 EmergeJobState objects to the job_queue.
1396 """
1397
1398 SetupWorkerSignals()
1399 settings, trees, mtimedb = emerge.settings, emerge.trees, emerge.mtimedb
1400 opts, spinner = emerge.opts, emerge.spinner
1401 opts["--nodeps"] = True
1402 if new_portage:
1403 # When Portage launches new processes, it goes on a rampage and closes all
1404 # open file descriptors. Ask Portage not to do that, as it breaks us.
1405 portage.process.get_open_fds = lambda: []
1406 while True:
1407 # Wait for a new item to show up on the queue. This is a blocking wait,
1408 # so if there's nothing to do, we just sit here.
1409 target = task_queue.get()
1410 if not target:
1411 # If target is None, this means that the main thread wants us to quit.
1412 # The other workers need to exit too, so we'll push the message back on
1413 # to the queue so they'll get it too.
1414 task_queue.put(target)
1415 return
1416 db_pkg = package_db[target]
1417 db_pkg.root_config = emerge.root_config
1418 install_list = [db_pkg]
1419 pkgname = db_pkg.pf
1420 output = tempfile.NamedTemporaryFile(prefix=pkgname + "-", delete=False)
1421 start_timestamp = time.time()
1422 job = EmergeJobState(target, pkgname, False, output.name, start_timestamp)
1423 job_queue.put(job)
1424 if "--pretend" in opts:
1425 retcode = 0
1426 else:
1427 save_stdout = sys.stdout
1428 save_stderr = sys.stderr
1429 try:
1430 sys.stdout = output
1431 sys.stderr = output
1432 if new_portage:
1433 emerge.scheduler_graph.mergelist = install_list
1434 scheduler = Scheduler(settings, trees, mtimedb, opts, spinner,
1435 favorites=[], graph_config=emerge.scheduler_graph)
1436 else:
1437 scheduler = Scheduler(settings, trees, mtimedb, opts, spinner,
1438 install_list, [], emerge.scheduler_graph)
1439 retcode = scheduler.merge()
1440 except Exception:
1441 traceback.print_exc(file=output)
1442 retcode = 1
1443 finally:
1444 sys.stdout = save_stdout
1445 sys.stderr = save_stderr
1446 output.close()
1447 if retcode is None:
1448 retcode = 0
1449
1450 job = EmergeJobState(target, pkgname, True, output.name, start_timestamp,
1451 retcode)
1452 job_queue.put(job)
1453
1454
1455class LinePrinter(object):
1456 """Helper object to print a single line."""
1457
1458 def __init__(self, line):
1459 self.line = line
1460
1461 def Print(self, seek_locations):
1462 print self.line
1463
1464
1465class JobPrinter(object):
1466 """Helper object to print output of a job."""
1467
1468 def __init__(self, job, unlink=False):
1469 """Print output of job.
1470
1471 If unlink is True, unlink the job output file when done."""
1472 self.current_time = time.time()
1473 self.job = job
1474 self.unlink = unlink
1475
1476 def Print(self, seek_locations):
1477
1478 job = self.job
1479
1480 # Calculate how long the job has been running.
1481 seconds = self.current_time - job.start_timestamp
1482
1483 # Note that we've printed out the job so far.
1484 job.last_output_timestamp = self.current_time
1485
1486 # Note that we're starting the job
1487 info = "job %s (%dm%.1fs)" % (job.pkgname, seconds / 60, seconds % 60)
1488 last_output_seek = seek_locations.get(job.filename, 0)
1489 if last_output_seek:
1490 print "=== Continue output for %s ===" % info
1491 else:
1492 print "=== Start output for %s ===" % info
1493
1494 # Print actual output from job
1495 f = codecs.open(job.filename, encoding='utf-8', errors='replace')
1496 f.seek(last_output_seek)
1497 prefix = job.pkgname + ":"
1498 for line in f:
1499
1500 # Save off our position in the file
1501 if line and line[-1] == "\n":
1502 last_output_seek = f.tell()
1503 line = line[:-1]
1504
1505 # Print our line
1506 print prefix, line.encode('utf-8', 'replace')
1507 f.close()
1508
1509 # Save our last spot in the file so that we don't print out the same
1510 # location twice.
1511 seek_locations[job.filename] = last_output_seek
1512
1513 # Note end of output section
1514 if job.done:
1515 print "=== Complete: %s ===" % info
1516 else:
1517 print "=== Still running: %s ===" % info
1518
1519 if self.unlink:
1520 os.unlink(job.filename)
1521
1522
1523def PrintWorker(queue):
1524 """A worker that prints stuff to the screen as requested."""
1525
1526 def ExitHandler(signum, frame):
1527 # Switch to default signal handlers so that we'll die after two signals.
1528 signal.signal(signal.SIGINT, signal.SIG_DFL)
1529 signal.signal(signal.SIGTERM, signal.SIG_DFL)
1530
1531 # Don't exit on the first SIGINT / SIGTERM, because the parent worker will
1532 # handle it and tell us when we need to exit.
1533 signal.signal(signal.SIGINT, ExitHandler)
1534 signal.signal(signal.SIGTERM, ExitHandler)
1535
1536 # seek_locations is a map indicating the position we are at in each file.
1537 # It starts off empty, but is set by the various Print jobs as we go along
1538 # to indicate where we left off in each file.
1539 seek_locations = {}
1540 while True:
1541 try:
1542 job = queue.get()
1543 if job:
1544 job.Print(seek_locations)
1545 else:
1546 break
1547 except IOError as ex:
1548 if ex.errno == errno.EINTR:
1549 # Looks like we received a signal. Keep printing.
1550 continue
1551 raise
1552
1553
1554class EmergeQueue(object):
1555 """Class to schedule emerge jobs according to a dependency graph."""
1556
1557 def __init__(self, deps_map, emerge, package_db, show_output):
1558 # Store the dependency graph.
1559 self._deps_map = deps_map
1560 # Initialize the running queue to empty
1561 self._jobs = {}
1562 # List of total package installs represented in deps_map.
1563 install_jobs = [x for x in deps_map if deps_map[x]["action"] == "merge"]
1564 self._total_jobs = len(install_jobs)
1565 self._show_output = show_output
1566
1567 if "--pretend" in emerge.opts:
1568 print "Skipping merge because of --pretend mode."
1569 sys.exit(0)
1570
1571 # Setup scheduler graph object. This is used by the child processes
1572 # to help schedule jobs.
1573 emerge.scheduler_graph = emerge.depgraph.schedulerGraph()
1574
1575 # Calculate how many jobs we can run in parallel. We don't want to pass
1576 # the --jobs flag over to emerge itself, because that'll tell emerge to
1577 # hide its output, and said output is quite useful for debugging hung
1578 # jobs.
1579 procs = min(self._total_jobs,
1580 emerge.opts.pop("--jobs", multiprocessing.cpu_count()))
1581 self._emerge_queue = multiprocessing.Queue()
1582 self._job_queue = multiprocessing.Queue()
1583 self._print_queue = multiprocessing.Queue()
1584 args = (self._emerge_queue, self._job_queue, emerge, package_db)
1585 self._pool = multiprocessing.Pool(procs, EmergeWorker, args)
1586 self._print_worker = multiprocessing.Process(target=PrintWorker,
1587 args=[self._print_queue])
1588 self._print_worker.start()
1589
1590 # Initialize the failed queue to empty.
1591 self._retry_queue = []
1592 self._failed = set()
1593
1594 # Print an update before we launch the merges.
1595 self._Status()
1596
1597 # Setup an exit handler so that we print nice messages if we are
1598 # terminated.
1599 self._SetupExitHandler()
1600
1601 # Schedule our jobs.
1602 for target, info in deps_map.items():
1603 if not info["needs"]:
1604 self._Schedule(target)
1605
1606 def _SetupExitHandler(self):
1607
1608 def ExitHandler(signum, frame):
1609
1610 # Kill our signal handlers so we don't get called recursively
1611 signal.signal(signal.SIGINT, signal.SIG_DFL)
1612 signal.signal(signal.SIGTERM, signal.SIG_DFL)
1613
1614 # Print our current job status
1615 for target, job in self._jobs.iteritems():
1616 if job:
1617 self._print_queue.put(JobPrinter(job, unlink=True))
1618
1619 # Notify the user that we are exiting
1620 self._Print("Exiting on signal %s" % signum)
1621
1622 # Kill child threads, then exit.
1623 self._Exit()
1624 sys.exit(1)
1625
1626 # Print out job status when we are killed
1627 signal.signal(signal.SIGINT, ExitHandler)
1628 signal.signal(signal.SIGTERM, ExitHandler)
1629
1630 def _Schedule(self, target):
1631 # We maintain a tree of all deps, if this doesn't need
1632 # to be installed just free up it's children and continue.
1633 # It is possible to reinstall deps of deps, without reinstalling
1634 # first level deps, like so:
1635 # chromeos (merge) -> eselect (nomerge) -> python (merge)
1636 if self._deps_map[target]["action"] == "nomerge":
1637 self._Finish(target)
1638 else:
1639 # Kick off the build if it's marked to be built.
1640 self._jobs[target] = None
1641 self._emerge_queue.put(target)
1642
1643 def _LoadAvg(self):
1644 loads = open("/proc/loadavg", "r").readline().split()[:3]
1645 return " ".join(loads)
1646
1647 def _Print(self, line):
1648 """Print a single line."""
1649 self._print_queue.put(LinePrinter(line))
1650
1651 def _Status(self):
1652 """Print status."""
1653 current_time = time.time()
1654 no_output = True
1655
1656 # Print interim output every minute if --show-output is used. Otherwise,
1657 # print notifications about running packages every 2 minutes, and print
1658 # full output for jobs that have been running for 60 minutes or more.
1659 if self._show_output:
1660 interval = 60
1661 notify_interval = 0
1662 else:
1663 interval = 60 * 60
1664 notify_interval = 60 * 2
1665 for target, job in self._jobs.iteritems():
1666 if job:
1667 last_timestamp = max(job.start_timestamp, job.last_output_timestamp)
1668 if last_timestamp + interval < current_time:
1669 self._print_queue.put(JobPrinter(job))
1670 job.last_output_timestamp = current_time
1671 no_output = False
1672 elif (notify_interval and
1673 job.last_notify_timestamp + notify_interval < current_time):
1674 job_seconds = current_time - job.start_timestamp
1675 args = (job.pkgname, job_seconds / 60, job_seconds % 60, job.filename)
1676 info = "Still building %s (%dm%.1fs). Logs in %s" % args
1677 job.last_notify_timestamp = current_time
1678 self._Print(info)
1679 no_output = False
1680
1681 # If we haven't printed any messages yet, print a general status message
1682 # here.
1683 if no_output:
1684 seconds = current_time - GLOBAL_START
1685 line = ("Pending %s, Ready %s, Running %s, Retrying %s, Total %s "
1686 "[Time %dm%.1fs Load %s]")
1687 qsize = self._emerge_queue.qsize()
1688 self._Print(line % (len(self._deps_map), qsize, len(self._jobs) - qsize,
1689 len(self._retry_queue), self._total_jobs,
1690 seconds / 60, seconds % 60, self._LoadAvg()))
1691
1692 def _Finish(self, target):
1693 """Mark a target as completed and unblock dependecies."""
1694 for dep in self._deps_map[target]["provides"]:
1695 del self._deps_map[dep]["needs"][target]
1696 if not self._deps_map[dep]["needs"]:
1697 self._Schedule(dep)
1698 self._deps_map.pop(target)
1699
1700 def _Retry(self):
1701 if self._retry_queue:
1702 target = self._retry_queue.pop(0)
1703 self._Schedule(target)
1704 self._Print("Retrying emerge of %s." % target)
1705
1706 def _Exit(self):
1707 # Tell emerge workers to exit. They all exit when 'None' is pushed
1708 # to the queue.
1709 self._emerge_queue.put(None)
1710 self._pool.close()
1711 self._pool.join()
1712
1713 # Now that our workers are finished, we can kill the print queue.
1714 self._print_queue.put(None)
1715 self._print_worker.join()
1716
1717 def Run(self):
1718 """Run through the scheduled ebuilds.
1719
1720 Keep running so long as we have uninstalled packages in the
1721 dependency graph to merge.
1722 """
1723 while self._deps_map:
1724 # Check here that we are actually waiting for something.
1725 if (self._emerge_queue.empty() and
1726 self._job_queue.empty() and
1727 not self._jobs and
1728 self._deps_map):
1729 # If we have failed on a package, retry it now.
1730 if self._retry_queue:
1731 self._Retry()
1732 else:
1733 # Tell child threads to exit.
1734 self._Exit()
1735
1736 # The dependency map is helpful for debugging failures.
1737 PrintDepsMap(self._deps_map)
1738
1739 # Tell the user why we're exiting.
1740 if self._failed:
1741 print "Packages failed: %s" % " ,".join(self._failed)
1742 else:
1743 print "Deadlock! Circular dependencies!"
1744 sys.exit(1)
1745
1746 try:
1747 job = self._job_queue.get(timeout=5)
1748 except Queue.Empty:
1749 # Print an update.
1750 self._Status()
1751 continue
1752
1753 target = job.target
1754
1755 if not job.done:
1756 self._jobs[target] = job
1757 self._Print("Started %s (logged in %s)" % (target, job.filename))
1758 continue
1759
1760 # Print output of job
1761 if self._show_output or job.retcode != 0:
1762 self._print_queue.put(JobPrinter(job, unlink=True))
1763 else:
1764 os.unlink(job.filename)
1765 del self._jobs[target]
1766
1767 seconds = time.time() - job.start_timestamp
1768 details = "%s (in %dm%.1fs)" % (target, seconds / 60, seconds % 60)
1769
1770 # Complain if necessary.
1771 if job.retcode != 0:
1772 # Handle job failure.
1773 if target in self._failed:
1774 # If this job has failed previously, give up.
1775 self._Print("Failed %s. Your build has failed." % details)
1776 else:
1777 # Queue up this build to try again after a long while.
1778 self._retry_queue.append(target)
1779 self._failed.add(target)
1780 self._Print("Failed %s, retrying later." % details)
1781 else:
1782 if target in self._failed and self._retry_queue:
1783 # If we have successfully retried a failed package, and there
1784 # are more failed packages, try the next one. We will only have
1785 # one retrying package actively running at a time.
1786 self._Retry()
1787
1788 self._Print("Completed %s" % details)
1789 # Mark as completed and unblock waiting ebuilds.
1790 self._Finish(target)
1791
1792 # Print an update.
1793 self._Status()
1794
1795 # Tell child threads to exit.
1796 self._Print("Merge complete")
1797 self._Exit()
1798
1799
1800def main():
1801
1802 deps = DepGraphGenerator()
1803 deps.Initialize(sys.argv[1:])
1804 emerge = deps.emerge
1805
1806 if emerge.action is not None:
1807 sys.argv = deps.ParseParallelEmergeArgs(sys.argv)
1808 sys.exit(emerge_main())
1809 elif not emerge.cmdline_packages:
1810 Usage()
1811 sys.exit(1)
1812
1813 # Unless we're in pretend mode, there's not much point running without
1814 # root access. We need to be able to install packages.
1815 #
1816 # NOTE: Even if you're running --pretend, it's a good idea to run
1817 # parallel_emerge with root access so that portage can write to the
1818 # dependency cache. This is important for performance.
1819 if "--pretend" not in emerge.opts and portage.secpass < 2:
1820 print "parallel_emerge: superuser access is required."
1821 sys.exit(1)
1822
1823 if "--quiet" not in emerge.opts:
1824 cmdline_packages = " ".join(emerge.cmdline_packages)
1825 nomerge_packages = " ".join(deps.nomerge)
1826 print "Starting fast-emerge."
1827 print " Building package %s on %s" % (cmdline_packages,
1828 deps.board or "root")
1829 if nomerge_packages:
1830 print " Skipping package %s on %s" % (nomerge_packages,
1831 deps.board or "root")
1832
1833 remote_pkgs = {}
1834 if "--getbinpkg" in emerge.opts:
1835 binhost = emerge.settings["PORTAGE_BINHOST"]
1836 try:
1837 remote_pkgs = deps.RemotePackageDatabase(binhost)
1838 except (urllib2.HTTPError, urllib2.URLError):
1839 print "Cannot resolve binhost. Building from source..."
1840 del emerge.opts["--getbinpkg"]
1841
1842 deps_tree, deps_info = deps.GenDependencyTree(remote_pkgs)
1843
1844 # You want me to be verbose? I'll give you two trees! Twice as much value.
1845 if "--tree" in emerge.opts and "--verbose" in emerge.opts:
1846 deps.PrintTree(deps_tree)
1847
1848 deps_graph = deps.GenDependencyGraph(deps_tree, deps_info, remote_pkgs)
1849
1850 # OK, time to print out our progress so far.
1851 deps.PrintInstallPlan(deps_graph)
1852 if "--tree" in emerge.opts:
1853 PrintDepsMap(deps_graph)
1854
1855 # Are we upgrading portage? If so, and there are more packages to merge,
1856 # schedule a restart of parallel_emerge to merge the rest. This ensures that
1857 # we pick up all updates to portage settings before merging any more
1858 # packages.
1859 portage_upgrade = False
1860 root = emerge.settings["ROOT"]
1861 final_db = emerge.depgraph._dynamic_config.mydbapi[root]
1862 if root == "/":
1863 for db_pkg in final_db.match_pkgs("sys-apps/portage"):
1864 portage_pkg = deps_graph.get(db_pkg.cpv)
1865 if portage_pkg and len(deps_graph) > 1:
1866 portage_pkg["needs"].clear()
1867 portage_pkg["provides"].clear()
1868 deps_graph = { str(db_pkg.cpv): portage_pkg }
1869 portage_upgrade = True
1870 if "--quiet" not in emerge.opts:
1871 print "Upgrading portage first, then restarting..."
1872
1873 # Run the queued emerges.
1874 scheduler = EmergeQueue(deps_graph, emerge, deps.package_db, deps.show_output)
1875 scheduler.Run()
1876
1877 # Update world.
1878 if ("--oneshot" not in emerge.opts and
1879 "--pretend" not in emerge.opts):
1880 world_set = emerge.root_config.sets["selected"]
1881 new_world_pkgs = []
1882 for pkg in emerge.cmdline_packages:
1883 for db_pkg in final_db.match_pkgs(pkg):
1884 print "Adding %s to world" % db_pkg.cp
1885 new_world_pkgs.append(db_pkg.cp)
1886 if new_world_pkgs:
1887 world_set.update(new_world_pkgs)
1888
1889 # Update environment (library cache, symlinks, etc.)
1890 if deps.board and "--pretend" not in emerge.opts:
1891 portage.env_update()
1892
1893 # If we already upgraded portage, we don't need to do so again. But we do
1894 # need to upgrade the rest of the packages. So we'll go ahead and do that.
1895 if portage_upgrade:
1896 args = sys.argv[1:] + ["--nomerge=sys-apps/portage"]
1897 os.execvp(os.path.realpath(sys.argv[0]), args)
1898
1899 print "Done"
1900 sys.exit(0)
1901
1902if __name__ == "__main__":
1903 main()