aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'portage_with_autodep/pym/_emerge')
-rw-r--r--portage_with_autodep/pym/_emerge/AbstractDepPriority.py4
-rw-r--r--portage_with_autodep/pym/_emerge/AbstractDepPriority.pyobin0 -> 1757 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py52
-rw-r--r--portage_with_autodep/pym/_emerge/AbstractEbuildProcess.pyobin0 -> 10082 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/AbstractPollTask.py112
-rw-r--r--portage_with_autodep/pym/_emerge/AbstractPollTask.pyobin0 -> 4918 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/AsynchronousLock.py90
-rw-r--r--portage_with_autodep/pym/_emerge/AsynchronousLock.pyobin0 -> 10536 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/AsynchronousTask.py45
-rw-r--r--portage_with_autodep/pym/_emerge/AsynchronousTask.pyobin0 -> 5610 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/AtomArg.pyobin0 -> 771 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/Binpkg.py85
-rw-r--r--portage_with_autodep/pym/_emerge/Binpkg.pyobin0 -> 13229 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.py6
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.pyobin0 -> 3084 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.py3
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.pyobin0 -> 1337 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgFetcher.py3
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgFetcher.pyobin0 -> 5698 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgPrefetcher.pyobin0 -> 1932 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/BinpkgVerifier.pyobin0 -> 2515 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/Blocker.pyobin0 -> 853 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/BlockerCache.py21
-rw-r--r--portage_with_autodep/pym/_emerge/BlockerCache.pyobin0 -> 6840 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/BlockerDB.py5
-rw-r--r--portage_with_autodep/pym/_emerge/BlockerDB.pyobin0 -> 4286 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/BlockerDepPriority.pyobin0 -> 797 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/CompositeTask.py7
-rw-r--r--portage_with_autodep/pym/_emerge/CompositeTask.pyobin0 -> 5111 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/DepPriority.pyobin0 -> 1653 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/DepPriorityNormalRange.pyobin0 -> 1866 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/DepPrioritySatisfiedRange.pyobin0 -> 2980 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/Dependency.py5
-rw-r--r--portage_with_autodep/pym/_emerge/Dependency.pyobin0 -> 1092 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/DependencyArg.pyobin0 -> 1612 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildBinpkg.py6
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildBinpkg.pyobin0 -> 2007 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildBuild.py29
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildBuild.pyobin0 -> 11947 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildBuildDir.py5
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildBuildDir.pyobin0 -> 3933 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildExecuter.py4
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildExecuter.pyobin0 -> 3424 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildFetcher.py28
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildFetcher.pyobin0 -> 9374 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildFetchonly.py6
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildFetchonly.pyobin0 -> 1552 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildIpcDaemon.py31
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildIpcDaemon.pyobin0 -> 3227 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildMerge.py4
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildMerge.pyobin0 -> 2558 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildMetadataPhase.py154
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildMetadataPhase.pyobin0 -> 5787 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildPhase.py15
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildPhase.py.rej12
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildPhase.pyobin0 -> 11191 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildProcess.pyobin0 -> 967 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/EbuildSpawnProcess.pyobin0 -> 897 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/EventsAnalyser.py180
-rw-r--r--portage_with_autodep/pym/_emerge/EventsLogger.py102
-rw-r--r--portage_with_autodep/pym/_emerge/FakeVartree.py26
-rw-r--r--portage_with_autodep/pym/_emerge/FakeVartree.pyobin0 -> 9274 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/FifoIpcDaemon.py25
-rw-r--r--portage_with_autodep/pym/_emerge/FifoIpcDaemon.pyobin0 -> 2902 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/JobStatusDisplay.py21
-rw-r--r--portage_with_autodep/pym/_emerge/JobStatusDisplay.pyobin0 -> 9115 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/MergeListItem.py2
-rw-r--r--portage_with_autodep/pym/_emerge/MergeListItem.pyobin0 -> 3960 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/MetadataRegen.py79
-rw-r--r--portage_with_autodep/pym/_emerge/MetadataRegen.pyobin0 -> 5760 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/MiscFunctionsProcess.py10
-rw-r--r--portage_with_autodep/pym/_emerge/MiscFunctionsProcess.pyobin0 -> 1701 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/Package.py50
-rw-r--r--portage_with_autodep/pym/_emerge/Package.pyobin0 -> 21535 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/PackageArg.pyobin0 -> 1110 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/PackageMerge.py2
-rw-r--r--portage_with_autodep/pym/_emerge/PackageMerge.pyobin0 -> 1509 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/PackageUninstall.pyobin0 -> 4110 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/PackageVirtualDbapi.py22
-rw-r--r--portage_with_autodep/pym/_emerge/PackageVirtualDbapi.pyobin0 -> 6099 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/PipeReader.py60
-rw-r--r--portage_with_autodep/pym/_emerge/PipeReader.pyobin0 -> 3672 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/PollConstants.py18
-rw-r--r--portage_with_autodep/pym/_emerge/PollScheduler.py346
-rw-r--r--portage_with_autodep/pym/_emerge/PollScheduler.pyobin0 -> 8151 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/PollSelectAdapter.py73
-rw-r--r--portage_with_autodep/pym/_emerge/ProgressHandler.pyobin0 -> 1115 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/QueueScheduler.py79
-rw-r--r--portage_with_autodep/pym/_emerge/QueueScheduler.pyobin0 -> 3658 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/RootConfig.py2
-rw-r--r--portage_with_autodep/pym/_emerge/RootConfig.pyobin0 -> 1404 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/Scheduler.py399
-rw-r--r--portage_with_autodep/pym/_emerge/Scheduler.pyobin0 -> 55155 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/SequentialTaskQueue.py72
-rw-r--r--portage_with_autodep/pym/_emerge/SequentialTaskQueue.pyobin0 -> 3343 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/SetArg.pyobin0 -> 719 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/SlotObject.py42
-rw-r--r--portage_with_autodep/pym/_emerge/SpawnProcess.py107
-rw-r--r--portage_with_autodep/pym/_emerge/SpawnProcess.pyobin0 -> 6006 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/SubProcess.py65
-rw-r--r--portage_with_autodep/pym/_emerge/SubProcess.pyobin0 -> 4178 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/Task.py5
-rw-r--r--portage_with_autodep/pym/_emerge/Task.pyobin0 -> 2148 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/TaskScheduler.py7
-rw-r--r--portage_with_autodep/pym/_emerge/TaskScheduler.pyobin0 -> 1309 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/TaskSequence.pyobin0 -> 2147 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/UninstallFailure.pyobin0 -> 785 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/UnmergeDepPriority.pyobin0 -> 1345 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/UseFlagDisplay.pyobin0 -> 4148 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/__init__.pyobin0 -> 129 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/_find_deep_system_runtime_deps.pyobin0 -> 1299 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.py2
-rw-r--r--portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.pyobin0 -> 606 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/actions.py500
-rw-r--r--portage_with_autodep/pym/_emerge/actions.pyobin0 -> 80730 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/chk_updated_cfg_files.py42
-rw-r--r--portage_with_autodep/pym/_emerge/clear_caches.pyobin0 -> 719 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/countdown.pyobin0 -> 917 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/create_depgraph_params.py20
-rw-r--r--portage_with_autodep/pym/_emerge/create_depgraph_params.pyobin0 -> 1954 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/create_world_atom.py35
-rw-r--r--portage_with_autodep/pym/_emerge/create_world_atom.pyobin0 -> 2648 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/depgraph.py1140
-rw-r--r--portage_with_autodep/pym/_emerge/depgraph.pyobin0 -> 162245 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/emergelog.py9
-rw-r--r--portage_with_autodep/pym/_emerge/emergelog.pyobin0 -> 1927 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/getloadavg.pyobin0 -> 931 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/help.py798
-rw-r--r--portage_with_autodep/pym/_emerge/help.pyobin0 -> 2546 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/is_valid_package_atom.pyobin0 -> 910 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/main.py295
-rw-r--r--portage_with_autodep/pym/_emerge/main.pyobin0 -> 52644 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/post_emerge.py165
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/__init__.pyobin0 -> 138 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/backtracking.py27
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/backtracking.pyobin0 -> 7838 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/circular_dependency.py5
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/circular_dependency.pyobin0 -> 7555 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/output.py235
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/output.pyobin0 -> 28079 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/output_helpers.py142
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/output_helpers.pyobin0 -> 18016 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/slot_collision.py22
-rw-r--r--portage_with_autodep/pym/_emerge/resolver/slot_collision.pyobin0 -> 23644 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/search.py29
-rw-r--r--portage_with_autodep/pym/_emerge/search.pyobin0 -> 11825 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/show_invalid_depstring_notice.pyobin0 -> 2001 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/stdout_spinner.pyobin0 -> 3440 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/sync/__init__.pyobin0 -> 134 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/sync/getaddrinfo_validate.pyobin0 -> 847 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/sync/old_tree_timestamp.pyobin0 -> 2746 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/unmerge.py55
-rw-r--r--portage_with_autodep/pym/_emerge/unmerge.pyobin0 -> 14236 bytes
-rw-r--r--portage_with_autodep/pym/_emerge/userquery.py6
-rw-r--r--portage_with_autodep/pym/_emerge/userquery.pyobin0 -> 2254 bytes
155 files changed, 2969 insertions, 3084 deletions
diff --git a/portage_with_autodep/pym/_emerge/AbstractDepPriority.py b/portage_with_autodep/pym/_emerge/AbstractDepPriority.py
index 94a9379..94f26ef 100644
--- a/portage_with_autodep/pym/_emerge/AbstractDepPriority.py
+++ b/portage_with_autodep/pym/_emerge/AbstractDepPriority.py
@@ -1,8 +1,8 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import copy
-from _emerge.SlotObject import SlotObject
+from portage.util.SlotObject import SlotObject
class AbstractDepPriority(SlotObject):
__slots__ = ("buildtime", "runtime", "runtime_post")
diff --git a/portage_with_autodep/pym/_emerge/AbstractDepPriority.pyo b/portage_with_autodep/pym/_emerge/AbstractDepPriority.pyo
new file mode 100644
index 0000000..b6a9871
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AbstractDepPriority.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py b/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py
index 4147ecb..c7b8f83 100644
--- a/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py
+++ b/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.py
@@ -19,8 +19,9 @@ from portage.util import apply_secpass_permissions
class AbstractEbuildProcess(SpawnProcess):
__slots__ = ('phase', 'settings',) + \
- ('_build_dir', '_ipc_daemon', '_exit_command',)
+ ('_build_dir', '_ipc_daemon', '_exit_command', '_exit_timeout_id')
_phases_without_builddir = ('clean', 'cleanrm', 'depend', 'help',)
+ _phases_interactive_whitelist = ('config',)
# Number of milliseconds to allow natural exit of the ebuild
# process after it has called the exit command via IPC. It
@@ -92,7 +93,20 @@ class AbstractEbuildProcess(SpawnProcess):
else:
self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None)
- SpawnProcess._start(self)
+ if self.fd_pipes is None:
+ self.fd_pipes = {}
+ null_fd = None
+ if 0 not in self.fd_pipes and \
+ self.phase not in self._phases_interactive_whitelist and \
+ "interactive" not in self.settings.get("PROPERTIES", "").split():
+ null_fd = os.open('/dev/null', os.O_RDONLY)
+ self.fd_pipes[0] = null_fd
+
+ try:
+ SpawnProcess._start(self)
+ finally:
+ if null_fd is not None:
+ os.close(null_fd)
def _init_ipc_fifos(self):
@@ -143,13 +157,29 @@ class AbstractEbuildProcess(SpawnProcess):
def _exit_command_callback(self):
if self._registered:
# Let the process exit naturally, if possible.
- self.scheduler.schedule(self._reg_id, timeout=self._exit_timeout)
- if self._registered:
- # If it doesn't exit naturally in a reasonable amount
- # of time, kill it (solves bug #278895). We try to avoid
- # this when possible since it makes sandbox complain about
- # being killed by a signal.
- self.cancel()
+ self._exit_timeout_id = \
+ self.scheduler.timeout_add(self._exit_timeout,
+ self._exit_command_timeout_cb)
+
+ def _exit_command_timeout_cb(self):
+ if self._registered:
+ # If it doesn't exit naturally in a reasonable amount
+ # of time, kill it (solves bug #278895). We try to avoid
+ # this when possible since it makes sandbox complain about
+ # being killed by a signal.
+ self.cancel()
+ self._exit_timeout_id = \
+ self.scheduler.timeout_add(self._cancel_timeout,
+ self._cancel_timeout_cb)
+ else:
+ self._exit_timeout_id = None
+
+ return False # only run once
+
+ def _cancel_timeout_cb(self):
+ self._exit_timeout_id = None
+ self.wait()
+ return False # only run once
def _orphan_process_warn(self):
phase = self.phase
@@ -239,6 +269,10 @@ class AbstractEbuildProcess(SpawnProcess):
def _set_returncode(self, wait_retval):
SpawnProcess._set_returncode(self, wait_retval)
+ if self._exit_timeout_id is not None:
+ self.scheduler.source_remove(self._exit_timeout_id)
+ self._exit_timeout_id = None
+
if self._ipc_daemon is not None:
self._ipc_daemon.cancel()
if self._exit_command.exitcode is not None:
diff --git a/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.pyo b/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.pyo
new file mode 100644
index 0000000..b55f9c2
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AbstractEbuildProcess.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/AbstractPollTask.py b/portage_with_autodep/pym/_emerge/AbstractPollTask.py
index f7f3a95..2c84709 100644
--- a/portage_with_autodep/pym/_emerge/AbstractPollTask.py
+++ b/portage_with_autodep/pym/_emerge/AbstractPollTask.py
@@ -1,44 +1,111 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import array
+import errno
import logging
+import os
from portage.util import writemsg_level
from _emerge.AsynchronousTask import AsynchronousTask
-from _emerge.PollConstants import PollConstants
+
class AbstractPollTask(AsynchronousTask):
__slots__ = ("scheduler",) + \
("_registered",)
_bufsize = 4096
- _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
- _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
- _exceptional_events
+
+ @property
+ def _exceptional_events(self):
+ return self.scheduler.IO_ERR | self.scheduler.IO_NVAL
+
+ @property
+ def _registered_events(self):
+ return self.scheduler.IO_IN | self.scheduler.IO_HUP | \
+ self._exceptional_events
def isAlive(self):
return bool(self._registered)
- def _read_buf(self, f, event):
+ def _read_array(self, f, event):
"""
+ NOTE: array.fromfile() is used here only for testing purposes,
+ because it has bugs in all known versions of Python (including
+ Python 2.7 and Python 3.2). See PipeReaderArrayTestCase.
+
| POLLIN | RETURN
| BIT | VALUE
| ---------------------------------------------------
| 1 | Read self._bufsize into an instance of
- | | array.array('B') and return it, ignoring
+ | | array.array('B') and return it, handling
| | EOFError and IOError. An empty array
| | indicates EOF.
| ---------------------------------------------------
| 0 | None
"""
buf = None
- if event & PollConstants.POLLIN:
+ if event & self.scheduler.IO_IN:
buf = array.array('B')
try:
buf.fromfile(f, self._bufsize)
- except (EOFError, IOError):
+ except EOFError:
pass
+ except TypeError:
+ # Python 3.2:
+ # TypeError: read() didn't return bytes
+ pass
+ except IOError as e:
+ # EIO happens with pty on Linux after the
+ # slave end of the pty has been closed.
+ if e.errno == errno.EIO:
+ # EOF: return empty string of bytes
+ pass
+ elif e.errno == errno.EAGAIN:
+ # EAGAIN: return None
+ buf = None
+ else:
+ raise
+
+ if buf is not None:
+ try:
+ # Python >=3.2
+ buf = buf.tobytes()
+ except AttributeError:
+ buf = buf.tostring()
+
+ return buf
+
+ def _read_buf(self, fd, event):
+ """
+ | POLLIN | RETURN
+ | BIT | VALUE
+ | ---------------------------------------------------
+ | 1 | Read self._bufsize into a string of bytes,
+ | | handling EAGAIN and EIO. An empty string
+ | | of bytes indicates EOF.
+ | ---------------------------------------------------
+ | 0 | None
+ """
+ # NOTE: array.fromfile() is no longer used here because it has
+ # bugs in all known versions of Python (including Python 2.7
+ # and Python 3.2).
+ buf = None
+ if event & self.scheduler.IO_IN:
+ try:
+ buf = os.read(fd, self._bufsize)
+ except OSError as e:
+ # EIO happens with pty on Linux after the
+ # slave end of the pty has been closed.
+ if e.errno == errno.EIO:
+ # EOF: return empty string of bytes
+ buf = b''
+ elif e.errno == errno.EAGAIN:
+ # EAGAIN: return None
+ buf = None
+ else:
+ raise
+
return buf
def _unregister(self):
@@ -56,7 +123,32 @@ class AbstractPollTask(AsynchronousTask):
self._log_poll_exception(event)
self._unregister()
self.cancel()
- elif event & PollConstants.POLLHUP:
+ self.wait()
+ elif event & self.scheduler.IO_HUP:
self._unregister()
self.wait()
+ def _wait(self):
+ if self.returncode is not None:
+ return self.returncode
+ self._wait_loop()
+ return self.returncode
+
+ def _wait_loop(self, timeout=None):
+
+ if timeout is None:
+ while self._registered:
+ self.scheduler.iteration()
+ return
+
+ def timeout_cb():
+ timeout_cb.timed_out = True
+ return False
+ timeout_cb.timed_out = False
+ timeout_cb.timeout_id = self.scheduler.timeout_add(timeout, timeout_cb)
+
+ try:
+ while self._registered and not timeout_cb.timed_out:
+ self.scheduler.iteration()
+ finally:
+ self.scheduler.unregister(timeout_cb.timeout_id)
diff --git a/portage_with_autodep/pym/_emerge/AbstractPollTask.pyo b/portage_with_autodep/pym/_emerge/AbstractPollTask.pyo
new file mode 100644
index 0000000..06ef6b9
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AbstractPollTask.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/AsynchronousLock.py b/portage_with_autodep/pym/_emerge/AsynchronousLock.py
index 637ba73..587aa46 100644
--- a/portage_with_autodep/pym/_emerge/AsynchronousLock.py
+++ b/portage_with_autodep/pym/_emerge/AsynchronousLock.py
@@ -1,15 +1,16 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import dummy_threading
import fcntl
+import errno
import logging
import sys
try:
import threading
except ImportError:
- import dummy_threading as threading
+ threading = dummy_threading
import portage
from portage import os
@@ -19,7 +20,6 @@ from portage.locks import lockfile, unlockfile
from portage.util import writemsg_level
from _emerge.AbstractPollTask import AbstractPollTask
from _emerge.AsynchronousTask import AsynchronousTask
-from _emerge.PollConstants import PollConstants
from _emerge.SpawnProcess import SpawnProcess
class AsynchronousLock(AsynchronousTask):
@@ -35,7 +35,7 @@ class AsynchronousLock(AsynchronousTask):
__slots__ = ('path', 'scheduler',) + \
('_imp', '_force_async', '_force_dummy', '_force_process', \
- '_force_thread', '_waiting')
+ '_force_thread')
_use_process_by_default = True
@@ -66,8 +66,7 @@ class AsynchronousLock(AsynchronousTask):
def _imp_exit(self, imp):
# call exit listeners
- if not self._waiting:
- self.wait()
+ self.wait()
def _cancel(self):
if isinstance(self._imp, AsynchronousTask):
@@ -81,9 +80,7 @@ class AsynchronousLock(AsynchronousTask):
def _wait(self):
if self.returncode is not None:
return self.returncode
- self._waiting = True
self.returncode = self._imp.wait()
- self._waiting = False
return self.returncode
def unlock(self):
@@ -114,13 +111,13 @@ class _LockThread(AbstractPollTask):
def _start(self):
pr, pw = os.pipe()
self._files = {}
- self._files['pipe_read'] = os.fdopen(pr, 'rb', 0)
- self._files['pipe_write'] = os.fdopen(pw, 'wb', 0)
- for k, f in self._files.items():
- fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
- fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
- self._reg_id = self.scheduler.register(self._files['pipe_read'].fileno(),
- PollConstants.POLLIN, self._output_handler)
+ self._files['pipe_read'] = pr
+ self._files['pipe_write'] = pw
+ for f in self._files.values():
+ fcntl.fcntl(f, fcntl.F_SETFL,
+ fcntl.fcntl(f, fcntl.F_GETFL) | os.O_NONBLOCK)
+ self._reg_id = self.scheduler.register(self._files['pipe_read'],
+ self.scheduler.IO_IN, self._output_handler)
self._registered = True
threading_mod = threading
if self._force_dummy:
@@ -130,26 +127,27 @@ class _LockThread(AbstractPollTask):
def _run_lock(self):
self._lock_obj = lockfile(self.path, wantnewlockfile=True)
- self._files['pipe_write'].write(b'\0')
+ os.write(self._files['pipe_write'], b'\0')
def _output_handler(self, f, event):
- buf = self._read_buf(self._files['pipe_read'], event)
+ buf = None
+ if event & self.scheduler.IO_IN:
+ try:
+ buf = os.read(self._files['pipe_read'], self._bufsize)
+ except OSError as e:
+ if e.errno not in (errno.EAGAIN,):
+ raise
if buf:
self._unregister()
self.returncode = os.EX_OK
self.wait()
+ return True
+
def _cancel(self):
# There's currently no way to force thread termination.
pass
- def _wait(self):
- if self.returncode is not None:
- return self.returncode
- if self._registered:
- self.scheduler.schedule(self._reg_id)
- return self.returncode
-
def unlock(self):
if self._lock_obj is None:
raise AssertionError('not locked')
@@ -171,7 +169,7 @@ class _LockThread(AbstractPollTask):
if self._files is not None:
for f in self._files.values():
- f.close()
+ os.close(f)
self._files = None
class _LockProcess(AbstractPollTask):
@@ -190,12 +188,12 @@ class _LockProcess(AbstractPollTask):
in_pr, in_pw = os.pipe()
out_pr, out_pw = os.pipe()
self._files = {}
- self._files['pipe_in'] = os.fdopen(in_pr, 'rb', 0)
- self._files['pipe_out'] = os.fdopen(out_pw, 'wb', 0)
+ self._files['pipe_in'] = in_pr
+ self._files['pipe_out'] = out_pw
fcntl.fcntl(in_pr, fcntl.F_SETFL,
fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK)
self._reg_id = self.scheduler.register(in_pr,
- PollConstants.POLLIN, self._output_handler)
+ self.scheduler.IO_IN, self._output_handler)
self._registered = True
self._proc = SpawnProcess(
args=[portage._python_interpreter,
@@ -209,9 +207,22 @@ class _LockProcess(AbstractPollTask):
os.close(in_pw)
def _proc_exit(self, proc):
+
+ if self._files is not None:
+ # Close pipe_out if it's still open, since it's useless
+ # after the process has exited. This helps to avoid
+ # "ResourceWarning: unclosed file" since Python 3.2.
+ try:
+ pipe_out = self._files.pop('pipe_out')
+ except KeyError:
+ pass
+ else:
+ os.close(pipe_out)
+
if proc.returncode != os.EX_OK:
# Typically, this will happen due to the
# process being killed by a signal.
+
if not self._acquired:
# If the lock hasn't been aquired yet, the
# caller can check the returncode and handle
@@ -242,21 +253,22 @@ class _LockProcess(AbstractPollTask):
self._proc.poll()
return self.returncode
- def _wait(self):
- if self.returncode is not None:
- return self.returncode
- if self._registered:
- self.scheduler.schedule(self._reg_id)
- return self.returncode
-
def _output_handler(self, f, event):
- buf = self._read_buf(self._files['pipe_in'], event)
+ buf = None
+ if event & self.scheduler.IO_IN:
+ try:
+ buf = os.read(self._files['pipe_in'], self._bufsize)
+ except OSError as e:
+ if e.errno not in (errno.EAGAIN,):
+ raise
if buf:
self._acquired = True
self._unregister()
self.returncode = os.EX_OK
self.wait()
+ return True
+
def _unregister(self):
self._registered = False
@@ -270,7 +282,7 @@ class _LockProcess(AbstractPollTask):
except KeyError:
pass
else:
- pipe_in.close()
+ os.close(pipe_in)
def unlock(self):
if self._proc is None:
@@ -281,8 +293,8 @@ class _LockProcess(AbstractPollTask):
raise AssertionError("lock process failed with returncode %s" \
% (self.returncode,))
self._unlocked = True
- self._files['pipe_out'].write(b'\0')
- self._files['pipe_out'].close()
+ os.write(self._files['pipe_out'], b'\0')
+ os.close(self._files['pipe_out'])
self._files = None
self._proc.wait()
self._proc = None
diff --git a/portage_with_autodep/pym/_emerge/AsynchronousLock.pyo b/portage_with_autodep/pym/_emerge/AsynchronousLock.pyo
new file mode 100644
index 0000000..5f3cfbb
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AsynchronousLock.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/AsynchronousTask.py b/portage_with_autodep/pym/_emerge/AsynchronousTask.py
index 36522ca..7a193ce 100644
--- a/portage_with_autodep/pym/_emerge/AsynchronousTask.py
+++ b/portage_with_autodep/pym/_emerge/AsynchronousTask.py
@@ -1,8 +1,11 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import signal
+
from portage import os
-from _emerge.SlotObject import SlotObject
+from portage.util.SlotObject import SlotObject
+
class AsynchronousTask(SlotObject):
"""
Subclasses override _wait() and _poll() so that calls
@@ -14,7 +17,10 @@ class AsynchronousTask(SlotObject):
"""
__slots__ = ("background", "cancelled", "returncode") + \
- ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
+ ("_exit_listeners", "_exit_listener_stack", "_start_listeners",
+ "_waiting")
+
+ _cancelled_returncode = - signal.SIGINT
def start(self):
"""
@@ -42,7 +48,12 @@ class AsynchronousTask(SlotObject):
def wait(self):
if self.returncode is None:
- self._wait()
+ if not self._waiting:
+ self._waiting = True
+ try:
+ self._wait()
+ finally:
+ self._waiting = False
self._wait_hook()
return self.returncode
@@ -50,10 +61,17 @@ class AsynchronousTask(SlotObject):
return self.returncode
def cancel(self):
+ """
+ Cancel the task, but do not wait for exit status. If asynchronous exit
+ notification is desired, then use addExitListener to add a listener
+ before calling this method.
+ NOTE: Synchronous waiting for status is not supported, since it would
+ be vulnerable to hitting the recursion limit when a large number of
+ tasks need to be terminated simultaneously, like in bug #402335.
+ """
if not self.cancelled:
self.cancelled = True
self._cancel()
- self.wait()
def _cancel(self):
"""
@@ -62,6 +80,17 @@ class AsynchronousTask(SlotObject):
"""
pass
+ def _was_cancelled(self):
+ """
+ If cancelled, set returncode if necessary and return True.
+ Otherwise, return False.
+ """
+ if self.cancelled:
+ if self.returncode is None:
+ self.returncode = self._cancelled_returncode
+ return True
+ return False
+
def addStartListener(self, f):
"""
The function will be called with one argument, a reference to self.
@@ -123,7 +152,11 @@ class AsynchronousTask(SlotObject):
self._exit_listener_stack = self._exit_listeners
self._exit_listeners = None
- self._exit_listener_stack.reverse()
+ # Execute exit listeners in reverse order, so that
+ # the last added listener is executed first. This
+ # allows SequentialTaskQueue to decrement its running
+ # task count as soon as one of its tasks exits, so that
+ # the value is accurate when other listeners execute.
while self._exit_listener_stack:
self._exit_listener_stack.pop()(self)
diff --git a/portage_with_autodep/pym/_emerge/AsynchronousTask.pyo b/portage_with_autodep/pym/_emerge/AsynchronousTask.pyo
new file mode 100644
index 0000000..b8d67ea
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AsynchronousTask.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/AtomArg.pyo b/portage_with_autodep/pym/_emerge/AtomArg.pyo
new file mode 100644
index 0000000..b8f59cf
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/AtomArg.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/Binpkg.py b/portage_with_autodep/pym/_emerge/Binpkg.py
index bc6511e..ea8a1ad 100644
--- a/portage_with_autodep/pym/_emerge/Binpkg.py
+++ b/portage_with_autodep/pym/_emerge/Binpkg.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.EbuildPhase import EbuildPhase
@@ -9,15 +9,18 @@ from _emerge.CompositeTask import CompositeTask
from _emerge.BinpkgVerifier import BinpkgVerifier
from _emerge.EbuildMerge import EbuildMerge
from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.SpawnProcess import SpawnProcess
from portage.eapi import eapi_exports_replace_vars
-from portage.util import writemsg
+from portage.util import ensure_dirs, writemsg
import portage
from portage import os
+from portage import shutil
from portage import _encodings
from portage import _unicode_decode
from portage import _unicode_encode
import io
import logging
+import textwrap
from portage.output import colorize
class Binpkg(CompositeTask):
@@ -25,7 +28,8 @@ class Binpkg(CompositeTask):
__slots__ = ("find_blockers",
"ldpath_mtimes", "logger", "opts",
"pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
- ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
+ ("_bintree", "_build_dir", "_build_prefix",
+ "_ebuild_path", "_fetched_pkg",
"_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
def _writemsg_level(self, msg, level=0, noiselevel=0):
@@ -83,13 +87,12 @@ class Binpkg(CompositeTask):
waiting_msg = ("Fetching '%s' " + \
"in the background. " + \
- "To view fetch progress, run `tail -f " + \
+ "To view fetch progress, run `tail -f %s" + \
"/var/log/emerge-fetch.log` in another " + \
- "terminal.") % prefetcher.pkg_path
+ "terminal.") % (prefetcher.pkg_path, settings["EPREFIX"])
msg_prefix = colorize("GOOD", " * ")
- from textwrap import wrap
waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
- for line in wrap(waiting_msg, 65))
+ for line in textwrap.wrap(waiting_msg, 65))
if not self.background:
writemsg(waiting_msg, noiselevel=-1)
@@ -101,6 +104,10 @@ class Binpkg(CompositeTask):
def _prefetch_exit(self, prefetcher):
+ if self._was_cancelled():
+ self.wait()
+ return
+
pkg = self.pkg
pkg_count = self.pkg_count
if not (self.opts.pretend or self.opts.fetchonly):
@@ -299,10 +306,68 @@ class Binpkg(CompositeTask):
self._start_task(extractor, self._extractor_exit)
def _extractor_exit(self, extractor):
- if self._final_exit(extractor) != os.EX_OK:
+ if self._default_exit(extractor) != os.EX_OK:
self._unlock_builddir()
self._writemsg_level("!!! Error Extracting '%s'\n" % \
self._pkg_path, noiselevel=-1, level=logging.ERROR)
+ self.wait()
+ return
+
+ try:
+ with io.open(_unicode_encode(os.path.join(self._infloc, "EPREFIX"),
+ encoding=_encodings['fs'], errors='strict'), mode='r',
+ encoding=_encodings['repo.content'], errors='replace') as f:
+ self._build_prefix = f.read().rstrip('\n')
+ except IOError:
+ self._build_prefix = ""
+
+ if self._build_prefix == self.settings["EPREFIX"]:
+ ensure_dirs(self.settings["ED"])
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ chpathtool = SpawnProcess(
+ args=[portage._python_interpreter,
+ os.path.join(self.settings["PORTAGE_BIN_PATH"], "chpathtool.py"),
+ self.settings["D"], self._build_prefix, self.settings["EPREFIX"]],
+ background=self.background, env=self.settings.environ(),
+ scheduler=self.scheduler,
+ logfile=self.settings.get('PORTAGE_LOG_FILE'))
+ self._writemsg_level(">>> Adjusting Prefix to %s\n" % self.settings["EPREFIX"])
+ self._start_task(chpathtool, self._chpathtool_exit)
+
+ def _chpathtool_exit(self, chpathtool):
+ if self._final_exit(chpathtool) != os.EX_OK:
+ self._unlock_builddir()
+ self._writemsg_level("!!! Error Adjusting Prefix to %s" %
+ (self.settings["EPREFIX"],),
+ noiselevel=-1, level=logging.ERROR)
+ self.wait()
+ return
+
+ # We want to install in "our" prefix, not the binary one
+ with io.open(_unicode_encode(os.path.join(self._infloc, "EPREFIX"),
+ encoding=_encodings['fs'], errors='strict'), mode='w',
+ encoding=_encodings['repo.content'], errors='strict') as f:
+ f.write(self.settings["EPREFIX"] + "\n")
+
+ # Move the files to the correct location for merge.
+ image_tmp_dir = os.path.join(
+ self.settings["PORTAGE_BUILDDIR"], "image_tmp")
+ build_d = os.path.join(self.settings["D"],
+ self._build_prefix.lstrip(os.sep))
+ if not os.path.isdir(build_d):
+ # Assume this is a virtual package or something.
+ shutil.rmtree(self._image_dir)
+ ensure_dirs(self.settings["ED"])
+ else:
+ os.rename(build_d, image_tmp_dir)
+ shutil.rmtree(self._image_dir)
+ ensure_dirs(os.path.dirname(self.settings["ED"].rstrip(os.sep)))
+ os.rename(image_tmp_dir, self.settings["ED"])
+
self.wait()
def _unlock_builddir(self):
@@ -312,13 +377,13 @@ class Binpkg(CompositeTask):
self._build_dir.unlock()
def create_install_task(self):
- task = EbuildMerge(find_blockers=self.find_blockers,
+ task = EbuildMerge(exit_hook=self._install_exit,
+ find_blockers=self.find_blockers,
ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
pkg=self.pkg, pkg_count=self.pkg_count,
pkg_path=self._pkg_path, scheduler=self.scheduler,
settings=self.settings, tree=self._tree,
world_atom=self.world_atom)
- task.addExitListener(self._install_exit)
return task
def _install_exit(self, task):
diff --git a/portage_with_autodep/pym/_emerge/Binpkg.pyo b/portage_with_autodep/pym/_emerge/Binpkg.pyo
new file mode 100644
index 0000000..4499b9d
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Binpkg.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.py b/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.py
index f68971b..5ba1495 100644
--- a/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.py
+++ b/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
@@ -38,7 +38,7 @@ class BinpkgEnvExtractor(CompositeTask):
background=self.background,
env=self.settings.environ(),
scheduler=self.scheduler,
- logfile=self.settings.get('PORTAGE_LOGFILE'))
+ logfile=self.settings.get('PORTAGE_LOG_FILE'))
self._start_task(extractor_proc, self._extractor_exit)
@@ -59,7 +59,7 @@ class BinpkgEnvExtractor(CompositeTask):
# This is a signal to ebuild.sh, so that it knows to filter
# out things like SANDBOX_{DENY,PREDICT,READ,WRITE} that
# would be preserved between normal phases.
- open(_unicode_encode(self._get_dest_env_path() + '.raw'), 'w')
+ open(_unicode_encode(self._get_dest_env_path() + '.raw'), 'wb').close()
self._current_task = None
self.returncode = os.EX_OK
diff --git a/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.pyo b/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.pyo
new file mode 100644
index 0000000..21c2e13
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BinpkgEnvExtractor.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.py b/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.py
index d1630f2..f25cbf9 100644
--- a/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.py
+++ b/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.py
@@ -1,9 +1,8 @@
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.SpawnProcess import SpawnProcess
import portage
-import os
import signal
class BinpkgExtractorAsync(SpawnProcess):
diff --git a/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.pyo b/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.pyo
new file mode 100644
index 0000000..f8498f7
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BinpkgExtractorAsync.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/BinpkgFetcher.py b/portage_with_autodep/pym/_emerge/BinpkgFetcher.py
index baea4d6..f415e2e 100644
--- a/portage_with_autodep/pym/_emerge/BinpkgFetcher.py
+++ b/portage_with_autodep/pym/_emerge/BinpkgFetcher.py
@@ -28,9 +28,6 @@ class BinpkgFetcher(SpawnProcess):
def _start(self):
- if self.cancelled:
- return
-
pkg = self.pkg
pretend = self.pretend
bintree = pkg.root_config.trees["bintree"]
diff --git a/portage_with_autodep/pym/_emerge/BinpkgFetcher.pyo b/portage_with_autodep/pym/_emerge/BinpkgFetcher.pyo
new file mode 100644
index 0000000..482e55e
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BinpkgFetcher.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/BinpkgPrefetcher.pyo b/portage_with_autodep/pym/_emerge/BinpkgPrefetcher.pyo
new file mode 100644
index 0000000..c890cac
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BinpkgPrefetcher.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/BinpkgVerifier.pyo b/portage_with_autodep/pym/_emerge/BinpkgVerifier.pyo
new file mode 100644
index 0000000..21f770e
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BinpkgVerifier.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/Blocker.pyo b/portage_with_autodep/pym/_emerge/Blocker.pyo
new file mode 100644
index 0000000..b9e56bc
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Blocker.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/BlockerCache.py b/portage_with_autodep/pym/_emerge/BlockerCache.py
index 5c4f43e..fce81f8 100644
--- a/portage_with_autodep/pym/_emerge/BlockerCache.py
+++ b/portage_with_autodep/pym/_emerge/BlockerCache.py
@@ -1,6 +1,7 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+import errno
import sys
from portage.util import writemsg
from portage.data import secpass
@@ -15,6 +16,9 @@ except ImportError:
if sys.hexversion >= 0x3000000:
basestring = str
long = int
+ _unicode = str
+else:
+ _unicode = unicode
class BlockerCache(portage.cache.mappings.MutableMapping):
"""This caches blockers of installed packages so that dep_check does not
@@ -58,8 +62,11 @@ class BlockerCache(portage.cache.mappings.MutableMapping):
self._cache_data = mypickle.load()
f.close()
del f
- except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError) as e:
- if isinstance(e, pickle.UnpicklingError):
+ except (AttributeError, EOFError, EnvironmentError, ValueError, pickle.UnpicklingError) as e:
+ if isinstance(e, EnvironmentError) and \
+ getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
+ pass
+ else:
writemsg("!!! Error loading '%s': %s\n" % \
(self._cache_filename, str(e)), noiselevel=-1)
del e
@@ -141,7 +148,7 @@ class BlockerCache(portage.cache.mappings.MutableMapping):
f.close()
portage.util.apply_secpass_permissions(
self._cache_filename, gid=portage.portage_gid, mode=0o644)
- except (IOError, OSError) as e:
+ except (IOError, OSError):
pass
self._modified.clear()
@@ -155,8 +162,8 @@ class BlockerCache(portage.cache.mappings.MutableMapping):
@param blocker_data: An object with counter and atoms attributes.
@type blocker_data: BlockerData
"""
- self._cache_data["blockers"][cpv] = \
- (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
+ self._cache_data["blockers"][_unicode(cpv)] = (blocker_data.counter,
+ tuple(_unicode(x) for x in blocker_data.atoms))
self._modified.add(cpv)
def __iter__(self):
@@ -176,7 +183,7 @@ class BlockerCache(portage.cache.mappings.MutableMapping):
def __getitem__(self, cpv):
"""
@rtype: BlockerData
- @returns: An object with counter and atoms attributes.
+ @return: An object with counter and atoms attributes.
"""
return self.BlockerData(*self._cache_data["blockers"][cpv])
diff --git a/portage_with_autodep/pym/_emerge/BlockerCache.pyo b/portage_with_autodep/pym/_emerge/BlockerCache.pyo
new file mode 100644
index 0000000..41554e1
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BlockerCache.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/BlockerDB.py b/portage_with_autodep/pym/_emerge/BlockerDB.py
index 4819749..459affd 100644
--- a/portage_with_autodep/pym/_emerge/BlockerDB.py
+++ b/portage_with_autodep/pym/_emerge/BlockerDB.py
@@ -25,7 +25,7 @@ class BlockerDB(object):
self._dep_check_trees = None
self._fake_vartree = fake_vartree
self._dep_check_trees = {
- self._vartree.root : {
+ self._vartree.settings["EROOT"] : {
"porttree" : fake_vartree,
"vartree" : fake_vartree,
}}
@@ -36,7 +36,8 @@ class BlockerDB(object):
new_pkg is planned to be installed. This ignores build-time
blockers, since new_pkg is assumed to be built already.
"""
- blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
+ blocker_cache = BlockerCache(None,
+ self._vartree.dbapi)
dep_keys = ["RDEPEND", "PDEPEND"]
settings = self._vartree.settings
stale_cache = set(blocker_cache)
diff --git a/portage_with_autodep/pym/_emerge/BlockerDB.pyo b/portage_with_autodep/pym/_emerge/BlockerDB.pyo
new file mode 100644
index 0000000..dfab0aa
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BlockerDB.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/BlockerDepPriority.pyo b/portage_with_autodep/pym/_emerge/BlockerDepPriority.pyo
new file mode 100644
index 0000000..c3b554c
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/BlockerDepPriority.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/CompositeTask.py b/portage_with_autodep/pym/_emerge/CompositeTask.py
index 644a69b..3e43478 100644
--- a/portage_with_autodep/pym/_emerge/CompositeTask.py
+++ b/portage_with_autodep/pym/_emerge/CompositeTask.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.AsynchronousTask import AsynchronousTask
@@ -60,7 +60,8 @@ class CompositeTask(AsynchronousTask):
self._current_task = None
break
else:
- self.scheduler.schedule(condition=self._task_queued_wait)
+ while not self._task_queued_wait():
+ self.scheduler.iteration()
if self.returncode is not None:
break
elif self.cancelled:
@@ -103,7 +104,7 @@ class CompositeTask(AsynchronousTask):
Subclasses can use this as a generic task exit callback.
@rtype: int
- @returns: The task.returncode attribute.
+ @return: The task.returncode attribute.
"""
self._assert_current(task)
if task.returncode != os.EX_OK:
diff --git a/portage_with_autodep/pym/_emerge/CompositeTask.pyo b/portage_with_autodep/pym/_emerge/CompositeTask.pyo
new file mode 100644
index 0000000..adc8cae
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/CompositeTask.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/DepPriority.pyo b/portage_with_autodep/pym/_emerge/DepPriority.pyo
new file mode 100644
index 0000000..4028a36
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/DepPriority.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/DepPriorityNormalRange.pyo b/portage_with_autodep/pym/_emerge/DepPriorityNormalRange.pyo
new file mode 100644
index 0000000..5e0f710
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/DepPriorityNormalRange.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/DepPrioritySatisfiedRange.pyo b/portage_with_autodep/pym/_emerge/DepPrioritySatisfiedRange.pyo
new file mode 100644
index 0000000..5309bcd
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/DepPrioritySatisfiedRange.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/Dependency.py b/portage_with_autodep/pym/_emerge/Dependency.py
index 0f746b6..c2d36b2 100644
--- a/portage_with_autodep/pym/_emerge/Dependency.py
+++ b/portage_with_autodep/pym/_emerge/Dependency.py
@@ -1,8 +1,9 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
+from portage.util.SlotObject import SlotObject
from _emerge.DepPriority import DepPriority
-from _emerge.SlotObject import SlotObject
+
class Dependency(SlotObject):
__slots__ = ("atom", "blocker", "child", "depth",
"parent", "onlydeps", "priority", "root",
diff --git a/portage_with_autodep/pym/_emerge/Dependency.pyo b/portage_with_autodep/pym/_emerge/Dependency.pyo
new file mode 100644
index 0000000..f53e0ed
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Dependency.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/DependencyArg.pyo b/portage_with_autodep/pym/_emerge/DependencyArg.pyo
new file mode 100644
index 0000000..916a762
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/DependencyArg.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/EbuildBinpkg.py b/portage_with_autodep/pym/_emerge/EbuildBinpkg.py
index b7d43ba..34a6aef 100644
--- a/portage_with_autodep/pym/_emerge/EbuildBinpkg.py
+++ b/portage_with_autodep/pym/_emerge/EbuildBinpkg.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.CompositeTask import CompositeTask
@@ -34,6 +34,10 @@ class EbuildBinpkg(CompositeTask):
self.settings.pop("PORTAGE_BINPKG_TMPFILE", None)
if self._default_exit(package_phase) != os.EX_OK:
+ try:
+ os.unlink(self._binpkg_tmpfile)
+ except OSError:
+ pass
self.wait()
return
diff --git a/portage_with_autodep/pym/_emerge/EbuildBinpkg.pyo b/portage_with_autodep/pym/_emerge/EbuildBinpkg.pyo
new file mode 100644
index 0000000..2acfc87
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildBinpkg.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/EbuildBuild.py b/portage_with_autodep/pym/_emerge/EbuildBuild.py
index 1c423a3..5a48f8e 100644
--- a/portage_with_autodep/pym/_emerge/EbuildBuild.py
+++ b/portage_with_autodep/pym/_emerge/EbuildBuild.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.EbuildExecuter import EbuildExecuter
@@ -32,12 +32,13 @@ class EbuildBuild(CompositeTask):
pkg = self.pkg
settings = self.settings
- rval = _check_temp_dir(settings)
- if rval != os.EX_OK:
- self.returncode = rval
- self._current_task = None
- self.wait()
- return
+ if not self.opts.fetchonly:
+ rval = _check_temp_dir(settings)
+ if rval != os.EX_OK:
+ self.returncode = rval
+ self._current_task = None
+ self.wait()
+ return
root_config = pkg.root_config
tree = "porttree"
@@ -108,6 +109,10 @@ class EbuildBuild(CompositeTask):
def _prefetch_exit(self, prefetcher):
+ if self._was_cancelled():
+ self.wait()
+ return
+
opts = self.opts
pkg = self.pkg
settings = self.settings
@@ -225,9 +230,11 @@ class EbuildBuild(CompositeTask):
#buildsyspkg: Check if we need to _force_ binary package creation
self._issyspkg = "buildsyspkg" in features and \
system_set.findAtomForPackage(pkg) and \
- not opts.buildpkg
+ "buildpkg" not in features and \
+ opts.buildpkg != 'n'
- if opts.buildpkg or self._issyspkg:
+ if ("buildpkg" in features or self._issyspkg) \
+ and not self.opts.buildpkg_exclude.findAtomForPackage(pkg):
self._buildpkg = True
@@ -406,7 +413,8 @@ class EbuildBuild(CompositeTask):
ebuild_path = self._ebuild_path
tree = self._tree
- task = EbuildMerge(find_blockers=self.find_blockers,
+ task = EbuildMerge(exit_hook=self._install_exit,
+ find_blockers=self.find_blockers,
ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
pkg_count=pkg_count, pkg_path=ebuild_path,
scheduler=self.scheduler,
@@ -419,7 +427,6 @@ class EbuildBuild(CompositeTask):
(pkg_count.curval, pkg_count.maxval, pkg.cpv)
logger.log(msg, short_msg=short_msg)
- task.addExitListener(self._install_exit)
return task
def _install_exit(self, task):
diff --git a/portage_with_autodep/pym/_emerge/EbuildBuild.pyo b/portage_with_autodep/pym/_emerge/EbuildBuild.pyo
new file mode 100644
index 0000000..19d913c
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildBuild.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/EbuildBuildDir.py b/portage_with_autodep/pym/_emerge/EbuildBuildDir.py
index ddc5fe0..9773bd7 100644
--- a/portage_with_autodep/pym/_emerge/EbuildBuildDir.py
+++ b/portage_with_autodep/pym/_emerge/EbuildBuildDir.py
@@ -1,11 +1,12 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.AsynchronousLock import AsynchronousLock
-from _emerge.SlotObject import SlotObject
+
import portage
from portage import os
from portage.exception import PortageException
+from portage.util.SlotObject import SlotObject
import errno
class EbuildBuildDir(SlotObject):
diff --git a/portage_with_autodep/pym/_emerge/EbuildBuildDir.pyo b/portage_with_autodep/pym/_emerge/EbuildBuildDir.pyo
new file mode 100644
index 0000000..2846579
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildBuildDir.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/EbuildExecuter.py b/portage_with_autodep/pym/_emerge/EbuildExecuter.py
index f8febd4..fd663a4 100644
--- a/portage_with_autodep/pym/_emerge/EbuildExecuter.py
+++ b/portage_with_autodep/pym/_emerge/EbuildExecuter.py
@@ -12,7 +12,7 @@ from portage.package.ebuild.doebuild import _prepare_fake_distdir
class EbuildExecuter(CompositeTask):
- __slots__ = ("pkg", "scheduler", "settings")
+ __slots__ = ("pkg", "settings")
_phases = ("prepare", "configure", "compile", "test", "install")
@@ -34,8 +34,6 @@ class EbuildExecuter(CompositeTask):
cleanup = 0
portage.prepare_build_dirs(pkg.root, settings, cleanup)
- portdb = pkg.root_config.trees['porttree'].dbapi
- ebuild_path = settings['EBUILD']
alist = settings.configdict["pkg"].get("A", "").split()
_prepare_fake_distdir(settings, alist)
diff --git a/portage_with_autodep/pym/_emerge/EbuildExecuter.pyo b/portage_with_autodep/pym/_emerge/EbuildExecuter.pyo
new file mode 100644
index 0000000..592a0c9
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildExecuter.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/EbuildFetcher.py b/portage_with_autodep/pym/_emerge/EbuildFetcher.py
index feb68d0..c0a7fdd 100644
--- a/portage_with_autodep/pym/_emerge/EbuildFetcher.py
+++ b/portage_with_autodep/pym/_emerge/EbuildFetcher.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import traceback
@@ -21,7 +21,7 @@ class EbuildFetcher(SpawnProcess):
__slots__ = ("config_pool", "ebuild_path", "fetchonly", "fetchall",
"pkg", "prefetch") + \
- ("_digests", "_settings", "_uri_map")
+ ("_digests", "_manifest", "_settings", "_uri_map")
def already_fetched(self, settings):
"""
@@ -40,7 +40,7 @@ class EbuildFetcher(SpawnProcess):
digests = self._get_digests()
distdir = settings["DISTDIR"]
- allow_missing = "allow-missing-manifests" in settings.features
+ allow_missing = self._get_manifest().allow_missing
for filename in uri_map:
# Use stat rather than lstat since fetch() creates
@@ -163,10 +163,15 @@ class EbuildFetcher(SpawnProcess):
pid = os.fork()
if pid != 0:
+ if not isinstance(pid, int):
+ raise AssertionError(
+ "fork returned non-integer: %s" % (repr(pid),))
portage.process.spawned_pids.append(pid)
return [pid]
- portage.process._setup_pipes(fd_pipes)
+ portage.locks._close_fds()
+ # Disable close_fds since we don't exec (see _setup_pipes docstring).
+ portage.process._setup_pipes(fd_pipes, close_fds=False)
# Use default signal handlers in order to avoid problems
# killing subprocesses as reported in bug #353239.
@@ -179,7 +184,7 @@ class EbuildFetcher(SpawnProcess):
not in ('yes', 'true')
rval = 1
- allow_missing = 'allow-missing-manifests' in self._settings.features
+ allow_missing = self._get_manifest().allow_missing
try:
if fetch(self._uri_map, self._settings, fetchonly=self.fetchonly,
digests=copy.deepcopy(self._get_digests()),
@@ -203,11 +208,16 @@ class EbuildFetcher(SpawnProcess):
raise AssertionError("ebuild not found for '%s'" % self.pkg.cpv)
return self.ebuild_path
+ def _get_manifest(self):
+ if self._manifest is None:
+ pkgdir = os.path.dirname(self._get_ebuild_path())
+ self._manifest = self.pkg.root_config.settings.repositories.get_repo_for_location(
+ os.path.dirname(os.path.dirname(pkgdir))).load_manifest(pkgdir, None)
+ return self._manifest
+
def _get_digests(self):
- if self._digests is not None:
- return self._digests
- self._digests = portage.Manifest(os.path.dirname(
- self._get_ebuild_path()), None).getTypeDigests("DIST")
+ if self._digests is None:
+ self._digests = self._get_manifest().getTypeDigests("DIST")
return self._digests
def _get_uri_map(self):
diff --git a/portage_with_autodep/pym/_emerge/EbuildFetcher.pyo b/portage_with_autodep/pym/_emerge/EbuildFetcher.pyo
new file mode 100644
index 0000000..ddc92d1
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildFetcher.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/EbuildFetchonly.py b/portage_with_autodep/pym/_emerge/EbuildFetchonly.py
index b898971..f88ea96 100644
--- a/portage_with_autodep/pym/_emerge/EbuildFetchonly.py
+++ b/portage_with_autodep/pym/_emerge/EbuildFetchonly.py
@@ -1,10 +1,10 @@
-# Copyright 1999-2010 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from _emerge.SlotObject import SlotObject
import portage
from portage import os
from portage.elog.messages import eerror
+from portage.util.SlotObject import SlotObject
class EbuildFetchonly(SlotObject):
@@ -21,7 +21,7 @@ class EbuildFetchonly(SlotObject):
debug = settings.get("PORTAGE_DEBUG") == "1"
rval = portage.doebuild(ebuild_path, "fetch",
- settings["ROOT"], settings, debug=debug,
+ settings=settings, debug=debug,
listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
mydbapi=portdb, tree="porttree")
diff --git a/portage_with_autodep/pym/_emerge/EbuildFetchonly.pyo b/portage_with_autodep/pym/_emerge/EbuildFetchonly.pyo
new file mode 100644
index 0000000..c54a1db
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildFetchonly.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.py b/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.py
index 5dabe34..8414d20 100644
--- a/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.py
+++ b/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.py
@@ -1,14 +1,15 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
import logging
import pickle
from portage import os
+from portage.exception import TryAgain
from portage.localization import _
+from portage.locks import lockfile, unlockfile
from portage.util import writemsg_level
from _emerge.FifoIpcDaemon import FifoIpcDaemon
-from _emerge.PollConstants import PollConstants
class EbuildIpcDaemon(FifoIpcDaemon):
"""
@@ -34,7 +35,7 @@ class EbuildIpcDaemon(FifoIpcDaemon):
def _input_handler(self, fd, event):
# Read the whole pickle in a single atomic read() call.
data = None
- if event & PollConstants.POLLIN:
+ if event & self.scheduler.IO_IN:
# For maximum portability, use os.read() here since
# array.fromfile() and file.read() are both known to
# erroneously return an empty string from this
@@ -84,6 +85,30 @@ class EbuildIpcDaemon(FifoIpcDaemon):
if reply_hook is not None:
reply_hook()
+ elif event & self.scheduler.IO_HUP:
+ # This can be triggered due to a race condition which happens when
+ # the previous _reopen_input() call occurs before the writer has
+ # closed the pipe (see bug #401919). It's not safe to re-open
+ # without a lock here, since it's possible that another writer will
+ # write something to the pipe just before we close it, and in that
+ # case the write will be lost. Therefore, try for a non-blocking
+ # lock, and only re-open the pipe if the lock is acquired.
+ lock_filename = os.path.join(
+ os.path.dirname(self.input_fifo), '.ipc_lock')
+ try:
+ lock_obj = lockfile(lock_filename, unlinkfile=True,
+ flags=os.O_NONBLOCK)
+ except TryAgain:
+ # We'll try again when another IO_HUP event arrives.
+ pass
+ else:
+ try:
+ self._reopen_input()
+ finally:
+ unlockfile(lock_obj)
+
+ return True
+
def _send_reply(self, reply):
# File streams are in unbuffered mode since we do atomic
# read and write of whole pickles. Use non-blocking mode so
diff --git a/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.pyo b/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.pyo
new file mode 100644
index 0000000..7a9588f
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildIpcDaemon.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/EbuildMerge.py b/portage_with_autodep/pym/_emerge/EbuildMerge.py
index 9c35988..df0778c 100644
--- a/portage_with_autodep/pym/_emerge/EbuildMerge.py
+++ b/portage_with_autodep/pym/_emerge/EbuildMerge.py
@@ -7,7 +7,7 @@ from portage.dbapi._MergeProcess import MergeProcess
class EbuildMerge(CompositeTask):
- __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
+ __slots__ = ("exit_hook", "find_blockers", "logger", "ldpath_mtimes",
"pkg", "pkg_count", "pkg_path", "pretend",
"settings", "tree", "world_atom")
@@ -35,6 +35,7 @@ class EbuildMerge(CompositeTask):
def _merge_exit(self, merge_task):
if self._final_exit(merge_task) != os.EX_OK:
+ self.exit_hook(self)
self.wait()
return
@@ -53,4 +54,5 @@ class EbuildMerge(CompositeTask):
logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
(pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
+ self.exit_hook(self)
self.wait()
diff --git a/portage_with_autodep/pym/_emerge/EbuildMerge.pyo b/portage_with_autodep/pym/_emerge/EbuildMerge.pyo
new file mode 100644
index 0000000..662c681
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildMerge.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.py b/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.py
index e53298b..c2d3747 100644
--- a/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.py
+++ b/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.py
@@ -1,15 +1,19 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.SubProcess import SubProcess
-from _emerge.PollConstants import PollConstants
import sys
from portage.cache.mappings import slot_dict_class
import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.package.ebuild._eapi_invalid:eapi_invalid',
+)
from portage import os
from portage import _encodings
from portage import _unicode_decode
from portage import _unicode_encode
+
+import errno
import fcntl
import io
@@ -20,37 +24,44 @@ class EbuildMetadataPhase(SubProcess):
used to extract metadata from the ebuild.
"""
- __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
- "ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
- ("_raw_metadata",)
+ __slots__ = ("cpv", "eapi_supported", "ebuild_hash", "fd_pipes",
+ "metadata", "portdb", "repo_path", "settings") + \
+ ("_eapi", "_eapi_lineno", "_raw_metadata",)
_file_names = ("ebuild",)
_files_dict = slot_dict_class(_file_names, prefix="")
_metadata_fd = 9
def _start(self):
+ ebuild_path = self.ebuild_hash.location
+
+ with io.open(_unicode_encode(ebuild_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ self._eapi, self._eapi_lineno = portage._parse_eapi_ebuild_head(f)
+
+ parsed_eapi = self._eapi
+ if parsed_eapi is None:
+ parsed_eapi = "0"
+
+ if not parsed_eapi:
+ # An empty EAPI setting is invalid.
+ self._eapi_invalid(None)
+ self._set_returncode((self.pid, 1 << 8))
+ self.wait()
+ return
+
+ self.eapi_supported = portage.eapi_is_supported(parsed_eapi)
+ if not self.eapi_supported:
+ self.metadata = {"EAPI": parsed_eapi}
+ self._set_returncode((self.pid, os.EX_OK << 8))
+ self.wait()
+ return
+
settings = self.settings
settings.setcpv(self.cpv)
- ebuild_path = self.ebuild_path
-
- eapi = None
- if eapi is None and \
- 'parse-eapi-ebuild-head' in settings.features:
- eapi = portage._parse_eapi_ebuild_head(
- io.open(_unicode_encode(ebuild_path,
- encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['repo.content'],
- errors='replace'))
-
- if eapi is not None:
- if not portage.eapi_is_supported(eapi):
- self.metadata_callback(self.cpv, self.ebuild_path,
- self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
- self._set_returncode((self.pid, os.EX_OK << 8))
- self.wait()
- return
-
- settings.configdict['pkg']['EAPI'] = eapi
+ settings.configdict['pkg']['EAPI'] = parsed_eapi
debug = settings.get("PORTAGE_DEBUG") == "1"
master_fd = None
@@ -61,7 +72,8 @@ class EbuildMetadataPhase(SubProcess):
else:
fd_pipes = {}
- fd_pipes.setdefault(0, sys.stdin.fileno())
+ null_input = open('/dev/null', 'rb')
+ fd_pipes.setdefault(0, null_input.fileno())
fd_pipes.setdefault(1, sys.stdout.fileno())
fd_pipes.setdefault(2, sys.stderr.fileno())
@@ -72,7 +84,6 @@ class EbuildMetadataPhase(SubProcess):
if fd == sys.stderr.fileno():
sys.stderr.flush()
- fd_pipes_orig = fd_pipes.copy()
self._files = self._files_dict()
files = self._files
@@ -83,17 +94,18 @@ class EbuildMetadataPhase(SubProcess):
fd_pipes[self._metadata_fd] = slave_fd
self._raw_metadata = []
- files.ebuild = os.fdopen(master_fd, 'rb', 0)
- self._reg_id = self.scheduler.register(files.ebuild.fileno(),
+ files.ebuild = master_fd
+ self._reg_id = self.scheduler.register(files.ebuild,
self._registered_events, self._output_handler)
self._registered = True
retval = portage.doebuild(ebuild_path, "depend",
- settings["ROOT"], settings, debug,
+ settings=settings, debug=debug,
mydbapi=self.portdb, tree="porttree",
fd_pipes=fd_pipes, returnpid=True)
os.close(slave_fd)
+ null_input.close()
if isinstance(retval, int):
# doebuild failed before spawning
@@ -107,27 +119,81 @@ class EbuildMetadataPhase(SubProcess):
def _output_handler(self, fd, event):
- if event & PollConstants.POLLIN:
- self._raw_metadata.append(self._files.ebuild.read())
- if not self._raw_metadata[-1]:
- self._unregister()
- self.wait()
+ if event & self.scheduler.IO_IN:
+ while True:
+ try:
+ self._raw_metadata.append(
+ os.read(self._files.ebuild, self._bufsize))
+ except OSError as e:
+ if e.errno not in (errno.EAGAIN,):
+ raise
+ break
+ else:
+ if not self._raw_metadata[-1]:
+ self._unregister()
+ self.wait()
+ break
self._unregister_if_appropriate(event)
+ return True
+
def _set_returncode(self, wait_retval):
SubProcess._set_returncode(self, wait_retval)
- if self.returncode == os.EX_OK:
- metadata_lines = ''.join(_unicode_decode(chunk,
- encoding=_encodings['repo.content'], errors='replace')
- for chunk in self._raw_metadata).splitlines()
+ # self._raw_metadata is None when _start returns
+ # early due to an unsupported EAPI detected with
+ # FEATURES=parse-eapi-ebuild-head
+ if self.returncode == os.EX_OK and \
+ self._raw_metadata is not None:
+ metadata_lines = _unicode_decode(b''.join(self._raw_metadata),
+ encoding=_encodings['repo.content'],
+ errors='replace').splitlines()
+ metadata_valid = True
if len(portage.auxdbkeys) != len(metadata_lines):
# Don't trust bash's returncode if the
# number of lines is incorrect.
- self.returncode = 1
+ metadata_valid = False
else:
- metadata = zip(portage.auxdbkeys, metadata_lines)
- self.metadata = self.metadata_callback(self.cpv,
- self.ebuild_path, self.repo_path, metadata,
- self.ebuild_mtime)
+ metadata = dict(zip(portage.auxdbkeys, metadata_lines))
+ parsed_eapi = self._eapi
+ if parsed_eapi is None:
+ parsed_eapi = "0"
+ self.eapi_supported = \
+ portage.eapi_is_supported(metadata["EAPI"])
+ if (not metadata["EAPI"] or self.eapi_supported) and \
+ metadata["EAPI"] != parsed_eapi:
+ self._eapi_invalid(metadata)
+ if 'parse-eapi-ebuild-head' in self.settings.features:
+ metadata_valid = False
+
+ if metadata_valid:
+ # Since we're supposed to be able to efficiently obtain the
+ # EAPI from _parse_eapi_ebuild_head, we don't write cache
+ # entries for unsupported EAPIs.
+ if self.eapi_supported:
+
+ if metadata.get("INHERITED", False):
+ metadata["_eclasses_"] = \
+ self.portdb.repositories.get_repo_for_location(
+ self.repo_path).eclass_db.get_eclass_data(
+ metadata["INHERITED"].split())
+ else:
+ metadata["_eclasses_"] = {}
+ metadata.pop("INHERITED", None)
+
+ self.portdb._write_cache(self.cpv,
+ self.repo_path, metadata, self.ebuild_hash)
+ else:
+ metadata = {"EAPI": metadata["EAPI"]}
+ self.metadata = metadata
+ else:
+ self.returncode = 1
+ def _eapi_invalid(self, metadata):
+ repo_name = self.portdb.getRepositoryName(self.repo_path)
+ if metadata is not None:
+ eapi_var = metadata["EAPI"]
+ else:
+ eapi_var = None
+ eapi_invalid(self, self.cpv, repo_name, self.settings,
+ eapi_var, self._eapi, self._eapi_lineno)
diff --git a/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.pyo b/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.pyo
new file mode 100644
index 0000000..fcc0874
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildMetadataPhase.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/EbuildPhase.py b/portage_with_autodep/pym/_emerge/EbuildPhase.py
index 82c165d..36ca8b0 100644
--- a/portage_with_autodep/pym/_emerge/EbuildPhase.py
+++ b/portage_with_autodep/pym/_emerge/EbuildPhase.py
@@ -33,12 +33,14 @@ class EbuildPhase(CompositeTask):
("_ebuild_lock",)
# FEATURES displayed prior to setup phase
- _features_display = ("ccache", "depcheck", "depcheckstrict" "distcc",
- "distcc-pump", "fakeroot",
+ _features_display = (
+ "ccache", "compressdebug", "depcheck", "depcheckstrict",
+ "distcc", "distcc-pump", "fakeroot",
"installsources", "keeptemp", "keepwork", "nostrip",
"preserve-libs", "sandbox", "selinux", "sesandbox",
"splitdebug", "suidctl", "test", "userpriv",
- "usersandbox")
+ "usersandbox"
+ )
# Locked phases
_locked_phases = ("setup", "preinst", "postinst", "prerm", "postrm")
@@ -274,13 +276,15 @@ class EbuildPhase(CompositeTask):
temp_file = open(_unicode_encode(temp_log,
encoding=_encodings['fs'], errors='strict'), 'rb')
- log_file = self._open_log(log_path)
+ log_file, log_file_real = self._open_log(log_path)
for line in temp_file:
log_file.write(line)
temp_file.close()
log_file.close()
+ if log_file_real is not log_file:
+ log_file_real.close()
os.unlink(temp_log)
def _open_log(self, log_path):
@@ -288,11 +292,12 @@ class EbuildPhase(CompositeTask):
f = open(_unicode_encode(log_path,
encoding=_encodings['fs'], errors='strict'),
mode='ab')
+ f_real = f
if log_path.endswith('.gz'):
f = gzip.GzipFile(filename='', mode='ab', fileobj=f)
- return f
+ return (f, f_real)
def _die_hooks(self):
self.returncode = None
diff --git a/portage_with_autodep/pym/_emerge/EbuildPhase.py.rej b/portage_with_autodep/pym/_emerge/EbuildPhase.py.rej
new file mode 100644
index 0000000..0f061da
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildPhase.py.rej
@@ -0,0 +1,12 @@
+--- pym/_emerge/EbuildPhase.py
++++ pym/_emerge/EbuildPhase.py
+@@ -33,7 +33,8 @@
+ ("_ebuild_lock",)
+
+ # FEATURES displayed prior to setup phase
+- _features_display = ("ccache", "distcc", "distcc-pump", "fakeroot",
++ _features_display = ("ccache", "depcheck", "depcheckstrict" "distcc",
++ "distcc-pump", "fakeroot",
+ "installsources", "keeptemp", "keepwork", "nostrip",
+ "preserve-libs", "sandbox", "selinux", "sesandbox",
+ "splitdebug", "suidctl", "test", "userpriv",
diff --git a/portage_with_autodep/pym/_emerge/EbuildPhase.pyo b/portage_with_autodep/pym/_emerge/EbuildPhase.pyo
new file mode 100644
index 0000000..4c73313
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildPhase.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/EbuildProcess.pyo b/portage_with_autodep/pym/_emerge/EbuildProcess.pyo
new file mode 100644
index 0000000..52f6cdf
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildProcess.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/EbuildSpawnProcess.pyo b/portage_with_autodep/pym/_emerge/EbuildSpawnProcess.pyo
new file mode 100644
index 0000000..1f3e925
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/EbuildSpawnProcess.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/EventsAnalyser.py b/portage_with_autodep/pym/_emerge/EventsAnalyser.py
index 7e416e7..65ece7b 100644
--- a/portage_with_autodep/pym/_emerge/EventsAnalyser.py
+++ b/portage_with_autodep/pym/_emerge/EventsAnalyser.py
@@ -18,51 +18,39 @@ class PortageUtils:
self.metadata_keys = [k for k in portage.auxdbkeys if not k.startswith("UNUSED_")]
self.use=self.settings["USE"]
- def get_best_visible_pkg(self,pkg,db="portdb"):
+ def get_best_visible_pkg(self,pkg):
"""
Gets best candidate on installing. Returns empty string if no found
:param pkg: package name
- :param db: name of db to look. Can be "vardb" or "portdb"
"""
try:
- if db=="portdb":
- return self.portdbapi.xmatch("bestmatch-visible", pkg)
- elif db=="vardb":
- return self.vardbapi.match(pkg)[0]
- else:
- return ''
+ return self.portdbapi.xmatch("bestmatch-visible", pkg)
except:
return ''
# non-recursive dependency getter
- def get_dep(self,pkg,dep_type=["RDEPEND","DEPEND"],db="portdb"):
+ def get_dep(self,pkg,dep_type=["RDEPEND","DEPEND"]):
"""
Gets current dependencies of a package. Looks in portage db
:param pkg: name of package
:param dep_type: type of dependencies to recurse. Can be ["DEPEND"] or
["RDEPEND", "DEPEND"]
- :param db: name of db to look. Can be "vardb" or "portdb"
:returns: **set** of packages names
"""
ret=set()
- pkg = self.get_best_visible_pkg(pkg,db)
+ pkg = self.get_best_visible_pkg(pkg)
if not pkg:
return ret
# we found the best visible match in common tree
- if db=="portdb":
- aux_get=self.portdbapi.aux_get
- elif db=="vardb":
- aux_get=self.vardbapi.aux_get
- else:
- return ret
- metadata = dict(zip(self.metadata_keys, aux_get(pkg, self.metadata_keys)))
+ metadata = dict(zip(self.metadata_keys,
+ self.portdbapi.aux_get(pkg, self.metadata_keys)))
dep_str = " ".join(metadata[k] for k in dep_type)
# the IUSE default are very important for us
@@ -94,7 +82,7 @@ class PortageUtils:
return ret
# recursive dependency getter
- def get_deps(self,pkg,dep_type=["RDEPEND","DEPEND"],db="portdb"):
+ def get_deps(self,pkg,dep_type=["RDEPEND","DEPEND"]):
"""
Gets current dependencies of a package on any depth
All dependencies **must** be installed
@@ -102,20 +90,19 @@ class PortageUtils:
:param pkg: name of package
:param dep_type: type of dependencies to recurse. Can be ["DEPEND"] or
["RDEPEND", "DEPEND"]
- :param db: name of db to look. Can be "vardb" or "portdb"
:returns: **set** of packages names
"""
ret=set()
- #import pdb; pdb.set_trace()
+
# get porttree dependencies on the first package
- pkg = self.get_best_visible_pkg(pkg,db)
+ pkg = self.portdbapi.xmatch("bestmatch-visible", pkg)
if not pkg:
return ret
known_packages=set()
- unknown_packages=self.get_dep(pkg,dep_type,db)
+ unknown_packages=self.get_dep(pkg,dep_type)
ret=ret.union(unknown_packages)
while unknown_packages:
@@ -124,40 +111,36 @@ class PortageUtils:
continue
known_packages.add(p)
- current_deps=self.get_dep(p,dep_type,'vardb')
- unknown_packages=unknown_packages.union(current_deps)
- ret=ret.union(current_deps)
-
- #metadata = dict(zip(self.metadata_keys, self.vardbapi.aux_get(p, self.metadata_keys)))
+ metadata = dict(zip(self.metadata_keys, self.vardbapi.aux_get(p, self.metadata_keys)))
- #dep_str = " ".join(metadata[k] for k in dep_type)
+ dep_str = " ".join(metadata[k] for k in dep_type)
# the IUSE default are very important for us
- #iuse_defaults=[
- # u[1:] for u in metadata.get("IUSE",'').split() if u.startswith("+")]
+ iuse_defaults=[
+ u[1:] for u in metadata.get("IUSE",'').split() if u.startswith("+")]
- #use=self.use.split()
+ use=self.use.split()
- #for u in iuse_defaults:
- # if u not in use:
- # use.append(u)
+ for u in iuse_defaults:
+ if u not in use:
+ use.append(u)
- #success, atoms = portage.dep_check(dep_str, None, self.settings,
- # myuse=use, myroot=self.settings["ROOT"],
- # trees={self.settings["ROOT"]:{"vartree":self.vartree,"porttree": self.vartree}})
-
- #if not success:
- # continue
-
- #for atom in atoms:
- # atomname = self.vartree.dep_bestmatch(atom)
- # if not atomname:
- # continue
- #
- # for unvirt_pkg in expand_new_virt(self.vardbapi,'='+atomname):
- # for pkg in self.vartree.dep_match(unvirt_pkg):
- # ret.add(pkg)
- # unknown_packages.add(pkg)
+ success, atoms = portage.dep_check(dep_str, None, self.settings,
+ myuse=use, myroot=self.settings["ROOT"],
+ trees={self.settings["ROOT"]:{"vartree":self.vartree,"porttree": self.vartree}})
+
+ if not success:
+ continue
+
+ for atom in atoms:
+ atomname = self.vartree.dep_bestmatch(atom)
+ if not atomname:
+ continue
+
+ for unvirt_pkg in expand_new_virt(self.vardbapi,'='+atomname):
+ for pkg in self.vartree.dep_match(unvirt_pkg):
+ ret.add(pkg)
+ unknown_packages.add(pkg)
return ret
def get_deps_for_package_building(self, pkg):
@@ -165,13 +148,12 @@ class PortageUtils:
returns buildtime dependencies of current package and
all runtime dependencies of that buildtime dependencies
"""
- buildtime_deps=self.get_dep(pkg, ["DEPEND"],"portdb")
+ buildtime_deps=self.get_dep(pkg, ["DEPEND"])
runtime_deps=set()
for dep in buildtime_deps:
- runtime_deps|=self.get_deps(dep,["RDEPEND","PDEPEND"],"vardb")
+ runtime_deps=runtime_deps.union(self.get_deps(dep,["RDEPEND"]))
- ret = buildtime_deps | runtime_deps
-
+ ret=buildtime_deps.union(runtime_deps)
return ret
def get_system_packages_list(self):
@@ -187,19 +169,7 @@ class PortageUtils:
for pkg in self.vartree.dep_match(unvirt_pkg):
ret.append(pkg)
return ret
-
- def get_system_packages_rdeps(self):
- """
- returns runtime dependencies of packages from system set
-
- :returns: **list** of package names
- """
- ret=set()
-
- for pkg in self.get_system_packages_list():
- ret=ret.union(self.get_deps(pkg,["RDEPEND"]))
- return list(ret)
-
+
class GentoolkitUtils:
"""
@@ -207,7 +177,7 @@ class GentoolkitUtils:
internals.
"""
- def getpackagesbyfiles(self,files):
+ def getpackagesbyfiles(files):
"""
:param files: list of filenames
:returns: **dictionary** file->package, if file doesn't belong to any
@@ -226,30 +196,17 @@ class GentoolkitUtils:
stdin=subprocess.PIPE, stdout=subprocess.PIPE,stderr=subprocess.PIPE,
bufsize=4096)
- out,err=proc.communicate(b"\n".join(listtocheck))
+ out,err=proc.communicate("\n".join(listtocheck).encode("utf8"))
- lines=out.split(b"\n")
+ lines=out.decode("utf8").split("\n")
#print lines
line_re=re.compile(r"^([^ ]+)\s+\(([^)]+)\)$")
for line in lines:
- try:
- line=line.decode("utf-8")
- except UnicodeDecodeError:
- portage.util.writemsg("Util qfile returned non-utf8 string: %s\n" % line)
-
- #import pdb; pdb.set_trace()
-
if len(line)==0:
continue
match=line_re.match(line)
if match:
- try:
- ret[match.group(2).encode("utf-8")]=match.group(1)
- except UnicodeEncodeError:
- portage.util.writemsg(
- "Util qfile failed to encode string %s to unicode\n" %
- match.group(2))
-
+ ret[match.group(2)]=match.group(1)
else:
portage.util.writemsg("Util qfile returned unparsable string: %s\n" % line)
@@ -259,7 +216,7 @@ class GentoolkitUtils:
return ret
- def getfilesbypackages(self,packagenames):
+ def getfilesbypackages(packagenames):
"""
:param packagename: name of package
@@ -273,7 +230,7 @@ class GentoolkitUtils:
out,err=proc.communicate()
- ret=out.split(b"\n")
+ ret=out.decode("utf8").split("\n")
if ret==['']:
ret=[]
except OSError as e:
@@ -281,7 +238,7 @@ class GentoolkitUtils:
return ret
- def get_all_packages_files(self):
+ def get_all_packages_files():
"""
Memory-hungry operation
@@ -295,7 +252,7 @@ class GentoolkitUtils:
out,err=proc.communicate()
- ret=out.split(b"\n")
+ ret=out.decode("utf8").split("\n")
except OSError as e:
portage.util.writemsg("Error while launching qfile: %s\n" % e)
@@ -306,28 +263,25 @@ class FilterProcGenerator:
portageutils=PortageUtils(settings=settings)
deps_all=portageutils.get_deps_for_package_building(pkgname)
- deps_portage=portageutils.get_dep('sys-apps/portage',["RDEPEND"])
+ deps_portage=portageutils.get_dep('portage',["RDEPEND"])
system_packages=portageutils.get_system_packages_list()
- system_deps=portageutils.get_system_packages_rdeps()
- allfiles=GentoolkitUtils().get_all_packages_files()
+ allfiles=GentoolkitUtils.get_all_packages_files()
portage.util.writemsg("All files list recieved, waiting for " \
"a list of allowed files\n")
- allowedpkgs=system_packages+system_deps
- allowedpkgs+=list(deps_portage)+list(deps_all)
- allowedpkgs+=["app-portage/autodep"]
+ allowedpkgs=system_packages+list(deps_portage)+list(deps_all)
- allowedfiles=GentoolkitUtils().getfilesbypackages(allowedpkgs)
+ allowedfiles=GentoolkitUtils.getfilesbypackages(allowedpkgs)
#for pkg in allowedpkgs:
# allowedfiles+=GentoolkitUtils.getfilesbypackage(pkg)
#import pdb; pdb.set_trace()
# manually add all python interpreters to this list
- allowedfiles+=GentoolkitUtils().getfilesbypackages(['python'])
+ allowedfiles+=GentoolkitUtils.getfilesbypackages(['python'])
allowedfiles=set(allowedfiles)
deniedfiles=allfiles-allowedfiles
@@ -350,10 +304,9 @@ class EventsAnalyser:
self.deps_all=self.portageutils.get_deps_for_package_building(pkgname)
self.deps_direct=self.portageutils.get_dep(pkgname,["DEPEND"])
- self.deps_portage=self.portageutils.get_dep('sys-apps/portage',["RDEPEND"])
-
+ self.deps_portage=self.portageutils.get_dep('portage',["RDEPEND"])
+
self.system_packages=self.portageutils.get_system_packages_list()
- self.system_deps=self.portageutils.get_system_packages_rdeps()
# All analyse work is here
# get unique filenames
@@ -365,7 +318,7 @@ class EventsAnalyser:
filenames=filenames.union(fail_events)
filenames=list(filenames)
- file_to_package=GentoolkitUtils().getpackagesbyfiles(filenames)
+ file_to_package=GentoolkitUtils.getpackagesbyfiles(filenames)
# This part is completly unreadable.
# It converting one complex struct(returned by getfsevents) to another complex
# struct which good for generating output.
@@ -420,8 +373,7 @@ class EventsAnalyser:
stagesorder={"clean":1,"setup":2,"unpack":3,"prepare":4,"configure":5,"compile":6,"test":7,
"install":8,"preinst":9,"postinst":10,"prerm":11,"postrm":12,"unknown":13}
packagesinfo=self.packagesinfo
- # print information grouped by package
- #print(packagesinfo.keys())
+ # print information grouped by package
for package in sorted(packagesinfo):
# not showing special directory package
if package=="directory":
@@ -429,14 +381,12 @@ class EventsAnalyser:
if package=="unknown":
continue
-
+
is_pkg_in_dep=package in self.deps_all
is_pkg_in_portage_dep=package in self.deps_portage
is_pkg_in_system=package in self.system_packages
- is_pkg_in_system_dep=package in self.system_deps
is_pkg_python="dev-lang/python" in package
- is_pkg_self="app-portage/autodep" in package
stages=[]
for stage in sorted(packagesinfo[package].keys(), key=stagesorder.get):
@@ -475,10 +425,6 @@ class EventsAnalyser:
portage.util.writemsg("[SYSTEM]")
elif is_pkg_in_portage_dep:
portage.util.writemsg("[PORTAGE DEP]")
- elif is_pkg_in_system_dep:
- portage.util.writemsg("[SYSTEM DEP]")
- elif is_pkg_self:
- portage.util.writemsg("[AUTODEP]")
elif is_pkg_python:
portage.util.writemsg("[INTERPRETER]")
elif not self.is_package_useful(package,stages,filenames.keys()):
@@ -505,12 +451,7 @@ class EventsAnalyser:
for filename in filenames:
event_info=tuple(filenames[filename])
- try:
- portage.util.writemsg(
- " %-56s %-21s\n" % (filename.decode('utf-8'),action[event_info]))
- except UnicodeDecodeError:
- portage.util.writemsg(
- " %-56s %-21s\n" % ('NON-UTF8-FILENAME',action[event_info]))
+ portage.util.writemsg(" %-56s %-21s\n" % (filename,action[event_info]))
filescounter+=1
if filescounter>10:
portage.util.writemsg(" ... and %d more ...\n" % (len(filenames)-10))
@@ -529,7 +470,7 @@ class EventsAnalyser:
""" some basic heuristics here to cut part of packages """
excluded_paths=set(
- [b'/etc/sandbox.d/']
+ ['/etc/sandbox.d/']
)
excluded_packages=set(
@@ -560,9 +501,8 @@ class EventsAnalyser:
continue
# test 1: package is not useful if all files are *.desktop or *.xml or *.m4
- if not (f.endswith(b".desktop") or f.endswith(b".xml") or
- f.endswith(b".m4") or f.endswith(b".pc")):
- break
+ if not (f.endswith(".desktop") or f.endswith(".xml") or f.endswith(".m4") or f.endswith(".pc")):
+ break
else:
return False # we get here if cycle ends not with break
diff --git a/portage_with_autodep/pym/_emerge/EventsLogger.py b/portage_with_autodep/pym/_emerge/EventsLogger.py
index 1ade9fd..68b3c67 100644
--- a/portage_with_autodep/pym/_emerge/EventsLogger.py
+++ b/portage_with_autodep/pym/_emerge/EventsLogger.py
@@ -100,69 +100,62 @@ class EventsLogger(threading.Thread):
continue
#import pdb; pdb.set_trace()
- #try:
- message=record.split(b"\0")
- #except UnicodeDecodeError:
- # print("Bad message %s" % record)
- # continue
+ try:
+ message=record.decode("utf8").split("\0")
+ except UnicodeDecodeError:
+ print("Bad message %s" % record)
+ continue
# continue
#print(message)
try:
- eventname,filename,stage,result=message[1:5]
- eventname=eventname.decode("utf-8")
- stage=stage.decode("utf-8")
- result=result.decode("utf-8")
- except IndexError:
- print("IndexError while parsing %s" % record)
- except ValueError:
- print("ValueError while parsing %s" % record)
- except UnicodeDecodeError:
- print("UnicodeDecodeError while parsing %s" % record)
-
- if result=="ASKING":
- if self.filter_proc(eventname,filename,stage):
- s.sendall(b"ALLOW\0")
+ if message[4]=="ASKING":
+ if self.filter_proc(message[1],message[2],message[3]):
+ s.sendall(b"ALLOW\0")
+ else:
+ # TODO: log through portage infrastructure
+ #print("Blocking an access to %s" % message[2])
+ s.sendall(b"DENY\0")
else:
- # TODO: log through portage infrastructure
- #print("Blocking an access to %s" % message[2])
- s.sendall(b"DENY\0")
- else:
- if not stage in self.events:
- self.events[stage]=[{},{}]
-
- hashofsucesses=self.events[stage][0]
- hashoffailures=self.events[stage][1]
-
- if result=="DENIED":
- print("Blocking an access to %s" % filename)
-
- if result=="OK":
- if not filename in hashofsucesses:
- hashofsucesses[filename]=[False,False]
-
- readed_or_writed=hashofsucesses[filename]
-
- if eventname=="read":
- readed_or_writed[0]=True
- elif eventname=="write":
- readed_or_writed[1]=True
-
- elif result[0:3]=="ERR" or result=="DENIED":
- if not filename in hashoffailures:
- hashoffailures[filename]=[False,False]
- notfound_or_blocked=hashoffailures[filename]
-
- if result=="ERR/2":
- notfound_or_blocked[0]=True
- elif result=="DENIED":
- notfound_or_blocked[1]=True
+ eventname,filename,stage,result=message[1:5]
- else:
- print("Error in logger module<->analyser protocol")
+ if not stage in self.events:
+ self.events[stage]=[{},{}]
+ hashofsucesses=self.events[stage][0]
+ hashoffailures=self.events[stage][1]
+
+ if result=="DENIED":
+ print("Blocking an access to %s" % filename)
+
+ if result=="OK":
+ if not filename in hashofsucesses:
+ hashofsucesses[filename]=[False,False]
+
+ readed_or_writed=hashofsucesses[filename]
+
+ if eventname=="read":
+ readed_or_writed[0]=True
+ elif eventname=="write":
+ readed_or_writed[1]=True
+
+ elif result[0:3]=="ERR" or result=="DENIED":
+ if not filename in hashoffailures:
+ hashoffailures[filename]=[False,False]
+ notfound_or_blocked=hashoffailures[filename]
+
+ if result=="ERR/2":
+ notfound_or_blocked[0]=True
+ elif result=="DENIED":
+ notfound_or_blocked[1]=True
+
+ else:
+ print("Error in logger module<->analyser protocol")
+
+ except IndexError:
+ print("IndexError while parsing %s" % record)
except IOError as e:
if e.errno!=4: # handling "Interrupted system call" errors
raise
@@ -184,5 +177,4 @@ class EventsLogger(threading.Thread):
# We assume portage clears tmp folder, so no deleting a socket file
# We assume that no new socket data will arrive after this moment
- #print(self.events)
return self.events
diff --git a/portage_with_autodep/pym/_emerge/FakeVartree.py b/portage_with_autodep/pym/_emerge/FakeVartree.py
index a11966f..d4dbe97 100644
--- a/portage_with_autodep/pym/_emerge/FakeVartree.py
+++ b/portage_with_autodep/pym/_emerge/FakeVartree.py
@@ -2,6 +2,7 @@
# Distributed under the terms of the GNU General Public License v2
import sys
+import warnings
import portage
from portage import os
@@ -37,8 +38,10 @@ class FakeVartree(vartree):
global updates are necessary (updates are performed when necessary if there
is not a matching ebuild in the tree). Instances of this class are not
populated until the sync() method is called."""
- def __init__(self, root_config, pkg_cache=None, pkg_root_config=None):
+ def __init__(self, root_config, pkg_cache=None, pkg_root_config=None,
+ dynamic_deps=True):
self._root_config = root_config
+ self._dynamic_deps = dynamic_deps
if pkg_root_config is None:
pkg_root_config = self._root_config
self._pkg_root_config = pkg_root_config
@@ -47,7 +50,6 @@ class FakeVartree(vartree):
real_vartree = root_config.trees["vartree"]
self._real_vardb = real_vartree.dbapi
portdb = root_config.trees["porttree"].dbapi
- self.root = real_vartree.root
self.settings = real_vartree.settings
mykeys = list(real_vartree.dbapi._aux_cache_keys)
if "_mtime_" not in mykeys:
@@ -55,19 +57,30 @@ class FakeVartree(vartree):
self._db_keys = mykeys
self._pkg_cache = pkg_cache
self.dbapi = FakeVardbapi(real_vartree.settings)
+ self.dbapi._aux_cache_keys = set(self._db_keys)
# Initialize variables needed for lazy cache pulls of the live ebuild
# metadata. This ensures that the vardb lock is released ASAP, without
# being delayed in case cache generation is triggered.
self._aux_get = self.dbapi.aux_get
- self.dbapi.aux_get = self._aux_get_wrapper
self._match = self.dbapi.match
- self.dbapi.match = self._match_wrapper
+ if dynamic_deps:
+ self.dbapi.aux_get = self._aux_get_wrapper
+ self.dbapi.match = self._match_wrapper
self._aux_get_history = set()
self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
self._portdb = portdb
self._global_updates = None
+ @property
+ def root(self):
+ warnings.warn("The root attribute of "
+ "_emerge.FakeVartree.FakeVartree"
+ " is deprecated. Use "
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=3)
+ return self.settings['ROOT']
+
def _match_wrapper(self, cpv, use_cache=1):
"""
Make sure the metadata in Package instances gets updated for any
@@ -147,15 +160,14 @@ class FakeVartree(vartree):
self.dbapi.aux_get = self._aux_get
self.settings._populate_treeVirtuals_if_needed(self)
finally:
- self.dbapi.aux_get = self._aux_get_wrapper
+ if self._dynamic_deps:
+ self.dbapi.aux_get = self._aux_get_wrapper
def _sync(self):
real_vardb = self._root_config.trees["vartree"].dbapi
current_cpv_set = frozenset(real_vardb.cpv_all())
pkg_vardb = self.dbapi
- pkg_cache = self._pkg_cache
- aux_get_history = self._aux_get_history
# Remove any packages that have been uninstalled.
for pkg in list(pkg_vardb):
diff --git a/portage_with_autodep/pym/_emerge/FakeVartree.pyo b/portage_with_autodep/pym/_emerge/FakeVartree.pyo
new file mode 100644
index 0000000..8707391
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/FakeVartree.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/FifoIpcDaemon.py b/portage_with_autodep/pym/_emerge/FifoIpcDaemon.py
index a716dac..fcc4ab4 100644
--- a/portage_with_autodep/pym/_emerge/FifoIpcDaemon.py
+++ b/portage_with_autodep/pym/_emerge/FifoIpcDaemon.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage import os
@@ -15,14 +15,14 @@ class FifoIpcDaemon(AbstractPollTask):
def _start(self):
self._files = self._files_dict()
- input_fd = os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
# File streams are in unbuffered mode since we do atomic
# read and write of whole pickles.
- self._files.pipe_in = os.fdopen(input_fd, 'rb', 0)
+ self._files.pipe_in = \
+ os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
self._reg_id = self.scheduler.register(
- self._files.pipe_in.fileno(),
+ self._files.pipe_in,
self._registered_events, self._input_handler)
self._registered = True
@@ -32,12 +32,12 @@ class FifoIpcDaemon(AbstractPollTask):
Re-open the input stream, in order to suppress
POLLHUP events (bug #339976).
"""
- self._files.pipe_in.close()
- input_fd = os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
- self._files.pipe_in = os.fdopen(input_fd, 'rb', 0)
self.scheduler.unregister(self._reg_id)
+ os.close(self._files.pipe_in)
+ self._files.pipe_in = \
+ os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
self._reg_id = self.scheduler.register(
- self._files.pipe_in.fileno(),
+ self._files.pipe_in,
self._registered_events, self._input_handler)
def isAlive(self):
@@ -51,14 +51,9 @@ class FifoIpcDaemon(AbstractPollTask):
def _wait(self):
if self.returncode is not None:
return self.returncode
-
- if self._registered:
- self.scheduler.schedule(self._reg_id)
- self._unregister()
-
+ self._wait_loop()
if self.returncode is None:
self.returncode = os.EX_OK
-
return self.returncode
def _input_handler(self, fd, event):
@@ -77,5 +72,5 @@ class FifoIpcDaemon(AbstractPollTask):
if self._files is not None:
for f in self._files.values():
- f.close()
+ os.close(f)
self._files = None
diff --git a/portage_with_autodep/pym/_emerge/FifoIpcDaemon.pyo b/portage_with_autodep/pym/_emerge/FifoIpcDaemon.pyo
new file mode 100644
index 0000000..6d7c4f9
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/FifoIpcDaemon.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/JobStatusDisplay.py b/portage_with_autodep/pym/_emerge/JobStatusDisplay.py
index 1949232..5b9b221 100644
--- a/portage_with_autodep/pym/_emerge/JobStatusDisplay.py
+++ b/portage_with_autodep/pym/_emerge/JobStatusDisplay.py
@@ -97,7 +97,7 @@ class JobStatusDisplay(object):
"""
Initialize term control codes.
@rtype: bool
- @returns: True if term codes were successfully initialized,
+ @return: True if term codes were successfully initialized,
False otherwise.
"""
@@ -209,24 +209,26 @@ class JobStatusDisplay(object):
def display(self):
"""
Display status on stdout, but only if something has
- changed since the last call.
+ changed since the last call. This always returns True,
+ for continuous scheduling via timeout_add.
"""
if self.quiet:
- return
+ return True
current_time = time.time()
time_delta = current_time - self._last_display_time
if self._displayed and \
not self._changed:
if not self._isatty:
- return
+ return True
if time_delta < self._min_display_latency:
- return
+ return True
self._last_display_time = current_time
self._changed = False
self._display_status()
+ return True
def _display_status(self):
# Don't use len(self._completed_tasks) here since that also
@@ -289,4 +291,11 @@ class JobStatusDisplay(object):
self._update(color_output.getvalue())
if self.xterm_titles:
- xtermTitle(" ".join(plain_output.split()))
+ # If the HOSTNAME variable is exported, include it
+ # in the xterm title, just like emergelog() does.
+ # See bug #390699.
+ title_str = " ".join(plain_output.split())
+ hostname = os.environ.get("HOSTNAME")
+ if hostname is not None:
+ title_str = "%s: %s" % (hostname, title_str)
+ xtermTitle(title_str)
diff --git a/portage_with_autodep/pym/_emerge/JobStatusDisplay.pyo b/portage_with_autodep/pym/_emerge/JobStatusDisplay.pyo
new file mode 100644
index 0000000..f79b2c2
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/JobStatusDisplay.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/MergeListItem.py b/portage_with_autodep/pym/_emerge/MergeListItem.py
index 2176bf6..8086c68 100644
--- a/portage_with_autodep/pym/_emerge/MergeListItem.py
+++ b/portage_with_autodep/pym/_emerge/MergeListItem.py
@@ -68,7 +68,7 @@ class MergeListItem(CompositeTask):
pkg_repo_name = "unknown repo"
msg += " from %s" % pkg_repo_name
- if pkg.root != "/":
+ if pkg.root_config.settings["ROOT"] != "/":
msg += " %s %s" % (preposition, pkg.root)
if not build_opts.pretend:
diff --git a/portage_with_autodep/pym/_emerge/MergeListItem.pyo b/portage_with_autodep/pym/_emerge/MergeListItem.pyo
new file mode 100644
index 0000000..168a227
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/MergeListItem.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/MetadataRegen.py b/portage_with_autodep/pym/_emerge/MetadataRegen.py
index 8103175..e82015f 100644
--- a/portage_with_autodep/pym/_emerge/MetadataRegen.py
+++ b/portage_with_autodep/pym/_emerge/MetadataRegen.py
@@ -1,8 +1,9 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import portage
from portage import os
+from portage.dep import _repo_separator
from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
from _emerge.PollScheduler import PollScheduler
@@ -10,7 +11,7 @@ class MetadataRegen(PollScheduler):
def __init__(self, portdb, cp_iter=None, consumer=None,
max_jobs=None, max_load=None):
- PollScheduler.__init__(self)
+ PollScheduler.__init__(self, main=True)
self._portdb = portdb
self._global_cleanse = False
if cp_iter is None:
@@ -33,10 +34,11 @@ class MetadataRegen(PollScheduler):
self.returncode = os.EX_OK
self._error_count = 0
self._running_tasks = set()
+ self._remaining_tasks = True
def _terminate_tasks(self):
- while self._running_tasks:
- self._running_tasks.pop().cancel()
+ for task in list(self._running_tasks):
+ task.cancel()
def _iter_every_cp(self):
portage.writemsg_stdout("Listing available packages...\n")
@@ -60,27 +62,32 @@ class MetadataRegen(PollScheduler):
break
cp_set.add(cp)
portage.writemsg_stdout("Processing %s\n" % cp)
- cpv_list = portdb.cp_list(cp)
- for cpv in cpv_list:
- if self._terminated_tasks:
- break
- valid_pkgs.add(cpv)
- ebuild_path, repo_path = portdb.findname2(cpv)
- if ebuild_path is None:
- raise AssertionError("ebuild not found for '%s'" % cpv)
- metadata, st, emtime = portdb._pull_valid_cache(
- cpv, ebuild_path, repo_path)
- if metadata is not None:
- if consumer is not None:
- consumer(cpv, ebuild_path,
- repo_path, metadata)
- continue
-
- yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
- ebuild_mtime=emtime,
- metadata_callback=portdb._metadata_callback,
- portdb=portdb, repo_path=repo_path,
- settings=portdb.doebuild_settings)
+ # We iterate over portdb.porttrees, since it's common to
+ # tweak this attribute in order to adjust repo selection.
+ for mytree in portdb.porttrees:
+ repo = portdb.repositories.get_repo_for_location(mytree)
+ cpv_list = portdb.cp_list(cp, mytree=[repo.location])
+ for cpv in cpv_list:
+ if self._terminated_tasks:
+ break
+ valid_pkgs.add(cpv)
+ ebuild_path, repo_path = portdb.findname2(cpv, myrepo=repo.name)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s%s%s'" % (cpv, _repo_separator, repo.name))
+ metadata, ebuild_hash = portdb._pull_valid_cache(
+ cpv, ebuild_path, repo_path)
+ if metadata is not None:
+ if consumer is not None:
+ consumer(cpv, repo_path, metadata, ebuild_hash, True)
+ continue
+
+ yield EbuildMetadataPhase(cpv=cpv,
+ ebuild_hash=ebuild_hash,
+ portdb=portdb, repo_path=repo_path,
+ settings=portdb.doebuild_settings)
+
+ def _keep_scheduling(self):
+ return self._remaining_tasks and not self._terminated_tasks
def run(self):
@@ -88,11 +95,7 @@ class MetadataRegen(PollScheduler):
from portage.cache.cache_errors import CacheError
dead_nodes = {}
- while self._schedule():
- self._poll_loop()
-
- while self._jobs:
- self._poll_loop()
+ self._main_loop()
if self._terminated_tasks:
self.returncode = 1
@@ -140,26 +143,21 @@ class MetadataRegen(PollScheduler):
pass
def _schedule_tasks(self):
- """
- @rtype: bool
- @returns: True if there may be remaining tasks to schedule,
- False otherwise.
- """
if self._terminated_tasks:
- return False
+ return
while self._can_add_job():
try:
metadata_process = next(self._process_iter)
except StopIteration:
- return False
+ self._remaining_tasks = False
+ return
self._jobs += 1
self._running_tasks.add(metadata_process)
metadata_process.scheduler = self.sched_iface
metadata_process.addExitListener(self._metadata_exit)
metadata_process.start()
- return True
def _metadata_exit(self, metadata_process):
self._jobs -= 1
@@ -176,9 +174,10 @@ class MetadataRegen(PollScheduler):
# On failure, still notify the consumer (in this case the metadata
# argument is None).
self._consumer(metadata_process.cpv,
- metadata_process.ebuild_path,
metadata_process.repo_path,
- metadata_process.metadata)
+ metadata_process.metadata,
+ metadata_process.ebuild_hash,
+ metadata_process.eapi_supported)
self._schedule()
diff --git a/portage_with_autodep/pym/_emerge/MetadataRegen.pyo b/portage_with_autodep/pym/_emerge/MetadataRegen.pyo
new file mode 100644
index 0000000..6c8788f
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/MetadataRegen.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.py b/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.py
index ce0ab14..afa44fb 100644
--- a/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.py
+++ b/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.py
@@ -29,5 +29,11 @@ class MiscFunctionsProcess(AbstractEbuildProcess):
AbstractEbuildProcess._start(self)
def _spawn(self, args, **kwargs):
- self.settings.pop("EBUILD_PHASE", None)
- return spawn(" ".join(args), self.settings, **kwargs)
+ # Temporarily unset EBUILD_PHASE so that bashrc code doesn't
+ # think this is a real phase.
+ phase_backup = self.settings.pop("EBUILD_PHASE", None)
+ try:
+ return spawn(" ".join(args), self.settings, **kwargs)
+ finally:
+ if phase_backup is not None:
+ self.settings["EBUILD_PHASE"] = phase_backup
diff --git a/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.pyo b/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.pyo
new file mode 100644
index 0000000..e3f5344
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/MiscFunctionsProcess.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/Package.py b/portage_with_autodep/pym/_emerge/Package.py
index 20c72b4..c04fa1f 100644
--- a/portage_with_autodep/pym/_emerge/Package.py
+++ b/portage_with_autodep/pym/_emerge/Package.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import sys
@@ -9,9 +9,9 @@ from portage.cache.mappings import slot_dict_class
from portage.const import EBUILD_PHASES
from portage.dep import Atom, check_required_use, use_reduce, \
paren_enclose, _slot_re, _slot_separator, _repo_separator
+from portage.versions import _pkg_str, _unknown_repo
from portage.eapi import eapi_has_iuse_defaults, eapi_has_required_use
from portage.exception import InvalidDependString
-from portage.repository.config import _gen_valid_repo
from _emerge.Task import Task
if sys.hexversion >= 0x3000000:
@@ -26,7 +26,7 @@ class Package(Task):
"root_config", "type_name",
"category", "counter", "cp", "cpv_split",
"inherited", "invalid", "iuse", "masks", "mtime",
- "pf", "pv_split", "root", "slot", "slot_atom", "visible",) + \
+ "pf", "root", "slot", "slot_atom", "version", "visible",) + \
("_raw_metadata", "_use",)
metadata_keys = [
@@ -38,7 +38,7 @@ class Package(Task):
_dep_keys = ('DEPEND', 'PDEPEND', 'RDEPEND',)
_use_conditional_misc_keys = ('LICENSE', 'PROPERTIES', 'RESTRICT')
- UNKNOWN_REPO = "__unknown__"
+ UNKNOWN_REPO = _unknown_repo
def __init__(self, **kwargs):
Task.__init__(self, **kwargs)
@@ -49,7 +49,6 @@ class Package(Task):
self.metadata = _PackageMetadataWrapper(self, self._raw_metadata)
if not self.built:
self.metadata['CHOST'] = self.root_config.settings.get('CHOST', '')
- self.cp = portage.cpv_getkey(self.cpv)
slot = self.slot
if _slot_re.match(slot) is None:
self._invalid_metadata('SLOT.invalid',
@@ -57,6 +56,11 @@ class Package(Task):
# Avoid an InvalidAtom exception when creating slot_atom.
# This package instance will be masked due to empty SLOT.
slot = '0'
+ self.cpv = _pkg_str(self.cpv, slot=slot,
+ repo=self.metadata.get('repository', ''))
+ self.cp = self.cpv.cp
+ # sync metadata with validated repo (may be UNKNOWN_REPO)
+ self.metadata['repository'] = self.cpv.repo
if (self.iuse.enabled or self.iuse.disabled) and \
not eapi_has_iuse_defaults(self.metadata["EAPI"]):
if not self.installed:
@@ -64,14 +68,10 @@ class Package(Task):
"IUSE contains defaults, but EAPI doesn't allow them")
self.slot_atom = portage.dep.Atom("%s%s%s" % (self.cp, _slot_separator, slot))
self.category, self.pf = portage.catsplit(self.cpv)
- self.cpv_split = portage.catpkgsplit(self.cpv)
- self.pv_split = self.cpv_split[1:]
+ self.cpv_split = self.cpv.cpv_split
+ self.version = self.cpv.version
if self.inherited is None:
self.inherited = frozenset()
- repo = _gen_valid_repo(self.metadata.get('repository', ''))
- if not repo:
- repo = self.UNKNOWN_REPO
- self.metadata['repository'] = repo
self._validate_deps()
self.masks = self._masks()
@@ -84,7 +84,7 @@ class Package(Task):
self._hash_key = Package._gen_hash_key(cpv=self.cpv,
installed=self.installed, onlydeps=self.onlydeps,
- operation=self.operation, repo_name=repo,
+ operation=self.operation, repo_name=self.cpv.repo,
root_config=self.root_config,
type_name=self.type_name)
self._hash_value = hash(self._hash_key)
@@ -239,11 +239,6 @@ class Package(Task):
if mask_atom is not None:
masks['package.mask'] = mask_atom
- system_mask = settings._getProfileMaskAtom(
- self.cpv, self.metadata)
- if system_mask is not None:
- masks['profile.system'] = system_mask
-
try:
missing_licenses = settings._getMissingLicenses(
self.cpv, self.metadata)
@@ -276,7 +271,6 @@ class Package(Task):
return False
if 'package.mask' in masks or \
- 'profile.system' in masks or \
'LICENSE' in masks:
return False
@@ -367,15 +361,15 @@ class Package(Task):
% (portage.output.colorize(cpv_color, self.cpv + _repo_separator + self.repo) , self.type_name)
if self.type_name == "installed":
- if self.root != "/":
- s += " in '%s'" % self.root
+ if self.root_config.settings['ROOT'] != "/":
+ s += " in '%s'" % self.root_config.settings['ROOT']
if self.operation == "uninstall":
s += " scheduled for uninstall"
else:
if self.operation == "merge":
s += " scheduled for merge"
- if self.root != "/":
- s += " to '%s'" % self.root
+ if self.root_config.settings['ROOT'] != "/":
+ s += " to '%s'" % self.root_config.settings['ROOT']
s += ")"
return s
@@ -497,7 +491,7 @@ class Package(Task):
def is_valid_flag(self, flags):
"""
- @returns: True if all flags are valid USE values which may
+ @return: True if all flags are valid USE values which may
be specified in USE dependencies, False otherwise.
"""
if isinstance(flags, basestring):
@@ -511,7 +505,7 @@ class Package(Task):
def get_missing_iuse(self, flags):
"""
- @returns: A list of flags missing from IUSE.
+ @return: A list of flags missing from IUSE.
"""
if isinstance(flags, basestring):
flags = [flags]
@@ -535,28 +529,28 @@ class Package(Task):
def __lt__(self, other):
if other.cp != self.cp:
return False
- if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
+ if portage.vercmp(self.version, other.version) < 0:
return True
return False
def __le__(self, other):
if other.cp != self.cp:
return False
- if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
+ if portage.vercmp(self.version, other.version) <= 0:
return True
return False
def __gt__(self, other):
if other.cp != self.cp:
return False
- if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
+ if portage.vercmp(self.version, other.version) > 0:
return True
return False
def __ge__(self, other):
if other.cp != self.cp:
return False
- if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
+ if portage.vercmp(self.version, other.version) >= 0:
return True
return False
diff --git a/portage_with_autodep/pym/_emerge/Package.pyo b/portage_with_autodep/pym/_emerge/Package.pyo
new file mode 100644
index 0000000..3d37317
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Package.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/PackageArg.pyo b/portage_with_autodep/pym/_emerge/PackageArg.pyo
new file mode 100644
index 0000000..c50e145
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PackageArg.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/PackageMerge.py b/portage_with_autodep/pym/_emerge/PackageMerge.py
index f8fa04a..eed34e9 100644
--- a/portage_with_autodep/pym/_emerge/PackageMerge.py
+++ b/portage_with_autodep/pym/_emerge/PackageMerge.py
@@ -28,7 +28,7 @@ class PackageMerge(CompositeTask):
counter_str,
colorize("GOOD", pkg.cpv))
- if pkg.root != "/":
+ if pkg.root_config.settings["ROOT"] != "/":
msg += " %s %s" % (preposition, pkg.root)
if not self.merge.build_opts.fetchonly and \
diff --git a/portage_with_autodep/pym/_emerge/PackageMerge.pyo b/portage_with_autodep/pym/_emerge/PackageMerge.pyo
new file mode 100644
index 0000000..6403d1b
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PackageMerge.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/PackageUninstall.pyo b/portage_with_autodep/pym/_emerge/PackageUninstall.pyo
new file mode 100644
index 0000000..847c749
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PackageUninstall.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.py b/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.py
index a692bb6..0f7be44 100644
--- a/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.py
+++ b/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.py
@@ -1,8 +1,9 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import sys
from portage.dbapi import dbapi
+from portage.dbapi.dep_expand import dep_expand
class PackageVirtualDbapi(dbapi):
"""
@@ -76,20 +77,24 @@ class PackageVirtualDbapi(dbapi):
self._match_cache = {}
def match(self, origdep, use_cache=1):
- result = self._match_cache.get(origdep)
+ atom = dep_expand(origdep, mydb=self, settings=self.settings)
+ cache_key = (atom, atom.unevaluated_atom)
+ result = self._match_cache.get(cache_key)
if result is not None:
return result[:]
- result = dbapi.match(self, origdep, use_cache=use_cache)
- self._match_cache[origdep] = result
+ result = list(self._iter_match(atom, self.cp_list(atom.cp)))
+ self._match_cache[cache_key] = result
return result[:]
def cpv_exists(self, cpv, myrepo=None):
return cpv in self._cpv_map
def cp_list(self, mycp, use_cache=1):
- cachelist = self._match_cache.get(mycp)
- # cp_list() doesn't expand old-style virtuals
- if cachelist and cachelist[0].startswith(mycp):
+ # NOTE: Cache can be safely shared with the match cache, since the
+ # match cache uses the result from dep_expand for the cache_key.
+ cache_key = (mycp, mycp)
+ cachelist = self._match_cache.get(cache_key)
+ if cachelist is not None:
return cachelist[:]
cpv_list = self._cp_map.get(mycp)
if cpv_list is None:
@@ -97,8 +102,7 @@ class PackageVirtualDbapi(dbapi):
else:
cpv_list = [pkg.cpv for pkg in cpv_list]
self._cpv_sort_ascending(cpv_list)
- if not (not cpv_list and mycp.startswith("virtual/")):
- self._match_cache[mycp] = cpv_list
+ self._match_cache[cache_key] = cpv_list
return cpv_list[:]
def cp_all(self):
diff --git a/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.pyo b/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.pyo
new file mode 100644
index 0000000..a1a850f
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PackageVirtualDbapi.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/PipeReader.py b/portage_with_autodep/pym/_emerge/PipeReader.py
index 375c98f..90febdf 100644
--- a/portage_with_autodep/pym/_emerge/PipeReader.py
+++ b/portage_with_autodep/pym/_emerge/PipeReader.py
@@ -1,11 +1,9 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage import os
from _emerge.AbstractPollTask import AbstractPollTask
-from _emerge.PollConstants import PollConstants
import fcntl
-import array
class PipeReader(AbstractPollTask):
@@ -17,16 +15,22 @@ class PipeReader(AbstractPollTask):
"""
__slots__ = ("input_files",) + \
- ("_read_data", "_reg_ids")
+ ("_read_data", "_reg_ids", "_use_array")
def _start(self):
self._reg_ids = set()
self._read_data = []
- for k, f in self.input_files.items():
+
+ if self._use_array:
+ output_handler = self._array_output_handler
+ else:
+ output_handler = self._output_handler
+
+ for f in self.input_files.values():
fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
self._reg_ids.add(self.scheduler.register(f.fileno(),
- self._registered_events, self._output_handler))
+ self._registered_events, output_handler))
self._registered = True
def isAlive(self):
@@ -39,11 +43,7 @@ class PipeReader(AbstractPollTask):
def _wait(self):
if self.returncode is not None:
return self.returncode
-
- if self._registered:
- self.scheduler.schedule(self._reg_ids)
- self._unregister()
-
+ self._wait_loop()
self.returncode = os.EX_OK
return self.returncode
@@ -57,26 +57,42 @@ class PipeReader(AbstractPollTask):
def _output_handler(self, fd, event):
- if event & PollConstants.POLLIN:
+ while True:
+ data = self._read_buf(fd, event)
+ if data is None:
+ break
+ if data:
+ self._read_data.append(data)
+ else:
+ self._unregister()
+ self.wait()
+ break
+
+ self._unregister_if_appropriate(event)
+
+ return True
- for f in self.input_files.values():
- if fd == f.fileno():
- break
+ def _array_output_handler(self, fd, event):
- buf = array.array('B')
- try:
- buf.fromfile(f, self._bufsize)
- except (EOFError, IOError):
- pass
+ for f in self.input_files.values():
+ if f.fileno() == fd:
+ break
- if buf:
- self._read_data.append(buf.tostring())
+ while True:
+ data = self._read_array(f, event)
+ if data is None:
+ break
+ if data:
+ self._read_data.append(data)
else:
self._unregister()
self.wait()
+ break
self._unregister_if_appropriate(event)
+ return True
+
def _unregister(self):
"""
Unregister from the scheduler and close open files.
diff --git a/portage_with_autodep/pym/_emerge/PipeReader.pyo b/portage_with_autodep/pym/_emerge/PipeReader.pyo
new file mode 100644
index 0000000..2f53e7d
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PipeReader.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/PollConstants.py b/portage_with_autodep/pym/_emerge/PollConstants.py
deleted file mode 100644
index d0270a9..0000000
--- a/portage_with_autodep/pym/_emerge/PollConstants.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 1999-2009 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import select
-class PollConstants(object):
-
- """
- Provides POLL* constants that are equivalent to those from the
- select module, for use by PollSelectAdapter.
- """
-
- names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
- v = 1
- for k in names:
- locals()[k] = getattr(select, k, v)
- v *= 2
- del k, v
-
diff --git a/portage_with_autodep/pym/_emerge/PollScheduler.py b/portage_with_autodep/pym/_emerge/PollScheduler.py
index a2b5c24..965dc20 100644
--- a/portage_with_autodep/pym/_emerge/PollScheduler.py
+++ b/portage_with_autodep/pym/_emerge/PollScheduler.py
@@ -1,11 +1,8 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import gzip
import errno
-import logging
-import select
-import time
try:
import threading
@@ -15,36 +12,55 @@ except ImportError:
from portage import _encodings
from portage import _unicode_encode
from portage.util import writemsg_level
+from portage.util.SlotObject import SlotObject
+from portage.util._eventloop.EventLoop import EventLoop
+from portage.util._eventloop.global_event_loop import global_event_loop
-from _emerge.SlotObject import SlotObject
from _emerge.getloadavg import getloadavg
-from _emerge.PollConstants import PollConstants
-from _emerge.PollSelectAdapter import PollSelectAdapter
class PollScheduler(object):
class _sched_iface_class(SlotObject):
- __slots__ = ("output", "register", "schedule", "unregister")
+ __slots__ = ("IO_ERR", "IO_HUP", "IO_IN", "IO_NVAL", "IO_OUT",
+ "IO_PRI", "child_watch_add",
+ "idle_add", "io_add_watch", "iteration",
+ "output", "register", "run",
+ "source_remove", "timeout_add", "unregister")
- def __init__(self):
+ def __init__(self, main=False):
+ """
+ @param main: If True then use global_event_loop(), otherwise use
+ a local EventLoop instance (default is False, for safe use in
+ a non-main thread)
+ @type main: bool
+ """
self._terminated = threading.Event()
self._terminated_tasks = False
self._max_jobs = 1
self._max_load = None
self._jobs = 0
- self._poll_event_queue = []
- self._poll_event_handlers = {}
- self._poll_event_handler_ids = {}
- # Increment id for each new handler.
- self._event_handler_id = 0
- self._poll_obj = create_poll_instance()
self._scheduling = False
self._background = False
+ if main:
+ self._event_loop = global_event_loop()
+ else:
+ self._event_loop = EventLoop(main=False)
self.sched_iface = self._sched_iface_class(
+ IO_ERR=self._event_loop.IO_ERR,
+ IO_HUP=self._event_loop.IO_HUP,
+ IO_IN=self._event_loop.IO_IN,
+ IO_NVAL=self._event_loop.IO_NVAL,
+ IO_OUT=self._event_loop.IO_OUT,
+ IO_PRI=self._event_loop.IO_PRI,
+ child_watch_add=self._event_loop.child_watch_add,
+ idle_add=self._event_loop.idle_add,
+ io_add_watch=self._event_loop.io_add_watch,
+ iteration=self._event_loop.iteration,
output=self._task_output,
- register=self._register,
- schedule=self._schedule_wait,
- unregister=self._unregister)
+ register=self._event_loop.io_add_watch,
+ source_remove=self._event_loop.source_remove,
+ timeout_add=self._event_loop.timeout_add,
+ unregister=self._event_loop.source_remove)
def terminate(self):
"""
@@ -55,17 +71,47 @@ class PollScheduler(object):
"""
self._terminated.set()
+ def _termination_check(self):
+ """
+ Calls _terminate_tasks() if appropriate. It's guaranteed not to
+ call it while _schedule_tasks() is being called. The check should
+ be executed for each iteration of the event loop, for response to
+ termination signals at the earliest opportunity. It always returns
+ True, for continuous scheduling via idle_add.
+ """
+ if not self._scheduling and \
+ self._terminated.is_set() and \
+ not self._terminated_tasks:
+ self._scheduling = True
+ try:
+ self._terminated_tasks = True
+ self._terminate_tasks()
+ finally:
+ self._scheduling = False
+ return True
+
def _terminate_tasks(self):
"""
Send signals to terminate all tasks. This is called once
- from self._schedule() in the event dispatching thread. This
- prevents it from being called while the _schedule_tasks()
+ from _keep_scheduling() or _is_work_scheduled() in the event
+ dispatching thread. It will not be called while the _schedule_tasks()
implementation is running, in order to avoid potential
interference. All tasks should be cleaned up at the earliest
opportunity, but not necessarily before this method returns.
+ Typically, this method will send kill signals and return without
+ waiting for exit status. This allows basic cleanup to occur, such as
+ flushing of buffered output to logs.
"""
raise NotImplementedError()
+ def _keep_scheduling(self):
+ """
+ @rtype: bool
+ @return: True if there may be remaining tasks to schedule,
+ False otherwise.
+ """
+ return False
+
def _schedule_tasks(self):
"""
This is called from inside the _schedule() method, which
@@ -79,10 +125,10 @@ class PollScheduler(object):
Unless this method is used to perform user interface updates,
or something like that, the first thing it should do is check
the state of _terminated_tasks and if that is True then it
- should return False immediately (since there's no need to
+ should return immediately (since there's no need to
schedule anything after _terminate_tasks() has been called).
"""
- raise NotImplementedError()
+ pass
def _schedule(self):
"""
@@ -95,15 +141,32 @@ class PollScheduler(object):
return False
self._scheduling = True
try:
+ self._schedule_tasks()
+ finally:
+ self._scheduling = False
- if self._terminated.is_set() and \
- not self._terminated_tasks:
- self._terminated_tasks = True
- self._terminate_tasks()
+ def _main_loop(self):
+ term_check_id = self.sched_iface.idle_add(self._termination_check)
+ try:
+ # Populate initial event sources. We only need to do
+ # this once here, since it can be called during the
+ # loop from within event handlers.
+ self._schedule()
+
+ # Loop while there are jobs to be scheduled.
+ while self._keep_scheduling():
+ self.sched_iface.iteration()
- return self._schedule_tasks()
+ # Clean shutdown of previously scheduled jobs. In the
+ # case of termination, this allows for basic cleanup
+ # such as flushing of buffered output to logs.
+ while self._is_work_scheduled():
+ self.sched_iface.iteration()
finally:
- self._scheduling = False
+ self.sched_iface.source_remove(term_check_id)
+
+ def _is_work_scheduled(self):
+ return bool(self._running_job_count())
def _running_job_count(self):
return self._jobs
@@ -132,183 +195,6 @@ class PollScheduler(object):
return True
- def _poll(self, timeout=None):
- """
- All poll() calls pass through here. The poll events
- are added directly to self._poll_event_queue.
- In order to avoid endless blocking, this raises
- StopIteration if timeout is None and there are
- no file descriptors to poll.
- """
- if not self._poll_event_handlers:
- self._schedule()
- if timeout is None and \
- not self._poll_event_handlers:
- raise StopIteration(
- "timeout is None and there are no poll() event handlers")
-
- # The following error is known to occur with Linux kernel versions
- # less than 2.6.24:
- #
- # select.error: (4, 'Interrupted system call')
- #
- # This error has been observed after a SIGSTOP, followed by SIGCONT.
- # Treat it similar to EAGAIN if timeout is None, otherwise just return
- # without any events.
- while True:
- try:
- self._poll_event_queue.extend(self._poll_obj.poll(timeout))
- break
- except select.error as e:
- writemsg_level("\n!!! select error: %s\n" % (e,),
- level=logging.ERROR, noiselevel=-1)
- del e
- if timeout is not None:
- break
-
- def _next_poll_event(self, timeout=None):
- """
- Since the _schedule_wait() loop is called by event
- handlers from _poll_loop(), maintain a central event
- queue for both of them to share events from a single
- poll() call. In order to avoid endless blocking, this
- raises StopIteration if timeout is None and there are
- no file descriptors to poll.
- """
- if not self._poll_event_queue:
- self._poll(timeout)
- if not self._poll_event_queue:
- raise StopIteration()
- return self._poll_event_queue.pop()
-
- def _poll_loop(self):
-
- event_handlers = self._poll_event_handlers
- event_handled = False
-
- try:
- while event_handlers:
- f, event = self._next_poll_event()
- handler, reg_id = event_handlers[f]
- handler(f, event)
- event_handled = True
- except StopIteration:
- event_handled = True
-
- if not event_handled:
- raise AssertionError("tight loop")
-
- def _schedule_yield(self):
- """
- Schedule for a short period of time chosen by the scheduler based
- on internal state. Synchronous tasks should call this periodically
- in order to allow the scheduler to service pending poll events. The
- scheduler will call poll() exactly once, without blocking, and any
- resulting poll events will be serviced.
- """
- event_handlers = self._poll_event_handlers
- events_handled = 0
-
- if not event_handlers:
- return bool(events_handled)
-
- if not self._poll_event_queue:
- self._poll(0)
-
- try:
- while event_handlers and self._poll_event_queue:
- f, event = self._next_poll_event()
- handler, reg_id = event_handlers[f]
- handler(f, event)
- events_handled += 1
- except StopIteration:
- events_handled += 1
-
- return bool(events_handled)
-
- def _register(self, f, eventmask, handler):
- """
- @rtype: Integer
- @return: A unique registration id, for use in schedule() or
- unregister() calls.
- """
- if f in self._poll_event_handlers:
- raise AssertionError("fd %d is already registered" % f)
- self._event_handler_id += 1
- reg_id = self._event_handler_id
- self._poll_event_handler_ids[reg_id] = f
- self._poll_event_handlers[f] = (handler, reg_id)
- self._poll_obj.register(f, eventmask)
- return reg_id
-
- def _unregister(self, reg_id):
- f = self._poll_event_handler_ids[reg_id]
- self._poll_obj.unregister(f)
- if self._poll_event_queue:
- # Discard any unhandled events that belong to this file,
- # in order to prevent these events from being erroneously
- # delivered to a future handler that is using a reallocated
- # file descriptor of the same numeric value (causing
- # extremely confusing bugs).
- remaining_events = []
- discarded_events = False
- for event in self._poll_event_queue:
- if event[0] == f:
- discarded_events = True
- else:
- remaining_events.append(event)
-
- if discarded_events:
- self._poll_event_queue[:] = remaining_events
-
- del self._poll_event_handlers[f]
- del self._poll_event_handler_ids[reg_id]
-
- def _schedule_wait(self, wait_ids=None, timeout=None, condition=None):
- """
- Schedule until wait_id is not longer registered
- for poll() events.
- @type wait_id: int
- @param wait_id: a task id to wait for
- """
- event_handlers = self._poll_event_handlers
- handler_ids = self._poll_event_handler_ids
- event_handled = False
-
- if isinstance(wait_ids, int):
- wait_ids = frozenset([wait_ids])
-
- start_time = None
- remaining_timeout = timeout
- timed_out = False
- if timeout is not None:
- start_time = time.time()
- try:
- while (wait_ids is None and event_handlers) or \
- (wait_ids is not None and wait_ids.intersection(handler_ids)):
- f, event = self._next_poll_event(timeout=remaining_timeout)
- handler, reg_id = event_handlers[f]
- handler(f, event)
- event_handled = True
- if condition is not None and condition():
- break
- if timeout is not None:
- elapsed_time = time.time() - start_time
- if elapsed_time < 0:
- # The system clock has changed such that start_time
- # is now in the future, so just assume that the
- # timeout has already elapsed.
- timed_out = True
- break
- remaining_timeout = timeout - 1000 * elapsed_time
- if remaining_timeout <= 0:
- timed_out = True
- break
- except StopIteration:
- event_handled = True
-
- return event_handled
-
def _task_output(self, msg, log_path=None, background=None,
level=0, noiselevel=-1):
"""
@@ -333,6 +219,7 @@ class PollScheduler(object):
f = open(_unicode_encode(log_path,
encoding=_encodings['fs'], errors='strict'),
mode='ab')
+ f_real = f
except IOError as e:
if e.errno not in (errno.ENOENT, errno.ESTALE):
raise
@@ -349,50 +236,5 @@ class PollScheduler(object):
f.write(_unicode_encode(msg))
f.close()
-
-_can_poll_device = None
-
-def can_poll_device():
- """
- Test if it's possible to use poll() on a device such as a pty. This
- is known to fail on Darwin.
- @rtype: bool
- @returns: True if poll() on a device succeeds, False otherwise.
- """
-
- global _can_poll_device
- if _can_poll_device is not None:
- return _can_poll_device
-
- if not hasattr(select, "poll"):
- _can_poll_device = False
- return _can_poll_device
-
- try:
- dev_null = open('/dev/null', 'rb')
- except IOError:
- _can_poll_device = False
- return _can_poll_device
-
- p = select.poll()
- p.register(dev_null.fileno(), PollConstants.POLLIN)
-
- invalid_request = False
- for f, event in p.poll():
- if event & PollConstants.POLLNVAL:
- invalid_request = True
- break
- dev_null.close()
-
- _can_poll_device = not invalid_request
- return _can_poll_device
-
-def create_poll_instance():
- """
- Create an instance of select.poll, or an instance of
- PollSelectAdapter there is no poll() implementation or
- it is broken somehow.
- """
- if can_poll_device():
- return select.poll()
- return PollSelectAdapter()
+ if f_real is not f:
+ f_real.close()
diff --git a/portage_with_autodep/pym/_emerge/PollScheduler.pyo b/portage_with_autodep/pym/_emerge/PollScheduler.pyo
new file mode 100644
index 0000000..b7e52be
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/PollScheduler.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/PollSelectAdapter.py b/portage_with_autodep/pym/_emerge/PollSelectAdapter.py
deleted file mode 100644
index c11dab8..0000000
--- a/portage_with_autodep/pym/_emerge/PollSelectAdapter.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright 1999-2009 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from _emerge.PollConstants import PollConstants
-import select
-class PollSelectAdapter(PollConstants):
-
- """
- Use select to emulate a poll object, for
- systems that don't support poll().
- """
-
- def __init__(self):
- self._registered = {}
- self._select_args = [[], [], []]
-
- def register(self, fd, *args):
- """
- Only POLLIN is currently supported!
- """
- if len(args) > 1:
- raise TypeError(
- "register expected at most 2 arguments, got " + \
- repr(1 + len(args)))
-
- eventmask = PollConstants.POLLIN | \
- PollConstants.POLLPRI | PollConstants.POLLOUT
- if args:
- eventmask = args[0]
-
- self._registered[fd] = eventmask
- self._select_args = None
-
- def unregister(self, fd):
- self._select_args = None
- del self._registered[fd]
-
- def poll(self, *args):
- if len(args) > 1:
- raise TypeError(
- "poll expected at most 2 arguments, got " + \
- repr(1 + len(args)))
-
- timeout = None
- if args:
- timeout = args[0]
-
- select_args = self._select_args
- if select_args is None:
- select_args = [list(self._registered), [], []]
-
- if timeout is not None:
- select_args = select_args[:]
- # Translate poll() timeout args to select() timeout args:
- #
- # | units | value(s) for indefinite block
- # ---------|--------------|------------------------------
- # poll | milliseconds | omitted, negative, or None
- # ---------|--------------|------------------------------
- # select | seconds | omitted
- # ---------|--------------|------------------------------
-
- if timeout is not None and timeout < 0:
- timeout = None
- if timeout is not None:
- select_args.append(timeout / 1000)
-
- select_events = select.select(*select_args)
- poll_events = []
- for fd in select_events[0]:
- poll_events.append((fd, PollConstants.POLLIN))
- return poll_events
-
diff --git a/portage_with_autodep/pym/_emerge/ProgressHandler.pyo b/portage_with_autodep/pym/_emerge/ProgressHandler.pyo
new file mode 100644
index 0000000..83e2f7f
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/ProgressHandler.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/QueueScheduler.py b/portage_with_autodep/pym/_emerge/QueueScheduler.py
index a4ab328..206087c 100644
--- a/portage_with_autodep/pym/_emerge/QueueScheduler.py
+++ b/portage_with_autodep/pym/_emerge/QueueScheduler.py
@@ -1,8 +1,6 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-import time
-
from _emerge.PollScheduler import PollScheduler
class QueueScheduler(PollScheduler):
@@ -12,8 +10,8 @@ class QueueScheduler(PollScheduler):
run() method returns when no tasks remain.
"""
- def __init__(self, max_jobs=None, max_load=None):
- PollScheduler.__init__(self)
+ def __init__(self, main=True, max_jobs=None, max_load=None):
+ PollScheduler.__init__(self, main=main)
if max_jobs is None:
max_jobs = 1
@@ -36,51 +34,44 @@ class QueueScheduler(PollScheduler):
def run(self, timeout=None):
- start_time = None
- timed_out = False
- remaining_timeout = timeout
+ timeout_callback = None
if timeout is not None:
- start_time = time.time()
-
- while self._schedule():
- self._schedule_wait(timeout=remaining_timeout)
- if timeout is not None:
- elapsed_time = time.time() - start_time
- if elapsed_time < 0:
- # The system clock has changed such that start_time
- # is now in the future, so just assume that the
- # timeout has already elapsed.
- timed_out = True
- break
- remaining_timeout = timeout - 1000 * elapsed_time
- if remaining_timeout <= 0:
- timed_out = True
+ def timeout_callback():
+ timeout_callback.timed_out = True
+ return False
+ timeout_callback.timed_out = False
+ timeout_callback.timeout_id = self.sched_iface.timeout_add(
+ timeout, timeout_callback)
+
+ term_check_id = self.sched_iface.idle_add(self._termination_check)
+ try:
+ while not (timeout_callback is not None and
+ timeout_callback.timed_out):
+ # We don't have any callbacks to trigger _schedule(),
+ # so we have to call it explicitly here.
+ self._schedule()
+ if self._keep_scheduling():
+ self.sched_iface.iteration()
+ else:
break
- if timeout is None or not timed_out:
- while self._running_job_count():
- self._schedule_wait(timeout=remaining_timeout)
- if timeout is not None:
- elapsed_time = time.time() - start_time
- if elapsed_time < 0:
- # The system clock has changed such that start_time
- # is now in the future, so just assume that the
- # timeout has already elapsed.
- timed_out = True
- break
- remaining_timeout = timeout - 1000 * elapsed_time
- if remaining_timeout <= 0:
- timed_out = True
- break
+ while self._is_work_scheduled() and \
+ not (timeout_callback is not None and
+ timeout_callback.timed_out):
+ self.sched_iface.iteration()
+ finally:
+ self.sched_iface.source_remove(term_check_id)
+ if timeout_callback is not None:
+ self.sched_iface.unregister(timeout_callback.timeout_id)
def _schedule_tasks(self):
"""
@rtype: bool
- @returns: True if there may be remaining tasks to schedule,
+ @return: True if there may be remaining tasks to schedule,
False otherwise.
"""
if self._terminated_tasks:
- return False
+ return
while self._can_add_job():
n = self._max_jobs - self._running_job_count()
@@ -88,12 +79,10 @@ class QueueScheduler(PollScheduler):
break
if not self._start_next_job(n):
- return False
+ return
- for q in self._queues:
- if q:
- return True
- return False
+ def _keep_scheduling(self):
+ return not self._terminated_tasks and any(self._queues)
def _running_job_count(self):
job_count = 0
diff --git a/portage_with_autodep/pym/_emerge/QueueScheduler.pyo b/portage_with_autodep/pym/_emerge/QueueScheduler.pyo
new file mode 100644
index 0000000..88de3ea
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/QueueScheduler.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/RootConfig.py b/portage_with_autodep/pym/_emerge/RootConfig.py
index d84f108..bb0d768 100644
--- a/portage_with_autodep/pym/_emerge/RootConfig.py
+++ b/portage_with_autodep/pym/_emerge/RootConfig.py
@@ -19,7 +19,7 @@ class RootConfig(object):
def __init__(self, settings, trees, setconfig):
self.trees = trees
self.settings = settings
- self.root = self.settings["ROOT"]
+ self.root = self.settings['EROOT']
self.setconfig = setconfig
if setconfig is None:
self.sets = {}
diff --git a/portage_with_autodep/pym/_emerge/RootConfig.pyo b/portage_with_autodep/pym/_emerge/RootConfig.pyo
new file mode 100644
index 0000000..fad3022
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/RootConfig.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/Scheduler.py b/portage_with_autodep/pym/_emerge/Scheduler.py
index 6412d82..30a7e10 100644
--- a/portage_with_autodep/pym/_emerge/Scheduler.py
+++ b/portage_with_autodep/pym/_emerge/Scheduler.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -7,10 +7,8 @@ from collections import deque
import gc
import gzip
import logging
-import shutil
import signal
import sys
-import tempfile
import textwrap
import time
import warnings
@@ -28,9 +26,12 @@ from portage.output import colorize, create_color_func, red
bad = create_color_func("BAD")
from portage._sets import SETPREFIX
from portage._sets.base import InternalPackageSet
-from portage.util import writemsg, writemsg_level
+from portage.util import ensure_dirs, writemsg, writemsg_level
+from portage.util.SlotObject import SlotObject
from portage.package.ebuild.digestcheck import digestcheck
from portage.package.ebuild.digestgen import digestgen
+from portage.package.ebuild.doebuild import (_check_temp_dir,
+ _prepare_self_update)
from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
import _emerge
@@ -44,6 +45,7 @@ from _emerge.create_depgraph_params import create_depgraph_params
from _emerge.create_world_atom import create_world_atom
from _emerge.DepPriority import DepPriority
from _emerge.depgraph import depgraph, resume_depgraph
+from _emerge.EbuildBuildDir import EbuildBuildDir
from _emerge.EbuildFetcher import EbuildFetcher
from _emerge.EbuildPhase import EbuildPhase
from _emerge.emergelog import emergelog
@@ -52,12 +54,9 @@ from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_dep
from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
from _emerge.JobStatusDisplay import JobStatusDisplay
from _emerge.MergeListItem import MergeListItem
-from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
from _emerge.Package import Package
from _emerge.PackageMerge import PackageMerge
from _emerge.PollScheduler import PollScheduler
-from _emerge.RootConfig import RootConfig
-from _emerge.SlotObject import SlotObject
from _emerge.SequentialTaskQueue import SequentialTaskQueue
if sys.hexversion >= 0x3000000:
@@ -77,17 +76,12 @@ class Scheduler(PollScheduler):
frozenset(["--pretend",
"--fetchonly", "--fetch-all-uri"])
- _opts_no_restart = frozenset(["--buildpkgonly",
+ _opts_no_self_update = frozenset(["--buildpkgonly",
"--fetchonly", "--fetch-all-uri", "--pretend"])
- _bad_resume_opts = set(["--ask", "--changelog",
- "--resume", "--skipfirst"])
-
- class _iface_class(SlotObject):
+ class _iface_class(PollScheduler._sched_iface_class):
__slots__ = ("fetch",
- "output", "register", "schedule",
- "scheduleSetup", "scheduleUnpack", "scheduleYield",
- "unregister")
+ "scheduleSetup", "scheduleUnpack")
class _fetch_iface_class(SlotObject):
__slots__ = ("log_file", "schedule")
@@ -96,7 +90,7 @@ class Scheduler(PollScheduler):
("merge", "jobs", "ebuild_locks", "fetch", "unpack"), prefix="")
class _build_opts_class(SlotObject):
- __slots__ = ("buildpkg", "buildpkgonly",
+ __slots__ = ("buildpkg", "buildpkg_exclude", "buildpkgonly",
"fetch_all_uri", "fetchonly", "pretend")
class _binpkg_opts_class(SlotObject):
@@ -141,8 +135,9 @@ class Scheduler(PollScheduler):
portage.exception.PortageException.__init__(self, value)
def __init__(self, settings, trees, mtimedb, myopts,
- spinner, mergelist=None, favorites=None, graph_config=None):
- PollScheduler.__init__(self)
+ spinner, mergelist=None, favorites=None, graph_config=None,
+ uninstall_only=False):
+ PollScheduler.__init__(self, main=True)
if mergelist is not None:
warnings.warn("The mergelist parameter of the " + \
@@ -151,16 +146,22 @@ class Scheduler(PollScheduler):
DeprecationWarning, stacklevel=2)
self.settings = settings
- self.target_root = settings["ROOT"]
+ self.target_root = settings["EROOT"]
self.trees = trees
self.myopts = myopts
self._spinner = spinner
self._mtimedb = mtimedb
self._favorites = favorites
+ self._uninstall_only = uninstall_only
self._args_set = InternalPackageSet(favorites, allow_repo=True)
self._build_opts = self._build_opts_class()
+
for k in self._build_opts.__slots__:
- setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
+ setattr(self._build_opts, k, myopts.get("--" + k.replace("_", "-")))
+ self._build_opts.buildpkg_exclude = InternalPackageSet( \
+ initial_atoms=" ".join(myopts.get("--buildpkg-exclude", [])).split(), \
+ allow_wildcard=True, allow_repo=True)
+
self._binpkg_opts = self._binpkg_opts_class()
for k in self._binpkg_opts.__slots__:
setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
@@ -202,10 +203,7 @@ class Scheduler(PollScheduler):
if max_jobs is None:
max_jobs = 1
self._set_max_jobs(max_jobs)
-
- # The root where the currently running
- # portage instance is installed.
- self._running_root = trees["/"]["root_config"]
+ self._running_root = trees[trees._running_eroot]["root_config"]
self.edebug = 0
if settings.get("PORTAGE_DEBUG", "") == "1":
self.edebug = 1
@@ -219,13 +217,11 @@ class Scheduler(PollScheduler):
fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
schedule=self._schedule_fetch)
self._sched_iface = self._iface_class(
- fetch=fetch_iface, output=self._task_output,
- register=self._register,
- schedule=self._schedule_wait,
+ fetch=fetch_iface,
scheduleSetup=self._schedule_setup,
scheduleUnpack=self._schedule_unpack,
- scheduleYield=self._schedule_yield,
- unregister=self._unregister)
+ **dict((k, getattr(self.sched_iface, k))
+ for k in self.sched_iface.__slots__))
self._prefetchers = weakref.WeakValueDictionary()
self._pkg_queue = []
@@ -277,7 +273,7 @@ class Scheduler(PollScheduler):
if self._parallel_fetch:
# clear out existing fetch log if it exists
try:
- open(self._fetch_log, 'w')
+ open(self._fetch_log, 'w').close()
except EnvironmentError:
pass
@@ -289,10 +285,37 @@ class Scheduler(PollScheduler):
self._running_portage = self._pkg(cpv, "installed",
self._running_root, installed=True)
+ def _handle_self_update(self):
+
+ if self._opts_no_self_update.intersection(self.myopts):
+ return os.EX_OK
+
+ for x in self._mergelist:
+ if not isinstance(x, Package):
+ continue
+ if x.operation != "merge":
+ continue
+ if x.root != self._running_root.root:
+ continue
+ if not portage.dep.match_from_list(
+ portage.const.PORTAGE_PACKAGE_ATOM, [x]):
+ continue
+ if self._running_portage is None or \
+ self._running_portage.cpv != x.cpv or \
+ '9999' in x.cpv or \
+ 'git' in x.inherited or \
+ 'git-2' in x.inherited:
+ rval = _check_temp_dir(self.settings)
+ if rval != os.EX_OK:
+ return rval
+ _prepare_self_update(self.settings)
+ break
+
+ return os.EX_OK
+
def _terminate_tasks(self):
self._status_display.quiet = True
- while self._running_tasks:
- task_id, task = self._running_tasks.popitem()
+ for task in list(self._running_tasks.values()):
task.cancel()
for q in self._task_queues.values():
q.clear()
@@ -304,10 +327,13 @@ class Scheduler(PollScheduler):
"""
self._set_graph_config(graph_config)
self._blocker_db = {}
+ dynamic_deps = self.myopts.get("--dynamic-deps", "y") != "n"
for root in self.trees:
+ if self._uninstall_only:
+ continue
if graph_config is None:
fake_vartree = FakeVartree(self.trees[root]["root_config"],
- pkg_cache=self._pkg_cache)
+ pkg_cache=self._pkg_cache, dynamic_deps=dynamic_deps)
fake_vartree.sync()
else:
fake_vartree = graph_config.trees[root]['vartree']
@@ -324,52 +350,6 @@ class Scheduler(PollScheduler):
self._set_graph_config(None)
gc.collect()
- def _poll(self, timeout=None):
-
- self._schedule()
-
- if timeout is None:
- while True:
- if not self._poll_event_handlers:
- self._schedule()
- if not self._poll_event_handlers:
- raise StopIteration(
- "timeout is None and there are no poll() event handlers")
- previous_count = len(self._poll_event_queue)
- PollScheduler._poll(self, timeout=self._max_display_latency)
- self._status_display.display()
- if previous_count != len(self._poll_event_queue):
- break
-
- elif timeout <= self._max_display_latency:
- PollScheduler._poll(self, timeout=timeout)
- if timeout == 0:
- # The display is updated by _schedule() above, so it would be
- # redundant to update it here when timeout is 0.
- pass
- else:
- self._status_display.display()
-
- else:
- remaining_timeout = timeout
- start_time = time.time()
- while True:
- previous_count = len(self._poll_event_queue)
- PollScheduler._poll(self,
- timeout=min(self._max_display_latency, remaining_timeout))
- self._status_display.display()
- if previous_count != len(self._poll_event_queue):
- break
- elapsed_time = time.time() - start_time
- if elapsed_time < 0:
- # The system clock has changed such that start_time
- # is now in the future, so just assume that the
- # timeout has already elapsed.
- break
- remaining_timeout = timeout - 1000 * elapsed_time
- if remaining_timeout <= 0:
- break
-
def _set_max_jobs(self, max_jobs):
self._max_jobs = max_jobs
self._task_queues.jobs.max_jobs = max_jobs
@@ -381,11 +361,11 @@ class Scheduler(PollScheduler):
Check if background mode is enabled and adjust states as necessary.
@rtype: bool
- @returns: True if background mode is enabled, False otherwise.
+ @return: True if background mode is enabled, False otherwise.
"""
background = (self._max_jobs is True or \
self._max_jobs > 1 or "--quiet" in self.myopts \
- or "--quiet-build" in self.myopts) and \
+ or self.myopts.get("--quiet-build") == "y") and \
not bool(self._opts_no_background.intersection(self.myopts))
if background:
@@ -398,7 +378,7 @@ class Scheduler(PollScheduler):
msg = [""]
for pkg in interactive_tasks:
pkg_str = " " + colorize("INFORM", str(pkg.cpv))
- if pkg.root != "/":
+ if pkg.root_config.settings["ROOT"] != "/":
pkg_str += " for " + pkg.root
msg.append(pkg_str)
msg.append("")
@@ -741,7 +721,6 @@ class Scheduler(PollScheduler):
self._status_msg("Starting parallel fetch")
prefetchers = self._prefetchers
- getbinpkg = "--getbinpkg" in self.myopts
for pkg in self._mergelist:
# mergelist can contain solved Blocker instances
@@ -749,15 +728,13 @@ class Scheduler(PollScheduler):
continue
prefetcher = self._create_prefetcher(pkg)
if prefetcher is not None:
- self._task_queues.fetch.add(prefetcher)
+ # This will start the first prefetcher immediately, so that
+ # self._task() won't discard it. This avoids a case where
+ # the first prefetcher is discarded, causing the second
+ # prefetcher to occupy the fetch queue before the first
+ # fetcher has an opportunity to execute.
prefetchers[pkg] = prefetcher
-
- # Start the first prefetcher immediately so that self._task()
- # won't discard it. This avoids a case where the first
- # prefetcher is discarded, causing the second prefetcher to
- # occupy the fetch queue before the first fetcher has an
- # opportunity to execute.
- self._task_queues.fetch.schedule()
+ self._task_queues.fetch.add(prefetcher)
def _create_prefetcher(self, pkg):
"""
@@ -785,100 +762,6 @@ class Scheduler(PollScheduler):
return prefetcher
- def _is_restart_scheduled(self):
- """
- Check if the merge list contains a replacement
- for the current running instance, that will result
- in restart after merge.
- @rtype: bool
- @returns: True if a restart is scheduled, False otherwise.
- """
- if self._opts_no_restart.intersection(self.myopts):
- return False
-
- mergelist = self._mergelist
-
- for i, pkg in enumerate(mergelist):
- if self._is_restart_necessary(pkg) and \
- i != len(mergelist) - 1:
- return True
-
- return False
-
- def _is_restart_necessary(self, pkg):
- """
- @return: True if merging the given package
- requires restart, False otherwise.
- """
-
- # Figure out if we need a restart.
- if pkg.root == self._running_root.root and \
- portage.match_from_list(
- portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
- if self._running_portage is None:
- return True
- elif pkg.cpv != self._running_portage.cpv or \
- '9999' in pkg.cpv or \
- 'git' in pkg.inherited or \
- 'git-2' in pkg.inherited:
- return True
- return False
-
- def _restart_if_necessary(self, pkg):
- """
- Use execv() to restart emerge. This happens
- if portage upgrades itself and there are
- remaining packages in the list.
- """
-
- if self._opts_no_restart.intersection(self.myopts):
- return
-
- if not self._is_restart_necessary(pkg):
- return
-
- if pkg == self._mergelist[-1]:
- return
-
- self._main_loop_cleanup()
-
- logger = self._logger
- pkg_count = self._pkg_count
- mtimedb = self._mtimedb
- bad_resume_opts = self._bad_resume_opts
-
- logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
- (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
-
- logger.log(" *** RESTARTING " + \
- "emerge via exec() after change of " + \
- "portage version.")
-
- mtimedb["resume"]["mergelist"].remove(list(pkg))
- mtimedb.commit()
- portage.run_exitfuncs()
- # Don't trust sys.argv[0] here because eselect-python may modify it.
- emerge_binary = os.path.join(portage.const.PORTAGE_BIN_PATH, 'emerge')
- mynewargv = [emerge_binary, "--resume"]
- resume_opts = self.myopts.copy()
- # For automatic resume, we need to prevent
- # any of bad_resume_opts from leaking in
- # via EMERGE_DEFAULT_OPTS.
- resume_opts["--ignore-default-opts"] = True
- for myopt, myarg in resume_opts.items():
- if myopt not in bad_resume_opts:
- if myarg is True:
- mynewargv.append(myopt)
- elif isinstance(myarg, list):
- # arguments like --exclude that use 'append' action
- for x in myarg:
- mynewargv.append("%s=%s" % (myopt, x))
- else:
- mynewargv.append("%s=%s" % (myopt, myarg))
- # priority only needs to be adjusted on the first run
- os.environ["PORTAGE_NICENESS"] = "0"
- os.execv(mynewargv[0], mynewargv)
-
def _run_pkg_pretend(self):
"""
Since pkg_pretend output may be important, this method sends all
@@ -912,11 +795,48 @@ class Scheduler(PollScheduler):
root_config = x.root_config
settings = self.pkgsettings[root_config.root]
settings.setcpv(x)
- tmpdir = tempfile.mkdtemp()
- tmpdir_orig = settings["PORTAGE_TMPDIR"]
- settings["PORTAGE_TMPDIR"] = tmpdir
+
+ # setcpv/package.env allows for per-package PORTAGE_TMPDIR so we
+ # have to validate it for each package
+ rval = _check_temp_dir(settings)
+ if rval != os.EX_OK:
+ return rval
+
+ build_dir_path = os.path.join(
+ os.path.realpath(settings["PORTAGE_TMPDIR"]),
+ "portage", x.category, x.pf)
+ existing_buildir = os.path.isdir(build_dir_path)
+ settings["PORTAGE_BUILDDIR"] = build_dir_path
+ build_dir = EbuildBuildDir(scheduler=sched_iface,
+ settings=settings)
+ build_dir.lock()
+ current_task = None
try:
+
+ # Clean up the existing build dir, in case pkg_pretend
+ # checks for available space (bug #390711).
+ if existing_buildir:
+ if x.built:
+ tree = "bintree"
+ infloc = os.path.join(build_dir_path, "build-info")
+ ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
+ else:
+ tree = "porttree"
+ portdb = root_config.trees["porttree"].dbapi
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError(
+ "ebuild not found for '%s'" % x.cpv)
+ portage.package.ebuild.doebuild.doebuild_environment(
+ ebuild_path, "clean", settings=settings,
+ db=self.trees[settings['EROOT']][tree].dbapi)
+ clean_phase = EbuildPhase(background=False,
+ phase='clean', scheduler=sched_iface, settings=settings)
+ current_task = clean_phase
+ clean_phase.start()
+ clean_phase.wait()
+
if x.built:
tree = "bintree"
bintree = root_config.trees["bintree"].dbapi.bintree
@@ -935,6 +855,7 @@ class Scheduler(PollScheduler):
verifier = BinpkgVerifier(pkg=x,
scheduler=sched_iface)
+ current_task = verifier
verifier.start()
if verifier.wait() != os.EX_OK:
failures += 1
@@ -943,8 +864,8 @@ class Scheduler(PollScheduler):
if fetched:
bintree.inject(x.cpv, filename=fetched)
tbz2_file = bintree.getname(x.cpv)
- infloc = os.path.join(tmpdir, x.category, x.pf, "build-info")
- os.makedirs(infloc)
+ infloc = os.path.join(build_dir_path, "build-info")
+ ensure_dirs(infloc)
portage.xpak.tbz2(tbz2_file).unpackinfo(infloc)
ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
@@ -964,7 +885,8 @@ class Scheduler(PollScheduler):
portage.package.ebuild.doebuild.doebuild_environment(ebuild_path,
"pretend", settings=settings,
- db=self.trees[settings["ROOT"]][tree].dbapi)
+ db=self.trees[settings['EROOT']][tree].dbapi)
+
prepare_build_dirs(root_config.root, settings, cleanup=0)
vardb = root_config.trees['vartree'].dbapi
@@ -976,14 +898,21 @@ class Scheduler(PollScheduler):
phase="pretend", scheduler=sched_iface,
settings=settings)
+ current_task = pretend_phase
pretend_phase.start()
ret = pretend_phase.wait()
if ret != os.EX_OK:
failures += 1
portage.elog.elog_process(x.cpv, settings)
finally:
- shutil.rmtree(tmpdir)
- settings["PORTAGE_TMPDIR"] = tmpdir_orig
+ if current_task is not None and current_task.isAlive():
+ current_task.cancel()
+ current_task.wait()
+ clean_phase = EbuildPhase(background=False,
+ phase='clean', scheduler=sched_iface, settings=settings)
+ clean_phase.start()
+ clean_phase.wait()
+ build_dir.unlock()
if failures:
return 1
@@ -1003,6 +932,10 @@ class Scheduler(PollScheduler):
except self._unknown_internal_error:
return 1
+ rval = self._handle_self_update()
+ if rval != os.EX_OK:
+ return rval
+
for root in self.trees:
root_config = self.trees[root]["root_config"]
@@ -1131,10 +1064,8 @@ class Scheduler(PollScheduler):
# If only one package failed then just show it's
# whole log for easy viewing.
failed_pkg = self._failed_pkgs_all[-1]
- build_dir = failed_pkg.build_dir
log_file = None
-
- log_paths = [failed_pkg.build_log]
+ log_file_real = None
log_path = self._locate_failure_log(failed_pkg)
if log_path is not None:
@@ -1145,6 +1076,7 @@ class Scheduler(PollScheduler):
pass
else:
if log_path.endswith('.gz'):
+ log_file_real = log_file
log_file = gzip.GzipFile(filename='',
mode='rb', fileobj=log_file)
@@ -1157,6 +1089,8 @@ class Scheduler(PollScheduler):
noiselevel=-1)
finally:
log_file.close()
+ if log_file_real is not None:
+ log_file_real.close()
failure_log_shown = True
# Dump mod_echo output now since it tends to flood the terminal.
@@ -1228,9 +1162,6 @@ class Scheduler(PollScheduler):
def _locate_failure_log(self, failed_pkg):
- build_dir = failed_pkg.build_dir
- log_file = None
-
log_paths = [failed_pkg.build_log]
for log_path in log_paths:
@@ -1272,7 +1203,7 @@ class Scheduler(PollScheduler):
# Skip this if $ROOT != / since it shouldn't matter if there
# are unsatisfied system runtime deps in this case.
- if pkg.root != '/':
+ if pkg.root_config.settings["ROOT"] != "/":
return
completed_tasks = self._completed_tasks
@@ -1350,8 +1281,6 @@ class Scheduler(PollScheduler):
if pkg.installed:
return
- self._restart_if_necessary(pkg)
-
# Call mtimedb.commit() after each merge so that
# --resume still works after being interrupted
# by reboot, sigkill or similar.
@@ -1411,12 +1340,16 @@ class Scheduler(PollScheduler):
def _merge(self):
+ if self._opts_no_background.intersection(self.myopts):
+ self._set_max_jobs(1)
+
self._add_prefetchers()
self._add_packages()
- pkg_queue = self._pkg_queue
failed_pkgs = self._failed_pkgs
portage.locks._quiet = self._background
portage.elog.add_listener(self._elog_listener)
+ display_timeout_id = self.sched_iface.timeout_add(
+ self._max_display_latency, self._status_display.display)
rval = os.EX_OK
try:
@@ -1425,6 +1358,7 @@ class Scheduler(PollScheduler):
self._main_loop_cleanup()
portage.locks._quiet = False
portage.elog.remove_listener(self._elog_listener)
+ self.sched_iface.source_remove(display_timeout_id)
if failed_pkgs:
rval = failed_pkgs[-1].returncode
@@ -1505,7 +1439,7 @@ class Scheduler(PollScheduler):
merge order
@type later: set
@rtype: bool
- @returns: True if the package is dependent, False otherwise.
+ @return: True if the package is dependent, False otherwise.
"""
graph = self._digraph
@@ -1553,24 +1487,7 @@ class Scheduler(PollScheduler):
return temp_settings
def _deallocate_config(self, settings):
- self._config_pool[settings["ROOT"]].append(settings)
-
- def _main_loop(self):
-
- # Only allow 1 job max if a restart is scheduled
- # due to portage update.
- if self._is_restart_scheduled() or \
- self._opts_no_background.intersection(self.myopts):
- self._set_max_jobs(1)
-
- while self._schedule():
- self._poll_loop()
-
- while True:
- self._schedule()
- if not self._is_work_scheduled():
- break
- self._poll_loop()
+ self._config_pool[settings['EROOT']].append(settings)
def _keep_scheduling(self):
return bool(not self._terminated_tasks and self._pkg_queue and \
@@ -1583,6 +1500,8 @@ class Scheduler(PollScheduler):
while True:
+ state_change = 0
+
# When the number of jobs and merges drops to zero,
# process a single merge from _merge_wait_queue if
# it's not empty. We only process one since these are
@@ -1593,37 +1512,34 @@ class Scheduler(PollScheduler):
not self._task_queues.merge):
task = self._merge_wait_queue.popleft()
task.addExitListener(self._merge_wait_exit_handler)
+ self._merge_wait_scheduled.append(task)
self._task_queues.merge.add(task)
self._status_display.merges = len(self._task_queues.merge)
- self._merge_wait_scheduled.append(task)
+ state_change += 1
- self._schedule_tasks_imp()
- self._status_display.display()
+ if self._schedule_tasks_imp():
+ state_change += 1
- state_change = 0
- for q in self._task_queues.values():
- if q.schedule():
- state_change += 1
+ self._status_display.display()
# Cancel prefetchers if they're the only reason
# the main poll loop is still running.
if self._failed_pkgs and not self._build_opts.fetchonly and \
not self._is_work_scheduled() and \
self._task_queues.fetch:
+ # Since this happens asynchronously, it doesn't count in
+ # state_change (counting it triggers an infinite loop).
self._task_queues.fetch.clear()
- state_change += 1
if not (state_change or \
(self._merge_wait_queue and not self._jobs and
not self._task_queues.merge)):
break
- return self._keep_scheduling()
-
def _job_delay(self):
"""
@rtype: bool
- @returns: True if job scheduling should be delayed, False otherwise.
+ @return: True if job scheduling should be delayed, False otherwise.
"""
if self._jobs and self._max_load is not None:
@@ -1641,7 +1557,7 @@ class Scheduler(PollScheduler):
def _schedule_tasks_imp(self):
"""
@rtype: bool
- @returns: True if state changed, False otherwise.
+ @return: True if state changed, False otherwise.
"""
state_change = 0
@@ -1709,7 +1625,14 @@ class Scheduler(PollScheduler):
"installed", pkg.root_config, installed=True,
operation="uninstall")
- prefetcher = self._prefetchers.pop(pkg, None)
+ try:
+ prefetcher = self._prefetchers.pop(pkg, None)
+ except KeyError:
+ # KeyError observed with PyPy 1.8, despite None given as default.
+ # Note that PyPy 1.8 has the same WeakValueDictionary code as
+ # CPython 2.7, so it may be possible for CPython to raise KeyError
+ # here as well.
+ prefetcher = None
if prefetcher is not None and not prefetcher.isAlive():
try:
self._task_queues.fetch._task_queue.remove(prefetcher)
@@ -1738,7 +1661,7 @@ class Scheduler(PollScheduler):
pkg = failed_pkg.pkg
msg = "%s to %s %s" % \
(bad("Failed"), action, colorize("INFORM", pkg.cpv))
- if pkg.root != "/":
+ if pkg.root_config.settings["ROOT"] != "/":
msg += " %s %s" % (preposition, pkg.root)
log_path = self._locate_failure_log(failed_pkg)
@@ -1791,7 +1714,7 @@ class Scheduler(PollScheduler):
Use the current resume list to calculate a new one,
dropping any packages with unsatisfied deps.
@rtype: bool
- @returns: True if successful, False otherwise.
+ @return: True if successful, False otherwise.
"""
print(colorize("GOOD", "*** Resuming merge..."))
@@ -1868,7 +1791,7 @@ class Scheduler(PollScheduler):
pkg = task
msg = "emerge --keep-going:" + \
" %s" % (pkg.cpv,)
- if pkg.root != "/":
+ if pkg.root_config.settings["ROOT"] != "/":
msg += " for %s" % (pkg.root,)
msg += " dropped due to unsatisfied dependency."
for line in textwrap.wrap(msg, msg_width):
diff --git a/portage_with_autodep/pym/_emerge/Scheduler.pyo b/portage_with_autodep/pym/_emerge/Scheduler.pyo
new file mode 100644
index 0000000..5555703
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Scheduler.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/SequentialTaskQueue.py b/portage_with_autodep/pym/_emerge/SequentialTaskQueue.py
index c1c98c4..8090893 100644
--- a/portage_with_autodep/pym/_emerge/SequentialTaskQueue.py
+++ b/portage_with_autodep/pym/_emerge/SequentialTaskQueue.py
@@ -1,13 +1,15 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-import sys
-from _emerge.SlotObject import SlotObject
from collections import deque
+import sys
+
+from portage.util.SlotObject import SlotObject
+
class SequentialTaskQueue(SlotObject):
__slots__ = ("max_jobs", "running_tasks") + \
- ("_dirty", "_scheduling", "_task_queue")
+ ("_scheduling", "_task_queue")
def __init__(self, **kwargs):
SlotObject.__init__(self, **kwargs)
@@ -15,50 +17,34 @@ class SequentialTaskQueue(SlotObject):
self.running_tasks = set()
if self.max_jobs is None:
self.max_jobs = 1
- self._dirty = True
def add(self, task):
self._task_queue.append(task)
- self._dirty = True
+ self.schedule()
def addFront(self, task):
self._task_queue.appendleft(task)
- self._dirty = True
+ self.schedule()
def schedule(self):
- if not self._dirty:
- return False
-
- if not self:
- return False
-
if self._scheduling:
# Ignore any recursive schedule() calls triggered via
# self._task_exit().
- return False
+ return
self._scheduling = True
-
- task_queue = self._task_queue
- running_tasks = self.running_tasks
- max_jobs = self.max_jobs
- state_changed = False
-
- while task_queue and \
- (max_jobs is True or len(running_tasks) < max_jobs):
- task = task_queue.popleft()
- cancelled = getattr(task, "cancelled", None)
- if not cancelled:
- running_tasks.add(task)
- task.addExitListener(self._task_exit)
- task.start()
- state_changed = True
-
- self._dirty = False
- self._scheduling = False
-
- return state_changed
+ try:
+ while self._task_queue and (self.max_jobs is True or
+ len(self.running_tasks) < self.max_jobs):
+ task = self._task_queue.popleft()
+ cancelled = getattr(task, "cancelled", None)
+ if not cancelled:
+ self.running_tasks.add(task)
+ task.addExitListener(self._task_exit)
+ task.start()
+ finally:
+ self._scheduling = False
def _task_exit(self, task):
"""
@@ -68,16 +54,22 @@ class SequentialTaskQueue(SlotObject):
"""
self.running_tasks.remove(task)
if self._task_queue:
- self._dirty = True
+ self.schedule()
def clear(self):
+ """
+ Clear the task queue and asynchronously terminate any running tasks.
+ """
self._task_queue.clear()
- running_tasks = self.running_tasks
- while running_tasks:
- task = running_tasks.pop()
- task.removeExitListener(self._task_exit)
+ for task in list(self.running_tasks):
task.cancel()
- self._dirty = False
+
+ def wait(self):
+ """
+ Synchronously wait for all running tasks to exit.
+ """
+ while self.running_tasks:
+ next(iter(self.running_tasks)).wait()
def __bool__(self):
return bool(self._task_queue or self.running_tasks)
diff --git a/portage_with_autodep/pym/_emerge/SequentialTaskQueue.pyo b/portage_with_autodep/pym/_emerge/SequentialTaskQueue.pyo
new file mode 100644
index 0000000..3ab65c9
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/SequentialTaskQueue.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/SetArg.pyo b/portage_with_autodep/pym/_emerge/SetArg.pyo
new file mode 100644
index 0000000..5a3d9d9
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/SetArg.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/SlotObject.py b/portage_with_autodep/pym/_emerge/SlotObject.py
deleted file mode 100644
index fdc6f35..0000000
--- a/portage_with_autodep/pym/_emerge/SlotObject.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 1999-2009 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-class SlotObject(object):
- __slots__ = ("__weakref__",)
-
- def __init__(self, **kwargs):
- classes = [self.__class__]
- while classes:
- c = classes.pop()
- if c is SlotObject:
- continue
- classes.extend(c.__bases__)
- slots = getattr(c, "__slots__", None)
- if not slots:
- continue
- for myattr in slots:
- myvalue = kwargs.get(myattr, None)
- setattr(self, myattr, myvalue)
-
- def copy(self):
- """
- Create a new instance and copy all attributes
- defined from __slots__ (including those from
- inherited classes).
- """
- obj = self.__class__()
-
- classes = [self.__class__]
- while classes:
- c = classes.pop()
- if c is SlotObject:
- continue
- classes.extend(c.__bases__)
- slots = getattr(c, "__slots__", None)
- if not slots:
- continue
- for myattr in slots:
- setattr(obj, myattr, getattr(self, myattr))
-
- return obj
-
diff --git a/portage_with_autodep/pym/_emerge/SpawnProcess.py b/portage_with_autodep/pym/_emerge/SpawnProcess.py
index b72971c..9fbc964 100644
--- a/portage_with_autodep/pym/_emerge/SpawnProcess.py
+++ b/portage_with_autodep/pym/_emerge/SpawnProcess.py
@@ -26,29 +26,16 @@ class SpawnProcess(SubProcess):
"path_lookup", "pre_exec")
__slots__ = ("args",) + \
- _spawn_kwarg_names + ("_selinux_type",)
+ _spawn_kwarg_names + ("_log_file_real", "_selinux_type",)
_file_names = ("log", "process", "stdout")
_files_dict = slot_dict_class(_file_names, prefix="")
def _start(self):
- if self.cancelled:
- return
-
if self.fd_pipes is None:
self.fd_pipes = {}
fd_pipes = self.fd_pipes
- fd_pipes.setdefault(0, sys.stdin.fileno())
- fd_pipes.setdefault(1, sys.stdout.fileno())
- fd_pipes.setdefault(2, sys.stderr.fileno())
-
- # flush any pending output
- for fd in fd_pipes.values():
- if fd == sys.stdout.fileno():
- sys.stdout.flush()
- if fd == sys.stderr.fileno():
- sys.stderr.flush()
self._files = self._files_dict()
files = self._files
@@ -56,34 +43,46 @@ class SpawnProcess(SubProcess):
master_fd, slave_fd = self._pipe(fd_pipes)
fcntl.fcntl(master_fd, fcntl.F_SETFL,
fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+ files.process = master_fd
logfile = None
if self._can_log(slave_fd):
logfile = self.logfile
null_input = None
- fd_pipes_orig = fd_pipes.copy()
- if self.background:
+ if not self.background or 0 in fd_pipes:
+ # Subclasses such as AbstractEbuildProcess may have already passed
+ # in a null file descriptor in fd_pipes, so use that when given.
+ pass
+ else:
# TODO: Use job control functions like tcsetpgrp() to control
# access to stdin. Until then, use /dev/null so that any
# attempts to read from stdin will immediately return EOF
# instead of blocking indefinitely.
- null_input = open('/dev/null', 'rb')
- fd_pipes[0] = null_input.fileno()
- else:
- fd_pipes[0] = fd_pipes_orig[0]
+ null_input = os.open('/dev/null', os.O_RDWR)
+ fd_pipes[0] = null_input
+
+ fd_pipes.setdefault(0, sys.stdin.fileno())
+ fd_pipes.setdefault(1, sys.stdout.fileno())
+ fd_pipes.setdefault(2, sys.stderr.fileno())
+
+ # flush any pending output
+ for fd in fd_pipes.values():
+ if fd == sys.stdout.fileno():
+ sys.stdout.flush()
+ if fd == sys.stderr.fileno():
+ sys.stderr.flush()
- # WARNING: It is very important to use unbuffered mode here,
- # in order to avoid issue 5380 with python3.
- files.process = os.fdopen(master_fd, 'rb', 0)
if logfile is not None:
+ fd_pipes_orig = fd_pipes.copy()
fd_pipes[1] = slave_fd
fd_pipes[2] = slave_fd
files.log = open(_unicode_encode(logfile,
encoding=_encodings['fs'], errors='strict'), mode='ab')
if logfile.endswith('.gz'):
+ self._log_file_real = files.log
files.log = gzip.GzipFile(filename='', mode='ab',
fileobj=files.log)
@@ -92,7 +91,7 @@ class SpawnProcess(SubProcess):
mode=0o660)
if not self.background:
- files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
+ files.stdout = os.dup(fd_pipes_orig[1])
output_handler = self._output_handler
@@ -116,7 +115,7 @@ class SpawnProcess(SubProcess):
kwargs["returnpid"] = True
kwargs.pop("logfile", None)
- self._reg_id = self.scheduler.register(files.process.fileno(),
+ self._reg_id = self.scheduler.register(files.process,
self._registered_events, output_handler)
self._registered = True
@@ -124,7 +123,7 @@ class SpawnProcess(SubProcess):
os.close(slave_fd)
if null_input is not None:
- null_input.close()
+ os.close(null_input)
if isinstance(retval, int):
# spawn failed
@@ -161,22 +160,30 @@ class SpawnProcess(SubProcess):
def _output_handler(self, fd, event):
files = self._files
- buf = self._read_buf(files.process, event)
+ while True:
+ buf = self._read_buf(fd, event)
+
+ if buf is None:
+ # not a POLLIN event, EAGAIN, etc...
+ break
- if buf is not None:
+ if not buf:
+ # EOF
+ self._unregister()
+ self.wait()
+ break
- if buf:
+ else:
if not self.background:
write_successful = False
failures = 0
while True:
try:
if not write_successful:
- buf.tofile(files.stdout)
+ os.write(files.stdout, buf)
write_successful = True
- files.stdout.flush()
break
- except IOError as e:
+ except OSError as e:
if e.errno != errno.EAGAIN:
raise
del e
@@ -198,22 +205,17 @@ class SpawnProcess(SubProcess):
# inherit stdio file descriptors from portage
# (maybe it can't be avoided with
# PROPERTIES=interactive).
- fcntl.fcntl(files.stdout.fileno(), fcntl.F_SETFL,
- fcntl.fcntl(files.stdout.fileno(),
+ fcntl.fcntl(files.stdout, fcntl.F_SETFL,
+ fcntl.fcntl(files.stdout,
fcntl.F_GETFL) ^ os.O_NONBLOCK)
- try:
- buf.tofile(files.log)
- except TypeError:
- # array.tofile() doesn't work with GzipFile
- files.log.write(buf.tostring())
+ files.log.write(buf)
files.log.flush()
- else:
- self._unregister()
- self.wait()
self._unregister_if_appropriate(event)
+ return True
+
def _dummy_handler(self, fd, event):
"""
This method is mainly interested in detecting EOF, since
@@ -221,15 +223,26 @@ class SpawnProcess(SubProcess):
monitor the process from inside a poll() loop.
"""
- buf = self._read_buf(self._files.process, event)
+ while True:
+ buf = self._read_buf(fd, event)
- if buf is not None:
+ if buf is None:
+ # not a POLLIN event, EAGAIN, etc...
+ break
- if buf:
- pass
- else:
+ if not buf:
+ # EOF
self._unregister()
self.wait()
+ break
self._unregister_if_appropriate(event)
+ return True
+
+ def _unregister(self):
+ super(SpawnProcess, self)._unregister()
+ if self._log_file_real is not None:
+ # Avoid "ResourceWarning: unclosed file" since python 3.2.
+ self._log_file_real.close()
+ self._log_file_real = None
diff --git a/portage_with_autodep/pym/_emerge/SpawnProcess.pyo b/portage_with_autodep/pym/_emerge/SpawnProcess.pyo
new file mode 100644
index 0000000..7a6142e
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/SpawnProcess.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/SubProcess.py b/portage_with_autodep/pym/_emerge/SubProcess.py
index b99cf0b..76b313f 100644
--- a/portage_with_autodep/pym/_emerge/SubProcess.py
+++ b/portage_with_autodep/pym/_emerge/SubProcess.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage import os
@@ -16,6 +16,10 @@ class SubProcess(AbstractPollTask):
# serve this purpose alone.
_dummy_pipe_fd = 9
+ # This is how much time we allow for waitpid to succeed after
+ # we've sent a kill signal to our subprocess.
+ _cancel_timeout = 1000 # 1 second
+
def _poll(self):
if self.returncode is not None:
return self.returncode
@@ -60,8 +64,7 @@ class SubProcess(AbstractPollTask):
if self._registered:
if self.cancelled:
- timeout = 1000
- self.scheduler.schedule(self._reg_id, timeout=timeout)
+ self._wait_loop(timeout=self._cancel_timeout)
if self._registered:
try:
os.kill(self.pid, signal.SIGKILL)
@@ -69,41 +72,39 @@ class SubProcess(AbstractPollTask):
if e.errno != errno.ESRCH:
raise
del e
- self.scheduler.schedule(self._reg_id, timeout=timeout)
+ self._wait_loop(timeout=self._cancel_timeout)
if self._registered:
self._orphan_process_warn()
else:
- self.scheduler.schedule(self._reg_id)
- self._unregister()
+ self._wait_loop()
+
if self.returncode is not None:
return self.returncode
- try:
- # With waitpid and WNOHANG, only check the
- # first element of the tuple since the second
- # element may vary (bug #337465).
- wait_retval = os.waitpid(self.pid, os.WNOHANG)
- except OSError as e:
- if e.errno != errno.ECHILD:
- raise
- del e
- self._set_returncode((self.pid, 1 << 8))
- else:
- if wait_retval[0] != 0:
- self._set_returncode(wait_retval)
- else:
- try:
- wait_retval = os.waitpid(self.pid, 0)
- except OSError as e:
- if e.errno != errno.ECHILD:
- raise
- del e
- self._set_returncode((self.pid, 1 << 8))
- else:
- self._set_returncode(wait_retval)
+ if not isinstance(self.pid, int):
+ # Get debug info for bug #403697.
+ raise AssertionError(
+ "%s: pid is non-integer: %s" %
+ (self.__class__.__name__, repr(self.pid)))
+
+ self._waitpid_loop()
return self.returncode
+ def _waitpid_loop(self):
+ source_id = self.scheduler.child_watch_add(
+ self.pid, self._waitpid_cb)
+ try:
+ while self.returncode is None:
+ self.scheduler.iteration()
+ finally:
+ self.scheduler.source_remove(source_id)
+
+ def _waitpid_cb(self, pid, condition, user_data=None):
+ if pid != self.pid:
+ raise AssertionError("expected pid %s, got %s" % (self.pid, pid))
+ self._set_returncode((pid, condition))
+
def _orphan_process_warn(self):
pass
@@ -120,7 +121,10 @@ class SubProcess(AbstractPollTask):
if self._files is not None:
for f in self._files.values():
- f.close()
+ if isinstance(f, int):
+ os.close(f)
+ else:
+ f.close()
self._files = None
def _set_returncode(self, wait_retval):
@@ -129,6 +133,7 @@ class SubProcess(AbstractPollTask):
subprocess.Popen.returncode: A negative value -N indicates
that the child was terminated by signal N (Unix only).
"""
+ self._unregister()
pid, status = wait_retval
diff --git a/portage_with_autodep/pym/_emerge/SubProcess.pyo b/portage_with_autodep/pym/_emerge/SubProcess.pyo
new file mode 100644
index 0000000..26e13e1
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/SubProcess.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/Task.py b/portage_with_autodep/pym/_emerge/Task.py
index efbe3a9..40f5066 100644
--- a/portage_with_autodep/pym/_emerge/Task.py
+++ b/portage_with_autodep/pym/_emerge/Task.py
@@ -1,7 +1,8 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
-from _emerge.SlotObject import SlotObject
+from portage.util.SlotObject import SlotObject
+
class Task(SlotObject):
__slots__ = ("_hash_key", "_hash_value")
diff --git a/portage_with_autodep/pym/_emerge/Task.pyo b/portage_with_autodep/pym/_emerge/Task.pyo
new file mode 100644
index 0000000..2958cb1
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/Task.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/TaskScheduler.py b/portage_with_autodep/pym/_emerge/TaskScheduler.py
index 83c0cbe..583bfe3 100644
--- a/portage_with_autodep/pym/_emerge/TaskScheduler.py
+++ b/portage_with_autodep/pym/_emerge/TaskScheduler.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from _emerge.QueueScheduler import QueueScheduler
@@ -11,13 +11,14 @@ class TaskScheduler(object):
add tasks and call run(). The run() method returns when no tasks remain.
"""
- def __init__(self, max_jobs=None, max_load=None):
+ def __init__(self, main=True, max_jobs=None, max_load=None):
self._queue = SequentialTaskQueue(max_jobs=max_jobs)
- self._scheduler = QueueScheduler(
+ self._scheduler = QueueScheduler(main=main,
max_jobs=max_jobs, max_load=max_load)
self.sched_iface = self._scheduler.sched_iface
self.run = self._scheduler.run
self.clear = self._scheduler.clear
+ self.wait = self._queue.wait
self._scheduler.add(self._queue)
def add(self, task):
diff --git a/portage_with_autodep/pym/_emerge/TaskScheduler.pyo b/portage_with_autodep/pym/_emerge/TaskScheduler.pyo
new file mode 100644
index 0000000..8b84de7
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/TaskScheduler.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/TaskSequence.pyo b/portage_with_autodep/pym/_emerge/TaskSequence.pyo
new file mode 100644
index 0000000..b98196e
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/TaskSequence.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/UninstallFailure.pyo b/portage_with_autodep/pym/_emerge/UninstallFailure.pyo
new file mode 100644
index 0000000..9f1c88b
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/UninstallFailure.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/UnmergeDepPriority.pyo b/portage_with_autodep/pym/_emerge/UnmergeDepPriority.pyo
new file mode 100644
index 0000000..b163ed7
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/UnmergeDepPriority.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/UseFlagDisplay.pyo b/portage_with_autodep/pym/_emerge/UseFlagDisplay.pyo
new file mode 100644
index 0000000..005b007
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/UseFlagDisplay.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/__init__.pyo b/portage_with_autodep/pym/_emerge/__init__.pyo
new file mode 100644
index 0000000..fba4ca5
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/__init__.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/_find_deep_system_runtime_deps.pyo b/portage_with_autodep/pym/_emerge/_find_deep_system_runtime_deps.pyo
new file mode 100644
index 0000000..8ad61b2
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/_find_deep_system_runtime_deps.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.py b/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.py
index eab4168..9ac65b8 100644
--- a/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.py
+++ b/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.py
@@ -8,7 +8,7 @@ def _flush_elog_mod_echo():
Dump the mod_echo output now so that our other
notifications are shown last.
@rtype: bool
- @returns: True if messages were shown, False otherwise.
+ @return: True if messages were shown, False otherwise.
"""
messages_shown = bool(mod_echo._items)
mod_echo.finalize()
diff --git a/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.pyo b/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.pyo
new file mode 100644
index 0000000..f211d41
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/_flush_elog_mod_echo.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/actions.py b/portage_with_autodep/pym/_emerge/actions.py
index 2166963..eaf5a15 100644
--- a/portage_with_autodep/pym/_emerge/actions.py
+++ b/portage_with_autodep/pym/_emerge/actions.py
@@ -1,18 +1,19 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
import errno
import logging
+import operator
import platform
import pwd
import random
import re
-import shutil
import signal
import socket
import stat
+import subprocess
import sys
import tempfile
import textwrap
@@ -20,20 +21,27 @@ import time
from itertools import chain
import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.news:count_unread_news,display_news_notifications',
+)
+
+from portage.localization import _
from portage import os
-from portage import subprocess_getstatusoutput
-from portage import _unicode_decode
+from portage import shutil
+from portage import eapi_is_supported, _unicode_decode
from portage.cache.cache_errors import CacheError
-from portage.const import GLOBAL_CONFIG_PATH, NEWS_LIB_PATH
+from portage.const import GLOBAL_CONFIG_PATH
from portage.const import _ENABLE_DYN_LINK_MAP, _ENABLE_SET_CONFIG
from portage.dbapi.dep_expand import dep_expand
from portage.dbapi._expand_new_virt import expand_new_virt
from portage.dep import Atom, extended_cp_match
+from portage.eclass_cache import hashed_path
from portage.exception import InvalidAtom
from portage.output import blue, bold, colorize, create_color_func, darkgreen, \
red, yellow
good = create_color_func("GOOD")
bad = create_color_func("BAD")
+warn = create_color_func("WARN")
from portage.package.ebuild._ipc.QueryCommand import QueryCommand
from portage.package.ebuild.doebuild import _check_temp_dir
from portage._sets import load_default_config, SETPREFIX
@@ -180,8 +188,7 @@ def action_build(settings, trees, mtimedb,
" entire repository or category at once."
prefix = bad(" * ")
writemsg(prefix + "\n")
- from textwrap import wrap
- for line in wrap(msg, 72):
+ for line in textwrap.wrap(msg, 72):
writemsg("%s%s\n" % (prefix, line))
writemsg(prefix + "\n")
@@ -209,7 +216,6 @@ def action_build(settings, trees, mtimedb,
if isinstance(e, depgraph.UnsatisfiedResumeDep):
mydepgraph = e.depgraph
- from textwrap import wrap
from portage.output import EOutput
out = EOutput()
@@ -248,7 +254,7 @@ def action_build(settings, trees, mtimedb,
"to skip the first package in the list and " + \
"any other packages that may be " + \
"masked or have missing dependencies."
- for line in wrap(msg, 72):
+ for line in textwrap.wrap(msg, 72):
out.eerror(line)
elif isinstance(e, portage.exception.PackageNotFound):
out.eerror("An expected package is " + \
@@ -258,7 +264,7 @@ def action_build(settings, trees, mtimedb,
"packages that are no longer " + \
"available. Please restart/continue " + \
"the operation manually."
- for line in wrap(msg, 72):
+ for line in textwrap.wrap(msg, 72):
out.eerror(line)
if success:
@@ -291,7 +297,7 @@ def action_build(settings, trees, mtimedb,
success, mydepgraph, favorites = backtrack_depgraph(
settings, trees, myopts, myparams, myaction, myfiles, spinner)
except portage.exception.PackageSetNotFound as e:
- root_config = trees[settings["ROOT"]]["root_config"]
+ root_config = trees[settings['EROOT']]['root_config']
display_missing_pkg_set(root_config, e.value)
return 1
@@ -329,7 +335,7 @@ def action_build(settings, trees, mtimedb,
mergecount += 1
if mergecount==0:
- sets = trees[settings["ROOT"]]["root_config"].sets
+ sets = trees[settings['EROOT']]['root_config'].sets
world_candidates = None
if "selective" in myparams and \
not oneshot and favorites:
@@ -362,7 +368,7 @@ def action_build(settings, trees, mtimedb,
print()
print("Quitting.")
print()
- return os.EX_OK
+ return 128 + signal.SIGINT
# Don't ask again (e.g. when auto-cleaning packages after merge)
myopts.pop("--ask", None)
@@ -439,7 +445,7 @@ def action_build(settings, trees, mtimedb,
if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
if "yes" == settings.get("AUTOCLEAN"):
portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
- unmerge(trees[settings["ROOT"]]["root_config"],
+ unmerge(trees[settings['EROOT']]['root_config'],
myopts, "clean", [],
ldpath_mtimes, autoclean=1)
else:
@@ -454,7 +460,7 @@ def action_config(settings, trees, myopts, myfiles):
if len(myfiles) != 1:
print(red("!!! config can only take a single package atom at this time\n"))
sys.exit(1)
- if not is_valid_package_atom(myfiles[0]):
+ if not is_valid_package_atom(myfiles[0], allow_repo=True):
portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
noiselevel=-1)
portage.writemsg("!!! Please check ebuild(5) for full details.\n")
@@ -462,7 +468,7 @@ def action_config(settings, trees, myopts, myfiles):
sys.exit(1)
print()
try:
- pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
+ pkgs = trees[settings['EROOT']]['vartree'].dbapi.match(myfiles[0])
except portage.exception.AmbiguousPackageName as e:
# Multiple matches thrown from cpv_expand
pkgs = e.args[0]
@@ -482,7 +488,7 @@ def action_config(settings, trees, myopts, myfiles):
options.append("X")
idx = userquery("Selection?", enter_invalid, responses=options)
if idx == "X":
- sys.exit(0)
+ sys.exit(128 + signal.SIGINT)
pkg = pkgs[int(idx)-1]
else:
print("The following packages available:")
@@ -496,21 +502,20 @@ def action_config(settings, trees, myopts, myfiles):
print()
if "--ask" in myopts:
if userquery("Ready to configure %s?" % pkg, enter_invalid) == "No":
- sys.exit(0)
+ sys.exit(128 + signal.SIGINT)
else:
print("Configuring pkg...")
print()
- ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
+ ebuildpath = trees[settings['EROOT']]['vartree'].dbapi.findname(pkg)
mysettings = portage.config(clone=settings)
- vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
+ vardb = trees[mysettings['EROOT']]['vartree'].dbapi
debug = mysettings.get("PORTAGE_DEBUG") == "1"
- retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
- mysettings,
+ retval = portage.doebuild(ebuildpath, "config", settings=mysettings,
debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
- mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
+ mydbapi = trees[settings['EROOT']]['vartree'].dbapi, tree="vartree")
if retval == os.EX_OK:
- portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
- mysettings, debug=debug, mydbapi=vardb, tree="vartree")
+ portage.doebuild(ebuildpath, "clean", settings=mysettings,
+ debug=debug, mydbapi=vardb, tree="vartree")
print()
def action_depclean(settings, trees, ldpath_mtimes,
@@ -550,7 +555,7 @@ def action_depclean(settings, trees, ldpath_mtimes,
for x in msg:
portage.writemsg_stdout(colorize("WARN", " * ") + x)
- root_config = trees[settings['ROOT']]['root_config']
+ root_config = trees[settings['EROOT']]['root_config']
vardb = root_config.trees['vartree'].dbapi
args_set = InternalPackageSet(allow_repo=True)
@@ -582,15 +587,15 @@ def action_depclean(settings, trees, ldpath_mtimes,
return rval
if cleanlist:
- unmerge(root_config, myopts, "unmerge",
+ rval = unmerge(root_config, myopts, "unmerge",
cleanlist, ldpath_mtimes, ordered=ordered,
scheduler=scheduler)
if action == "prune":
- return
+ return rval
if not cleanlist and "--quiet" in myopts:
- return
+ return rval
print("Packages installed: " + str(len(vardb.cpv_all())))
print("Packages in world: " + \
@@ -603,14 +608,17 @@ def action_depclean(settings, trees, ldpath_mtimes,
else:
print("Number removed: "+str(len(cleanlist)))
+ return rval
+
def calc_depclean(settings, trees, ldpath_mtimes,
myopts, action, args_set, spinner):
allow_missing_deps = bool(args_set)
debug = '--debug' in myopts
xterm_titles = "notitles" not in settings.features
- myroot = settings["ROOT"]
- root_config = trees[myroot]["root_config"]
+ root_len = len(settings["ROOT"])
+ eroot = settings['EROOT']
+ root_config = trees[eroot]["root_config"]
psets = root_config.setconfig.psets
deselect = myopts.get('--deselect') != 'n'
required_sets = {}
@@ -649,8 +657,8 @@ def calc_depclean(settings, trees, ldpath_mtimes,
resolver_params = create_depgraph_params(myopts, "remove")
resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
resolver._load_vdb()
- vardb = resolver._frozen_config.trees[myroot]["vartree"].dbapi
- real_vardb = trees[myroot]["vartree"].dbapi
+ vardb = resolver._frozen_config.trees[eroot]["vartree"].dbapi
+ real_vardb = trees[eroot]["vartree"].dbapi
if action == "depclean":
@@ -705,7 +713,8 @@ def calc_depclean(settings, trees, ldpath_mtimes,
# that are also matched by argument atoms, but do not remove
# them if they match the highest installed version.
for pkg in vardb:
- spinner.update()
+ if spinner is not None:
+ spinner.update()
pkgs_for_cp = vardb.match_pkgs(pkg.cp)
if not pkgs_for_cp or pkg not in pkgs_for_cp:
raise AssertionError("package expected in matches: " + \
@@ -751,7 +760,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
del e
required_sets['__excluded__'].add("=" + pkg.cpv)
- success = resolver._complete_graph(required_sets={myroot:required_sets})
+ success = resolver._complete_graph(required_sets={eroot:required_sets})
writemsg_level("\b\b... done!\n")
resolver.display_problems()
@@ -937,7 +946,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
consumers = {}
for lib in pkg_dblink.getcontents():
- lib = lib[len(myroot):]
+ lib = lib[root_len:]
lib_key = linkmap._obj_key(lib)
lib_consumers = consumer_cache.get(lib_key)
if lib_consumers is None:
@@ -1053,9 +1062,8 @@ def calc_depclean(settings, trees, ldpath_mtimes,
"the packages that pulled them in."
prefix = bad(" * ")
- from textwrap import wrap
writemsg_level("".join(prefix + "%s\n" % line for \
- line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
+ line in textwrap.wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
msg = []
for pkg in sorted(consumer_map, key=cmp_sort_key(cmp_pkg_cpv)):
@@ -1095,7 +1103,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
writemsg_level("\nCalculating dependencies ")
success = resolver._complete_graph(
- required_sets={myroot:required_sets})
+ required_sets={eroot:required_sets})
writemsg_level("\b\b... done!\n")
resolver.display_problems()
if not success:
@@ -1137,7 +1145,6 @@ def calc_depclean(settings, trees, ldpath_mtimes,
for node in clean_set:
graph.add(node, None)
- mydeps = []
for dep_type in dep_keys:
depstr = node.metadata[dep_type]
if not depstr:
@@ -1153,7 +1160,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
% (priority,), noiselevel=-1, level=logging.DEBUG)
try:
- atoms = resolver._select_atoms(myroot, depstr,
+ atoms = resolver._select_atoms(eroot, depstr,
myuse=node.use.enabled, parent=node,
priority=priority)[node]
except portage.exception.InvalidDependString:
@@ -1226,7 +1233,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
def action_deselect(settings, trees, opts, atoms):
enter_invalid = '--ask-enter-invalid' in opts
- root_config = trees[settings['ROOT']]['root_config']
+ root_config = trees[settings['EROOT']]['root_config']
world_set = root_config.sets['selected']
if not hasattr(world_set, 'update'):
writemsg_level("World @selected set does not appear to be mutable.\n",
@@ -1291,7 +1298,7 @@ def action_deselect(settings, trees, opts, atoms):
prompt = "Would you like to remove these " + \
"packages from your world favorites?"
if userquery(prompt, enter_invalid) == 'No':
- return os.EX_OK
+ return 128 + signal.SIGINT
remaining = set(world_set)
remaining.difference_update(discard_atoms)
@@ -1325,11 +1332,12 @@ def action_info(settings, trees, myopts, myfiles):
output_buffer = []
append = output_buffer.append
- root_config = trees[settings['ROOT']]['root_config']
+ root_config = trees[settings['EROOT']]['root_config']
+ running_eroot = trees._running_eroot
- append(getportageversion(settings["PORTDIR"], settings["ROOT"],
+ append(getportageversion(settings["PORTDIR"], None,
settings.profile_path, settings["CHOST"],
- trees[settings["ROOT"]]["vartree"].dbapi))
+ trees[settings['EROOT']]["vartree"].dbapi))
header_width = 65
header_title = "System Settings"
@@ -1347,7 +1355,14 @@ def action_info(settings, trees, myopts, myfiles):
lastSync = "Unknown"
append("Timestamp of tree: %s" % (lastSync,))
- output=subprocess_getstatusoutput("distcc --version")
+ try:
+ proc = subprocess.Popen(["distcc", "--version"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ output = (1, None)
+ else:
+ output = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+ output = (proc.wait(), output)
if output[0] == os.EX_OK:
distcc_str = output[1].split("\n", 1)[0]
if "distcc" in settings.features:
@@ -1356,7 +1371,14 @@ def action_info(settings, trees, myopts, myfiles):
distcc_str += " [disabled]"
append(distcc_str)
- output=subprocess_getstatusoutput("ccache -V")
+ try:
+ proc = subprocess.Popen(["ccache", "-V"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ output = (1, None)
+ else:
+ output = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+ output = (proc.wait(), output)
if output[0] == os.EX_OK:
ccache_str = output[1].split("\n", 1)[0]
if "ccache" in settings.features:
@@ -1369,7 +1391,7 @@ def action_info(settings, trees, myopts, myfiles):
"sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
atoms = []
- vardb = trees["/"]["vartree"].dbapi
+ vardb = trees[running_eroot]['vartree'].dbapi
for x in myvars:
try:
x = Atom(x)
@@ -1382,7 +1404,7 @@ def action_info(settings, trees, myopts, myfiles):
myvars = sorted(set(atoms))
- portdb = trees["/"]["porttree"].dbapi
+ portdb = trees[running_eroot]['porttree'].dbapi
main_repo = portdb.getRepositoryName(portdb.porttree_root)
cp_map = {}
cp_max_len = 0
@@ -1425,8 +1447,6 @@ def action_info(settings, trees, myopts, myfiles):
append("%s %s" % \
((cp + ":").ljust(cp_max_len + 1), versions))
- libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
-
repos = portdb.settings.repositories
if "--verbose" in myopts:
append("Repositories:\n")
@@ -1463,9 +1483,6 @@ def action_info(settings, trees, myopts, myfiles):
myvars = portage.util.unique_array(myvars)
use_expand = settings.get('USE_EXPAND', '').split()
use_expand.sort()
- use_expand_hidden = set(
- settings.get('USE_EXPAND_HIDDEN', '').upper().split())
- alphabetical_use = '--alphabetical' in myopts
unset_vars = []
myvars.sort()
for k in myvars:
@@ -1504,9 +1521,10 @@ def action_info(settings, trees, myopts, myfiles):
# See if we can find any packages installed matching the strings
# passed on the command line
mypkgs = []
- vardb = trees[settings["ROOT"]]["vartree"].dbapi
- portdb = trees[settings["ROOT"]]["porttree"].dbapi
- bindb = trees[settings["ROOT"]]["bintree"].dbapi
+ eroot = settings['EROOT']
+ vardb = trees[eroot]["vartree"].dbapi
+ portdb = trees[eroot]['porttree'].dbapi
+ bindb = trees[eroot]["bintree"].dbapi
for x in myfiles:
match_found = False
installed_match = vardb.match(x)
@@ -1541,7 +1559,6 @@ def action_info(settings, trees, myopts, myfiles):
mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
auxkeys.append('DEFINED_PHASES')
- global_vals = {}
pkgsettings = portage.config(clone=settings)
# Loop through each package
@@ -1611,19 +1628,19 @@ def action_info(settings, trees, myopts, myfiles):
continue
if pkg_type == "installed":
- portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
- pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
- mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
+ portage.doebuild(ebuildpath, "info", settings=pkgsettings,
+ debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+ mydbapi=trees[settings['EROOT']]["vartree"].dbapi,
tree="vartree")
elif pkg_type == "ebuild":
- portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
- pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
- mydbapi=trees[settings["ROOT"]]["porttree"].dbapi,
+ portage.doebuild(ebuildpath, "info", settings=pkgsettings,
+ debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+ mydbapi=trees[settings['EROOT']]['porttree'].dbapi,
tree="porttree")
elif pkg_type == "binary":
- portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
- pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
- mydbapi=trees[settings["ROOT"]]["bintree"].dbapi,
+ portage.doebuild(ebuildpath, "info", settings=pkgsettings,
+ debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+ mydbapi=trees[settings['EROOT']]["bintree"].dbapi,
tree="bintree")
shutil.rmtree(tmpdir)
@@ -1643,8 +1660,7 @@ def action_metadata(settings, portdb, myopts, porttrees=None):
if not os.path.exists(cachedir):
os.makedirs(cachedir)
- auxdbkeys = [x for x in portage.auxdbkeys if not x.startswith("UNUSED_0")]
- auxdbkeys = tuple(auxdbkeys)
+ auxdbkeys = portdb._known_keys
class TreeData(object):
__slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes')
@@ -1658,18 +1674,14 @@ def action_metadata(settings, portdb, myopts, porttrees=None):
porttrees_data = []
for path in porttrees:
src_db = portdb._pregen_auxdb.get(path)
- if src_db is None and \
- os.path.isdir(os.path.join(path, 'metadata', 'cache')):
- src_db = portdb.metadbmodule(
- path, 'metadata/cache', auxdbkeys, readonly=True)
- try:
- src_db.ec = portdb._repo_info[path].eclass_db
- except AttributeError:
- pass
+ if src_db is None:
+ # portdbapi does not populate _pregen_auxdb
+ # when FEATURES=metadata-transfer is enabled
+ src_db = portdb._create_pregen_cache(path)
if src_db is not None:
porttrees_data.append(TreeData(portdb.auxdb[path],
- portdb._repo_info[path].eclass_db, path, src_db))
+ portdb.repositories.get_repo_for_location(path).eclass_db, path, src_db))
porttrees = [tree_data.path for tree_data in porttrees_data]
@@ -1704,42 +1716,45 @@ def action_metadata(settings, portdb, myopts, porttrees=None):
if onProgress is not None:
onProgress(maxval, curval)
- from portage.cache.util import quiet_mirroring
- from portage import eapi_is_supported, \
- _validate_cache_for_unsupported_eapis
-
# TODO: Display error messages, but do not interfere with the progress bar.
# Here's how:
# 1) erase the progress bar
# 2) show the error message
# 3) redraw the progress bar on a new line
- noise = quiet_mirroring()
for cp in cp_all:
for tree_data in porttrees_data:
+
+ src_chf = tree_data.src_db.validation_chf
+ dest_chf = tree_data.dest_db.validation_chf
+ dest_chf_key = '_%s_' % dest_chf
+ dest_chf_getter = operator.attrgetter(dest_chf)
+
for cpv in portdb.cp_list(cp, mytree=tree_data.path):
tree_data.valid_nodes.add(cpv)
try:
src = tree_data.src_db[cpv]
- except KeyError as e:
- noise.missing_entry(cpv)
- del e
+ except (CacheError, KeyError):
continue
- except CacheError as ce:
- noise.exception(cpv, ce)
- del ce
+
+ ebuild_location = portdb.findname(cpv, mytree=tree_data.path)
+ if ebuild_location is None:
+ continue
+ ebuild_hash = hashed_path(ebuild_location)
+
+ try:
+ if not tree_data.src_db.validate_entry(src,
+ ebuild_hash, tree_data.eclass_db):
+ continue
+ except CacheError:
continue
eapi = src.get('EAPI')
if not eapi:
eapi = '0'
- eapi = eapi.lstrip('-')
eapi_supported = eapi_is_supported(eapi)
if not eapi_supported:
- if not _validate_cache_for_unsupported_eapis:
- noise.misc(cpv, "unable to validate " + \
- "cache for EAPI='%s'" % eapi)
- continue
+ continue
dest = None
try:
@@ -1751,18 +1766,30 @@ def action_metadata(settings, portdb, myopts, porttrees=None):
if d is not None and d.get('EAPI') in ('', '0'):
del d['EAPI']
+ if src_chf != 'mtime':
+ # src may contain an irrelevant _mtime_ which corresponds
+ # to the time that the cache entry was written
+ src.pop('_mtime_', None)
+
+ if src_chf != dest_chf:
+ # populate src entry with dest_chf_key
+ # (the validity of the dest_chf that we generate from the
+ # ebuild here relies on the fact that we already used
+ # validate_entry to validate the ebuild with src_chf)
+ src[dest_chf_key] = dest_chf_getter(ebuild_hash)
+
if dest is not None:
- if not (dest['_mtime_'] == src['_mtime_'] and \
- tree_data.eclass_db.is_eclass_data_valid(
- dest['_eclasses_']) and \
+ if not (dest[dest_chf_key] == src[dest_chf_key] and \
+ tree_data.eclass_db.validate_and_rewrite_cache(
+ dest['_eclasses_'], tree_data.dest_db.validation_chf,
+ tree_data.dest_db.store_eclass_paths) is not None and \
set(dest['_eclasses_']) == set(src['_eclasses_'])):
dest = None
else:
# We don't want to skip the write unless we're really
# sure that the existing cache is identical, so don't
# trust _mtime_ and _eclasses_ alone.
- for k in set(chain(src, dest)).difference(
- ('_mtime_', '_eclasses_')):
+ for k in auxdbkeys:
if dest.get(k, '') != src.get(k, ''):
dest = None
break
@@ -1773,56 +1800,10 @@ def action_metadata(settings, portdb, myopts, porttrees=None):
continue
try:
- inherited = src.get('INHERITED', '')
- eclasses = src.get('_eclasses_')
- except CacheError as ce:
- noise.exception(cpv, ce)
- del ce
- continue
-
- if eclasses is not None:
- if not tree_data.eclass_db.is_eclass_data_valid(
- src['_eclasses_']):
- noise.eclass_stale(cpv)
- continue
- inherited = eclasses
- else:
- inherited = inherited.split()
-
- if tree_data.src_db.complete_eclass_entries and \
- eclasses is None:
- noise.corruption(cpv, "missing _eclasses_ field")
- continue
-
- if inherited:
- # Even if _eclasses_ already exists, replace it with data from
- # eclass_cache, in order to insert local eclass paths.
- try:
- eclasses = tree_data.eclass_db.get_eclass_data(inherited)
- except KeyError:
- # INHERITED contains a non-existent eclass.
- noise.eclass_stale(cpv)
- continue
-
- if eclasses is None:
- noise.eclass_stale(cpv)
- continue
- src['_eclasses_'] = eclasses
- else:
- src['_eclasses_'] = {}
-
- if not eapi_supported:
- src = {
- 'EAPI' : '-' + eapi,
- '_mtime_' : src['_mtime_'],
- '_eclasses_' : src['_eclasses_'],
- }
-
- try:
tree_data.dest_db[cpv] = src
- except CacheError as ce:
- noise.exception(cpv, ce)
- del ce
+ except CacheError:
+ # ignore it; can't do anything about it.
+ pass
curval += 1
if onProgress is not None:
@@ -1860,12 +1841,6 @@ def action_regen(settings, portdb, max_jobs, max_load):
xterm_titles = "notitles" not in settings.features
emergelog(xterm_titles, " === regen")
#regenerate cache entries
- try:
- os.close(sys.stdin.fileno())
- except SystemExit as e:
- raise # Needed else can't exit
- except:
- pass
sys.stdout.flush()
regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
@@ -1921,7 +1896,7 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
enter_invalid = '--ask-enter-invalid' in myopts
xterm_titles = "notitles" not in settings.features
emergelog(xterm_titles, " === sync")
- portdb = trees[settings["ROOT"]]["porttree"].dbapi
+ portdb = trees[settings['EROOT']]['porttree'].dbapi
myportdir = portdb.porttree_root
if not myportdir:
myportdir = settings.get('PORTDIR', '')
@@ -1993,6 +1968,7 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
os.umask(0o022)
dosyncuri = syncuri
updatecache_flg = False
+ git = False
if myaction == "metadata":
print("skipping sync")
updatecache_flg = True
@@ -2021,9 +1997,7 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
msg = ">>> Git pull in %s successful" % myportdir
emergelog(xterm_titles, msg)
writemsg_level(msg + "\n")
- exitcode = git_sync_timestamps(settings, myportdir)
- if exitcode == os.EX_OK:
- updatecache_flg = True
+ git = True
elif syncuri[:8]=="rsync://" or syncuri[:6]=="ssh://":
for vcs_dir in vcs_dirs:
writemsg_level(("!!! %s appears to be under revision " + \
@@ -2050,6 +2024,7 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
"--whole-file", # Don't do block transfers, only entire files
"--delete", # Delete files that aren't in the master tree
"--stats", # Show final statistics about what was transfered
+ "--human-readable",
"--timeout="+str(mytimeout), # IO timeout if not done in X seconds
"--exclude=/distfiles", # Exclude distfiles from consideration
"--exclude=/local", # Exclude local from consideration
@@ -2237,7 +2212,7 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
print()
print("Quitting.")
print()
- sys.exit(0)
+ sys.exit(128 + signal.SIGINT)
emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
if "--quiet" not in myopts:
print(">>> Starting rsync with "+dosyncuri+"...")
@@ -2465,17 +2440,25 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
noiselevel=-1, level=logging.ERROR)
return 1
+ # Reload the whole config from scratch.
+ settings, trees, mtimedb = load_emerge_config(trees=trees)
+ adjust_configs(myopts, trees)
+ root_config = trees[settings['EROOT']]['root_config']
+ portdb = trees[settings['EROOT']]['porttree'].dbapi
+
+ if git:
+ # NOTE: Do this after reloading the config, in case
+ # it did not exist prior to sync, so that the config
+ # and portdb properly account for its existence.
+ exitcode = git_sync_timestamps(portdb, myportdir)
+ if exitcode == os.EX_OK:
+ updatecache_flg = True
+
if updatecache_flg and \
myaction != "metadata" and \
"metadata-transfer" not in settings.features:
updatecache_flg = False
- # Reload the whole config from scratch.
- settings, trees, mtimedb = load_emerge_config(trees=trees)
- adjust_configs(myopts, trees)
- root_config = trees[settings["ROOT"]]["root_config"]
- portdb = trees[settings["ROOT"]]["porttree"].dbapi
-
if updatecache_flg and \
os.path.exists(os.path.join(myportdir, 'metadata', 'cache')):
@@ -2489,13 +2472,13 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
# Reload the whole config from scratch.
settings, trees, mtimedb = load_emerge_config(trees=trees)
adjust_configs(myopts, trees)
- portdb = trees[settings["ROOT"]]["porttree"].dbapi
- root_config = trees[settings["ROOT"]]["root_config"]
+ portdb = trees[settings['EROOT']]['porttree'].dbapi
+ root_config = trees[settings['EROOT']]['root_config']
mybestpv = portdb.xmatch("bestmatch-visible",
portage.const.PORTAGE_PACKAGE_ATOM)
mypvs = portage.best(
- trees[settings["ROOT"]]["vartree"].dbapi.match(
+ trees[settings['EROOT']]['vartree'].dbapi.match(
portage.const.PORTAGE_PACKAGE_ATOM))
chk_updated_cfg_files(settings["EROOT"],
@@ -2514,10 +2497,10 @@ def action_sync(settings, trees, mtimedb, myopts, myaction):
if(mybestpv != mypvs) and not "--quiet" in myopts:
print()
- print(red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended")
- print(red(" * ")+"that you update portage now, before any other packages are updated.")
+ print(warn(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended")
+ print(warn(" * ")+"that you update portage now, before any other packages are updated.")
print()
- print(red(" * ")+"To update portage, run 'emerge portage' now.")
+ print(warn(" * ")+"To update portage, run 'emerge portage' now.")
print()
display_news_notification(root_config, myopts)
@@ -2528,7 +2511,8 @@ def action_uninstall(settings, trees, ldpath_mtimes,
# For backward compat, some actions do not require leading '='.
ignore_missing_eq = action in ('clean', 'unmerge')
root = settings['ROOT']
- vardb = trees[root]['vartree'].dbapi
+ eroot = settings['EROOT']
+ vardb = trees[settings['EROOT']]['vartree'].dbapi
valid_atoms = []
lookup_owners = []
@@ -2566,9 +2550,9 @@ def action_uninstall(settings, trees, ldpath_mtimes,
valid_atoms.append(atom)
elif x.startswith(os.sep):
- if not x.startswith(root):
+ if not x.startswith(eroot):
writemsg_level(("!!! '%s' does not start with" + \
- " $ROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
+ " $EROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
return 1
# Queue these up since it's most efficient to handle
# multiple files in a single iter_owners() call.
@@ -2661,7 +2645,7 @@ def action_uninstall(settings, trees, ldpath_mtimes,
# redirection of ebuild phase output to logs as required for
# options such as --quiet.
sched = Scheduler(settings, trees, None, opts,
- spinner)
+ spinner, uninstall_only=True)
sched._background = sched._background_mode()
sched._status_display.quiet = True
@@ -2670,16 +2654,15 @@ def action_uninstall(settings, trees, ldpath_mtimes,
sched.settings["PORTAGE_BACKGROUND"] = "1"
sched.settings.backup_changes("PORTAGE_BACKGROUND")
sched.settings.lock()
- sched.pkgsettings[root] = portage.config(clone=sched.settings)
+ sched.pkgsettings[eroot] = portage.config(clone=sched.settings)
if action in ('clean', 'unmerge') or \
(action == 'prune' and "--nodeps" in opts):
# When given a list of atoms, unmerge them in the order given.
ordered = action == 'unmerge'
- unmerge(trees[settings["ROOT"]]['root_config'], opts, action,
+ rval = unmerge(trees[settings['EROOT']]['root_config'], opts, action,
valid_atoms, ldpath_mtimes, ordered=ordered,
scheduler=sched._sched_iface)
- rval = os.EX_OK
else:
rval = action_depclean(settings, trees, ldpath_mtimes,
opts, action, valid_atoms, spinner, scheduler=sched._sched_iface)
@@ -2730,7 +2713,13 @@ def adjust_config(myopts, settings):
settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
settings.backup_changes("EMERGE_WARNING_DELAY")
- if "--quiet" in myopts or "--quiet-build" in myopts:
+ buildpkg = myopts.get("--buildpkg")
+ if buildpkg is True:
+ settings.features.add("buildpkg")
+ elif buildpkg == 'n':
+ settings.features.discard("buildpkg")
+
+ if "--quiet" in myopts:
settings["PORTAGE_QUIET"]="1"
settings.backup_changes("PORTAGE_QUIET")
@@ -2766,8 +2755,8 @@ def adjust_config(myopts, settings):
if settings.get("NOCOLOR") not in ("yes","true"):
portage.output.havecolor = 1
- """The explicit --color < y | n > option overrides the NOCOLOR environment
- variable and stdout auto-detection."""
+ # The explicit --color < y | n > option overrides the NOCOLOR environment
+ # variable and stdout auto-detection.
if "--color" in myopts:
if "y" == myopts["--color"]:
portage.output.havecolor = 1
@@ -2806,7 +2795,7 @@ def relative_profile_path(portdir, abs_profile):
profilever = None
return profilever
-def getportageversion(portdir, target_root, profile, chost, vardb):
+def getportageversion(portdir, _unused, profile, chost, vardb):
profilever = None
if profile:
profilever = relative_profile_path(portdir, profile)
@@ -2839,7 +2828,7 @@ def getportageversion(portdir, target_root, profile, chost, vardb):
for cpv in sorted(libclist):
libc_split = portage.catpkgsplit(cpv)[1:]
if libc_split[-1] == "r0":
- libc_split[:-1]
+ libc_split = libc_split[:-1]
libcver.append("-".join(libc_split))
else:
libcver = ["unavailable"]
@@ -2850,27 +2839,35 @@ def getportageversion(portdir, target_root, profile, chost, vardb):
return "Portage %s (%s, %s, %s, %s)" % \
(portage.VERSION, profilever, gccver, ",".join(libcver), unameout)
-def git_sync_timestamps(settings, portdir):
+def git_sync_timestamps(portdb, portdir):
"""
Since git doesn't preserve timestamps, synchronize timestamps between
entries and ebuilds/eclasses. Assume the cache has the correct timestamp
for a given file as long as the file in the working tree is not modified
(relative to HEAD).
"""
- cache_dir = os.path.join(portdir, "metadata", "cache")
- if not os.path.isdir(cache_dir):
- return os.EX_OK
- writemsg_level(">>> Synchronizing timestamps...\n")
- from portage.cache.cache_errors import CacheError
+ cache_db = portdb._pregen_auxdb.get(portdir)
+
try:
- cache_db = settings.load_best_module("portdbapi.metadbmodule")(
- portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
+ if cache_db is None:
+ # portdbapi does not populate _pregen_auxdb
+ # when FEATURES=metadata-transfer is enabled
+ cache_db = portdb._create_pregen_cache(portdir)
except CacheError as e:
writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
level=logging.ERROR, noiselevel=-1)
return 1
+ if cache_db is None:
+ return os.EX_OK
+
+ if cache_db.validation_chf != 'mtime':
+ # newer formats like md5-dict do not require mtime sync
+ return os.EX_OK
+
+ writemsg_level(">>> Synchronizing timestamps...\n")
+
ec_dir = os.path.join(portdir, "eclass")
try:
ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
@@ -2883,10 +2880,10 @@ def git_sync_timestamps(settings, portdir):
args = [portage.const.BASH_BINARY, "-c",
"cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
portage._shell_quote(portdir)]
- import subprocess
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
modified_files = set(_unicode_decode(l).rstrip("\n") for l in proc.stdout)
rval = proc.wait()
+ proc.stdout.close()
if rval != os.EX_OK:
return rval
@@ -2990,22 +2987,15 @@ def load_emerge_config(trees=None):
kwargs[k] = v
trees = portage.create_trees(trees=trees, **kwargs)
- for root, root_trees in trees.items():
+ for root_trees in trees.values():
settings = root_trees["vartree"].settings
settings._init_dirs()
setconfig = load_default_config(settings, root_trees)
root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
- settings = trees["/"]["vartree"].settings
-
- for myroot in trees:
- if myroot != "/":
- settings = trees[myroot]["vartree"].settings
- break
-
+ settings = trees[trees._target_eroot]['vartree'].settings
mtimedbfile = os.path.join(settings['EROOT'], portage.CACHE_PATH, "mtimedb")
mtimedb = portage.MtimeDB(mtimedbfile)
- portage.output._init(config_root=settings['PORTAGE_CONFIGROOT'])
QueryCommand._db = trees
return settings, trees, mtimedb
@@ -3015,55 +3005,35 @@ def chk_updated_cfg_files(eroot, config_protect):
portage.util.find_updated_config_files(target_root, config_protect))
for x in result:
- writemsg_level("\n %s " % (colorize("WARN", "* IMPORTANT:"),),
+ writemsg_level("\n %s " % (colorize("WARN", "* " + _("IMPORTANT:"))),
level=logging.INFO, noiselevel=-1)
if not x[1]: # it's a protected file
- writemsg_level("config file '%s' needs updating.\n" % x[0],
+ writemsg_level( _("config file '%s' needs updating.\n") % x[0],
level=logging.INFO, noiselevel=-1)
else: # it's a protected dir
if len(x[1]) == 1:
head, tail = os.path.split(x[1][0])
tail = tail[len("._cfg0000_"):]
fpath = os.path.join(head, tail)
- writemsg_level("config file '%s' needs updating.\n" % fpath,
+ writemsg_level(_("config file '%s' needs updating.\n") % fpath,
level=logging.INFO, noiselevel=-1)
else:
- writemsg_level("%d config files in '%s' need updating.\n" % \
+ writemsg_level( _("%d config files in '%s' need updating.\n") % \
(len(x[1]), x[0]), level=logging.INFO, noiselevel=-1)
if result:
- print(" "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")\
- + " section of the " + bold("emerge"))
- print(" "+yellow("*")+" man page to learn how to update config files.")
+ print(" "+yellow("*")+ " See the "+colorize("INFORM", _("CONFIGURATION FILES"))\
+ + " " + _("section of the") + " " + bold("emerge"))
+ print(" "+yellow("*")+ " " + _("man page to learn how to update config files."))
+
def display_news_notification(root_config, myopts):
- target_root = root_config.settings['EROOT']
- trees = root_config.trees
- settings = trees["vartree"].settings
- portdb = trees["porttree"].dbapi
- vardb = trees["vartree"].dbapi
- NEWS_PATH = os.path.join("metadata", "news")
- UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
- newsReaderDisplay = False
- update = "--pretend" not in myopts
- if "news" not in settings.features:
+ if "news" not in root_config.settings.features:
return
-
- for repo in portdb.getRepositories():
- unreadItems = checkUpdatedNewsItems(
- portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
- if unreadItems:
- if not newsReaderDisplay:
- newsReaderDisplay = True
- print()
- print(colorize("WARN", " * IMPORTANT:"), end=' ')
- print("%s news items need reading for repository '%s'." % (unreadItems, repo))
-
-
- if newsReaderDisplay:
- print(colorize("WARN", " *"), end=' ')
- print("Use " + colorize("GOOD", "eselect news") + " to read news items.")
- print()
+ portdb = root_config.trees["porttree"].dbapi
+ vardb = root_config.trees["vartree"].dbapi
+ news_counts = count_unread_news(portdb, vardb)
+ display_news_notifications(news_counts)
def getgccversion(chost):
"""
@@ -3071,7 +3041,7 @@ def getgccversion(chost):
return: the current in-use gcc version
"""
- gcc_ver_command = 'gcc -dumpversion'
+ gcc_ver_command = ['gcc', '-dumpversion']
gcc_ver_prefix = 'gcc-'
gcc_not_found_error = red(
@@ -3080,44 +3050,42 @@ def getgccversion(chost):
"!!! other terminals also.\n"
)
- mystatus, myoutput = subprocess_getstatusoutput("gcc-config -c")
+ try:
+ proc = subprocess.Popen(["gcc-config", "-c"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ myoutput = None
+ mystatus = 1
+ else:
+ myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+ mystatus = proc.wait()
if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
- mystatus, myoutput = subprocess_getstatusoutput(
- chost + "-" + gcc_ver_command)
+ try:
+ proc = subprocess.Popen(
+ [chost + "-" + gcc_ver_command[0]] + gcc_ver_command[1:],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ myoutput = None
+ mystatus = 1
+ else:
+ myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+ mystatus = proc.wait()
if mystatus == os.EX_OK:
return gcc_ver_prefix + myoutput
- mystatus, myoutput = subprocess_getstatusoutput(gcc_ver_command)
+ try:
+ proc = subprocess.Popen(gcc_ver_command,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ myoutput = None
+ mystatus = 1
+ else:
+ myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+ mystatus = proc.wait()
if mystatus == os.EX_OK:
return gcc_ver_prefix + myoutput
portage.writemsg(gcc_not_found_error, noiselevel=-1)
return "[unavailable]"
-
-def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
- update=False):
- """
- Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
- Returns the number of unread (yet relevent) items.
-
- @param portdb: a portage tree database
- @type portdb: pordbapi
- @param vardb: an installed package database
- @type vardb: vardbapi
- @param NEWS_PATH:
- @type NEWS_PATH:
- @param UNREAD_PATH:
- @type UNREAD_PATH:
- @param repo_id:
- @type repo_id:
- @rtype: Integer
- @returns:
- 1. The number of unread but relevant news items.
-
- """
- from portage.news import NewsManager
- manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
- return manager.getUnreadItems( repo_id, update=update )
-
diff --git a/portage_with_autodep/pym/_emerge/actions.pyo b/portage_with_autodep/pym/_emerge/actions.pyo
new file mode 100644
index 0000000..4fbda01
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/actions.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/chk_updated_cfg_files.py b/portage_with_autodep/pym/_emerge/chk_updated_cfg_files.py
new file mode 100644
index 0000000..9f2ab6f
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/chk_updated_cfg_files.py
@@ -0,0 +1,42 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+
+import portage
+from portage import os
+from portage.localization import _
+from portage.output import bold, colorize, yellow
+from portage.util import writemsg_level
+
+def chk_updated_cfg_files(eroot, config_protect):
+ target_root = eroot
+ result = list(
+ portage.util.find_updated_config_files(target_root, config_protect))
+
+ for x in result:
+ writemsg_level("\n %s " % (colorize("WARN", "* " + _("IMPORTANT:"))),
+ level=logging.INFO, noiselevel=-1)
+ if not x[1]: # it's a protected file
+ writemsg_level( _("config file '%s' needs updating.\n") % x[0],
+ level=logging.INFO, noiselevel=-1)
+ else: # it's a protected dir
+ if len(x[1]) == 1:
+ head, tail = os.path.split(x[1][0])
+ tail = tail[len("._cfg0000_"):]
+ fpath = os.path.join(head, tail)
+ writemsg_level(_("config file '%s' needs updating.\n") % fpath,
+ level=logging.INFO, noiselevel=-1)
+ else:
+ writemsg_level(
+ _("%d config files in '%s' need updating.\n") % \
+ (len(x[1]), x[0]), level=logging.INFO, noiselevel=-1)
+
+ if result:
+ print(" " + yellow("*") + " See the " +
+ colorize("INFORM", _("CONFIGURATION FILES")) +
+ " " + _("section of the") + " " + bold("emerge"))
+ print(" " + yellow("*") + " " +
+ _("man page to learn how to update config files."))
diff --git a/portage_with_autodep/pym/_emerge/clear_caches.pyo b/portage_with_autodep/pym/_emerge/clear_caches.pyo
new file mode 100644
index 0000000..2e6f010
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/clear_caches.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/countdown.pyo b/portage_with_autodep/pym/_emerge/countdown.pyo
new file mode 100644
index 0000000..537dd27
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/countdown.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/create_depgraph_params.py b/portage_with_autodep/pym/_emerge/create_depgraph_params.py
index 44dceda..8f15c68 100644
--- a/portage_with_autodep/pym/_emerge/create_depgraph_params.py
+++ b/portage_with_autodep/pym/_emerge/create_depgraph_params.py
@@ -21,6 +21,10 @@ def create_depgraph_params(myopts, myaction):
if bdeps is not None:
myparams["bdeps"] = bdeps
+ dynamic_deps = myopts.get("--dynamic-deps")
+ if dynamic_deps is not None:
+ myparams["dynamic_deps"] = dynamic_deps
+
if myaction == "remove":
myparams["remove"] = True
myparams["complete"] = True
@@ -37,6 +41,12 @@ def create_depgraph_params(myopts, myaction):
deep = myopts.get("--deep")
if deep is not None and deep != 0:
myparams["deep"] = deep
+
+ complete_if_new_ver = \
+ myopts.get("--complete-graph-if-new-ver")
+ if complete_if_new_ver is not None:
+ myparams["complete_if_new_ver"] = complete_if_new_ver
+
if ("--complete-graph" in myopts or "--rebuild-if-new-rev" in myopts or
"--rebuild-if-new-ver" in myopts or "--rebuild-if-unbuilt" in myopts):
myparams["complete"] = True
@@ -58,6 +68,16 @@ def create_depgraph_params(myopts, myaction):
'--update' in myopts:
myparams['rebuilt_binaries'] = True
+ binpkg_respect_use = myopts.get('--binpkg-respect-use')
+ if binpkg_respect_use is not None:
+ myparams['binpkg_respect_use'] = binpkg_respect_use
+ elif '--usepkgonly' not in myopts:
+ # If --binpkg-respect-use is not explicitly specified, we enable
+ # the behavior automatically (like requested in bug #297549), as
+ # long as it doesn't strongly conflict with other options that
+ # have been specified.
+ myparams['binpkg_respect_use'] = 'auto'
+
if myopts.get("--selective") == "n":
# --selective=n can be used to remove selective
# behavior that may have been implied by some
diff --git a/portage_with_autodep/pym/_emerge/create_depgraph_params.pyo b/portage_with_autodep/pym/_emerge/create_depgraph_params.pyo
new file mode 100644
index 0000000..834580a
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/create_depgraph_params.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/create_world_atom.py b/portage_with_autodep/pym/_emerge/create_world_atom.py
index fa7cffc..35fb7c4 100644
--- a/portage_with_autodep/pym/_emerge/create_world_atom.py
+++ b/portage_with_autodep/pym/_emerge/create_world_atom.py
@@ -21,8 +21,25 @@ def create_world_atom(pkg, args_set, root_config):
sets = root_config.sets
portdb = root_config.trees["porttree"].dbapi
vardb = root_config.trees["vartree"].dbapi
- available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
- for cpv in portdb.match(cp))
+
+ if arg_atom.repo is not None:
+ repos = [arg_atom.repo]
+ else:
+ # Iterate over portdbapi.porttrees, since it's common to
+ # tweak this attribute in order to adjust match behavior.
+ repos = []
+ for tree in portdb.porttrees:
+ repos.append(portdb.repositories.get_name_for_location(tree))
+
+ available_slots = set()
+ for cpv in portdb.match(cp):
+ for repo in repos:
+ try:
+ available_slots.add(portdb.aux_get(cpv, ["SLOT"],
+ myrepo=repo)[0])
+ except KeyError:
+ pass
+
slotted = len(available_slots) > 1 or \
(len(available_slots) == 1 and "0" not in available_slots)
if not slotted:
@@ -64,8 +81,18 @@ def create_world_atom(pkg, args_set, root_config):
# enough to identify a specific slot.
matches = mydb.match(arg_atom)
matched_slots = set()
- for cpv in matches:
- matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
+ if mydb is vardb:
+ for cpv in matches:
+ matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
+ else:
+ for cpv in matches:
+ for repo in repos:
+ try:
+ matched_slots.add(portdb.aux_get(cpv, ["SLOT"],
+ myrepo=repo)[0])
+ except KeyError:
+ pass
+
if len(matched_slots) == 1:
new_world_atom = slot_atom
if arg_atom.repo:
diff --git a/portage_with_autodep/pym/_emerge/create_world_atom.pyo b/portage_with_autodep/pym/_emerge/create_world_atom.pyo
new file mode 100644
index 0000000..ac3fb5d
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/create_world_atom.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/depgraph.py b/portage_with_autodep/pym/_emerge/depgraph.py
index 5b48aca..572cea7 100644
--- a/portage_with_autodep/pym/_emerge/depgraph.py
+++ b/portage_with_autodep/pym/_emerge/depgraph.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -18,7 +18,10 @@ from portage import os, OrderedDict
from portage import _unicode_decode, _unicode_encode, _encodings
from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH
from portage.dbapi import dbapi
-from portage.dep import Atom, extract_affecting_use, check_required_use, human_readable_required_use, _repo_separator
+from portage.dbapi.dep_expand import dep_expand
+from portage.dep import Atom, best_match_to_list, extract_affecting_use, \
+ check_required_use, human_readable_required_use, match_from_list, \
+ _repo_separator
from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use
from portage.exception import InvalidAtom, InvalidDependString, PortageException
from portage.output import colorize, create_color_func, \
@@ -92,15 +95,13 @@ class _frozen_depgraph_config(object):
def __init__(self, settings, trees, myopts, spinner):
self.settings = settings
- self.target_root = settings["ROOT"]
+ self.target_root = settings["EROOT"]
self.myopts = myopts
self.edebug = 0
if settings.get("PORTAGE_DEBUG", "") == "1":
self.edebug = 1
self.spinner = spinner
- self._running_root = trees["/"]["root_config"]
- self._opts_no_restart = frozenset(["--buildpkgonly",
- "--fetchonly", "--fetch-all-uri", "--pretend"])
+ self._running_root = trees[trees._running_eroot]["root_config"]
self.pkgsettings = {}
self.trees = {}
self._trees_orig = trees
@@ -108,6 +109,7 @@ class _frozen_depgraph_config(object):
# All Package instances
self._pkg_cache = {}
self._highest_license_masked = {}
+ dynamic_deps = myopts.get("--dynamic-deps", "y") != "n"
for myroot in trees:
self.trees[myroot] = {}
# Create a RootConfig instance that references
@@ -121,7 +123,8 @@ class _frozen_depgraph_config(object):
self.trees[myroot]["vartree"] = \
FakeVartree(trees[myroot]["root_config"],
pkg_cache=self._pkg_cache,
- pkg_root_config=self.roots[myroot])
+ pkg_root_config=self.roots[myroot],
+ dynamic_deps=dynamic_deps)
self.pkgsettings[myroot] = portage.config(
clone=self.trees[myroot]["vartree"].settings)
@@ -174,7 +177,7 @@ class _rebuild_config(object):
rebuild_exclude = self._frozen_config.rebuild_exclude
rebuild_ignore = self._frozen_config.rebuild_ignore
if (self.rebuild and isinstance(parent, Package) and
- parent.built and (priority.buildtime or priority.runtime) and
+ parent.built and priority.buildtime and
isinstance(dep_pkg, Package) and
not rebuild_exclude.findAtomForPackage(parent) and
not rebuild_ignore.findAtomForPackage(dep_pkg)):
@@ -209,66 +212,63 @@ class _rebuild_config(object):
return True
- def _trigger_rebuild(self, parent, build_deps, runtime_deps):
+ def _trigger_rebuild(self, parent, build_deps):
root_slot = (parent.root, parent.slot_atom)
if root_slot in self.rebuild_list:
return False
trees = self._frozen_config.trees
- children = set(build_deps).intersection(runtime_deps)
reinstall = False
- for slot_atom in children:
- kids = set([build_deps[slot_atom], runtime_deps[slot_atom]])
- for dep_pkg in kids:
- dep_root_slot = (dep_pkg.root, slot_atom)
- if self._needs_rebuild(dep_pkg):
+ for slot_atom, dep_pkg in build_deps.items():
+ dep_root_slot = (dep_pkg.root, slot_atom)
+ if self._needs_rebuild(dep_pkg):
+ self.rebuild_list.add(root_slot)
+ return True
+ elif ("--usepkg" in self._frozen_config.myopts and
+ (dep_root_slot in self.reinstall_list or
+ dep_root_slot in self.rebuild_list or
+ not dep_pkg.installed)):
+
+ # A direct rebuild dependency is being installed. We
+ # should update the parent as well to the latest binary,
+ # if that binary is valid.
+ #
+ # To validate the binary, we check whether all of the
+ # rebuild dependencies are present on the same binhost.
+ #
+ # 1) If parent is present on the binhost, but one of its
+ # rebuild dependencies is not, then the parent should
+ # be rebuilt from source.
+ # 2) Otherwise, the parent binary is assumed to be valid,
+ # because all of its rebuild dependencies are
+ # consistent.
+ bintree = trees[parent.root]["bintree"]
+ uri = bintree.get_pkgindex_uri(parent.cpv)
+ dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
+ bindb = bintree.dbapi
+ if self.rebuild_if_new_ver and uri and uri != dep_uri:
+ cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
+ for cpv in bindb.match(dep_pkg.slot_atom):
+ if cpv_norev == catpkgsplit(cpv)[:-1]:
+ dep_uri = bintree.get_pkgindex_uri(cpv)
+ if uri == dep_uri:
+ break
+ if uri and uri != dep_uri:
+ # 1) Remote binary package is invalid because it was
+ # built without dep_pkg. Force rebuild.
self.rebuild_list.add(root_slot)
return True
- elif ("--usepkg" in self._frozen_config.myopts and
- (dep_root_slot in self.reinstall_list or
- dep_root_slot in self.rebuild_list or
- not dep_pkg.installed)):
-
- # A direct rebuild dependency is being installed. We
- # should update the parent as well to the latest binary,
- # if that binary is valid.
- #
- # To validate the binary, we check whether all of the
- # rebuild dependencies are present on the same binhost.
- #
- # 1) If parent is present on the binhost, but one of its
- # rebuild dependencies is not, then the parent should
- # be rebuilt from source.
- # 2) Otherwise, the parent binary is assumed to be valid,
- # because all of its rebuild dependencies are
- # consistent.
- bintree = trees[parent.root]["bintree"]
- uri = bintree.get_pkgindex_uri(parent.cpv)
- dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
- bindb = bintree.dbapi
- if self.rebuild_if_new_ver and uri and uri != dep_uri:
- cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
- for cpv in bindb.match(dep_pkg.slot_atom):
- if cpv_norev == catpkgsplit(cpv)[:-1]:
- dep_uri = bintree.get_pkgindex_uri(cpv)
- if uri == dep_uri:
- break
- if uri and uri != dep_uri:
- # 1) Remote binary package is invalid because it was
- # built without dep_pkg. Force rebuild.
- self.rebuild_list.add(root_slot)
- return True
- elif (parent.installed and
- root_slot not in self.reinstall_list):
- inst_build_time = parent.metadata.get("BUILD_TIME")
- try:
- bin_build_time, = bindb.aux_get(parent.cpv,
- ["BUILD_TIME"])
- except KeyError:
- continue
- if bin_build_time != inst_build_time:
- # 2) Remote binary package is valid, and local package
- # is not up to date. Force reinstall.
- reinstall = True
+ elif (parent.installed and
+ root_slot not in self.reinstall_list):
+ inst_build_time = parent.metadata.get("BUILD_TIME")
+ try:
+ bin_build_time, = bindb.aux_get(parent.cpv,
+ ["BUILD_TIME"])
+ except KeyError:
+ continue
+ if bin_build_time != inst_build_time:
+ # 2) Remote binary package is valid, and local package
+ # is not up to date. Force reinstall.
+ reinstall = True
if reinstall:
self.reinstall_list.add(root_slot)
return reinstall
@@ -282,31 +282,15 @@ class _rebuild_config(object):
need_restart = False
graph = self._graph
build_deps = {}
- runtime_deps = {}
- leaf_nodes = deque(graph.leaf_nodes())
-
- def ignore_non_runtime(priority):
- return not priority.runtime
- def ignore_non_buildtime(priority):
- return not priority.buildtime
+ leaf_nodes = deque(graph.leaf_nodes())
# Trigger rebuilds bottom-up (starting with the leaves) so that parents
# will always know which children are being rebuilt.
while graph:
if not leaf_nodes:
- # We're interested in intersection of buildtime and runtime,
- # so ignore edges that do not contain both.
- leaf_nodes.extend(graph.leaf_nodes(
- ignore_priority=ignore_non_runtime))
- if not leaf_nodes:
- leaf_nodes.extend(graph.leaf_nodes(
- ignore_priority=ignore_non_buildtime))
- if not leaf_nodes:
- # We'll have to drop an edge that is both
- # buildtime and runtime. This should be
- # quite rare.
- leaf_nodes.append(graph.order[-1])
+ # We'll have to drop an edge. This should be quite rare.
+ leaf_nodes.append(graph.order[-1])
node = leaf_nodes.popleft()
if node not in graph:
@@ -315,32 +299,23 @@ class _rebuild_config(object):
slot_atom = node.slot_atom
# Remove our leaf node from the graph, keeping track of deps.
- parents = graph.nodes[node][1].items()
+ parents = graph.parent_nodes(node)
graph.remove(node)
node_build_deps = build_deps.get(node, {})
- node_runtime_deps = runtime_deps.get(node, {})
- for parent, priorities in parents:
+ for parent in parents:
if parent == node:
# Ignore a direct cycle.
continue
parent_bdeps = build_deps.setdefault(parent, {})
- parent_rdeps = runtime_deps.setdefault(parent, {})
- for priority in priorities:
- if priority.buildtime:
- parent_bdeps[slot_atom] = node
- if priority.runtime:
- parent_rdeps[slot_atom] = node
- if slot_atom in parent_bdeps and slot_atom in parent_rdeps:
- parent_rdeps.update(node_runtime_deps)
+ parent_bdeps[slot_atom] = node
if not graph.child_nodes(parent):
leaf_nodes.append(parent)
# Trigger rebuilds for our leaf node. Because all of our children
- # have been processed, build_deps and runtime_deps will be
- # completely filled in, and self.rebuild_list / self.reinstall_list
- # will tell us whether any of our children need to be rebuilt or
- # reinstalled.
- if self._trigger_rebuild(node, node_build_deps, node_runtime_deps):
+ # have been processed, the build_deps will be completely filled in,
+ # and self.rebuild_list / self.reinstall_list will tell us whether
+ # any of our children need to be rebuilt or reinstalled.
+ if self._trigger_rebuild(node, node_build_deps):
need_restart = True
return need_restart
@@ -416,6 +391,11 @@ class _dynamic_depgraph_config(object):
self._ignored_deps = []
self._highest_pkg_cache = {}
+ # Binary packages that have been rejected because their USE
+ # didn't match the user's config. It maps packages to a set
+ # of flags causing the rejection.
+ self.ignored_binaries = {}
+
self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
self._needed_license_changes = backtrack_parameters.needed_license_changes
@@ -536,9 +516,15 @@ class depgraph(object):
for myroot in self._frozen_config.trees:
+ dynamic_deps = self._dynamic_config.myparams.get(
+ "dynamic_deps", "y") != "n"
preload_installed_pkgs = \
"--nodeps" not in self._frozen_config.myopts
+ if self._frozen_config.myopts.get("--root-deps") is not None and \
+ myroot != self._frozen_config.target_root:
+ continue
+
fake_vartree = self._frozen_config.trees[myroot]["vartree"]
if not fake_vartree.dbapi:
# This needs to be called for the first depgraph, but not for
@@ -557,8 +543,11 @@ class depgraph(object):
for pkg in vardb:
self._spinner_update()
- # This triggers metadata updates via FakeVartree.
- vardb.aux_get(pkg.cpv, [])
+ if dynamic_deps:
+ # This causes FakeVartree to update the
+ # Package instance dependencies via
+ # PackageVirtualDbapi.aux_update()
+ vardb.aux_get(pkg.cpv, [])
fakedb.cpv_inject(pkg)
self._dynamic_config._vdb_loaded = True
@@ -567,6 +556,67 @@ class depgraph(object):
if self._frozen_config.spinner:
self._frozen_config.spinner.update()
+ def _show_ignored_binaries(self):
+ """
+ Show binaries that have been ignored because their USE didn't
+ match the user's config.
+ """
+ if not self._dynamic_config.ignored_binaries \
+ or '--quiet' in self._frozen_config.myopts \
+ or self._dynamic_config.myparams.get(
+ "binpkg_respect_use") in ("y", "n"):
+ return
+
+ for pkg in list(self._dynamic_config.ignored_binaries):
+
+ selected_pkg = self._dynamic_config.mydbapi[pkg.root
+ ].match_pkgs(pkg.slot_atom)
+
+ if not selected_pkg:
+ continue
+
+ selected_pkg = selected_pkg[-1]
+ if selected_pkg > pkg:
+ self._dynamic_config.ignored_binaries.pop(pkg)
+ continue
+
+ if selected_pkg.installed and \
+ selected_pkg.cpv == pkg.cpv and \
+ selected_pkg.metadata.get('BUILD_TIME') == \
+ pkg.metadata.get('BUILD_TIME'):
+ # We don't care about ignored binaries when an
+ # identical installed instance is selected to
+ # fill the slot.
+ self._dynamic_config.ignored_binaries.pop(pkg)
+ continue
+
+ if not self._dynamic_config.ignored_binaries:
+ return
+
+ self._show_merge_list()
+
+ writemsg("\n!!! The following binary packages have been ignored " + \
+ "due to non matching USE:\n\n", noiselevel=-1)
+
+ for pkg, flags in self._dynamic_config.ignored_binaries.items():
+ writemsg(" =%s" % pkg.cpv, noiselevel=-1)
+ if pkg.root_config.settings["ROOT"] != "/":
+ writemsg(" for %s" % (pkg.root,), noiselevel=-1)
+ writemsg("\n use flag(s): %s\n" % ", ".join(sorted(flags)),
+ noiselevel=-1)
+
+ msg = [
+ "",
+ "NOTE: The --binpkg-respect-use=n option will prevent emerge",
+ " from ignoring these binary packages if possible.",
+ " Using --binpkg-respect-use=y will silence this warning."
+ ]
+
+ for line in msg:
+ if line:
+ line = colorize("INFORM", line)
+ writemsg(line + "\n", noiselevel=-1)
+
def _show_missed_update(self):
# In order to minimize noise, show only the highest
@@ -578,6 +628,10 @@ class depgraph(object):
# Exclude installed here since we only
# want to show available updates.
continue
+ chosen_pkg = self._dynamic_config.mydbapi[pkg.root
+ ].match_pkgs(pkg.slot_atom)
+ if not chosen_pkg or chosen_pkg[-1] >= pkg:
+ continue
k = (pkg.root, pkg.slot_atom)
if k in missed_updates:
other_pkg, mask_type, parent_atoms = missed_updates[k]
@@ -613,6 +667,7 @@ class depgraph(object):
if not missed_updates:
return
+ self._show_merge_list()
backtrack_masked = []
for pkg, parent_atoms in missed_updates:
@@ -630,7 +685,7 @@ class depgraph(object):
"due to unsatisfied dependencies:\n\n", noiselevel=-1)
writemsg(str(pkg.slot_atom), noiselevel=-1)
- if pkg.root != '/':
+ if pkg.root_config.settings["ROOT"] != "/":
writemsg(" for %s" % (pkg.root,), noiselevel=-1)
writemsg("\n", noiselevel=-1)
@@ -646,7 +701,7 @@ class depgraph(object):
"!!! triggered by backtracking:\n\n", noiselevel=-1)
for pkg, parent_atoms in backtrack_masked:
writemsg(str(pkg.slot_atom), noiselevel=-1)
- if pkg.root != '/':
+ if pkg.root_config.settings["ROOT"] != "/":
writemsg(" for %s" % (pkg.root,), noiselevel=-1)
writemsg("\n", noiselevel=-1)
@@ -655,6 +710,7 @@ class depgraph(object):
if not missed_updates:
return
+ self._show_merge_list()
msg = []
msg.append("\nWARNING: One or more updates have been " + \
"skipped due to a dependency conflict:\n\n")
@@ -662,7 +718,7 @@ class depgraph(object):
indent = " "
for pkg, parent_atoms in missed_updates:
msg.append(str(pkg.slot_atom))
- if pkg.root != '/':
+ if pkg.root_config.settings["ROOT"] != "/":
msg.append(" for %s" % (pkg.root,))
msg.append("\n\n")
@@ -777,19 +833,28 @@ class depgraph(object):
else:
self._dynamic_config._slot_conflict_parent_atoms.add(parent_atom)
- def _reinstall_for_flags(self, forced_flags,
+ def _reinstall_for_flags(self, pkg, forced_flags,
orig_use, orig_iuse, cur_use, cur_iuse):
"""Return a set of flags that trigger reinstallation, or None if there
are no such flags."""
- if "--newuse" in self._frozen_config.myopts or \
- "--binpkg-respect-use" in self._frozen_config.myopts:
+
+ # binpkg_respect_use: Behave like newuse by default. If newuse is
+ # False and changed_use is True, then behave like changed_use.
+ binpkg_respect_use = (pkg.built and
+ self._dynamic_config.myparams.get("binpkg_respect_use")
+ in ("y", "auto"))
+ newuse = "--newuse" in self._frozen_config.myopts
+ changed_use = "changed-use" == self._frozen_config.myopts.get("--reinstall")
+
+ if newuse or (binpkg_respect_use and not changed_use):
flags = set(orig_iuse.symmetric_difference(
cur_iuse).difference(forced_flags))
flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
cur_iuse.intersection(cur_use)))
if flags:
return flags
- elif "changed-use" == self._frozen_config.myopts.get("--reinstall"):
+
+ elif changed_use or binpkg_respect_use:
flags = orig_iuse.intersection(orig_use).symmetric_difference(
cur_iuse.intersection(cur_use))
if flags:
@@ -827,7 +892,7 @@ class depgraph(object):
relationships from nested sets
@type add_to_digraph: Boolean
@rtype: Iterable
- @returns: All args given in the input together with additional
+ @return: All args given in the input together with additional
SetArg instances that are generated from nested sets
"""
@@ -876,8 +941,6 @@ class depgraph(object):
debug = "--debug" in self._frozen_config.myopts
buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
nodeps = "--nodeps" in self._frozen_config.myopts
- deep = self._dynamic_config.myparams.get("deep", 0)
- recurse = deep is True or dep.depth <= deep
if dep.blocker:
if not buildpkgonly and \
not nodeps and \
@@ -922,7 +985,7 @@ class depgraph(object):
# infinite backtracking loop.
if self._dynamic_config._allow_backtracking:
if dep.parent in self._dynamic_config._runtime_pkg_mask:
- if "--debug" in self._frozen_config.myopts:
+ if debug:
writemsg(
"!!! backtracking loop detected: %s %s\n" % \
(dep.parent,
@@ -937,7 +1000,7 @@ class depgraph(object):
if dep_pkg is None:
self._dynamic_config._backtrack_infos["missing dependency"] = dep
self._dynamic_config._need_restart = True
- if "--debug" in self._frozen_config.myopts:
+ if debug:
msg = []
msg.append("")
msg.append("")
@@ -1009,17 +1072,18 @@ class depgraph(object):
else:
# Display the specific atom from SetArg or
# Package types.
+ uneval = ""
+ if dep.atom is not dep.atom.unevaluated_atom:
+ uneval = " (%s)" % (dep.atom.unevaluated_atom,)
writemsg_level(
- "%s%s required by %s\n" %
- ("Parent Dep:".ljust(15), dep.atom, myparent),
+ "%s%s%s required by %s\n" %
+ ("Parent Dep:".ljust(15), dep.atom, uneval, myparent),
level=logging.DEBUG, noiselevel=-1)
# Ensure that the dependencies of the same package
# are never processed more than once.
previously_added = pkg in self._dynamic_config.digraph
- # select the correct /var database that we'll be checking against
- vardbapi = self._frozen_config.trees[pkg.root]["vartree"].dbapi
pkgsettings = self._frozen_config.pkgsettings[pkg.root]
arg_atoms = None
@@ -1036,7 +1100,7 @@ class depgraph(object):
# package selection, since we want to prompt the user
# for USE adjustment rather than have REQUIRED_USE
# affect package selection and || dep choices.
- if not pkg.built and pkg.metadata["REQUIRED_USE"] and \
+ if not pkg.built and pkg.metadata.get("REQUIRED_USE") and \
eapi_has_required_use(pkg.metadata["EAPI"]):
required_use_is_sat = check_required_use(
pkg.metadata["REQUIRED_USE"],
@@ -1055,7 +1119,8 @@ class depgraph(object):
if atom is None:
atom = Atom("=" + pkg.cpv)
self._dynamic_config._unsatisfied_deps_for_display.append(
- ((pkg.root, atom), {"myparent":dep.parent}))
+ ((pkg.root, atom),
+ {"myparent" : dep.parent, "show_req_use" : pkg}))
self._dynamic_config._skip_restart = True
return 0
@@ -1146,11 +1211,6 @@ class depgraph(object):
all_match = False
break
- if to_be_selected >= to_be_masked:
- # We only care about the parent atoms
- # when they trigger a downgrade.
- parent_atoms = set()
-
fallback_data.append((to_be_masked, parent_atoms))
if all_match:
@@ -1244,7 +1304,7 @@ class depgraph(object):
settings.unlock()
settings.setinst(pkg.cpv, pkg.metadata)
settings.lock()
- except portage.exception.InvalidDependString as e:
+ except portage.exception.InvalidDependString:
if not pkg.installed:
# should have been masked before it was selected
raise
@@ -1265,12 +1325,11 @@ class depgraph(object):
self._dynamic_config.digraph.add(pkg, parent, priority=priority)
self._add_parent_atom(pkg, parent_atom)
- """ This section determines whether we go deeper into dependencies or not.
- We want to go deeper on a few occasions:
- Installing package A, we need to make sure package A's deps are met.
- emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
- If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
- """
+ # This section determines whether we go deeper into dependencies or not.
+ # We want to go deeper on a few occasions:
+ # Installing package A, we need to make sure package A's deps are met.
+ # emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
+ # If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
if arg_atoms:
depth = 0
pkg.depth = depth
@@ -1318,13 +1377,8 @@ class depgraph(object):
def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
- mytype = pkg.type_name
myroot = pkg.root
- mykey = pkg.cpv
metadata = pkg.metadata
- myuse = self._pkg_use_enabled(pkg)
- jbigkey = pkg
- depth = pkg.depth + 1
removal_action = "remove" in self._dynamic_config.myparams
edepend={}
@@ -1361,7 +1415,7 @@ class depgraph(object):
if removal_action:
depend_root = myroot
else:
- depend_root = "/"
+ depend_root = self._frozen_config._running_root.root
root_deps = self._frozen_config.myopts.get("--root-deps")
if root_deps is not None:
if root_deps is True:
@@ -1388,7 +1442,6 @@ class depgraph(object):
)
debug = "--debug" in self._frozen_config.myopts
- strict = mytype != "installed"
for dep_root, dep_string, dep_priority in deps:
if not dep_string:
@@ -1481,7 +1534,7 @@ class depgraph(object):
selected_atoms = self._select_atoms(dep_root,
dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
strict=strict, priority=dep_priority)
- except portage.exception.InvalidDependString as e:
+ except portage.exception.InvalidDependString:
if pkg.installed:
self._dynamic_config._masked_installed.add(pkg)
return 1
@@ -1731,7 +1784,7 @@ class depgraph(object):
pkg_atom_map.setdefault(pkg, set()).add(atom)
cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
- for cp, pkgs in cp_pkg_map.items():
+ for pkgs in cp_pkg_map.values():
if len(pkgs) < 2:
for pkg in pkgs:
for atom in pkg_atom_map[pkg]:
@@ -1807,7 +1860,7 @@ class depgraph(object):
i += 1
else:
try:
- x = portage.dep.Atom(x)
+ x = portage.dep.Atom(x, eapi=pkg.metadata["EAPI"])
except portage.exception.InvalidAtom:
if not pkg.installed:
raise portage.exception.InvalidDependString(
@@ -1855,7 +1908,7 @@ class depgraph(object):
@param atom_without_category: an atom without a category component
@type atom_without_category: String
@rtype: list
- @returns: a list of atoms containing categories (possibly empty)
+ @return: a list of atoms containing categories (possibly empty)
"""
null_cp = portage.dep_getkey(insert_category_into_atom(
atom_without_category, "null"))
@@ -1886,7 +1939,6 @@ class depgraph(object):
def _iter_atoms_for_pkg(self, pkg):
depgraph_sets = self._dynamic_config.sets[pkg.root]
atom_arg_map = depgraph_sets.atom_arg_map
- root_config = self._frozen_config.roots[pkg.root]
for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
if atom.cp != pkg.cp and \
self._have_new_virt(pkg.root, atom.cp):
@@ -1923,13 +1975,13 @@ class depgraph(object):
sets = root_config.sets
depgraph_sets = self._dynamic_config.sets[root_config.root]
myfavorites=[]
- myroot = self._frozen_config.target_root
- dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
- vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
- real_vardb = self._frozen_config._trees_orig[myroot]["vartree"].dbapi
- portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
- bindb = self._frozen_config.trees[myroot]["bintree"].dbapi
- pkgsettings = self._frozen_config.pkgsettings[myroot]
+ eroot = root_config.root
+ root = root_config.settings['ROOT']
+ vardb = self._frozen_config.trees[eroot]["vartree"].dbapi
+ real_vardb = self._frozen_config._trees_orig[eroot]["vartree"].dbapi
+ portdb = self._frozen_config.trees[eroot]["porttree"].dbapi
+ bindb = self._frozen_config.trees[eroot]["bintree"].dbapi
+ pkgsettings = self._frozen_config.pkgsettings[eroot]
args = []
onlydeps = "--onlydeps" in self._frozen_config.myopts
lookup_owners = []
@@ -1950,7 +2002,7 @@ class depgraph(object):
mytbz2=portage.xpak.tbz2(x)
mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
if os.path.realpath(x) != \
- os.path.realpath(self._frozen_config.trees[myroot]["bintree"].getname(mykey)):
+ os.path.realpath(bindb.bintree.getname(mykey)):
writemsg(colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n\n"), noiselevel=-1)
self._dynamic_config._skip_restart = True
return 0, myfavorites
@@ -1996,9 +2048,9 @@ class depgraph(object):
args.append(PackageArg(arg=x, package=pkg,
root_config=root_config))
elif x.startswith(os.path.sep):
- if not x.startswith(myroot):
+ if not x.startswith(eroot):
portage.writemsg(("\n\n!!! '%s' does not start with" + \
- " $ROOT.\n") % x, noiselevel=-1)
+ " $EROOT.\n") % x, noiselevel=-1)
self._dynamic_config._skip_restart = True
return 0, []
# Queue these up since it's most efficient to handle
@@ -2007,9 +2059,9 @@ class depgraph(object):
elif x.startswith("." + os.sep) or \
x.startswith(".." + os.sep):
f = os.path.abspath(x)
- if not f.startswith(myroot):
+ if not f.startswith(eroot):
portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \
- " $ROOT.\n") % (f, x), noiselevel=-1)
+ " $EROOT.\n") % (f, x), noiselevel=-1)
self._dynamic_config._skip_restart = True
return 0, []
lookup_owners.append(f)
@@ -2126,7 +2178,7 @@ class depgraph(object):
for x in lookup_owners:
if not search_for_multiple and os.path.isdir(x):
search_for_multiple = True
- relative_paths.append(x[len(myroot)-1:])
+ relative_paths.append(x[len(root)-1:])
owners = set()
for pkg, relative_path in \
@@ -2526,24 +2578,36 @@ class depgraph(object):
# account for masking and USE settings.
_autounmask_backup = self._dynamic_config._autounmask
self._dynamic_config._autounmask = False
- mytrees["pkg_use_enabled"] = self._pkg_use_enabled
+ # backup state for restoration, in case of recursive
+ # calls to this method
+ backup_state = mytrees.copy()
try:
+ # clear state from previous call, in case this
+ # call is recursive (we have a backup, that we
+ # will use to restore it later)
+ mytrees.pop("pkg_use_enabled", None)
+ mytrees.pop("parent", None)
+ mytrees.pop("atom_graph", None)
+ mytrees.pop("priority", None)
+
+ mytrees["pkg_use_enabled"] = self._pkg_use_enabled
if parent is not None:
- trees[root]["parent"] = parent
- trees[root]["atom_graph"] = atom_graph
+ mytrees["parent"] = parent
+ mytrees["atom_graph"] = atom_graph
if priority is not None:
- trees[root]["priority"] = priority
+ mytrees["priority"] = priority
+
mycheck = portage.dep_check(depstring, None,
pkgsettings, myuse=myuse,
myroot=root, trees=trees)
finally:
+ # restore state
self._dynamic_config._autounmask = _autounmask_backup
- del mytrees["pkg_use_enabled"]
- if parent is not None:
- trees[root].pop("parent")
- trees[root].pop("atom_graph")
- if priority is not None:
- trees[root].pop("priority")
+ mytrees.pop("pkg_use_enabled", None)
+ mytrees.pop("parent", None)
+ mytrees.pop("atom_graph", None)
+ mytrees.pop("priority", None)
+ mytrees.update(backup_state)
if not mycheck[0]:
raise portage.exception.InvalidDependString(mycheck[1])
if parent is None:
@@ -2637,6 +2701,38 @@ class depgraph(object):
continue
yield atom
+ def _virt_deps_visible(self, pkg, ignore_use=False):
+ """
+ Assumes pkg is a virtual package. Traverses virtual deps recursively
+ and returns True if all deps are visible, False otherwise. This is
+ useful for checking if it will be necessary to expand virtual slots,
+ for cases like bug #382557.
+ """
+ try:
+ rdepend = self._select_atoms(
+ pkg.root, pkg.metadata.get("RDEPEND", ""),
+ myuse=self._pkg_use_enabled(pkg),
+ parent=pkg, priority=self._priority(runtime=True))
+ except InvalidDependString as e:
+ if not pkg.installed:
+ raise
+ writemsg_level("!!! Invalid RDEPEND in " + \
+ "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
+ (pkg.root, pkg.cpv, e),
+ noiselevel=-1, level=logging.ERROR)
+ return False
+
+ for atoms in rdepend.values():
+ for atom in atoms:
+ if ignore_use:
+ atom = atom.without_use
+ pkg, existing = self._select_package(
+ pkg.root, atom)
+ if pkg is None or not self._pkg_visibility_check(pkg):
+ return False
+
+ return True
+
def _get_dep_chain(self, start_node, target_atom=None,
unsatisfied_dependency=False):
"""
@@ -2652,6 +2748,7 @@ class depgraph(object):
node = start_node
child = None
all_parents = self._dynamic_config._parent_atoms
+ graph = self._dynamic_config.digraph
if target_atom is not None and isinstance(node, Package):
affecting_use = set()
@@ -2676,11 +2773,46 @@ class depgraph(object):
dep_chain.append((pkg_name, node.type_name))
+
+ # To build a dep chain for the given package we take
+ # "random" parents form the digraph, except for the
+ # first package, because we want a parent that forced
+ # the corresponding change (i.e '>=foo-2', instead 'foo').
+
+ traversed_nodes.add(start_node)
+
+ start_node_parent_atoms = {}
+ for ppkg, patom in all_parents.get(node, []):
+ # Get a list of suitable atoms. For use deps
+ # (aka unsatisfied_dependency is not None) we
+ # need that the start_node doesn't match the atom.
+ if not unsatisfied_dependency or \
+ not InternalPackageSet(initial_atoms=(patom,)).findAtomForPackage(start_node):
+ start_node_parent_atoms.setdefault(patom, []).append(ppkg)
+
+ if start_node_parent_atoms:
+ # If there are parents in all_parents then use one of them.
+ # If not, then this package got pulled in by an Arg and
+ # will be correctly handled by the code that handles later
+ # packages in the dep chain.
+ best_match = best_match_to_list(node.cpv, start_node_parent_atoms)
+
+ child = node
+ for ppkg in start_node_parent_atoms[best_match]:
+ node = ppkg
+ if ppkg in self._dynamic_config._initial_arg_list:
+ # Stop if reached the top level of the dep chain.
+ break
+
while node is not None:
traversed_nodes.add(node)
- if isinstance(node, DependencyArg):
- if self._dynamic_config.digraph.parent_nodes(node):
+ if node not in graph:
+ # The parent is not in the graph due to backtracking.
+ break
+
+ elif isinstance(node, DependencyArg):
+ if graph.parent_nodes(node):
node_type = "set"
else:
node_type = "argument"
@@ -2689,17 +2821,29 @@ class depgraph(object):
elif node is not start_node:
for ppkg, patom in all_parents[child]:
if ppkg == node:
+ if child is start_node and unsatisfied_dependency and \
+ InternalPackageSet(initial_atoms=(patom,)).findAtomForPackage(child):
+ # This atom is satisfied by child, there must be another atom.
+ continue
atom = patom.unevaluated_atom
break
dep_strings = set()
- for priority in self._dynamic_config.digraph.nodes[node][0][child]:
- if priority.buildtime:
- dep_strings.add(node.metadata["DEPEND"])
- if priority.runtime:
- dep_strings.add(node.metadata["RDEPEND"])
- if priority.runtime_post:
- dep_strings.add(node.metadata["PDEPEND"])
+ priorities = graph.nodes[node][0].get(child)
+ if priorities is None:
+ # This edge comes from _parent_atoms and was not added to
+ # the graph, and _parent_atoms does not contain priorities.
+ dep_strings.add(node.metadata["DEPEND"])
+ dep_strings.add(node.metadata["RDEPEND"])
+ dep_strings.add(node.metadata["PDEPEND"])
+ else:
+ for priority in priorities:
+ if priority.buildtime:
+ dep_strings.add(node.metadata["DEPEND"])
+ if priority.runtime:
+ dep_strings.add(node.metadata["RDEPEND"])
+ if priority.runtime_post:
+ dep_strings.add(node.metadata["PDEPEND"])
affecting_use = set()
for dep_str in dep_strings:
@@ -2726,10 +2870,6 @@ class depgraph(object):
dep_chain.append((pkg_name, node.type_name))
- if node not in self._dynamic_config.digraph:
- # The parent is not in the graph due to backtracking.
- break
-
# When traversing to parents, prefer arguments over packages
# since arguments are root nodes. Never traverse the same
# package twice, in order to prevent an infinite loop.
@@ -2791,7 +2931,7 @@ class depgraph(object):
def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
- check_backtrack=False, check_autounmask_breakage=False):
+ check_backtrack=False, check_autounmask_breakage=False, show_req_use=None):
"""
When check_backtrack=True, no output is produced and
the method either returns or raises _backtrack_mask if
@@ -2810,14 +2950,13 @@ class depgraph(object):
xinfo = _unicode_decode('"%s"') % (myparent,)
# Discard null/ from failed cpv_expand category expansion.
xinfo = xinfo.replace("null/", "")
- if root != "/":
+ if root != self._frozen_config._running_root.root:
xinfo = "%s for %s" % (xinfo, root)
masked_packages = []
missing_use = []
missing_use_adjustable = set()
required_use_unsatisfied = []
masked_pkg_instances = set()
- missing_licenses = []
have_eapi_mask = False
pkgsettings = self._frozen_config.pkgsettings[root]
root_config = self._frozen_config.roots[root]
@@ -2828,7 +2967,6 @@ class depgraph(object):
for db, pkg_type, built, installed, db_keys in dbs:
if installed:
continue
- match = db.match
if hasattr(db, "xmatch"):
cpv_list = db.xmatch("match-all-cpv-only", atom.without_use)
else:
@@ -2854,12 +2992,20 @@ class depgraph(object):
repo = metadata.get('repository')
pkg = self._pkg(cpv, pkg_type, root_config,
installed=installed, myrepo=repo)
- if not atom_set.findAtomForPackage(pkg,
- modified_use=self._pkg_use_enabled(pkg)):
- continue
# pkg.metadata contains calculated USE for ebuilds,
# required later for getMissingLicenses.
metadata = pkg.metadata
+ if pkg.invalid:
+ # Avoid doing any operations with packages that
+ # have invalid metadata. It would be unsafe at
+ # least because it could trigger unhandled
+ # exceptions in places like check_required_use().
+ masked_packages.append(
+ (root_config, pkgsettings, cpv, repo, metadata, mreasons))
+ continue
+ if not atom_set.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ continue
if pkg in self._dynamic_config._runtime_pkg_mask:
backtrack_reasons = \
self._dynamic_config._runtime_pkg_mask[pkg]
@@ -2887,7 +3033,7 @@ class depgraph(object):
raise
if not mreasons and \
not pkg.built and \
- pkg.metadata["REQUIRED_USE"] and \
+ pkg.metadata.get("REQUIRED_USE") and \
eapi_has_required_use(pkg.metadata["EAPI"]):
if not check_required_use(
pkg.metadata["REQUIRED_USE"],
@@ -2942,7 +3088,7 @@ class depgraph(object):
continue
missing_use_adjustable.add(pkg)
- required_use = pkg.metadata["REQUIRED_USE"]
+ required_use = pkg.metadata.get("REQUIRED_USE")
required_use_warning = ""
if required_use:
old_use = self._pkg_use_enabled(pkg)
@@ -2990,7 +3136,7 @@ class depgraph(object):
if untouchable_flags.intersection(involved_flags):
continue
- required_use = myparent.metadata["REQUIRED_USE"]
+ required_use = myparent.metadata.get("REQUIRED_USE")
required_use_warning = ""
if required_use:
old_use = self._pkg_use_enabled(myparent)
@@ -3066,62 +3212,66 @@ class depgraph(object):
mask_docs = False
- if required_use_unsatisfied:
+ if show_req_use is None and required_use_unsatisfied:
# We have an unmasked package that only requires USE adjustment
# in order to satisfy REQUIRED_USE, and nothing more. We assume
# that the user wants the latest version, so only the first
# instance is displayed.
- pkg = required_use_unsatisfied[0]
+ show_req_use = required_use_unsatisfied[0]
+
+ if show_req_use is not None:
+
+ pkg = show_req_use
output_cpv = pkg.cpv + _repo_separator + pkg.repo
- writemsg_stdout("\n!!! " + \
+ writemsg("\n!!! " + \
colorize("BAD", "The ebuild selected to satisfy ") + \
colorize("INFORM", xinfo) + \
colorize("BAD", " has unmet requirements.") + "\n",
noiselevel=-1)
use_display = pkg_use_display(pkg, self._frozen_config.myopts)
- writemsg_stdout("- %s %s\n" % (output_cpv, use_display),
+ writemsg("- %s %s\n" % (output_cpv, use_display),
noiselevel=-1)
- writemsg_stdout("\n The following REQUIRED_USE flag constraints " + \
+ writemsg("\n The following REQUIRED_USE flag constraints " + \
"are unsatisfied:\n", noiselevel=-1)
reduced_noise = check_required_use(
pkg.metadata["REQUIRED_USE"],
self._pkg_use_enabled(pkg),
pkg.iuse.is_valid_flag).tounicode()
- writemsg_stdout(" %s\n" % \
+ writemsg(" %s\n" % \
human_readable_required_use(reduced_noise),
noiselevel=-1)
normalized_required_use = \
" ".join(pkg.metadata["REQUIRED_USE"].split())
if reduced_noise != normalized_required_use:
- writemsg_stdout("\n The above constraints " + \
+ writemsg("\n The above constraints " + \
"are a subset of the following complete expression:\n",
noiselevel=-1)
- writemsg_stdout(" %s\n" % \
+ writemsg(" %s\n" % \
human_readable_required_use(normalized_required_use),
noiselevel=-1)
- writemsg_stdout("\n", noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
elif show_missing_use:
- writemsg_stdout("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
- writemsg_stdout("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
+ writemsg("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
+ writemsg("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
for pkg, mreasons in show_missing_use:
- writemsg_stdout("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
+ writemsg("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
elif masked_packages:
- writemsg_stdout("\n!!! " + \
+ writemsg("\n!!! " + \
colorize("BAD", "All ebuilds that could satisfy ") + \
colorize("INFORM", xinfo) + \
colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
- writemsg_stdout("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
+ writemsg("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
have_eapi_mask = show_masked_packages(masked_packages)
if have_eapi_mask:
- writemsg_stdout("\n", noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
msg = ("The current version of portage supports " + \
"EAPI '%s'. You must upgrade to a newer version" + \
" of portage before EAPI masked packages can" + \
" be installed.") % portage.const.EAPI
- writemsg_stdout("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
- writemsg_stdout("\n", noiselevel=-1)
+ writemsg("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
mask_docs = True
else:
cp_exists = False
@@ -3131,7 +3281,7 @@ class depgraph(object):
cp_exists = True
break
- writemsg_stdout("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
+ writemsg("\nemerge: there are no ebuilds to satisfy "+green(xinfo)+".\n", noiselevel=-1)
if isinstance(myparent, AtomArg) and \
not cp_exists and \
self._frozen_config.myopts.get(
@@ -3141,12 +3291,13 @@ class depgraph(object):
if cat == "null":
cat = None
- writemsg_stdout("\nemerge: searching for similar names..."
+ writemsg("\nemerge: searching for similar names..."
, noiselevel=-1)
all_cp = set()
all_cp.update(vardb.cp_all())
- all_cp.update(portdb.cp_all())
+ if "--usepkgonly" not in self._frozen_config.myopts:
+ all_cp.update(portdb.cp_all())
if "--usepkg" in self._frozen_config.myopts:
all_cp.update(bindb.cp_all())
# discard dir containing no ebuilds
@@ -3164,9 +3315,18 @@ class depgraph(object):
for other_cp in list(all_cp):
other_pkg = portage.catsplit(other_cp)[1]
if other_pkg == pkg:
- # discard dir containing no ebuilds
- all_cp.discard(other_cp)
- continue
+ # Check for non-identical package that
+ # differs only by upper/lower case.
+ identical = True
+ for cp_orig in orig_cp_map[other_cp]:
+ if portage.catsplit(cp_orig)[1] != \
+ portage.catsplit(atom.cp)[1]:
+ identical = False
+ break
+ if identical:
+ # discard dir containing no ebuilds
+ all_cp.discard(other_cp)
+ continue
pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
matches = []
@@ -3179,16 +3339,16 @@ class depgraph(object):
matches = matches_orig_case
if len(matches) == 1:
- writemsg_stdout("\nemerge: Maybe you meant " + matches[0] + "?\n"
+ writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
, noiselevel=-1)
elif len(matches) > 1:
- writemsg_stdout(
+ writemsg(
"\nemerge: Maybe you meant any of these: %s?\n" % \
(", ".join(matches),), noiselevel=-1)
else:
# Generally, this would only happen if
# all dbapis are empty.
- writemsg_stdout(" nothing similar found.\n"
+ writemsg(" nothing similar found.\n"
, noiselevel=-1)
msg = []
if not isinstance(myparent, AtomArg):
@@ -3201,12 +3361,12 @@ class depgraph(object):
(node)), node_type))
if msg:
- writemsg_stdout("\n".join(msg), noiselevel=-1)
- writemsg_stdout("\n", noiselevel=-1)
+ writemsg("\n".join(msg), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
if mask_docs:
show_mask_docs()
- writemsg_stdout("\n", noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
for db, pkg_type, built, installed, db_keys in \
@@ -3224,51 +3384,12 @@ class depgraph(object):
"""
db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
-
- if hasattr(db, "xmatch"):
- # For portdbapi we match only against the cpv, in order
- # to bypass unnecessary cache access for things like IUSE
- # and SLOT. Later, we cache the metadata in a Package
- # instance, and use that for further matching. This
- # optimization is especially relevant since
- # pordbapi.aux_get() does not cache calls that have
- # myrepo or mytree arguments.
- cpv_list = db.xmatch("match-all-cpv-only", atom)
- else:
- cpv_list = db.match(atom)
-
- # USE=multislot can make an installed package appear as if
- # it doesn't satisfy a slot dependency. Rebuilding the ebuild
- # won't do any good as long as USE=multislot is enabled since
- # the newly built package still won't have the expected slot.
- # Therefore, assume that such SLOT dependencies are already
- # satisfied rather than forcing a rebuild.
+ atom_exp = dep_expand(atom, mydb=db, settings=root_config.settings)
+ cp_list = db.cp_list(atom_exp.cp)
+ matched_something = False
installed = pkg_type == 'installed'
- if installed and not cpv_list and atom.slot:
- for cpv in db.match(atom.cp):
- slot_available = False
- for other_db, other_type, other_built, \
- other_installed, other_keys in \
- self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
- try:
- if atom.slot == \
- other_db.aux_get(cpv, ["SLOT"])[0]:
- slot_available = True
- break
- except KeyError:
- pass
- if not slot_available:
- continue
- inst_pkg = self._pkg(cpv, "installed",
- root_config, installed=installed, myrepo = atom.repo)
- # Remove the slot from the atom and verify that
- # the package matches the resulting atom.
- if portage.match_from_list(
- atom.without_slot, [inst_pkg]):
- yield inst_pkg
- return
-
- if cpv_list:
+
+ if cp_list:
atom_set = InternalPackageSet(initial_atoms=(atom,),
allow_repo=True)
if atom.repo is None and hasattr(db, "getRepositories"):
@@ -3277,8 +3398,13 @@ class depgraph(object):
repo_list = [atom.repo]
# descending order
- cpv_list.reverse()
- for cpv in cpv_list:
+ cp_list.reverse()
+ for cpv in cp_list:
+ # Call match_from_list on one cpv at a time, in order
+ # to avoid unnecessary match_from_list comparisons on
+ # versions that are never yielded from this method.
+ if not match_from_list(atom_exp, [cpv]):
+ continue
for repo in repo_list:
try:
@@ -3295,16 +3421,63 @@ class depgraph(object):
# Make sure that cpv from the current repo satisfies the atom.
# This might not be the case if there are several repos with
# the same cpv, but different metadata keys, like SLOT.
- # Also, for portdbapi, parts of the match that require
- # metadata access are deferred until we have cached the
- # metadata in a Package instance.
+ # Also, parts of the match that require metadata access
+ # are deferred until we have cached the metadata in a
+ # Package instance.
if not atom_set.findAtomForPackage(pkg,
modified_use=self._pkg_use_enabled(pkg)):
continue
+ matched_something = True
yield pkg
+ # USE=multislot can make an installed package appear as if
+ # it doesn't satisfy a slot dependency. Rebuilding the ebuild
+ # won't do any good as long as USE=multislot is enabled since
+ # the newly built package still won't have the expected slot.
+ # Therefore, assume that such SLOT dependencies are already
+ # satisfied rather than forcing a rebuild.
+ if not matched_something and installed and atom.slot is not None:
+
+ if "remove" in self._dynamic_config.myparams:
+ # We need to search the portdbapi, which is not in our
+ # normal dbs list, in order to find the real SLOT.
+ portdb = self._frozen_config.trees[root_config.root]["porttree"].dbapi
+ db_keys = list(portdb._aux_cache_keys)
+ dbs = [(portdb, "ebuild", False, False, db_keys)]
+ else:
+ dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
+
+ cp_list = db.cp_list(atom_exp.cp)
+ if cp_list:
+ atom_set = InternalPackageSet(
+ initial_atoms=(atom.without_slot,), allow_repo=True)
+ atom_exp_without_slot = atom_exp.without_slot
+ cp_list.reverse()
+ for cpv in cp_list:
+ if not match_from_list(atom_exp_without_slot, [cpv]):
+ continue
+ slot_available = False
+ for other_db, other_type, other_built, \
+ other_installed, other_keys in dbs:
+ try:
+ if atom.slot == \
+ other_db.aux_get(cpv, ["SLOT"])[0]:
+ slot_available = True
+ break
+ except KeyError:
+ pass
+ if not slot_available:
+ continue
+ inst_pkg = self._pkg(cpv, "installed",
+ root_config, installed=installed, myrepo=atom.repo)
+ # Remove the slot from the atom and verify that
+ # the package matches the resulting atom.
+ if atom_set.findAtomForPackage(inst_pkg):
+ yield inst_pkg
+ return
+
def _select_pkg_highest_available(self, root, atom, onlydeps=False):
- cache_key = (root, atom, onlydeps)
+ cache_key = (root, atom, atom.unevaluated_atom, onlydeps)
ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
if ret is not None:
pkg, existing = ret
@@ -3320,7 +3493,6 @@ class depgraph(object):
self._dynamic_config._highest_pkg_cache[cache_key] = ret
pkg, existing = ret
if pkg is not None:
- settings = pkg.root_config.settings
if self._pkg_visibility_check(pkg) and \
not (pkg.installed and pkg.masks):
self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
@@ -3347,40 +3519,81 @@ class depgraph(object):
return False
return True
+ class _AutounmaskLevel(object):
+ __slots__ = ("allow_use_changes", "allow_unstable_keywords", "allow_license_changes", \
+ "allow_missing_keywords", "allow_unmasks")
+
+ def __init__(self):
+ self.allow_use_changes = False
+ self.allow_license_changes = False
+ self.allow_unstable_keywords = False
+ self.allow_missing_keywords = False
+ self.allow_unmasks = False
+
+ def _autounmask_levels(self):
+ """
+ Iterate over the different allowed things to unmask.
+
+ 1. USE
+ 2. USE + ~arch + license
+ 3. USE + ~arch + license + missing keywords
+ 4. USE + ~arch + license + masks
+ 5. USE + ~arch + license + missing keywords + masks
+
+ Some thoughts:
+ * Do least invasive changes first.
+ * Try unmasking alone before unmasking + missing keywords
+ to avoid -9999 versions if possible
+ """
+
+ if self._dynamic_config._autounmask is not True:
+ return
+
+ autounmask_keep_masks = self._frozen_config.myopts.get("--autounmask-keep-masks", "n") != "n"
+ autounmask_level = self._AutounmaskLevel()
+
+ autounmask_level.allow_use_changes = True
+
+ for only_use_changes in (True, False):
+
+ autounmask_level.allow_unstable_keywords = (not only_use_changes)
+ autounmask_level.allow_license_changes = (not only_use_changes)
+
+ for missing_keyword, unmask in ((False,False), (True, False), (False, True), (True, True)):
+
+ if (only_use_changes or autounmask_keep_masks) and (missing_keyword or unmask):
+ break
+
+ autounmask_level.allow_missing_keywords = missing_keyword
+ autounmask_level.allow_unmasks = unmask
+
+ yield autounmask_level
+
+
def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
pkg, existing = self._wrapped_select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
default_selection = (pkg, existing)
- if self._dynamic_config._autounmask is True:
+ def reset_pkg(pkg):
if pkg is not None and \
pkg.installed and \
not self._want_installed_pkg(pkg):
pkg = None
- for only_use_changes in True, False:
+ if self._dynamic_config._autounmask is True:
+ reset_pkg(pkg)
+
+ for autounmask_level in self._autounmask_levels():
if pkg is not None:
break
- for allow_unmasks in (False, True):
- if only_use_changes and allow_unmasks:
- continue
+ pkg, existing = \
+ self._wrapped_select_pkg_highest_available_imp(
+ root, atom, onlydeps=onlydeps,
+ autounmask_level=autounmask_level)
- if pkg is not None:
- break
-
- pkg, existing = \
- self._wrapped_select_pkg_highest_available_imp(
- root, atom, onlydeps=onlydeps,
- allow_use_changes=True,
- allow_unstable_keywords=(not only_use_changes),
- allow_license_changes=(not only_use_changes),
- allow_unmasks=allow_unmasks)
-
- if pkg is not None and \
- pkg.installed and \
- not self._want_installed_pkg(pkg):
- pkg = None
+ reset_pkg(pkg)
if self._dynamic_config._need_restart:
return None, None
@@ -3392,21 +3605,20 @@ class depgraph(object):
return pkg, existing
- def _pkg_visibility_check(self, pkg, allow_unstable_keywords=False, allow_license_changes=False, allow_unmasks=False):
+ def _pkg_visibility_check(self, pkg, autounmask_level=None, trust_graph=True):
if pkg.visible:
return True
- if pkg in self._dynamic_config.digraph:
+ if trust_graph and pkg in self._dynamic_config.digraph:
# Sometimes we need to temporarily disable
# dynamic_config._autounmask, but for overall
- # consistency in dependency resolution, in any
- # case we want to respect autounmask visibity
- # for packages that have already been added to
- # the dependency graph.
+ # consistency in dependency resolution, in most
+ # cases we want to treat packages in the graph
+ # as though they are visible.
return True
- if not self._dynamic_config._autounmask:
+ if not self._dynamic_config._autounmask or autounmask_level is None:
return False
pkgsettings = self._frozen_config.pkgsettings[pkg.root]
@@ -3455,11 +3667,10 @@ class depgraph(object):
#Package has already been unmasked.
return True
- #We treat missing keywords in the same way as masks.
- if (masked_by_unstable_keywords and not allow_unstable_keywords) or \
- (masked_by_missing_keywords and not allow_unmasks) or \
- (masked_by_p_mask and not allow_unmasks) or \
- (missing_licenses and not allow_license_changes):
+ if (masked_by_unstable_keywords and not autounmask_level.allow_unstable_keywords) or \
+ (masked_by_missing_keywords and not autounmask_level.allow_missing_keywords) or \
+ (masked_by_p_mask and not autounmask_level.allow_unmasks) or \
+ (missing_licenses and not autounmask_level.allow_license_changes):
#We are not allowed to do the needed changes.
return False
@@ -3556,7 +3767,7 @@ class depgraph(object):
if new_changes != old_changes:
#Don't do the change if it violates REQUIRED_USE.
- required_use = pkg.metadata["REQUIRED_USE"]
+ required_use = pkg.metadata.get("REQUIRED_USE")
if required_use and check_required_use(required_use, old_use, pkg.iuse.is_valid_flag) and \
not check_required_use(required_use, new_use, pkg.iuse.is_valid_flag):
return old_use
@@ -3574,13 +3785,11 @@ class depgraph(object):
self._dynamic_config._need_restart = True
return new_use
- def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, \
- allow_use_changes=False, allow_unstable_keywords=False, allow_license_changes=False, allow_unmasks=False):
+ def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, autounmask_level=None):
root_config = self._frozen_config.roots[root]
pkgsettings = self._frozen_config.pkgsettings[root]
dbs = self._dynamic_config._filtered_trees[root]["dbs"]
vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
- portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
# List of acceptable packages, ordered by type preference.
matched_packages = []
matched_pkgs_ignore_use = []
@@ -3588,6 +3797,8 @@ class depgraph(object):
if not isinstance(atom, portage.dep.Atom):
atom = portage.dep.Atom(atom)
atom_cp = atom.cp
+ have_new_virt = atom_cp.startswith("virtual/") and \
+ self._have_new_virt(root, atom_cp)
atom_set = InternalPackageSet(initial_atoms=(atom,), allow_repo=True)
existing_node = None
myeb = None
@@ -3635,6 +3846,9 @@ class depgraph(object):
# USE configuration.
for pkg in self._iter_match_pkgs(root_config, pkg_type, atom.without_use,
onlydeps=onlydeps):
+ if pkg.cp != atom_cp and have_new_virt:
+ # pull in a new-style virtual instead
+ continue
if pkg in self._dynamic_config._runtime_pkg_mask:
# The package has been masked by the backtracking logic
continue
@@ -3698,10 +3912,7 @@ class depgraph(object):
# _dep_check_composite_db, in order to prevent
# incorrect choices in || deps like bug #351828.
- if not self._pkg_visibility_check(pkg, \
- allow_unstable_keywords=allow_unstable_keywords,
- allow_license_changes=allow_license_changes,
- allow_unmasks=allow_unmasks):
+ if not self._pkg_visibility_check(pkg, autounmask_level):
continue
# Enable upgrade or downgrade to a version
@@ -3741,19 +3952,13 @@ class depgraph(object):
pkg_eb_visible = False
for pkg_eb in self._iter_match_pkgs(pkg.root_config,
"ebuild", Atom("=%s" % (pkg.cpv,))):
- if self._pkg_visibility_check(pkg_eb, \
- allow_unstable_keywords=allow_unstable_keywords,
- allow_license_changes=allow_license_changes,
- allow_unmasks=allow_unmasks):
+ if self._pkg_visibility_check(pkg_eb, autounmask_level):
pkg_eb_visible = True
break
if not pkg_eb_visible:
continue
else:
- if not self._pkg_visibility_check(pkg_eb, \
- allow_unstable_keywords=allow_unstable_keywords,
- allow_license_changes=allow_license_changes,
- allow_unmasks=allow_unmasks):
+ if not self._pkg_visibility_check(pkg_eb, autounmask_level):
continue
# Calculation of USE for unbuilt ebuilds is relatively
@@ -3783,7 +3988,7 @@ class depgraph(object):
if atom.use:
matched_pkgs_ignore_use.append(pkg)
- if allow_use_changes and not pkg.built:
+ if autounmask_level and autounmask_level.allow_use_changes and not pkg.built:
target_use = {}
for flag in atom.use.enabled:
target_use[flag] = True
@@ -3852,6 +4057,7 @@ class depgraph(object):
e_pkg = self._dynamic_config._slot_pkg_map[root].get(pkg.slot_atom)
if not e_pkg:
break
+
# Use PackageSet.findAtomForPackage()
# for PROVIDE support.
if atom_set.findAtomForPackage(e_pkg, modified_use=self._pkg_use_enabled(e_pkg)):
@@ -3872,7 +4078,8 @@ class depgraph(object):
if built and not useoldpkg and (not installed or matched_pkgs_ignore_use) and \
("--newuse" in self._frozen_config.myopts or \
"--reinstall" in self._frozen_config.myopts or \
- "--binpkg-respect-use" in self._frozen_config.myopts):
+ (not installed and self._dynamic_config.myparams.get(
+ "binpkg_respect_use") in ("y", "auto"))):
iuses = pkg.iuse.all
old_use = self._pkg_use_enabled(pkg)
if myeb:
@@ -3886,9 +4093,11 @@ class depgraph(object):
cur_iuse = iuses
if myeb and not usepkgonly and not useoldpkg:
cur_iuse = myeb.iuse.all
- if self._reinstall_for_flags(forced_flags,
- old_use, iuses,
- now_use, cur_iuse):
+ reinstall_for_flags = self._reinstall_for_flags(pkg,
+ forced_flags, old_use, iuses, now_use, cur_iuse)
+ if reinstall_for_flags:
+ if not pkg.installed:
+ self._dynamic_config.ignored_binaries.setdefault(pkg, set()).update(reinstall_for_flags)
break
# Compare current config to installed package
# and do not reinstall if possible.
@@ -3905,7 +4114,7 @@ class depgraph(object):
cur_use = self._pkg_use_enabled(pkg)
cur_iuse = pkg.iuse.all
reinstall_for_flags = \
- self._reinstall_for_flags(
+ self._reinstall_for_flags(pkg,
forced_flags, old_use, old_iuse,
cur_use, cur_iuse)
if reinstall_for_flags:
@@ -4002,21 +4211,16 @@ class depgraph(object):
if avoid_update:
for pkg in matched_packages:
- if pkg.installed and self._pkg_visibility_check(pkg, \
- allow_unstable_keywords=allow_unstable_keywords,
- allow_license_changes=allow_license_changes,
- allow_unmasks=allow_unmasks):
+ if pkg.installed and self._pkg_visibility_check(pkg, autounmask_level):
return pkg, existing_node
visible_matches = []
if matched_oldpkg:
visible_matches = [pkg.cpv for pkg in matched_oldpkg \
- if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
- allow_license_changes=allow_license_changes, allow_unmasks=allow_unmasks)]
+ if self._pkg_visibility_check(pkg, autounmask_level)]
if not visible_matches:
visible_matches = [pkg.cpv for pkg in matched_packages \
- if self._pkg_visibility_check(pkg, allow_unstable_keywords=allow_unstable_keywords,
- allow_license_changes=allow_license_changes, allow_unmasks=allow_unmasks)]
+ if self._pkg_visibility_check(pkg, autounmask_level)]
if visible_matches:
bestmatch = portage.best(visible_matches)
else:
@@ -4046,11 +4250,12 @@ class depgraph(object):
"""
Select packages that are installed.
"""
- vardb = self._dynamic_config._graph_trees[root]["vartree"].dbapi
- matches = vardb.match_pkgs(atom)
+ matches = list(self._iter_match_pkgs(self._frozen_config.roots[root],
+ "installed", atom))
if not matches:
return None, None
if len(matches) > 1:
+ matches.reverse() # ascending order
unmasked = [pkg for pkg in matches if \
self._pkg_visibility_check(pkg)]
if unmasked:
@@ -4088,11 +4293,10 @@ class depgraph(object):
"recurse" not in self._dynamic_config.myparams:
return 1
- if "complete" not in self._dynamic_config.myparams:
- # Automatically enable complete mode if there are any
- # downgrades, since they often break dependencies
- # (like in bug #353613).
- have_downgrade = False
+ if "complete" not in self._dynamic_config.myparams and \
+ self._dynamic_config.myparams.get("complete_if_new_ver", "y") == "y":
+ # Enable complete mode if an installed package version will change.
+ version_change = False
for node in self._dynamic_config.digraph:
if not isinstance(node, Package) or \
node.operation != "merge":
@@ -4100,16 +4304,15 @@ class depgraph(object):
vardb = self._frozen_config.roots[
node.root].trees["vartree"].dbapi
inst_pkg = vardb.match_pkgs(node.slot_atom)
- if inst_pkg and inst_pkg[0] > node:
- have_downgrade = True
+ if inst_pkg and (inst_pkg[0] > node or inst_pkg[0] < node):
+ version_change = True
break
- if have_downgrade:
+ if version_change:
self._dynamic_config.myparams["complete"] = True
- else:
- # Skip complete graph mode, in order to avoid consuming
- # enough time to disturb users.
- return 1
+
+ if "complete" not in self._dynamic_config.myparams:
+ return 1
self._load_vdb()
@@ -4137,7 +4340,8 @@ class depgraph(object):
args = self._dynamic_config._initial_arg_list[:]
for root in self._frozen_config.roots:
if root != self._frozen_config.target_root and \
- "remove" in self._dynamic_config.myparams:
+ ("remove" in self._dynamic_config.myparams or
+ self._frozen_config.myopts.get("--root-deps") is not None):
# Only pull in deps for the relevant root.
continue
depgraph_sets = self._dynamic_config.sets[root]
@@ -4265,9 +4469,6 @@ class depgraph(object):
"--nodeps" in self._frozen_config.myopts:
return True
- complete = "complete" in self._dynamic_config.myparams
- deep = "deep" in self._dynamic_config.myparams
-
if True:
# Pull in blockers from all installed packages that haven't already
# been pulled into the depgraph, in order to ensure that they are
@@ -4281,11 +4482,14 @@ class depgraph(object):
# are already built.
dep_keys = ["RDEPEND", "PDEPEND"]
for myroot in self._frozen_config.trees:
+
+ if self._frozen_config.myopts.get("--root-deps") is not None and \
+ myroot != self._frozen_config.target_root:
+ continue
+
vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
- portdb = self._frozen_config.trees[myroot]["porttree"].dbapi
pkgsettings = self._frozen_config.pkgsettings[myroot]
root_config = self._frozen_config.roots[myroot]
- dbs = self._dynamic_config._filtered_trees[myroot]["dbs"]
final_db = self._dynamic_config.mydbapi[myroot]
blocker_cache = BlockerCache(myroot, vardb)
@@ -4304,7 +4508,8 @@ class depgraph(object):
# packages masked by license, since the user likely wants
# to adjust ACCEPT_LICENSE.
if pkg in final_db:
- if not self._pkg_visibility_check(pkg) and \
+ if not self._pkg_visibility_check(pkg,
+ trust_graph=False) and \
(pkg_in_graph or 'LICENSE' in pkg.masks):
self._dynamic_config._masked_installed.add(pkg)
else:
@@ -4381,7 +4586,7 @@ class depgraph(object):
# matches (this can happen if an atom lacks a
# category).
show_invalid_depstring_notice(
- pkg, depstr, str(e))
+ pkg, depstr, _unicode_decode("%s") % (e,))
del e
raise
if not success:
@@ -4412,7 +4617,8 @@ class depgraph(object):
except portage.exception.InvalidAtom as e:
depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
show_invalid_depstring_notice(
- pkg, depstr, "Invalid Atom: %s" % (e,))
+ pkg, depstr,
+ _unicode_decode("Invalid Atom: %s") % (e,))
return False
for cpv in stale_cache:
del blocker_cache[cpv]
@@ -4852,15 +5058,6 @@ class depgraph(object):
if replacement_portage == running_portage:
replacement_portage = None
- if replacement_portage is not None and \
- (running_portage is None or \
- running_portage.cpv != replacement_portage.cpv or \
- '9999' in replacement_portage.cpv or \
- 'git' in replacement_portage.inherited or \
- 'git-2' in replacement_portage.inherited):
- # update from running_portage to replacement_portage asap
- asap_nodes.append(replacement_portage)
-
if running_portage is not None:
try:
portage_rdepend = self._select_atoms_highest_available(
@@ -5668,6 +5865,8 @@ class depgraph(object):
"""
autounmask_write = self._frozen_config.myopts.get("--autounmask-write", "n") == True
+ autounmask_unrestricted_atoms = \
+ self._frozen_config.myopts.get("--autounmask-unrestricted-atoms", "n") == True
quiet = "--quiet" in self._frozen_config.myopts
pretend = "--pretend" in self._frozen_config.myopts
ask = "--ask" in self._frozen_config.myopts
@@ -5703,6 +5902,7 @@ class depgraph(object):
#Set of roots we have autounmask changes for.
roots = set()
+ masked_by_missing_keywords = False
unstable_keyword_msg = {}
for pkg in self._dynamic_config._needed_unstable_keywords:
self._show_merge_list()
@@ -5718,12 +5918,17 @@ class depgraph(object):
if reason.unmask_hint and \
reason.unmask_hint.key == 'unstable keyword':
keyword = reason.unmask_hint.value
+ if keyword == "**":
+ masked_by_missing_keywords = True
unstable_keyword_msg[root].append(self._get_dep_chain_as_comment(pkg))
- if is_latest:
- unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
- elif is_latest_in_slot:
- unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], keyword))
+ if autounmask_unrestricted_atoms:
+ if is_latest:
+ unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
+ elif is_latest_in_slot:
+ unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.metadata["SLOT"], keyword))
+ else:
+ unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
else:
unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
@@ -5757,10 +5962,13 @@ class depgraph(object):
comment.splitlines() if line]
for line in comment:
p_mask_change_msg[root].append("%s\n" % line)
- if is_latest:
- p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
- elif is_latest_in_slot:
- p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.metadata["SLOT"]))
+ if autounmask_unrestricted_atoms:
+ if is_latest:
+ p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
+ elif is_latest_in_slot:
+ p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.metadata["SLOT"]))
+ else:
+ p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
else:
p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
@@ -5893,33 +6101,41 @@ class depgraph(object):
write_to_file = not problems
+ def format_msg(lines):
+ lines = lines[:]
+ for i, line in enumerate(lines):
+ if line.startswith("#"):
+ continue
+ lines[i] = colorize("INFORM", line.rstrip()) + "\n"
+ return "".join(lines)
+
for root in roots:
settings = self._frozen_config.roots[root].settings
abs_user_config = os.path.join(
settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
if len(roots) > 1:
- writemsg_stdout("\nFor %s:\n" % abs_user_config, noiselevel=-1)
+ writemsg("\nFor %s:\n" % abs_user_config, noiselevel=-1)
if root in unstable_keyword_msg:
- writemsg_stdout("\nThe following " + colorize("BAD", "keyword changes") + \
+ writemsg("\nThe following " + colorize("BAD", "keyword changes") + \
" are necessary to proceed:\n", noiselevel=-1)
- writemsg_stdout("".join(unstable_keyword_msg[root]), noiselevel=-1)
+ writemsg(format_msg(unstable_keyword_msg[root]), noiselevel=-1)
if root in p_mask_change_msg:
- writemsg_stdout("\nThe following " + colorize("BAD", "mask changes") + \
+ writemsg("\nThe following " + colorize("BAD", "mask changes") + \
" are necessary to proceed:\n", noiselevel=-1)
- writemsg_stdout("".join(p_mask_change_msg[root]), noiselevel=-1)
+ writemsg(format_msg(p_mask_change_msg[root]), noiselevel=-1)
if root in use_changes_msg:
- writemsg_stdout("\nThe following " + colorize("BAD", "USE changes") + \
+ writemsg("\nThe following " + colorize("BAD", "USE changes") + \
" are necessary to proceed:\n", noiselevel=-1)
- writemsg_stdout("".join(use_changes_msg[root]), noiselevel=-1)
+ writemsg(format_msg(use_changes_msg[root]), noiselevel=-1)
if root in license_msg:
- writemsg_stdout("\nThe following " + colorize("BAD", "license changes") + \
+ writemsg("\nThe following " + colorize("BAD", "license changes") + \
" are necessary to proceed:\n", noiselevel=-1)
- writemsg_stdout("".join(license_msg[root]), noiselevel=-1)
+ writemsg(format_msg(license_msg[root]), noiselevel=-1)
protect_obj = {}
if write_to_file:
@@ -5948,7 +6164,7 @@ class depgraph(object):
if protect_obj[root].isprotected(file_to_write_to):
# We want to force new_protect_filename to ensure
# that the user will see all our changes via
- # etc-update, even if file_to_write_to doesn't
+ # dispatch-conf, even if file_to_write_to doesn't
# exist yet, so we specify force=True.
file_to_write_to = new_protect_filename(file_to_write_to,
force=True)
@@ -5957,20 +6173,16 @@ class depgraph(object):
except PortageException:
problems.append("!!! Failed to write '%s'\n" % file_to_write_to)
- if not quiet and \
- (unstable_keyword_msg or \
- p_mask_change_msg or \
- use_changes_msg or \
- license_msg):
+ if not quiet and (p_mask_change_msg or masked_by_missing_keywords):
msg = [
"",
- "NOTE: This --autounmask behavior can be disabled by setting",
- " EMERGE_DEFAULT_OPTS=\"--autounmask=n\" in make.conf."
+ "NOTE: The --autounmask-keep-masks option will prevent emerge",
+ " from creating package.unmask or ** keyword changes."
]
for line in msg:
if line:
line = colorize("INFORM", line)
- writemsg_stdout(line + "\n", noiselevel=-1)
+ writemsg(line + "\n", noiselevel=-1)
if ask and write_to_file and file_to_write_to:
prompt = "\nWould you like to add these " + \
@@ -6002,14 +6214,14 @@ class depgraph(object):
file_to_write_to.get((abs_user_config, "package.license")))
if problems:
- writemsg_stdout("\nThe following problems occurred while writing autounmask changes:\n", \
+ writemsg("\nThe following problems occurred while writing autounmask changes:\n", \
noiselevel=-1)
- writemsg_stdout("".join(problems), noiselevel=-1)
+ writemsg("".join(problems), noiselevel=-1)
elif write_to_file and roots:
- writemsg_stdout("\nAutounmask changes successfully written. Remember to run etc-update.\n", \
+ writemsg("\nAutounmask changes successfully written. Remember to run dispatch-conf.\n", \
noiselevel=-1)
elif not pretend and not autounmask_write and roots:
- writemsg_stdout("\nUse --autounmask-write to write changes to config files (honoring CONFIG_PROTECT).\n", \
+ writemsg("\nUse --autounmask-write to write changes to config files (honoring CONFIG_PROTECT).\n", \
noiselevel=-1)
@@ -6020,49 +6232,25 @@ class depgraph(object):
the merge list where it is most likely to be seen, but if display()
is not going to be called then this method should be called explicitly
to ensure that the user is notified of problems with the graph.
-
- All output goes to stderr, except for unsatisfied dependencies which
- go to stdout for parsing by programs such as autounmask.
"""
- # Note that show_masked_packages() sends its output to
- # stdout, and some programs such as autounmask parse the
- # output in cases when emerge bails out. However, when
- # show_masked_packages() is called for installed packages
- # here, the message is a warning that is more appropriate
- # to send to stderr, so temporarily redirect stdout to
- # stderr. TODO: Fix output code so there's a cleaner way
- # to redirect everything to stderr.
- sys.stdout.flush()
- sys.stderr.flush()
- stdout = sys.stdout
- try:
- sys.stdout = sys.stderr
- self._display_problems()
- finally:
- sys.stdout = stdout
- sys.stdout.flush()
- sys.stderr.flush()
-
- # This goes to stdout for parsing by programs like autounmask.
- for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
- self._show_unsatisfied_dep(*pargs, **kwargs)
-
- def _display_problems(self):
if self._dynamic_config._circular_deps_for_display is not None:
self._show_circular_deps(
self._dynamic_config._circular_deps_for_display)
- # The user is only notified of a slot conflict if
- # there are no unresolvable blocker conflicts.
- if self._dynamic_config._unsatisfied_blockers_for_display is not None:
+ # The slot conflict display has better noise reduction than
+ # the unsatisfied blockers display, so skip unsatisfied blockers
+ # display if there are slot conflicts (see bug #385391).
+ if self._dynamic_config._slot_collision_info:
+ self._show_slot_collision_notice()
+ elif self._dynamic_config._unsatisfied_blockers_for_display is not None:
self._show_unsatisfied_blockers(
self._dynamic_config._unsatisfied_blockers_for_display)
- elif self._dynamic_config._slot_collision_info:
- self._show_slot_collision_notice()
else:
self._show_missed_update()
+ self._show_ignored_binaries()
+
self._display_autounmask()
# TODO: Add generic support for "set problem" handlers so that
@@ -6164,6 +6352,9 @@ class depgraph(object):
show_mask_docs()
writemsg("\n", noiselevel=-1)
+ for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
+ self._show_unsatisfied_dep(*pargs, **kwargs)
+
def saveNomergeFavorites(self):
"""Find atoms in favorites that are not in the mergelist and add them
to the world file if necessary."""
@@ -6184,7 +6375,6 @@ class depgraph(object):
args_set = self._dynamic_config.sets[
self._frozen_config.target_root].sets['__non_set_args__']
- portdb = self._frozen_config.trees[self._frozen_config.target_root]["porttree"].dbapi
added_favorites = set()
for x in self._dynamic_config._set_nodes:
if x.operation != "nomerge":
@@ -6222,7 +6412,8 @@ class depgraph(object):
all_added.extend(added_favorites)
all_added.sort()
for a in all_added:
- writemsg(">>> Recording %s in \"world\" favorites file...\n" % \
+ writemsg_stdout(
+ ">>> Recording %s in \"world\" favorites file...\n" % \
colorize("INFORM", str(a)), noiselevel=-1)
if all_added:
world_set.update(all_added)
@@ -6247,15 +6438,12 @@ class depgraph(object):
mergelist = []
favorites = resume_data.get("favorites")
- args_set = self._dynamic_config.sets[
- self._frozen_config.target_root].sets['__non_set_args__']
if isinstance(favorites, list):
args = self._load_favorites(favorites)
else:
args = []
fakedb = self._dynamic_config.mydbapi
- trees = self._frozen_config.trees
serialized_tasks = []
masked_tasks = []
for x in mergelist:
@@ -6552,38 +6740,43 @@ class _dep_check_composite_db(dbapi):
return ret
def match(self, atom):
- ret = self._match_cache.get(atom)
+ cache_key = (atom, atom.unevaluated_atom)
+ ret = self._match_cache.get(cache_key)
if ret is not None:
return ret[:]
+
+ ret = []
pkg, existing = self._depgraph._select_package(self._root, atom)
- if not pkg:
- ret = []
- else:
- # Return the highest available from select_package() as well as
- # any matching slots in the graph db.
+
+ if pkg is not None and self._visible(pkg):
+ self._cpv_pkg_map[pkg.cpv] = pkg
+ ret.append(pkg.cpv)
+
+ if pkg is not None and \
+ atom.slot is None and \
+ pkg.cp.startswith("virtual/") and \
+ (("remove" not in self._depgraph._dynamic_config.myparams and
+ "--update" not in self._depgraph._frozen_config.myopts) or
+ not ret or
+ not self._depgraph._virt_deps_visible(pkg, ignore_use=True)):
+ # For new-style virtual lookahead that occurs inside dep_check()
+ # for bug #141118, examine all slots. This is needed so that newer
+ # slots will not unnecessarily be pulled in when a satisfying lower
+ # slot is already installed. For example, if virtual/jdk-1.5 is
+ # satisfied via gcj-jdk then there's no need to pull in a newer
+ # slot to satisfy a virtual/jdk dependency, unless --update is
+ # enabled.
slots = set()
- slots.add(pkg.metadata["SLOT"])
- if pkg.cp.startswith("virtual/"):
- # For new-style virtual lookahead that occurs inside
- # dep_check(), examine all slots. This is needed
- # so that newer slots will not unnecessarily be pulled in
- # when a satisfying lower slot is already installed. For
- # example, if virtual/jdk-1.4 is satisfied via kaffe then
- # there's no need to pull in a newer slot to satisfy a
- # virtual/jdk dependency.
- for db, pkg_type, built, installed, db_keys in \
- self._depgraph._dynamic_config._filtered_trees[self._root]["dbs"]:
- for cpv in db.match(atom):
- if portage.cpv_getkey(cpv) != pkg.cp:
- continue
- slots.add(db.aux_get(cpv, ["SLOT"])[0])
- ret = []
- if self._visible(pkg):
- self._cpv_pkg_map[pkg.cpv] = pkg
- ret.append(pkg.cpv)
- slots.remove(pkg.metadata["SLOT"])
+ slots.add(pkg.slot)
+ for virt_pkg in self._depgraph._iter_match_pkgs_any(
+ self._depgraph._frozen_config.roots[self._root], atom):
+ if virt_pkg.cp != pkg.cp:
+ continue
+ slots.add(virt_pkg.slot)
+
+ slots.remove(pkg.slot)
while slots:
- slot_atom = Atom("%s:%s" % (atom.cp, slots.pop()))
+ slot_atom = atom.with_slot(slots.pop())
pkg, existing = self._depgraph._select_package(
self._root, slot_atom)
if not pkg:
@@ -6592,9 +6785,11 @@ class _dep_check_composite_db(dbapi):
continue
self._cpv_pkg_map[pkg.cpv] = pkg
ret.append(pkg.cpv)
- if ret:
+
+ if len(ret) > 1:
self._cpv_sort_ascending(ret)
- self._match_cache[atom] = ret
+
+ self._match_cache[cache_key] = ret
return ret[:]
def _visible(self, pkg):
@@ -6650,7 +6845,7 @@ class _dep_check_composite_db(dbapi):
# Note: highest_visible is not necessarily the real highest
# visible, especially when --update is not enabled, so use
# < operator instead of !=.
- if pkg < highest_visible:
+ if highest_visible is not None and pkg < highest_visible:
return False
elif in_graph != pkg:
# Mask choices for packages that would trigger a slot
@@ -6832,7 +7027,7 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
TODO: Return reasons for dropped_tasks, for display/logging.
@rtype: tuple
- @returns: (success, depgraph, dropped_tasks)
+ @return: (success, depgraph, dropped_tasks)
"""
skip_masked = True
skip_unsatisfied = True
@@ -6869,12 +7064,12 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
if not isinstance(parent_node, Package) \
or parent_node.operation not in ("merge", "nomerge"):
continue
- unsatisfied = \
- graph.child_nodes(parent_node,
- ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
- if pkg in unsatisfied:
- unsatisfied_parents[parent_node] = parent_node
- unsatisfied_stack.append(parent_node)
+ # We need to traverse all priorities here, in order to
+ # ensure that a package with an unsatisfied depenedency
+ # won't get pulled in, even indirectly via a soft
+ # dependency.
+ unsatisfied_parents[parent_node] = parent_node
+ unsatisfied_stack.append(parent_node)
unsatisfied_tuples = frozenset(tuple(parent_node)
for parent_node in unsatisfied_parents
@@ -6907,7 +7102,6 @@ def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
def get_mask_info(root_config, cpv, pkgsettings,
db, pkg_type, built, installed, db_keys, myrepo = None, _pkg_use_enabled=None):
- eapi_masked = False
try:
metadata = dict(zip(db_keys,
db.aux_get(cpv, db_keys, myrepo=myrepo)))
@@ -6918,8 +7112,6 @@ def get_mask_info(root_config, cpv, pkgsettings,
mreasons = ["corruption"]
else:
eapi = metadata['EAPI']
- if eapi[:1] == '-':
- eapi = eapi[1:]
if not portage.eapi_is_supported(eapi):
mreasons = ['EAPI %s' % eapi]
else:
@@ -6976,10 +7168,11 @@ def show_masked_packages(masked_packages):
# above via mreasons.
pass
- writemsg_stdout("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n", noiselevel=-1)
+ writemsg("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n",
+ noiselevel=-1)
if comment and comment not in shown_comments:
- writemsg_stdout(filename + ":\n" + comment + "\n",
+ writemsg(filename + ":\n" + comment + "\n",
noiselevel=-1)
shown_comments.add(comment)
portdb = root_config.trees["porttree"].dbapi
@@ -6989,13 +7182,14 @@ def show_masked_packages(masked_packages):
continue
msg = ("A copy of the '%s' license" + \
" is located at '%s'.\n\n") % (l, l_path)
- writemsg_stdout(msg, noiselevel=-1)
+ writemsg(msg, noiselevel=-1)
shown_licenses.add(l)
return have_eapi_mask
def show_mask_docs():
- writemsg_stdout("For more information, see the MASKED PACKAGES section in the emerge\n", noiselevel=-1)
- writemsg_stdout("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
+ writemsg("For more information, see the MASKED PACKAGES "
+ "section in the emerge\n", noiselevel=-1)
+ writemsg("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
def show_blocker_docs_link():
writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
@@ -7017,7 +7211,7 @@ def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
pkg.metadata["CHOST"]))
if pkg.invalid:
- for msg_type, msgs in pkg.invalid.items():
+ for msgs in pkg.invalid.values():
for msg in msgs:
mreasons.append(
_MaskReason("invalid", "invalid: %s" % (msg,)))
diff --git a/portage_with_autodep/pym/_emerge/depgraph.pyo b/portage_with_autodep/pym/_emerge/depgraph.pyo
new file mode 100644
index 0000000..ba00a11
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/depgraph.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/emergelog.py b/portage_with_autodep/pym/_emerge/emergelog.py
index d6ef1b4..b1b093f 100644
--- a/portage_with_autodep/pym/_emerge/emergelog.py
+++ b/portage_with_autodep/pym/_emerge/emergelog.py
@@ -49,15 +49,12 @@ def emergelog(xterm_titles, mystr, short_msg=None):
portage.util.apply_secpass_permissions(file_path,
uid=portage.portage_uid, gid=portage.portage_gid,
mode=0o660)
- mylock = None
+ mylock = portage.locks.lockfile(file_path)
try:
- mylock = portage.locks.lockfile(mylogfile)
mylogfile.write(_log_fmt % (time.time(), mystr))
- mylogfile.flush()
- finally:
- if mylock:
- portage.locks.unlockfile(mylock)
mylogfile.close()
+ finally:
+ portage.locks.unlockfile(mylock)
except (IOError,OSError,portage.exception.PortageException) as e:
if secpass >= 1:
print("emergelog():",e, file=sys.stderr)
diff --git a/portage_with_autodep/pym/_emerge/emergelog.pyo b/portage_with_autodep/pym/_emerge/emergelog.pyo
new file mode 100644
index 0000000..7e67bd3
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/emergelog.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/getloadavg.pyo b/portage_with_autodep/pym/_emerge/getloadavg.pyo
new file mode 100644
index 0000000..56bda8c
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/getloadavg.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/help.py b/portage_with_autodep/pym/_emerge/help.py
index c978ce2..a1dbb37 100644
--- a/portage_with_autodep/pym/_emerge/help.py
+++ b/portage_with_autodep/pym/_emerge/help.py
@@ -3,10 +3,9 @@
from __future__ import print_function
-from portage.const import _ENABLE_DYN_LINK_MAP
from portage.output import bold, turquoise, green
-def shorthelp():
+def help():
print(bold("emerge:")+" the other white meat (command-line interface to the Portage system)")
print(bold("Usage:"))
print(" "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] [ "+turquoise("ebuild")+" | "+turquoise("tbz2")+" | "+turquoise("file")+" | "+turquoise("@set")+" | "+turquoise("atom")+" ] [ ... ]")
@@ -19,797 +18,8 @@ def shorthelp():
print(" [ "+green("--complete-graph")+" ] [ "+green("--deep")+" ]")
print(" [ "+green("--jobs") + " " + turquoise("JOBS")+" ] [ "+green("--keep-going")+" ] [ " + green("--load-average")+" " + turquoise("LOAD") + " ]")
print(" [ "+green("--newuse")+" ] [ "+green("--noconfmem")+" ] [ "+green("--nospinner")+" ]")
- print(" [ "+green("--oneshot")+" ] [ "+green("--onlydeps")+" ]")
+ print(" [ "+green("--oneshot")+" ] [ "+green("--onlydeps")+" ] [ "+ green("--quiet-build")+" [ " + turquoise("y") + " | "+ turquoise("n")+" ] ]")
print(" [ "+green("--reinstall ")+turquoise("changed-use")+" ] [ " + green("--with-bdeps")+" < " + turquoise("y") + " | "+ turquoise("n")+" > ]")
print(bold("Actions:")+" [ "+green("--depclean")+" | "+green("--list-sets")+" | "+green("--search")+" | "+green("--sync")+" | "+green("--version")+" ]")
-
-def help(myopts, havecolor=1):
- # TODO: Implement a wrap() that accounts for console color escape codes.
- from textwrap import wrap
- desc_left_margin = 14
- desc_indent = desc_left_margin * " "
- desc_width = 80 - desc_left_margin - 5
- if "--verbose" not in myopts:
- shorthelp()
- print()
- print(" For more help try 'emerge --help --verbose' or consult the man page.")
- else:
- shorthelp()
- print()
- print(turquoise("Help (this screen):"))
- print(" "+green("--help")+" ("+green("-h")+" short option)")
- print(" Displays this help; an additional argument (see above) will tell")
- print(" emerge to display detailed help.")
- print()
- print(turquoise("Actions:"))
- print(" "+green("--clean"))
- print(" Cleans the system by removing outdated packages which will not")
- print(" remove functionalities or prevent your system from working.")
- print(" The arguments can be in several different formats :")
- print(" * world ")
- print(" * system or")
- print(" * 'dependency specification' (in single quotes is best.)")
- print(" Here are a few examples of the dependency specification format:")
- print(" "+bold("binutils")+" matches")
- print(" binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1")
- print(" "+bold("sys-devel/binutils")+" matches")
- print(" binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1")
- print(" "+bold(">sys-devel/binutils-2.11.90.0.7")+" matches")
- print(" binutils-2.11.92.0.12.3-r1")
- print(" "+bold(">=sys-devel/binutils-2.11.90.0.7")+" matches")
- print(" binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1")
- print(" "+bold("<=sys-devel/binutils-2.11.92.0.12.3-r1")+" matches")
- print(" binutils-2.11.90.0.7 and binutils-2.11.92.0.12.3-r1")
- print()
- print(" "+green("--config"))
- print(" Runs package-specific operations that must be executed after an")
- print(" emerge process has completed. This usually entails configuration")
- print(" file setup or other similar setups that the user may wish to run.")
- print()
- print(" "+green("--depclean")+" ("+green("-c")+" short option)")
-
- paragraph = "Cleans the system by removing packages that are " + \
- "not associated with explicitly merged packages. Depclean works " + \
- "by creating the full dependency tree from the " + \
- "@world set, then comparing it to installed packages. Packages " + \
- "installed, but not part of the dependency tree, will be " + \
- "uninstalled by depclean. See --with-bdeps for behavior with " + \
- "respect to build time dependencies that are not strictly " + \
- "required. Packages that are part of the world set will " + \
- "always be kept. They can be manually added to this set with " + \
- "emerge --noreplace <atom>. As a safety measure, depclean " + \
- "will not remove any packages unless *all* required dependencies " + \
- "have been resolved. As a consequence, it is often necessary to " + \
- "run emerge --update --newuse --deep @world " + \
- "prior to depclean."
-
- for line in wrap(paragraph, desc_width):
- print(desc_indent + line)
- print()
-
- paragraph = "WARNING: Inexperienced users are advised to use " + \
- "--pretend with this option in order to see a preview of which " + \
- "packages will be uninstalled. Always study the list of packages " + \
- "to be cleaned for any obvious mistakes. Note that packages " + \
- "listed in package.provided (see portage(5)) may be removed by " + \
- "depclean, even if they are part of the world set."
-
- paragraph += " Also note that " + \
- "depclean may break link level dependencies"
-
- if _ENABLE_DYN_LINK_MAP:
- paragraph += ", especially when the " + \
- "--depclean-lib-check option is disabled"
-
- paragraph += ". Thus, it is " + \
- "recommended to use a tool such as revdep-rebuild(1) " + \
- "in order to detect such breakage."
-
- for line in wrap(paragraph, desc_width):
- print(desc_indent + line)
- print()
-
- paragraph = "Depclean serves as a dependency aware version of " + \
- "--unmerge. When given one or more atoms, it will unmerge " + \
- "matched packages that have no reverse dependencies. Use " + \
- "--depclean together with --verbose to show reverse dependencies."
-
- for line in wrap(paragraph, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--deselect") + " [ %s | %s ]" % \
- (turquoise("y"), turquoise("n")))
-
- paragraph = \
- "Remove atoms and/or sets from the world file. This action is implied " + \
- "by uninstall actions, including --depclean, " + \
- "--prune and --unmerge. Use --deselect=n " + \
- "in order to prevent uninstall actions from removing " + \
- "atoms from the world file."
-
- for line in wrap(paragraph, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--ignore-default-opts"))
-
- paragraph = \
- "Causes EMERGE_DEFAULT_OPTS (see make.conf(5)) to be ignored."
-
- for line in wrap(paragraph, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--info"))
- print(" Displays important portage variables that will be exported to")
- print(" ebuild.sh when performing merges. This information is useful")
- print(" for bug reports and verification of settings. All settings in")
- print(" make.{conf,globals,defaults} and the environment show up if")
- print(" run with the '--verbose' flag.")
- print()
- print(" " + green("--list-sets"))
- paragraph = "Displays a list of available package sets."
-
- for line in wrap(paragraph, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--metadata"))
- print(" Transfers metadata cache from ${PORTDIR}/metadata/cache/ to")
- print(" /var/cache/edb/dep/ as is normally done on the tail end of an")
- print(" rsync update using " + bold("emerge --sync") + ". This process populates the")
- print(" cache database that portage uses for pre-parsed lookups of")
- print(" package data. It does not populate cache for the overlays")
- print(" listed in PORTDIR_OVERLAY. In order to generate cache for")
- print(" overlays, use " + bold("--regen") + ".")
- print()
- print(" "+green("--prune")+" ("+green("-P")+" short option)")
- print(" "+turquoise("WARNING: This action can remove important packages!"))
- paragraph = "Removes all but the highest installed version of a " + \
- "package from your system. Use --prune together with " + \
- "--verbose to show reverse dependencies or with --nodeps " + \
- "to ignore all dependencies. "
-
- for line in wrap(paragraph, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--regen"))
- print(" Causes portage to check and update the dependency cache of all")
- print(" ebuilds in the portage tree. This is not recommended for rsync")
- print(" users as rsync updates the cache using server-side caches.")
- print(" Rsync users should simply 'emerge --sync' to regenerate.")
- desc = "In order to specify parallel --regen behavior, use "+ \
- "the ---jobs and --load-average options. If you would like to " + \
- "generate and distribute cache for use by others, use egencache(1)."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--resume")+" ("+green("-r")+" short option)")
- print(" Resumes the most recent merge list that has been aborted due to an")
- print(" error. Please note that this operation will only return an error")
- print(" on failure. If there is nothing for portage to do, then portage")
- print(" will exit with a message and a success condition. A resume list")
- print(" will persist until it has been completed in entirety or until")
- print(" another aborted merge list replaces it. The resume history is")
- print(" capable of storing two merge lists. After one resume list")
- print(" completes, it is possible to invoke --resume once again in order")
- print(" to resume an older list.")
- print()
- print(" "+green("--search")+" ("+green("-s")+" short option)")
- print(" Searches for matches of the supplied string in the current local")
- print(" portage tree. By default emerge uses a case-insensitive simple ")
- print(" search, but you can enable a regular expression search by ")
- print(" prefixing the search string with %.")
- print(" Prepending the expression with a '@' will cause the category to")
- print(" be included in the search.")
- print(" A few examples:")
- print(" "+bold("emerge --search libc"))
- print(" list all packages that contain libc in their name")
- print(" "+bold("emerge --search '%^kde'"))
- print(" list all packages starting with kde")
- print(" "+bold("emerge --search '%gcc$'"))
- print(" list all packages ending with gcc")
- print(" "+bold("emerge --search '%@^dev-java.*jdk'"))
- print(" list all available Java JDKs")
- print()
- print(" "+green("--searchdesc")+" ("+green("-S")+" short option)")
- print(" Matches the search string against the description field as well")
- print(" the package's name. Take caution as the descriptions are also")
- print(" matched as regular expressions.")
- print(" emerge -S html")
- print(" emerge -S applet")
- print(" emerge -S 'perl.*module'")
- print()
- print(" "+green("--sync"))
- desc = "This updates the portage tree that is located in the " + \
- "directory that the PORTDIR variable refers to (default " + \
- "location is /usr/portage). The SYNC variable specifies " + \
- "the remote URI from which files will be synchronized. " + \
- "The PORTAGE_SYNC_STALE variable configures " + \
- "warnings that are shown when emerge --sync has not " + \
- "been executed recently."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(desc_indent + turquoise("WARNING:"))
- desc = "The emerge --sync action will modify and/or delete " + \
- "files located inside the directory that the PORTDIR " + \
- "variable refers to (default location is /usr/portage). " + \
- "For more information, see the PORTDIR documentation in " + \
- "the make.conf(5) man page."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(desc_indent + green("NOTE:"))
- desc = "The emerge-webrsync program will download the entire " + \
- "portage tree as a tarball, which is much faster than emerge " + \
- "--sync for first time syncs."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--unmerge")+" ("+green("-C")+" short option)")
- print(" "+turquoise("WARNING: This action can remove important packages!"))
- print(" Removes all matching packages. This does no checking of")
- print(" dependencies, so it may remove packages necessary for the proper")
- print(" operation of your system. Its arguments can be atoms or")
- print(" ebuilds. For a dependency aware version of --unmerge, use")
- print(" --depclean or --prune.")
- print()
- print(" "+green("--version")+" ("+green("-V")+" short option)")
- print(" Displays the currently installed version of portage along with")
- print(" other information useful for quick reference on a system. See")
- print(" "+bold("emerge info")+" for more advanced information.")
- print()
- print(turquoise("Options:"))
- print(" "+green("--accept-properties=ACCEPT_PROPERTIES"))
- desc = "This option temporarily overrides the ACCEPT_PROPERTIES " + \
- "variable. The ACCEPT_PROPERTIES variable is incremental, " + \
- "which means that the specified setting is appended to the " + \
- "existing value from your configuration. The special -* " + \
- "token can be used to discard the existing configuration " + \
- "value and start fresh. See the MASKED PACKAGES section " + \
- "and make.conf(5) for more information about " + \
- "ACCEPT_PROPERTIES. A typical usage example for this option " + \
- "would be to use --accept-properties=-interactive to " + \
- "temporarily mask interactive packages. With default " + \
- "configuration, this would result in an effective " + \
- "ACCEPT_PROPERTIES value of \"* -interactive\"."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--alphabetical"))
- print(" When displaying USE and other flag output, combines the enabled")
- print(" and disabled flags into a single list and sorts it alphabetically.")
- print(" With this option, output such as USE=\"dar -bar -foo\" will instead")
- print(" be displayed as USE=\"-bar dar -foo\"")
- print()
- print(" " + green("--ask") + \
- " [ %s | %s ] (%s short option)" % \
- (turquoise("y"), turquoise("n"), green("-a")))
- desc = "Before performing the action, display what will take place (server info for " + \
- "--sync, --pretend output for merge, and so forth), then ask " + \
- "whether to proceed with the action or abort. Using --ask is more " + \
- "efficient than using --pretend and then executing the same command " + \
- "without --pretend, as dependencies will only need to be calculated once. " + \
- "WARNING: If the \"Enter\" key is pressed at the prompt (with no other input), " + \
- "it is interpreted as acceptance of the first choice. Note that the input " + \
- "buffer is not cleared prior to the prompt, so an accidental press of the " + \
- "\"Enter\" key at any time prior to the prompt will be interpreted as a choice! " + \
- "Use the --ask-enter-invalid option if you want a single \"Enter\" key " + \
- "press to be interpreted as invalid input."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--ask-enter-invalid"))
- desc = "When used together with the --ask option, " + \
- "interpret a single \"Enter\" key press as " + \
- "invalid input. This helps prevent accidental " + \
- "acceptance of the first choice. This option is " + \
- "intended to be set in the make.conf(5) " + \
- "EMERGE_DEFAULT_OPTS variable."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--autounmask") + " [ %s | %s ]" % \
- (turquoise("y"), turquoise("n")))
- desc = "Automatically unmask packages and generate package.use " + \
- "settings as necessary to satisfy dependencies. This " + \
- "option is enabled by default. If any configuration " + \
- "changes are required, then they will be displayed " + \
- "after the merge list and emerge will immediately " + \
- "abort. If the displayed configuration changes are " + \
- "satisfactory, you should copy and paste them into " + \
- "the specified configuration file(s), or enable the " + \
- "--autounmask-write option. The " + \
- "EMERGE_DEFAULT_OPTS variable may be used to " + \
- "disable this option by default in make.conf(5)."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--autounmask-write") + " [ %s | %s ]" % \
- (turquoise("y"), turquoise("n")))
- desc = "If --autounmask is enabled, changes are written " + \
- "to config files, respecting CONFIG_PROTECT and --ask."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--backtrack") + " " + turquoise("COUNT"))
- desc = "Specifies an integer number of times to backtrack if " + \
- "dependency calculation fails due to a conflict or an " + \
- "unsatisfied dependency (default: '10')."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--binpkg-respect-use") + " [ %s | %s ]" % \
- (turquoise("y"), turquoise("n")))
- desc = "Tells emerge to ignore binary packages if their use flags" + \
- " don't match the current configuration. (default: 'n')"
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--buildpkg") + \
- " [ %s | %s ] (%s short option)" % \
- (turquoise("y"), turquoise("n"), green("-b")))
- desc = "Tells emerge to build binary packages for all ebuilds processed in" + \
- " addition to actually merging the packages. Useful for maintainers" + \
- " or if you administrate multiple Gentoo Linux systems (build once," + \
- " emerge tbz2s everywhere) as well as disaster recovery. The package" + \
- " will be created in the" + \
- " ${PKGDIR}/All directory. An alternative for already-merged" + \
- " packages is to use quickpkg(1) which creates a tbz2 from the" + \
- " live filesystem."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--buildpkgonly")+" ("+green("-B")+" short option)")
- print(" Creates a binary package, but does not merge it to the")
- print(" system. This has the restriction that unsatisfied dependencies")
- print(" must not exist for the desired package as they cannot be used if")
- print(" they do not exist on the system.")
- print()
- print(" " + green("--changed-use"))
- desc = "This is an alias for --reinstall=changed-use."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--changelog")+" ("+green("-l")+" short option)")
- print(" When pretending, also display the ChangeLog entries for packages")
- print(" that will be upgraded.")
- print()
- print(" "+green("--color") + " < " + turquoise("y") + " | "+ turquoise("n")+" >")
- print(" Enable or disable color output. This option will override NOCOLOR")
- print(" (see make.conf(5)) and may also be used to force color output when")
- print(" stdout is not a tty (by default, color is disabled unless stdout")
- print(" is a tty).")
- print()
- print(" "+green("--columns"))
- print(" Display the pretend output in a tabular form. Versions are")
- print(" aligned vertically.")
- print()
- print(" "+green("--complete-graph") + " [ %s | %s ]" % \
- (turquoise("y"), turquoise("n")))
- desc = "This causes emerge to consider the deep dependencies of all" + \
- " packages from the world set. With this option enabled," + \
- " emerge will bail out if it determines that the given operation will" + \
- " break any dependencies of the packages that have been added to the" + \
- " graph. Like the --deep option, the --complete-graph" + \
- " option will significantly increase the time taken for dependency" + \
- " calculations. Note that, unlike the --deep option, the" + \
- " --complete-graph option does not cause any more packages to" + \
- " be updated than would have otherwise " + \
- "been updated with the option disabled. " + \
- "Using --with-bdeps=y together with --complete-graph makes " + \
- "the graph as complete as possible."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--config-root=DIR"))
- desc = "Set the PORTAGE_CONFIGROOT environment variable " + \
- "which is documented in the emerge(1) man page."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--debug")+" ("+green("-d")+" short option)")
- print(" Tell emerge to run the ebuild command in --debug mode. In this")
- print(" mode, the bash build environment will run with the -x option,")
- print(" causing it to output verbose debug information print to stdout.")
- print(" --debug is great for finding bash syntax errors as providing")
- print(" very verbose information about the dependency and build process.")
- print()
- print(" "+green("--deep") + " " + turquoise("[DEPTH]") + \
- " (" + green("-D") + " short option)")
- print(" This flag forces emerge to consider the entire dependency tree of")
- print(" packages, instead of checking only the immediate dependencies of")
- print(" the packages. As an example, this catches updates in libraries")
- print(" that are not directly listed in the dependencies of a package.")
- print(" Also see --with-bdeps for behavior with respect to build time")
- print(" dependencies that are not strictly required.")
- print()
-
- if _ENABLE_DYN_LINK_MAP:
- print(" " + green("--depclean-lib-check") + " [ %s | %s ]" % \
- (turquoise("y"), turquoise("n")))
- desc = "Account for library link-level dependencies during " + \
- "--depclean and --prune actions. This " + \
- "option is enabled by default. In some cases this can " + \
- "be somewhat time-consuming. This option is ignored " + \
- "when FEATURES=\"preserve-libs\" is enabled in " + \
- "make.conf(5), since any libraries that have " + \
- "consumers will simply be preserved."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
-
- print(" "+green("--emptytree")+" ("+green("-e")+" short option)")
- desc = "Reinstalls target atoms and their entire deep " + \
- "dependency tree, as though no packages are currently " + \
- "installed. You should run this with --pretend " + \
- "first to make sure the result is what you expect."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--exclude") + " " + turquoise("ATOMS"))
- desc = "A space separated list of package names or slot atoms. " + \
- "Emerge won't install any ebuild or binary package that " + \
- "matches any of the given package atoms."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--fail-clean") + " [ %s | %s ]" % \
- (turquoise("y"), turquoise("n")))
- desc = "Clean up temporary files after a build failure. This is " + \
- "particularly useful if you have PORTAGE_TMPDIR on " + \
- "tmpfs. If this option is enabled, you probably also want " + \
- "to enable PORT_LOGDIR (see make.conf(5)) in " + \
- "order to save the build log."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--fetchonly")+" ("+green("-f")+" short option)")
- print(" Instead of doing any package building, just perform fetches for")
- print(" all packages (main package as well as all dependencies.) When")
- print(" used in combination with --pretend all the SRC_URIs will be")
- print(" displayed multiple mirrors per line, one line per file.")
- print()
- print(" "+green("--fetch-all-uri")+" ("+green("-F")+" short option)")
- print(" Same as --fetchonly except that all package files, including those")
- print(" not required to build the package, will be processed.")
- print()
- print(" " + green("--getbinpkg") + \
- " [ %s | %s ] (%s short option)" % \
- (turquoise("y"), turquoise("n"), green("-g")))
- print(" Using the server and location defined in PORTAGE_BINHOST, portage")
- print(" will download the information from each binary file there and it")
- print(" will use that information to help build the dependency list. This")
- print(" option implies '-k'. (Use -gK for binary-only merging.)")
- print()
- print(" " + green("--getbinpkgonly") + \
- " [ %s | %s ] (%s short option)" % \
- (turquoise("y"), turquoise("n"), green("-G")))
- print(" This option is identical to -g, as above, except it will not use")
- print(" ANY information from the local machine. All binaries will be")
- print(" downloaded from the remote server without consulting packages")
- print(" existing in the packages directory.")
- print()
- print(" " + green("--jobs") + " " + turquoise("[JOBS]") + " ("+green("-j")+" short option)")
- desc = "Specifies the number of packages " + \
- "to build simultaneously. If this option is " + \
- "given without an argument, emerge will not " + \
- "limit the number of jobs that " + \
- "can run simultaneously. Also see " + \
- "the related --load-average option. " + \
- "Note that interactive packages currently force a setting " + \
- "of --jobs=1. This issue can be temporarily avoided " + \
- "by specifying --accept-properties=-interactive."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--keep-going") + " [ %s | %s ]" % \
- (turquoise("y"), turquoise("n")))
- desc = "Continue as much as possible after " + \
- "an error. When an error occurs, " + \
- "dependencies are recalculated for " + \
- "remaining packages and any with " + \
- "unsatisfied dependencies are " + \
- "automatically dropped. Also see " + \
- "the related --skipfirst option."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--load-average") + " " + turquoise("LOAD"))
- desc = "Specifies that no new builds should " + \
- "be started if there are other builds " + \
- "running and the load average is at " + \
- "least LOAD (a floating-point number). " + \
- "This option is recommended for use " + \
- "in combination with --jobs in " + \
- "order to avoid excess load. See " + \
- "make(1) for information about " + \
- "analogous options that should be " + \
- "configured via MAKEOPTS in " + \
- "make.conf(5)."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--misspell-suggestions") + " < %s | %s >" % \
- (turquoise("y"), turquoise("n")))
- desc = "Enable or disable misspell suggestions. By default, " + \
- "emerge will show a list of packages with similar names " + \
- "when a package doesn't exist. The EMERGE_DEFAULT_OPTS " + \
- "variable may be used to disable this option by default"
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--newuse")+" ("+green("-N")+" short option)")
- desc = "Tells emerge to include installed packages where USE " + \
- "flags have changed since compilation. This option " + \
- "also implies the --selective option. If you would " + \
- "like to skip rebuilds for which disabled flags have " + \
- "been added to or removed from IUSE, see the related " + \
- "--reinstall=changed-use option."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--noconfmem"))
- print(" Portage keeps track of files that have been placed into")
- print(" CONFIG_PROTECT directories, and normally it will not merge the")
- print(" same file more than once, as that would become annoying. This")
- print(" can lead to problems when the user wants the file in the case")
- print(" of accidental deletion. With this option, files will always be")
- print(" merged to the live fs instead of silently dropped.")
- print()
- print(" "+green("--nodeps")+" ("+green("-O")+" short option)")
- print(" Merge specified packages, but don't merge any dependencies.")
- print(" Note that the build may fail if deps aren't satisfied.")
- print()
- print(" "+green("--noreplace")+" ("+green("-n")+" short option)")
- print(" Skip the packages specified on the command-line that have")
- print(" already been installed. Without this option, any packages,")
- print(" ebuilds, or deps you specify on the command-line *will* cause")
- print(" Portage to remerge the package, even if it is already installed.")
- print(" Note that Portage won't remerge dependencies by default.")
- print()
- print(" "+green("--nospinner"))
- print(" Disables the spinner regardless of terminal type.")
- print()
- print(" " + green("--usepkg-exclude") + " " + turquoise("ATOMS"))
- desc = "A space separated list of package names or slot atoms." + \
- " Emerge will ignore matching binary packages."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--rebuild-exclude") + " " + turquoise("ATOMS"))
- desc = "A space separated list of package names or slot atoms." + \
- " Emerge will not rebuild matching packages due to --rebuild."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--rebuild-ignore") + " " + turquoise("ATOMS"))
- desc = "A space separated list of package names or slot atoms." + \
- " Emerge will not rebuild packages that depend on matching " + \
- " packages due to --rebuild."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--oneshot")+" ("+green("-1")+" short option)")
- print(" Emerge as normal, but don't add packages to the world profile.")
- print(" This package will only be updated if it is depended upon by")
- print(" another package.")
- print()
- print(" "+green("--onlydeps")+" ("+green("-o")+" short option)")
- print(" Only merge (or pretend to merge) the dependencies of the")
- print(" specified packages, not the packages themselves.")
- print()
- print(" " + green("--package-moves") + " [ %s | %s ]" % \
- (turquoise("y"), turquoise("n")))
- desc = "Perform package moves when necessary. This option " + \
- "is enabled by default. WARNING: This option " + \
- "should remain enabled under normal circumstances. " + \
- "Do not disable it unless you know what you are " + \
- "doing."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--pretend")+" ("+green("-p")+" short option)")
- print(" Instead of actually performing the merge, simply display what")
- print(" ebuilds and tbz2s *would* have been installed if --pretend")
- print(" weren't used. Using --pretend is strongly recommended before")
- print(" installing an unfamiliar package. In the printout, N = new,")
- print(" U = updating, R = replacing, F = fetch restricted, B = blocked")
- print(" by an already installed package, D = possible downgrading,")
- print(" S = slotted install. --verbose causes affecting use flags to be")
- print(" printed out accompanied by a '+' for enabled and a '-' for")
- print(" disabled USE flags.")
- print()
- print(" " + green("--quiet") + \
- " [ %s | %s ] (%s short option)" % \
- (turquoise("y"), turquoise("n"), green("-q")))
- print(" Effects vary, but the general outcome is a reduced or condensed")
- print(" output from portage's displays.")
- print()
- print(" " + green("--quiet-build") + \
- " [ %s | %s ]" % (turquoise("y"), turquoise("n")))
- desc = "Redirect all build output to logs alone, and do not " + \
- "display it on stdout."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--quiet-unmerge-warn"))
- desc = "Disable the warning message that's shown prior to " + \
- "--unmerge actions. This option is intended " + \
- "to be set in the make.conf(5) " + \
- "EMERGE_DEFAULT_OPTS variable."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--rebuild-if-new-rev") + " [ %s | %s ]" % \
- (turquoise("y"), turquoise("n")))
- desc = "Rebuild packages when dependencies that are " + \
- "used at both build-time and run-time are built, " + \
- "if the dependency is not already installed with the " + \
- "same version and revision."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--rebuild-if-new-ver") + " [ %s | %s ]" % \
- (turquoise("y"), turquoise("n")))
- desc = "Rebuild packages when dependencies that are " + \
- "used at both build-time and run-time are built, " + \
- "if the dependency is not already installed with the " + \
- "same version. Revision numbers are ignored."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--rebuild-if-unbuilt") + " [ %s | %s ]" % \
- (turquoise("y"), turquoise("n")))
- desc = "Rebuild packages when dependencies that are " + \
- "used at both build-time and run-time are built."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--rebuilt-binaries") + " [ %s | %s ]" % \
- (turquoise("y"), turquoise("n")))
- desc = "Replace installed packages with binary packages that have " + \
- "been rebuilt. Rebuilds are detected by comparison of " + \
- "BUILD_TIME package metadata. This option is enabled " + \
- "automatically when using binary packages " + \
- "(--usepkgonly or --getbinpkgonly) together with " + \
- "--update and --deep."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--rebuilt-binaries-timestamp") + "=%s" % turquoise("TIMESTAMP"))
- desc = "This option modifies emerge's behaviour only if " + \
- "--rebuilt-binaries is given. Only binaries that " + \
- "have a BUILD_TIME that is larger than the given TIMESTAMP " + \
- "and that is larger than that of the installed package will " + \
- "be considered by the rebuilt-binaries logic."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--reinstall ") + turquoise("changed-use"))
- print(" Tells emerge to include installed packages where USE flags have")
- print(" changed since installation. Unlike --newuse, this option does")
- print(" not trigger reinstallation when flags that the user has not")
- print(" enabled are added or removed.")
- print()
- print(" " + green("--reinstall-atoms") + " " + turquoise("ATOMS"))
- desc = "A space separated list of package names or slot atoms. " + \
- "Emerge will treat matching packages as if they are not " + \
- "installed, and reinstall them if necessary."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--root=DIR"))
- desc = "Set the ROOT environment variable " + \
- "which is documented in the emerge(1) man page."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--root-deps[=rdeps]"))
- desc = "If no argument is given then build-time dependencies of packages for " + \
- "ROOT are installed to " + \
- "ROOT instead of /. If the rdeps argument is given then discard " + \
- "all build-time dependencies of packages for ROOT. This option is " + \
- "only meaningful when used together with ROOT and it should not " + \
- "be enabled under normal circumstances. For currently supported " + \
- "EAPI values, the build-time dependencies are specified in the " + \
- "DEPEND variable. However, behavior may change for new " + \
- "EAPIs when related extensions are added in the future."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--select") + " [ %s | %s ]" % \
- (turquoise("y"), turquoise("n")))
- desc = "Add specified packages to the world set (inverse of " + \
- "--oneshot). This is useful if you want to " + \
- "use EMERGE_DEFAULT_OPTS to make " + \
- "--oneshot behavior default."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--selective") + " [ %s | %s ]" % \
- (turquoise("y"), turquoise("n")))
- desc = "This identical to the --noreplace option. " + \
- "Some options, such as --update, imply --selective. " + \
- "Use --selective=n if you want to forcefully disable " + \
- "--selective, regardless of options like --update."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--skipfirst"))
- desc = "This option is only valid when " + \
- "used with --resume. It removes the " + \
- "first package in the resume list. " + \
- "Dependencies are recalculated for " + \
- "remaining packages and any that " + \
- "have unsatisfied dependencies or are " + \
- "masked will be automatically dropped. " + \
- "Also see the related " + \
- "--keep-going option."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--tree")+" ("+green("-t")+" short option)")
- print(" Shows the dependency tree using indentation for dependencies.")
- print(" The packages are also listed in reverse merge order so that")
- print(" a package's dependencies follow the package. Only really useful")
- print(" in combination with --emptytree, --update or --deep.")
- print()
- print(" " + green("--unordered-display"))
- desc = "By default the displayed merge list is sorted using the " + \
- "order in which the packages will be merged. When " + \
- "--tree is used together with this option, this " + \
- "constraint is removed, hopefully leading to a more " + \
- "readable dependency tree."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" "+green("--update")+" ("+green("-u")+" short option)")
- desc = "Updates packages to the best version available, which may " + \
- "not always be the highest version number due to masking " + \
- "for testing and development. Package atoms specified on " + \
- "the command line are greedy, meaning that unspecific " + \
- "atoms may match multiple versions of slotted packages."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--use-ebuild-visibility") + " [ %s | %s ]" % \
- (turquoise("y"), turquoise("n")))
- desc = "Use unbuilt ebuild metadata for visibility " + \
- "checks on built packages."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--useoldpkg-atoms") + " " + turquoise("ATOMS"))
- desc = "A space separated list of package names or slot atoms." + \
- " Emerge will prefer matching binary packages over newer" + \
- " unbuilt packages."
- for line in wrap(desc, desc_width):
- print(desc_indent + line)
- print()
- print(" " + green("--usepkg") + \
- " [ %s | %s ] (%s short option)" % \
- (turquoise("y"), turquoise("n"), green("-k")))
- print(" Tell emerge to use binary packages (from $PKGDIR) if they are")
- print(" available, thus possibly avoiding some time-consuming compiles.")
- print(" This option is useful for CD installs; you can export")
- print(" PKGDIR=/mnt/cdrom/packages and then use this option to have")
- print(" emerge \"pull\" binary packages from the CD in order to satisfy")
- print(" dependencies.")
- print()
- print(" " + green("--usepkgonly") + \
- " [ %s | %s ] (%s short option)" % \
- (turquoise("y"), turquoise("n"), green("-K")))
- print(" Like --usepkg above, except this only allows the use of binary")
- print(" packages, and it will abort the emerge if the package is not")
- print(" available at the time of dependency calculation.")
- print()
- print(" "+green("--verbose")+" ("+green("-v")+" short option)")
- print(" Effects vary, but the general outcome is an increased or expanded")
- print(" display of content in portage's displays.")
- print()
- print(" "+green("--with-bdeps")+" < " + turquoise("y") + " | "+ turquoise("n")+" >")
- print(" In dependency calculations, pull in build time dependencies that")
- print(" are not strictly required. This defaults to 'n' for installation")
- print(" actions and 'y' for the --depclean action. This setting can be")
- print(" added to EMERGE_DEFAULT_OPTS (see make.conf(5)) and later")
- print(" overridden via the command line.")
- print()
+ print()
+ print(" For more help consult the man page.")
diff --git a/portage_with_autodep/pym/_emerge/help.pyo b/portage_with_autodep/pym/_emerge/help.pyo
new file mode 100644
index 0000000..f6fea4e
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/help.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/is_valid_package_atom.pyo b/portage_with_autodep/pym/_emerge/is_valid_package_atom.pyo
new file mode 100644
index 0000000..20edc85
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/is_valid_package_atom.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/main.py b/portage_with_autodep/pym/_emerge/main.py
index 2830214..c52a3ea 100644
--- a/portage_with_autodep/pym/_emerge/main.py
+++ b/portage_with_autodep/pym/_emerge/main.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -6,10 +6,14 @@ from __future__ import print_function
import logging
import signal
import stat
+import subprocess
import sys
import textwrap
import platform
import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.news:count_unread_news,display_news_notifications',
+)
from portage import os
from portage import _encodings
from portage import _unicode_decode
@@ -28,7 +32,8 @@ import portage.exception
from portage.data import secpass
from portage.dbapi.dep_expand import dep_expand
from portage.util import normalize_path as normpath
-from portage.util import shlex_split, writemsg_level, writemsg_stdout
+from portage.util import (shlex_split, varexpand,
+ writemsg_level, writemsg_stdout)
from portage._sets import SETPREFIX
from portage._global_updates import _global_updates
@@ -62,6 +67,7 @@ options=[
"--nodeps", "--noreplace",
"--nospinner", "--oneshot",
"--onlydeps", "--pretend",
+"--quiet-repo-display",
"--quiet-unmerge-warn",
"--resume",
"--searchdesc",
@@ -70,6 +76,7 @@ options=[
"--unordered-display",
"--update",
"--verbose",
+"--verbose-main-repo-display",
]
shortmapping={
@@ -92,6 +99,21 @@ shortmapping={
"v":"--verbose", "V":"--version"
}
+COWSAY_MOO = """
+
+ Larry loves Gentoo (%s)
+
+ _______________________
+< Have you mooed today? >
+ -----------------------
+ \ ^__^
+ \ (oo)\_______
+ (__)\ )\/\
+ ||----w |
+ || ||
+
+"""
+
def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
if os.path.exists("/usr/bin/install-info"):
@@ -158,11 +180,21 @@ def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
raise
del e
processed_count += 1
- myso = portage.subprocess_getstatusoutput(
- "LANG=C LANGUAGE=C /usr/bin/install-info " +
- "--dir-file=%s/dir %s/%s" % (inforoot, inforoot, x))[1]
+ try:
+ proc = subprocess.Popen(
+ ['/usr/bin/install-info',
+ '--dir-file=%s' % os.path.join(inforoot, "dir"),
+ os.path.join(inforoot, x)],
+ env=dict(os.environ, LANG="C", LANGUAGE="C"),
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ myso = None
+ else:
+ myso = _unicode_decode(
+ proc.communicate()[0]).rstrip("\n")
+ proc.wait()
existsstr="already exists, for file `"
- if myso!="":
+ if myso:
if re.search(existsstr,myso):
# Already exists... Don't increment the count for this.
pass
@@ -233,7 +265,6 @@ def display_preserved_libs(vardbapi, myopts):
linkmap = vardbapi._linkmap
consumer_map = {}
owners = {}
- linkmap_broken = False
try:
linkmap.rebuild()
@@ -241,7 +272,6 @@ def display_preserved_libs(vardbapi, myopts):
writemsg_level("!!! Command Not Found: %s\n" % (e,),
level=logging.ERROR, noiselevel=-1)
del e
- linkmap_broken = True
else:
search_for_owners = set()
for cpv in plibdata:
@@ -315,7 +345,7 @@ def post_emerge(myaction, myopts, myfiles,
@type myopts: dict
@param myfiles: emerge arguments
@type myfiles: list
- @param target_root: The target ROOT for myaction
+ @param target_root: The target EROOT for myaction
@type target_root: String
@param trees: A dictionary mapping each ROOT to it's package databases
@type trees: dict
@@ -326,7 +356,7 @@ def post_emerge(myaction, myopts, myfiles,
"""
root_config = trees[target_root]["root_config"]
- vardbapi = trees[target_root]["vartree"].dbapi
+ vardbapi = trees[target_root]['vartree'].dbapi
settings = vardbapi.settings
info_mtimes = mtimedb["info"]
@@ -351,7 +381,9 @@ def post_emerge(myaction, myopts, myfiles,
_flush_elog_mod_echo()
if not vardbapi._pkgs_changed:
- display_news_notification(root_config, myopts)
+ # GLEP 42 says to display news *after* an emerge --pretend
+ if "--pretend" in myopts:
+ display_news_notification(root_config, myopts)
# If vdb state has not changed then there's nothing else to do.
return
@@ -372,11 +404,10 @@ def post_emerge(myaction, myopts, myfiles,
if vdb_lock:
vardbapi.unlock()
+ display_preserved_libs(vardbapi, myopts)
chk_updated_cfg_files(settings['EROOT'], config_protect)
display_news_notification(root_config, myopts)
- if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
- display_preserved_libs(vardbapi, myopts)
postemerge = os.path.join(settings["PORTAGE_CONFIGROOT"],
portage.USER_CONFIG_PATH, "bin", "post_emerge")
@@ -388,6 +419,8 @@ def post_emerge(myaction, myopts, myfiles,
" %s spawn failed of %s\n" % (bad("*"), postemerge,),
level=logging.ERROR, noiselevel=-1)
+ clean_logs(settings)
+
if "--quiet" not in myopts and \
myaction is None and "@world" in myfiles:
show_depclean_suggestion()
@@ -428,6 +461,8 @@ def insert_optional_args(args):
default_arg_opts = {
'--ask' : y_or_n,
'--autounmask' : y_or_n,
+ '--autounmask-keep-masks': y_or_n,
+ '--autounmask-unrestricted-atoms' : y_or_n,
'--autounmask-write' : y_or_n,
'--buildpkg' : y_or_n,
'--complete-graph' : y_or_n,
@@ -551,19 +586,25 @@ def insert_optional_args(args):
return new_args
-def _find_bad_atoms(atoms):
+def _find_bad_atoms(atoms, less_strict=False):
+ """
+ Declares all atoms as invalid that have an operator,
+ a use dependency, a blocker or a repo spec.
+ It accepts atoms with wildcards.
+ In less_strict mode it accepts operators and repo specs.
+ """
bad_atoms = []
for x in ' '.join(atoms).split():
bad_atom = False
try:
- atom = portage.dep.Atom(x, allow_wildcard=True)
+ atom = portage.dep.Atom(x, allow_wildcard=True, allow_repo=less_strict)
except portage.exception.InvalidAtom:
try:
- atom = portage.dep.Atom("*/"+x, allow_wildcard=True)
+ atom = portage.dep.Atom("*/"+x, allow_wildcard=True, allow_repo=less_strict)
except portage.exception.InvalidAtom:
bad_atom = True
- if bad_atom or atom.operator or atom.blocker or atom.use:
+ if bad_atom or (atom.operator and not less_strict) or atom.blocker or atom.use:
bad_atoms.append(x)
return bad_atoms
@@ -573,16 +614,15 @@ def parse_opts(tmpcmdline, silent=False):
myopts = {}
myfiles=[]
- global options, shortmapping
-
actions = frozenset([
- "clean", "config", "depclean", "help",
- "info", "list-sets", "metadata",
+ "clean", "check-news", "config", "depclean", "help",
+ "info", "list-sets", "metadata", "moo",
"prune", "regen", "search",
"sync", "unmerge", "version",
])
longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
+ y_or_n = ("y", "n")
true_y_or_n = ("True", "y", "n")
true_y = ("True", "y")
argument_options = {
@@ -600,6 +640,18 @@ def parse_opts(tmpcmdline, silent=False):
"choices" : true_y_or_n
},
+ "--autounmask-unrestricted-atoms": {
+ "help" : "write autounmask changes with >= atoms if possible",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
+ "--autounmask-keep-masks": {
+ "help" : "don't add package.unmask entries",
+ "type" : "choice",
+ "choices" : true_y_or_n
+ },
+
"--autounmask-write": {
"help" : "write changes made by --autounmask to disk",
"type" : "choice",
@@ -626,6 +678,14 @@ def parse_opts(tmpcmdline, silent=False):
"choices" : true_y_or_n
},
+ "--buildpkg-exclude": {
+ "help" :"A space separated list of package atoms for which " + \
+ "no binary packages should be built. This option overrides all " + \
+ "possible ways to enable building of binary packages.",
+
+ "action" : "append"
+ },
+
"--config-root": {
"help":"specify the location for portage configuration files",
"action":"store"
@@ -642,6 +702,12 @@ def parse_opts(tmpcmdline, silent=False):
"choices" : true_y_or_n
},
+ "--complete-graph-if-new-ver": {
+ "help" : "trigger --complete-graph behavior if an installed package version will change (upgrade or downgrade)",
+ "type" : "choice",
+ "choices" : y_or_n
+ },
+
"--deep": {
"shortopt" : "-D",
@@ -660,6 +726,12 @@ def parse_opts(tmpcmdline, silent=False):
"choices" : true_y_or_n
},
+ "--dynamic-deps": {
+ "help": "substitute the dependencies of installed packages with the dependencies of unbuilt ebuilds",
+ "type": "choice",
+ "choices": y_or_n
+ },
+
"--exclude": {
"help" :"A space separated list of package names or slot atoms. " + \
"Emerge won't install any ebuild or binary package that " + \
@@ -784,7 +856,7 @@ def parse_opts(tmpcmdline, silent=False):
"--quiet-build": {
"help" : "redirect build output to logs",
"type" : "choice",
- "choices" : true_y_or_n
+ "choices" : true_y_or_n,
},
"--rebuild-if-new-rev": {
@@ -923,13 +995,23 @@ def parse_opts(tmpcmdline, silent=False):
if myoptions.autounmask in true_y:
myoptions.autounmask = True
+ if myoptions.autounmask_unrestricted_atoms in true_y:
+ myoptions.autounmask_unrestricted_atoms = True
+
+ if myoptions.autounmask_keep_masks in true_y:
+ myoptions.autounmask_keep_masks = True
+
if myoptions.autounmask_write in true_y:
myoptions.autounmask_write = True
if myoptions.buildpkg in true_y:
myoptions.buildpkg = True
- else:
- myoptions.buildpkg = None
+
+ if myoptions.buildpkg_exclude:
+ bad_atoms = _find_bad_atoms(myoptions.buildpkg_exclude, less_strict=True)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --buildpkg-exclude parameter: '%s'\n" % \
+ (",".join(bad_atoms),))
if myoptions.changed_use is not False:
myoptions.reinstall = "changed-use"
@@ -938,10 +1020,11 @@ def parse_opts(tmpcmdline, silent=False):
if myoptions.deselect in true_y:
myoptions.deselect = True
- if myoptions.binpkg_respect_use in true_y:
- myoptions.binpkg_respect_use = True
- else:
- myoptions.binpkg_respect_use = None
+ if myoptions.binpkg_respect_use is not None:
+ if myoptions.binpkg_respect_use in true_y:
+ myoptions.binpkg_respect_use = 'y'
+ else:
+ myoptions.binpkg_respect_use = 'n'
if myoptions.complete_graph in true_y:
myoptions.complete_graph = True
@@ -1015,9 +1098,7 @@ def parse_opts(tmpcmdline, silent=False):
myoptions.quiet = None
if myoptions.quiet_build in true_y:
- myoptions.quiet_build = True
- else:
- myoptions.quiet_build = None
+ myoptions.quiet_build = 'y'
if myoptions.rebuild_if_new_ver in true_y:
myoptions.rebuild_if_new_ver = True
@@ -1172,8 +1253,7 @@ def parse_opts(tmpcmdline, silent=False):
if myaction is None and myoptions.deselect is True:
myaction = 'deselect'
- if myargs and sys.hexversion < 0x3000000 and \
- not isinstance(myargs[0], unicode):
+ if myargs and isinstance(myargs[0], bytes):
for i in range(len(myargs)):
myargs[i] = portage._unicode_decode(myargs[i])
@@ -1222,7 +1302,6 @@ def ionice(settings):
if not ionice_cmd:
return
- from portage.util import varexpand
variables = {"PID" : str(os.getpid())}
cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
@@ -1238,6 +1317,35 @@ def ionice(settings):
out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
+def clean_logs(settings):
+
+ if "clean-logs" not in settings.features:
+ return
+
+ clean_cmd = settings.get("PORT_LOGDIR_CLEAN")
+ if clean_cmd:
+ clean_cmd = shlex_split(clean_cmd)
+ if not clean_cmd:
+ return
+
+ logdir = settings.get("PORT_LOGDIR")
+ if logdir is None or not os.path.isdir(logdir):
+ return
+
+ variables = {"PORT_LOGDIR" : logdir}
+ cmd = [varexpand(x, mydict=variables) for x in clean_cmd]
+
+ try:
+ rval = portage.process.spawn(cmd, env=os.environ)
+ except portage.exception.CommandNotFound:
+ rval = 127
+
+ if rval != os.EX_OK:
+ out = portage.output.EOutput()
+ out.eerror("PORT_LOGDIR_CLEAN returned %d" % (rval,))
+ out.eerror("See the make.conf(5) man page for "
+ "PORT_LOGDIR_CLEAN usage instructions.")
+
def setconfig_fallback(root_config):
from portage._sets.base import DummyPackageSet
from portage._sets.files import WorldSelectedSet
@@ -1451,25 +1559,26 @@ def repo_name_duplicate_check(trees):
def config_protect_check(trees):
for root, root_trees in trees.items():
- if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
+ settings = root_trees["root_config"].settings
+ if not settings.get("CONFIG_PROTECT"):
msg = "!!! CONFIG_PROTECT is empty"
- if root != "/":
+ if settings["ROOT"] != "/":
msg += " for '%s'" % root
msg += "\n"
writemsg_level(msg, level=logging.WARN, noiselevel=-1)
def profile_check(trees, myaction):
- if myaction in ("help", "info", "sync", "version"):
+ if myaction in ("help", "info", "search", "sync", "version"):
return os.EX_OK
- for root, root_trees in trees.items():
+ for root_trees in trees.values():
if root_trees["root_config"].settings.profiles:
continue
# generate some profile related warning messages
validate_ebuild_environment(trees)
- msg = "If you have just changed your profile configuration, you " + \
- "should revert back to the previous configuration. Due to " + \
- "your current profile being invalid, allowed actions are " + \
- "limited to --help, --info, --sync, and --version."
+ msg = ("Your current profile is invalid. If you have just changed "
+ "your profile configuration, you should revert back to the "
+ "previous configuration. Allowed actions are limited to "
+ "--help, --info, --search, --sync, and --version.")
writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
level=logging.ERROR, noiselevel=-1)
return 1
@@ -1515,7 +1624,7 @@ def emerge_main(args=None):
# Portage needs to ensure a sane umask for the files it creates.
os.umask(0o22)
settings, trees, mtimedb = load_emerge_config()
- portdb = trees[settings["ROOT"]]["porttree"].dbapi
+ portdb = trees[settings['EROOT']]['porttree'].dbapi
rval = profile_check(trees, myaction)
if rval != os.EX_OK:
return rval
@@ -1526,13 +1635,14 @@ def emerge_main(args=None):
tmpcmdline.extend(args)
myaction, myopts, myfiles = parse_opts(tmpcmdline)
- if myaction not in ('help', 'info', 'version') and \
+ # skip global updates prior to sync, since it's called after sync
+ if myaction not in ('help', 'info', 'sync', 'version') and \
myopts.get('--package-moves') != 'n' and \
_global_updates(trees, mtimedb["updates"], quiet=("--quiet" in myopts)):
mtimedb.commit()
# Reload the whole config from scratch.
settings, trees, mtimedb = load_emerge_config(trees=trees)
- portdb = trees[settings["ROOT"]]["porttree"].dbapi
+ portdb = trees[settings['EROOT']]['porttree'].dbapi
xterm_titles = "notitles" not in settings.features
if xterm_titles:
@@ -1543,19 +1653,24 @@ def emerge_main(args=None):
# Reload the whole config from scratch so that the portdbapi internal
# config is updated with new FEATURES.
settings, trees, mtimedb = load_emerge_config(trees=trees)
- portdb = trees[settings["ROOT"]]["porttree"].dbapi
+ portdb = trees[settings['EROOT']]['porttree'].dbapi
+
+ # NOTE: adjust_configs() can map options to FEATURES, so any relevant
+ # options adjustments should be made prior to calling adjust_configs().
+ if "--buildpkgonly" in myopts:
+ myopts["--buildpkg"] = True
adjust_configs(myopts, trees)
apply_priorities(settings)
if myaction == 'version':
writemsg_stdout(getportageversion(
- settings["PORTDIR"], settings["ROOT"],
+ settings["PORTDIR"], None,
settings.profile_path, settings["CHOST"],
- trees[settings["ROOT"]]["vartree"].dbapi) + '\n', noiselevel=-1)
+ trees[settings['EROOT']]['vartree'].dbapi) + '\n', noiselevel=-1)
return 0
elif myaction == 'help':
- _emerge.help.help(myopts, portage.output.havecolor)
+ _emerge.help.help()
return 0
spinner = stdout_spinner()
@@ -1587,9 +1702,6 @@ def emerge_main(args=None):
if "--usepkgonly" in myopts:
myopts["--usepkg"] = True
- if "buildpkg" in settings.features or "--buildpkgonly" in myopts:
- myopts["--buildpkg"] = True
-
if "--buildpkgonly" in myopts:
# --buildpkgonly will not merge anything, so
# it cancels all binary package options.
@@ -1613,20 +1725,11 @@ def emerge_main(args=None):
del mytrees, mydb
if "moo" in myfiles:
- print("""
-
- Larry loves Gentoo (""" + platform.system() + """)
-
- _______________________
-< Have you mooed today? >
- -----------------------
- \ ^__^
- \ (oo)\_______
- (__)\ )\/\
- ||----w |
- || ||
-
-""")
+ print(COWSAY_MOO % platform.system())
+ msg = ("The above `emerge moo` display is deprecated. "
+ "Please use `emerge --moo` instead.")
+ for line in textwrap.wrap(msg, 50):
+ print(" %s %s" % (colorize("WARN", "*"), line))
for x in myfiles:
ext = os.path.splitext(x)[1]
@@ -1634,10 +1737,22 @@ def emerge_main(args=None):
print(colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n"))
break
- root_config = trees[settings["ROOT"]]["root_config"]
- if myaction == "list-sets":
+ root_config = trees[settings['EROOT']]['root_config']
+ if myaction == "moo":
+ print(COWSAY_MOO % platform.system())
+ return os.EX_OK
+ elif myaction == "list-sets":
writemsg_stdout("".join("%s\n" % s for s in sorted(root_config.sets)))
return os.EX_OK
+ elif myaction == "check-news":
+ news_counts = count_unread_news(
+ root_config.trees["porttree"].dbapi,
+ root_config.trees["vartree"].dbapi)
+ if any(news_counts.values()):
+ display_news_notifications(news_counts)
+ elif "--quiet" not in myopts:
+ print("", colorize("GOOD", "*"), "No news items were found.")
+ return os.EX_OK
ensure_required_sets(trees)
@@ -1703,7 +1818,7 @@ def emerge_main(args=None):
print("myopts", myopts)
if not myaction and not myfiles and "--resume" not in myopts:
- _emerge.help.help(myopts, portage.output.havecolor)
+ _emerge.help.help()
return 1
pretend = "--pretend" in myopts
@@ -1735,7 +1850,7 @@ def emerge_main(args=None):
portage_group_warning()
if userquery("Would you like to add --pretend to options?",
"--ask-enter-invalid" in myopts) == "No":
- return 1
+ return 128 + signal.SIGINT
myopts["--pretend"] = True
del myopts["--ask"]
else:
@@ -1753,7 +1868,11 @@ def emerge_main(args=None):
if x in myopts:
disable_emergelog = True
break
- if myaction in ("search", "info"):
+ if disable_emergelog:
+ pass
+ elif myaction in ("search", "info"):
+ disable_emergelog = True
+ elif portage.data.secpass < 1:
disable_emergelog = True
_emerge.emergelog._disable = disable_emergelog
@@ -1768,8 +1887,13 @@ def emerge_main(args=None):
"EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
(settings['EMERGE_LOG_DIR'], e),
noiselevel=-1, level=logging.ERROR)
+ portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
else:
_emerge.emergelog._emerge_log_dir = settings["EMERGE_LOG_DIR"]
+ else:
+ _emerge.emergelog._emerge_log_dir = os.path.join(os.sep,
+ settings["EPREFIX"].lstrip(os.sep), "var", "log")
+ portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
if not "--pretend" in myopts:
emergelog(xterm_titles, "Started emerge on: "+\
@@ -1778,9 +1902,19 @@ def emerge_main(args=None):
encoding=_encodings['content'], errors='replace'))
myelogstr=""
if myopts:
- myelogstr=" ".join(myopts)
+ opt_list = []
+ for opt, arg in myopts.items():
+ if arg is True:
+ opt_list.append(opt)
+ elif isinstance(arg, list):
+ # arguments like --exclude that use 'append' action
+ for x in arg:
+ opt_list.append("%s=%s" % (opt, x))
+ else:
+ opt_list.append("%s=%s" % (opt, arg))
+ myelogstr=" ".join(opt_list)
if myaction:
- myelogstr+=" "+myaction
+ myelogstr += " --" + myaction
if myfiles:
myelogstr += " " + " ".join(oldargs)
emergelog(xterm_titles, " *** emerge " + myelogstr)
@@ -1824,7 +1958,7 @@ def emerge_main(args=None):
# SEARCH action
elif "search"==myaction:
validate_ebuild_environment(trees)
- action_search(trees[settings["ROOT"]]["root_config"],
+ action_search(trees[settings['EROOT']]['root_config'],
myopts, myfiles, spinner)
elif myaction in ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
@@ -1832,19 +1966,19 @@ def emerge_main(args=None):
rval = action_uninstall(settings, trees, mtimedb["ldpath"],
myopts, myaction, myfiles, spinner)
if not (myaction == 'deselect' or buildpkgonly or fetchonly or pretend):
- post_emerge(myaction, myopts, myfiles, settings["ROOT"],
+ post_emerge(myaction, myopts, myfiles, settings['EROOT'],
trees, mtimedb, rval)
return rval
elif myaction == 'info':
# Ensure atoms are valid before calling unmerge().
- vardb = trees[settings["ROOT"]]["vartree"].dbapi
- portdb = trees[settings["ROOT"]]["porttree"].dbapi
- bindb = trees[settings["ROOT"]]["bintree"].dbapi
+ vardb = trees[settings['EROOT']]['vartree'].dbapi
+ portdb = trees[settings['EROOT']]['porttree'].dbapi
+ bindb = trees[settings['EROOT']]["bintree"].dbapi
valid_atoms = []
for x in myfiles:
- if is_valid_package_atom(x):
+ if is_valid_package_atom(x, allow_repo=True):
try:
#look at the installed files first, if there is no match
#look at the ebuilds, since EAPI 4 allows running pkg_info
@@ -1900,11 +2034,12 @@ def emerge_main(args=None):
level=logging.ERROR, noiselevel=-1)
return 1
+ # GLEP 42 says to display news *after* an emerge --pretend
if "--pretend" not in myopts:
display_news_notification(root_config, myopts)
retval = action_build(settings, trees, mtimedb,
myopts, myaction, myfiles, spinner)
- post_emerge(myaction, myopts, myfiles, settings["ROOT"],
+ post_emerge(myaction, myopts, myfiles, settings['EROOT'],
trees, mtimedb, retval)
return retval
diff --git a/portage_with_autodep/pym/_emerge/main.pyo b/portage_with_autodep/pym/_emerge/main.pyo
new file mode 100644
index 0000000..aaeb5b9
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/main.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/post_emerge.py b/portage_with_autodep/pym/_emerge/post_emerge.py
new file mode 100644
index 0000000..d5f1ba5
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/post_emerge.py
@@ -0,0 +1,165 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+import textwrap
+
+import portage
+from portage import os
+from portage.emaint.modules.logs.logs import CleanLogs
+from portage.news import count_unread_news, display_news_notifications
+from portage.output import colorize
+from portage.util._dyn_libs.display_preserved_libs import \
+ display_preserved_libs
+from portage.util._info_files import chk_updated_info_files
+
+from .chk_updated_cfg_files import chk_updated_cfg_files
+from .emergelog import emergelog
+from ._flush_elog_mod_echo import _flush_elog_mod_echo
+
+def clean_logs(settings):
+
+ if "clean-logs" not in settings.features:
+ return
+
+ logdir = settings.get("PORT_LOGDIR")
+ if logdir is None or not os.path.isdir(logdir):
+ return
+
+ cleanlogs = CleanLogs()
+ errors = cleanlogs.clean(settings=settings)
+ if errors:
+ out = portage.output.EOutput()
+ for msg in errors:
+ out.eerror(msg)
+
+def display_news_notification(root_config, myopts):
+ if "news" not in root_config.settings.features:
+ return
+ portdb = root_config.trees["porttree"].dbapi
+ vardb = root_config.trees["vartree"].dbapi
+ news_counts = count_unread_news(portdb, vardb)
+ display_news_notifications(news_counts)
+
+def show_depclean_suggestion():
+ out = portage.output.EOutput()
+ msg = "After world updates, it is important to remove " + \
+ "obsolete packages with emerge --depclean. Refer " + \
+ "to `man emerge` for more information."
+ for line in textwrap.wrap(msg, 72):
+ out.ewarn(line)
+
+def post_emerge(myaction, myopts, myfiles,
+ target_root, trees, mtimedb, retval):
+ """
+ Misc. things to run at the end of a merge session.
+
+ Update Info Files
+ Update Config Files
+ Update News Items
+ Commit mtimeDB
+ Display preserved libs warnings
+
+ @param myaction: The action returned from parse_opts()
+ @type myaction: String
+ @param myopts: emerge options
+ @type myopts: dict
+ @param myfiles: emerge arguments
+ @type myfiles: list
+ @param target_root: The target EROOT for myaction
+ @type target_root: String
+ @param trees: A dictionary mapping each ROOT to it's package databases
+ @type trees: dict
+ @param mtimedb: The mtimeDB to store data needed across merge invocations
+ @type mtimedb: MtimeDB class instance
+ @param retval: Emerge's return value
+ @type retval: Int
+ """
+
+ root_config = trees[target_root]["root_config"]
+ vardbapi = trees[target_root]['vartree'].dbapi
+ settings = vardbapi.settings
+ info_mtimes = mtimedb["info"]
+
+ # Load the most current variables from ${ROOT}/etc/profile.env
+ settings.unlock()
+ settings.reload()
+ settings.regenerate()
+ settings.lock()
+
+ config_protect = portage.util.shlex_split(
+ settings.get("CONFIG_PROTECT", ""))
+ infodirs = settings.get("INFOPATH","").split(":") + \
+ settings.get("INFODIR","").split(":")
+
+ os.chdir("/")
+
+ if retval == os.EX_OK:
+ exit_msg = " *** exiting successfully."
+ else:
+ exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
+ emergelog("notitles" not in settings.features, exit_msg)
+
+ _flush_elog_mod_echo()
+
+ if not vardbapi._pkgs_changed:
+ # GLEP 42 says to display news *after* an emerge --pretend
+ if "--pretend" in myopts:
+ display_news_notification(root_config, myopts)
+ # If vdb state has not changed then there's nothing else to do.
+ return
+
+ vdb_path = os.path.join(root_config.settings['EROOT'], portage.VDB_PATH)
+ portage.util.ensure_dirs(vdb_path)
+ vdb_lock = None
+ if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
+ vardbapi.lock()
+ vdb_lock = True
+
+ if vdb_lock:
+ try:
+ if "noinfo" not in settings.features:
+ chk_updated_info_files(target_root,
+ infodirs, info_mtimes)
+ mtimedb.commit()
+ finally:
+ if vdb_lock:
+ vardbapi.unlock()
+
+ # Explicitly load and prune the PreservedLibsRegistry in order
+ # to ensure that we do not display stale data.
+ vardbapi._plib_registry.load()
+
+ if vardbapi._plib_registry.hasEntries():
+ if "--quiet" in myopts:
+ print()
+ print(colorize("WARN", "!!!") + " existing preserved libs found")
+ else:
+ print()
+ print(colorize("WARN", "!!!") + " existing preserved libs:")
+ display_preserved_libs(vardbapi)
+ print("Use " + colorize("GOOD", "emerge @preserved-rebuild") +
+ " to rebuild packages using these libraries")
+
+ chk_updated_cfg_files(settings['EROOT'], config_protect)
+
+ display_news_notification(root_config, myopts)
+
+ postemerge = os.path.join(settings["PORTAGE_CONFIGROOT"],
+ portage.USER_CONFIG_PATH, "bin", "post_emerge")
+ if os.access(postemerge, os.X_OK):
+ hook_retval = portage.process.spawn(
+ [postemerge], env=settings.environ())
+ if hook_retval != os.EX_OK:
+ portage.util.writemsg_level(
+ " %s spawn failed of %s\n" %
+ (colorize("BAD", "*"), postemerge,),
+ level=logging.ERROR, noiselevel=-1)
+
+ clean_logs(settings)
+
+ if "--quiet" not in myopts and \
+ myaction is None and "@world" in myfiles:
+ show_depclean_suggestion()
diff --git a/portage_with_autodep/pym/_emerge/resolver/__init__.pyo b/portage_with_autodep/pym/_emerge/resolver/__init__.pyo
new file mode 100644
index 0000000..5c1b374
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/resolver/__init__.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/resolver/backtracking.py b/portage_with_autodep/pym/_emerge/resolver/backtracking.py
index dcdaee0..f2857b0 100644
--- a/portage_with_autodep/pym/_emerge/resolver/backtracking.py
+++ b/portage_with_autodep/pym/_emerge/resolver/backtracking.py
@@ -47,7 +47,7 @@ class BacktrackParameter(object):
self.reinstall_list == other.reinstall_list
-class _BacktrackNode:
+class _BacktrackNode(object):
__slots__ = (
"parameter", "depth", "mask_steps", "terminal",
@@ -84,6 +84,9 @@ class Backtracker(object):
Adds a newly computed backtrack parameter. Makes sure that it doesn't already exist and
that we don't backtrack deeper than we are allowed by --backtrack.
"""
+ if not self._check_runtime_pkg_mask(node.parameter.runtime_pkg_mask):
+ return
+
if node.mask_steps <= self._max_depth and node not in self._nodes:
if explore:
self._unexplored_nodes.append(node)
@@ -105,6 +108,28 @@ class Backtracker(object):
def __len__(self):
return len(self._unexplored_nodes)
+ def _check_runtime_pkg_mask(self, runtime_pkg_mask):
+ """
+ If a package gets masked that caused other packages to be masked
+ before, we revert the mask for other packages (bug 375573).
+ """
+
+ for pkg in runtime_pkg_mask:
+
+ if "missing dependency" in runtime_pkg_mask[pkg]:
+ continue
+
+ entry_is_valid = False
+
+ for ppkg, patom in runtime_pkg_mask[pkg].get("slot conflict", set()):
+ if ppkg not in runtime_pkg_mask:
+ entry_is_valid = True
+ break
+
+ if not entry_is_valid:
+ return False
+
+ return True
def _feedback_slot_conflict(self, conflict_data):
for pkg, parent_atoms in conflict_data:
diff --git a/portage_with_autodep/pym/_emerge/resolver/backtracking.pyo b/portage_with_autodep/pym/_emerge/resolver/backtracking.pyo
new file mode 100644
index 0000000..d989c15
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/resolver/backtracking.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/resolver/circular_dependency.py b/portage_with_autodep/pym/_emerge/resolver/circular_dependency.py
index d113c5e..aca81fa 100644
--- a/portage_with_autodep/pym/_emerge/resolver/circular_dependency.py
+++ b/portage_with_autodep/pym/_emerge/resolver/circular_dependency.py
@@ -143,7 +143,8 @@ class circular_dependency_handler(object):
#If any of the flags we're going to touch is in REQUIRED_USE, add all
#other flags in REQUIRED_USE to affecting_use, to not lose any solution.
- required_use_flags = get_required_use_flags(parent.metadata["REQUIRED_USE"])
+ required_use_flags = get_required_use_flags(
+ parent.metadata.get("REQUIRED_USE", ""))
if affecting_use.intersection(required_use_flags):
# TODO: Find out exactly which REQUIRED_USE flags are
@@ -185,7 +186,7 @@ class circular_dependency_handler(object):
parent_atom not in reduced_dep:
#We found an assignment that removes the atom from 'dep'.
#Make sure it doesn't conflict with REQUIRED_USE.
- required_use = parent.metadata["REQUIRED_USE"]
+ required_use = parent.metadata.get("REQUIRED_USE", "")
if check_required_use(required_use, current_use, parent.iuse.is_valid_flag):
use = self.depgraph._pkg_use_enabled(parent)
diff --git a/portage_with_autodep/pym/_emerge/resolver/circular_dependency.pyo b/portage_with_autodep/pym/_emerge/resolver/circular_dependency.pyo
new file mode 100644
index 0000000..c1f95dc
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/resolver/circular_dependency.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/resolver/output.py b/portage_with_autodep/pym/_emerge/resolver/output.py
index 05e316a..1208bf9 100644
--- a/portage_with_autodep/pym/_emerge/resolver/output.py
+++ b/portage_with_autodep/pym/_emerge/resolver/output.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
"""Resolver output display operation.
@@ -13,14 +13,14 @@ import sys
from portage import os
from portage import _unicode_decode
from portage.dbapi.dep_expand import dep_expand
-from portage.const import PORTAGE_PACKAGE_ATOM
-from portage.dep import cpvequal, match_from_list
-from portage.exception import InvalidDependString
-from portage.output import ( blue, bold, colorize, create_color_func,
+from portage.dep import cpvequal, _repo_separator
+from portage.exception import InvalidDependString, SignatureException
+from portage.package.ebuild._spawn_nofetch import spawn_nofetch
+from portage.output import ( blue, colorize, create_color_func,
darkblue, darkgreen, green, nc_len, red, teal, turquoise, yellow )
bad = create_color_func("BAD")
-from portage.util import writemsg_stdout, writemsg_level
-from portage.versions import best, catpkgsplit, cpv_getkey
+from portage.util import writemsg_stdout
+from portage.versions import best, catpkgsplit
from _emerge.Blocker import Blocker
from _emerge.create_world_atom import create_world_atom
@@ -72,7 +72,7 @@ class Display(object):
"""Processes pkg for blockers and adds colorized strings to
self.print_msg and self.blockers
- @param pkg: _emerge.Package instance
+ @param pkg: _emerge.Package.Package instance
@param fetch_symbol: string
@rtype: bool
Modifies class globals: self.blocker_style, self.resolved,
@@ -121,7 +121,7 @@ class Display(object):
def _display_use(self, pkg, myoldbest, myinslotlist):
""" USE flag display
- @param pkg: _emerge.Package instance
+ @param pkg: _emerge.Package.Package instance
@param myoldbest: list of installed versions
@param myinslotlist: list of installed slots
Modifies class globals: self.forced_flags, self.cur_iuse,
@@ -161,7 +161,7 @@ class Display(object):
def gen_mask_str(self, pkg):
"""
- @param pkg: _emerge.Package instance
+ @param pkg: _emerge.Package.Package instance
"""
hardmasked = pkg.isHardMasked()
mask_str = " "
@@ -223,7 +223,7 @@ class Display(object):
""" Prevent USE_EXPAND_HIDDEN flags from being hidden if they
are the only thing that triggered reinstallation.
- @param pkg: _emerge.Package instance
+ @param pkg: _emerge.Package.Package instance
Modifies self.use_expand_hidden, self.use_expand, self.verboseadd
"""
reinst_flags_map = {}
@@ -302,68 +302,78 @@ class Display(object):
def verbose_size(self, pkg, repoadd_set, pkg_info):
"""Determines the size of the downloads required
- @param pkg: _emerge.Package instance
+ @param pkg: _emerge.Package.Package instance
@param repoadd_set: set of repos to add
@param pkg_info: dictionary
Modifies class globals: self.myfetchlist, self.counters.totalsize,
self.verboseadd, repoadd_set.
"""
mysize = 0
- if pkg.type_name == "ebuild" and pkg_info.merge:
+ if pkg.type_name in ("binary", "ebuild") and pkg_info.merge:
+ db = pkg.root_config.trees[
+ pkg.root_config.pkg_tree_map[pkg.type_name]].dbapi
+ kwargs = {}
+ if pkg.type_name == "ebuild":
+ kwargs["useflags"] = pkg_info.use
+ kwargs["myrepo"] = pkg.repo
+ myfilesdict = None
try:
- myfilesdict = self.portdb.getfetchsizes(pkg.cpv,
- useflags=pkg_info.use, myrepo=pkg.repo)
+ myfilesdict = db.getfetchsizes(pkg.cpv, **kwargs)
except InvalidDependString as e:
# FIXME: validate SRC_URI earlier
- depstr, = self.portdb.aux_get(pkg.cpv,
+ depstr, = db.aux_get(pkg.cpv,
["SRC_URI"], myrepo=pkg.repo)
show_invalid_depstring_notice(
pkg, depstr, str(e))
raise
+ except SignatureException:
+ # missing/invalid binary package SIZE signature
+ pass
if myfilesdict is None:
myfilesdict = "[empty/missing/bad digest]"
else:
for myfetchfile in myfilesdict:
if myfetchfile not in self.myfetchlist:
mysize += myfilesdict[myfetchfile]
- self.myfetchlist.append(myfetchfile)
+ self.myfetchlist.add(myfetchfile)
if pkg_info.ordered:
self.counters.totalsize += mysize
self.verboseadd += _format_size(mysize)
- # overlay verbose
- # assign index for a previous version in the same slot
- slot_matches = self.vardb.match(pkg.slot_atom)
- if slot_matches:
- repo_name_prev = self.vardb.aux_get(slot_matches[0],
- ["repository"])[0]
- else:
- repo_name_prev = None
+ if self.quiet_repo_display:
+ # overlay verbose
+ # assign index for a previous version in the same slot
+ slot_matches = self.vardb.match(pkg.slot_atom)
+ if slot_matches:
+ repo_name_prev = self.vardb.aux_get(slot_matches[0],
+ ["repository"])[0]
+ else:
+ repo_name_prev = None
- # now use the data to generate output
- if pkg.installed or not slot_matches:
- self.repoadd = self.conf.repo_display.repoStr(
- pkg_info.repo_path_real)
- else:
- repo_path_prev = None
- if repo_name_prev:
- repo_path_prev = self.portdb.getRepositoryPath(
- repo_name_prev)
- if repo_path_prev == pkg_info.repo_path_real:
+ # now use the data to generate output
+ if pkg.installed or not slot_matches:
self.repoadd = self.conf.repo_display.repoStr(
pkg_info.repo_path_real)
else:
- self.repoadd = "%s=>%s" % (
- self.conf.repo_display.repoStr(repo_path_prev),
- self.conf.repo_display.repoStr(pkg_info.repo_path_real))
- if self.repoadd:
- repoadd_set.add(self.repoadd)
+ repo_path_prev = None
+ if repo_name_prev:
+ repo_path_prev = self.portdb.getRepositoryPath(
+ repo_name_prev)
+ if repo_path_prev == pkg_info.repo_path_real:
+ self.repoadd = self.conf.repo_display.repoStr(
+ pkg_info.repo_path_real)
+ else:
+ self.repoadd = "%s=>%s" % (
+ self.conf.repo_display.repoStr(repo_path_prev),
+ self.conf.repo_display.repoStr(pkg_info.repo_path_real))
+ if self.repoadd:
+ repoadd_set.add(self.repoadd)
- @staticmethod
- def convert_myoldbest(myoldbest):
+ def convert_myoldbest(self, pkg, myoldbest):
"""converts and colorizes a version list to a string
+ @param pkg: _emerge.Package.Package instance
@param myoldbest: list
@rtype string.
"""
@@ -371,11 +381,13 @@ class Display(object):
myoldbest_str = ""
if myoldbest:
versions = []
- for pos, pkg in enumerate(myoldbest):
- key = catpkgsplit(pkg.cpv)[2] + \
- "-" + catpkgsplit(pkg.cpv)[3]
+ for pos, old_pkg in enumerate(myoldbest):
+ key = catpkgsplit(old_pkg.cpv)[2] + "-" + catpkgsplit(old_pkg.cpv)[3]
if key[-3:] == "-r0":
key = key[:-3]
+ if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
+ any(x.repo != self.portdb.repositories.mainRepo().name for x in myoldbest + [pkg])):
+ key += _repo_separator + old_pkg.repo
versions.append(key)
myoldbest_str = blue("["+", ".join(versions)+"]")
return myoldbest_str
@@ -385,7 +397,7 @@ class Display(object):
"""Increments counters.interactive if the pkg is to
be merged and it's metadata has interactive set True
- @param pkg: _emerge.Package instance
+ @param pkg: _emerge.Package.Package instance
@param ordered: boolean
@param addl: already defined string to add to
"""
@@ -401,13 +413,17 @@ class Display(object):
@param addl: already defined string to add to
@param pkg_info: dictionary
- @param pkg: _emerge.Package instance
+ @param pkg: _emerge.Package.Package instance
@rtype string
"""
+ ver_str = pkg_info.ver
+ if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
+ any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
+ ver_str += _repo_separator + pkg.repo
if self.conf.quiet:
myprint = addl + " " + self.indent + \
self.pkgprint(pkg_info.cp, pkg_info)
- myprint = myprint+darkblue(" "+pkg_info.ver)+" "
+ myprint = myprint+darkblue(" "+ver_str)+" "
myprint = myprint+pkg_info.oldbest
myprint = myprint+darkgreen("to "+pkg.root)
self.verboseadd = None
@@ -422,7 +438,7 @@ class Display(object):
self.indent, self.pkgprint(pkg.cp, pkg_info))
if (self.newlp-nc_len(myprint)) > 0:
myprint = myprint+(" "*(self.newlp-nc_len(myprint)))
- myprint = myprint+"["+darkblue(pkg_info.ver)+"] "
+ myprint = myprint+" "+darkblue("["+ver_str+"]")+" "
if (self.oldlp-nc_len(myprint)) > 0:
myprint = myprint+" "*(self.oldlp-nc_len(myprint))
myprint = myprint+pkg_info.oldbest
@@ -435,14 +451,18 @@ class Display(object):
@param addl: already defined string to add to
@param pkg_info: dictionary
- @param pkg: _emerge.Package instance
+ @param pkg: _emerge.Package.Package instance
@rtype string
Modifies self.verboseadd
"""
+ ver_str = pkg_info.ver
+ if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
+ any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
+ ver_str += _repo_separator + pkg.repo
if self.conf.quiet:
myprint = addl + " " + self.indent + \
self.pkgprint(pkg_info.cp, pkg_info)
- myprint = myprint+" "+green(pkg_info.ver)+" "
+ myprint = myprint+" "+green(ver_str)+" "
myprint = myprint+pkg_info.oldbest
self.verboseadd = None
else:
@@ -457,7 +477,7 @@ class Display(object):
self.indent, self.pkgprint(pkg.cp, pkg_info))
if (self.newlp-nc_len(myprint)) > 0:
myprint = myprint+(" "*(self.newlp-nc_len(myprint)))
- myprint = myprint+green(" ["+pkg_info.ver+"] ")
+ myprint = myprint+" "+green("["+ver_str+"]")+" "
if (self.oldlp-nc_len(myprint)) > 0:
myprint = myprint+(" "*(self.oldlp-nc_len(myprint)))
myprint += pkg_info.oldbest
@@ -467,31 +487,35 @@ class Display(object):
def _set_no_columns(self, pkg, pkg_info, addl):
"""prints pkg info without column indentation.
- @param pkg: _emerge.Package instance
+ @param pkg: _emerge.Package.Package instance
@param pkg_info: dictionary
@param addl: the current text to add for the next line to output
@rtype the updated addl
"""
+ pkg_str = pkg.cpv
+ if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
+ any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
+ pkg_str += _repo_separator + pkg.repo
if not pkg_info.merge:
addl = self.empty_space_in_brackets()
myprint = "[%s%s] %s%s %s" % \
(self.pkgprint(pkg_info.operation.ljust(13),
pkg_info), addl,
- self.indent, self.pkgprint(pkg.cpv, pkg_info),
+ self.indent, self.pkgprint(pkg_str, pkg_info),
pkg_info.oldbest)
else:
myprint = "[%s %s] %s%s %s" % \
(self.pkgprint(pkg.type_name, pkg_info),
addl, self.indent,
- self.pkgprint(pkg.cpv, pkg_info), pkg_info.oldbest)
+ self.pkgprint(pkg_str, pkg_info), pkg_info.oldbest)
return myprint
def _insert_slot(self, pkg, pkg_info, myinslotlist):
"""Adds slot info to the message
- @returns addl: formatted slot info
- @returns myoldbest: installed version list
+ @return addl: formatted slot info
+ @return myoldbest: installed version list
Modifies self.counters.downgrades, self.counters.upgrades,
self.counters.binary
"""
@@ -517,8 +541,8 @@ class Display(object):
def _new_slot(self, pkg, pkg_info):
"""New slot, mark it new.
- @returns addl: formatted slot info
- @returns myoldbest: installed version list
+ @return addl: formatted slot info
+ @return myoldbest: installed version list
Modifies self.counters.newslot, self.counters.binary
"""
addl = " " + green("NS") + pkg_info.fetch_symbol + " "
@@ -574,11 +598,9 @@ class Display(object):
def print_changelog(self):
"""Prints the changelog text to std_out
"""
- writemsg_stdout('\n', noiselevel=-1)
- for revision, text in self.changelogs:
- writemsg_stdout(bold('*'+revision) + '\n' + text,
+ for chunk in self.changelogs:
+ writemsg_stdout(chunk,
noiselevel=-1)
- return
def get_display_list(self, mylist):
@@ -613,7 +635,7 @@ class Display(object):
def set_pkg_info(self, pkg, ordered):
"""Sets various pkg_info dictionary variables
- @param pkg: _emerge.Package instance
+ @param pkg: _emerge.Package.Package instance
@param ordered: bool
@rtype pkg_info dictionary
Modifies self.counters.restrict_fetch,
@@ -643,7 +665,6 @@ class Display(object):
pkg_info.use = list(self.conf.pkg_use_enabled(pkg))
if not pkg.built and pkg.operation == 'merge' and \
'fetch' in pkg.metadata.restrict:
- pkg_info.fetch_symbol = red("F")
if pkg_info.ordered:
self.counters.restrict_fetch += 1
if not self.portdb.getfetchsizes(pkg.cpv,
@@ -651,13 +672,17 @@ class Display(object):
pkg_info.fetch_symbol = green("f")
if pkg_info.ordered:
self.counters.restrict_fetch_satisfied += 1
+ else:
+ pkg_info.fetch_symbol = red("F")
+ if pkg_info.ebuild_path is not None:
+ self.restrict_fetch_list[pkg] = pkg_info
return pkg_info
def do_changelog(self, pkg, pkg_info):
"""Processes and adds the changelog text to the master text for output
- @param pkg: _emerge.Package instance
+ @param pkg: _emerge.Package.Package instance
@param pkg_info: dictionay
Modifies self.changelogs
"""
@@ -676,7 +701,7 @@ class Display(object):
def check_system_world(self, pkg):
"""Checks for any occurances of the package in the system or world sets
- @param pkg: _emerge.Package instance
+ @param pkg: _emerge.Package.Package instance
@rtype system and world booleans
"""
root_config = self.conf.roots[pkg.root]
@@ -706,7 +731,7 @@ class Display(object):
@staticmethod
def get_ver_str(pkg):
"""Obtains the version string
- @param pkg: _emerge.Package instance
+ @param pkg: _emerge.Package.Package instance
@rtype string
"""
ver_str = list(catpkgsplit(pkg.cpv)[2:])
@@ -723,7 +748,7 @@ class Display(object):
param is used for -u, where you still *do* want to see when
something is being upgraded.
- @param pkg: _emerge.Package instance
+ @param pkg: _emerge.Package.Package instance
@param pkg_info: dictionay
@rtype addl, myoldbest: list, myinslotlist: list
Modifies self.counters.reinst, self.counters.binary, self.counters.new
@@ -734,6 +759,9 @@ class Display(object):
installed_versions = self.vardb.match_pkgs(pkg.cp)
if self.vardb.cpv_exists(pkg.cpv):
addl = " "+yellow("R")+pkg_info.fetch_symbol+" "
+ installed_version = self.vardb.match_pkgs(pkg.cpv)[0]
+ if not self.quiet_repo_display and installed_version.repo != pkg.repo:
+ myoldbest = [installed_version]
if pkg_info.ordered:
if pkg_info.merge:
self.counters.reinst += 1
@@ -784,10 +812,16 @@ class Display(object):
mylist = self.get_display_list(self.conf.mylist)
# files to fetch list - avoids counting a same file twice
# in size display (verbose mode)
- self.myfetchlist = []
- # Use this set to detect when all the "repoadd" strings are "[0]"
- # and disable the entire repo display in this case.
- repoadd_set = set()
+ self.myfetchlist = set()
+
+ self.quiet_repo_display = "--quiet-repo-display" in depgraph._frozen_config.myopts
+ if self.quiet_repo_display:
+ # Use this set to detect when all the "repoadd" strings are "[0]"
+ # and disable the entire repo display in this case.
+ repoadd_set = set()
+
+ self.verbose_main_repo_display = "--verbose-main-repo-display" in depgraph._frozen_config.myopts
+ self.restrict_fetch_list = {}
for mylist_index in range(len(mylist)):
pkg, depth, ordered = mylist[mylist_index]
@@ -801,21 +835,25 @@ class Display(object):
continue
else:
pkg_info = self.set_pkg_info(pkg, ordered)
- addl, pkg_info.oldbest, myinslotlist = \
+ addl, pkg_info.oldbest_list, myinslotlist = \
self._get_installed_best(pkg, pkg_info)
self.verboseadd = ""
- self.repoadd = None
- self._display_use(pkg, pkg_info.oldbest, myinslotlist)
+ if self.quiet_repo_display:
+ self.repoadd = None
+ self._display_use(pkg, pkg_info.oldbest_list, myinslotlist)
self.recheck_hidden(pkg)
if self.conf.verbosity == 3:
- self.verbose_size(pkg, repoadd_set, pkg_info)
+ if self.quiet_repo_display:
+ self.verbose_size(pkg, repoadd_set, pkg_info)
+ else:
+ self.verbose_size(pkg, None, pkg_info)
pkg_info.cp = pkg.cp
pkg_info.ver = self.get_ver_str(pkg)
self.oldlp = self.conf.columnwidth - 30
self.newlp = self.oldlp - 30
- pkg_info.oldbest = self.convert_myoldbest(pkg_info.oldbest)
+ pkg_info.oldbest = self.convert_myoldbest(pkg, pkg_info.oldbest_list)
pkg_info.system, pkg_info.world = \
self.check_system_world(pkg)
addl = self.set_interactive(pkg, pkg_info.ordered, addl)
@@ -823,13 +861,17 @@ class Display(object):
if self.include_mask_str():
addl += self.gen_mask_str(pkg)
- if pkg.root != "/":
+ if pkg.root_config.settings["ROOT"] != "/":
if pkg_info.oldbest:
pkg_info.oldbest += " "
if self.conf.columns:
myprint = self._set_non_root_columns(
addl, pkg_info, pkg)
else:
+ pkg_str = pkg.cpv
+ if self.conf.verbosity == 3 and not self.quiet_repo_display and (self.verbose_main_repo_display or
+ any(x.repo != self.portdb.repositories.mainRepo().name for x in pkg_info.oldbest_list + [pkg])):
+ pkg_str += _repo_separator + pkg.repo
if not pkg_info.merge:
addl = self.empty_space_in_brackets()
myprint = "[%s%s] " % (
@@ -840,7 +882,7 @@ class Display(object):
myprint = "[%s %s] " % (
self.pkgprint(pkg.type_name, pkg_info), addl)
myprint += self.indent + \
- self.pkgprint(pkg.cpv, pkg_info) + " " + \
+ self.pkgprint(pkg_str, pkg_info) + " " + \
pkg_info.oldbest + darkgreen("to " + pkg.root)
else:
if self.conf.columns:
@@ -852,36 +894,23 @@ class Display(object):
if self.conf.columns and pkg.operation == "uninstall":
continue
- self.print_msg.append((myprint, self.verboseadd, self.repoadd))
-
- if not self.conf.tree_display \
- and not self.conf.no_restart \
- and pkg.root == self.conf.running_root.root \
- and match_from_list(PORTAGE_PACKAGE_ATOM, [pkg]) \
- and not self.conf.quiet:
-
- if not self.vardb.cpv_exists(pkg.cpv) or \
- '9999' in pkg.cpv or \
- 'git' in pkg.inherited or \
- 'git-2' in pkg.inherited:
- if mylist_index < len(mylist) - 1:
- self.print_msg.append(
- colorize(
- "WARN", "*** Portage will stop merging "
- "at this point and reload itself,"
- )
- )
- self.print_msg.append(
- colorize("WARN", " then resume the merge.")
- )
+ if self.quiet_repo_display:
+ self.print_msg.append((myprint, self.verboseadd, self.repoadd))
+ else:
+ self.print_msg.append((myprint, self.verboseadd, None))
- show_repos = repoadd_set and repoadd_set != set(["0"])
+ show_repos = self.quiet_repo_display and repoadd_set and repoadd_set != set(["0"])
# now finally print out the messages
self.print_messages(show_repos)
self.print_blockers()
if self.conf.verbosity == 3:
self.print_verbose(show_repos)
+ for pkg, pkg_info in self.restrict_fetch_list.items():
+ writemsg_stdout("\nFetch instructions for %s:\n" % (pkg.cpv,),
+ noiselevel=-1)
+ spawn_nofetch(self.conf.trees[pkg.root]["porttree"].dbapi,
+ pkg_info.ebuild_path)
if self.conf.changelog:
self.print_changelog()
diff --git a/portage_with_autodep/pym/_emerge/resolver/output.pyo b/portage_with_autodep/pym/_emerge/resolver/output.pyo
new file mode 100644
index 0000000..bd2ae2f
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/resolver/output.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/resolver/output_helpers.py b/portage_with_autodep/pym/_emerge/resolver/output_helpers.py
index b7e7376..e751dd8 100644
--- a/portage_with_autodep/pym/_emerge/resolver/output_helpers.py
+++ b/portage_with_autodep/pym/_emerge/resolver/output_helpers.py
@@ -14,10 +14,10 @@ import sys
from portage import os
from portage import _encodings, _unicode_encode
from portage._sets.base import InternalPackageSet
-from portage.output import blue, colorize, create_color_func, green, red, \
- teal, yellow
+from portage.output import (blue, bold, colorize, create_color_func,
+ green, red, teal, yellow)
bad = create_color_func("BAD")
-from portage.util import writemsg
+from portage.util import shlex_split, writemsg
from portage.versions import catpkgsplit
from _emerge.Blocker import Blocker
@@ -39,7 +39,7 @@ class _RepoDisplay(object):
repo_paths.add(portdir)
overlays = root_config.settings.get("PORTDIR_OVERLAY")
if overlays:
- repo_paths.update(overlays.split())
+ repo_paths.update(shlex_split(overlays))
repo_paths = list(repo_paths)
self._repo_paths = repo_paths
self._repo_paths_real = [ os.path.realpath(repo_path) \
@@ -198,7 +198,6 @@ class _DisplayConfig(object):
self.print_use_string = self.verbosity != 1 or "--verbose" in frozen_config.myopts
self.changelog = "--changelog" in frozen_config.myopts
self.edebug = frozen_config.edebug
- self.no_restart = frozen_config._opts_no_restart.intersection(frozen_config.myopts)
self.unordered_display = "--unordered-display" in frozen_config.myopts
mywidth = 130
@@ -212,7 +211,8 @@ class _DisplayConfig(object):
del e
self.columnwidth = mywidth
- self.repo_display = _RepoDisplay(frozen_config.roots)
+ if "--quiet-repo-display" in frozen_config.myopts:
+ self.repo_display = _RepoDisplay(frozen_config.roots)
self.trees = frozen_config.trees
self.pkgsettings = frozen_config.pkgsettings
self.target_root = frozen_config.target_root
@@ -500,63 +500,120 @@ def _calc_changelog(ebuildpath,current,next):
next = '-'.join(catpkgsplit(next)[1:])
if next.endswith('-r0'):
next = next[:-3]
- changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
- try:
- changelog = io.open(_unicode_encode(changelogpath,
- encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['repo.content'], errors='replace'
- ).read()
- except SystemExit:
- raise # Needed else can't exit
- except:
+
+ changelogdir = os.path.dirname(ebuildpath)
+ changelogs = ['ChangeLog']
+ # ChangeLog-YYYY (see bug #389611)
+ changelogs.extend(sorted((fn for fn in os.listdir(changelogdir)
+ if fn.startswith('ChangeLog-')), reverse=True))
+
+ divisions = []
+ found_current = False
+ for fn in changelogs:
+ changelogpath = os.path.join(changelogdir, fn)
+ try:
+ with io.open(_unicode_encode(changelogpath,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ changelog = f.read()
+ except EnvironmentError:
+ return []
+ for node in _find_changelog_tags(changelog):
+ if node[0] == current:
+ found_current = True
+ break
+ else:
+ divisions.append(node)
+ if found_current:
+ break
+
+ if not found_current:
return []
- divisions = _find_changelog_tags(changelog)
+
#print 'XX from',current,'to',next
#for div,text in divisions: print 'XX',div
# skip entries for all revisions above the one we are about to emerge
- for i in range(len(divisions)):
- if divisions[i][0]==next:
- divisions = divisions[i:]
- break
- # find out how many entries we are going to display
- for i in range(len(divisions)):
- if divisions[i][0]==current:
- divisions = divisions[:i]
+ later_rev_index = None
+ for i, node in enumerate(divisions):
+ if node[0] == next:
+ if later_rev_index is not None:
+ first_node = divisions[later_rev_index]
+ # Discard the later revision and the first ChangeLog entry
+ # that follows it. We want to display all the entries after
+ # that first entry, as discussed in bug #373009.
+ trimmed_lines = []
+ iterator = iter(first_node[1])
+ for l in iterator:
+ if not l:
+ # end of the first entry that's discarded
+ break
+ first_node = (None, list(iterator))
+ divisions = [first_node] + divisions[later_rev_index+1:]
break
- else:
- # couldnt find the current revision in the list. display nothing
- return []
- return divisions
-
+ if node[0] is not None:
+ later_rev_index = i
+
+ output = []
+ prev_blank = False
+ prev_rev = False
+ for rev, lines in divisions:
+ if rev is not None:
+ if not (prev_blank or prev_rev):
+ output.append("\n")
+ output.append(bold('*' + rev) + '\n')
+ prev_rev = True
+ prev_blank = False
+ if lines:
+ prev_rev = False
+ if not prev_blank:
+ output.append("\n")
+ for l in lines:
+ output.append(l + "\n")
+ output.append("\n")
+ prev_blank = True
+ return output
+
+def _strip_header_comments(lines):
+ # strip leading and trailing blank or header/comment lines
+ i = 0
+ while i < len(lines) and (not lines[i] or lines[i][:1] == "#"):
+ i += 1
+ if i:
+ lines = lines[i:]
+ while lines and (not lines[-1] or lines[-1][:1] == "#"):
+ lines.pop()
+ return lines
def _find_changelog_tags(changelog):
divs = []
+ if not changelog:
+ return divs
release = None
- while 1:
- match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
- if match is None:
- if release is not None:
- divs.append((release,changelog))
- return divs
- if release is not None:
- divs.append((release,changelog[:match.start()]))
- changelog = changelog[match.end():]
+ release_end = 0
+ for match in re.finditer(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?$',
+ changelog, re.M):
+ divs.append((release, _strip_header_comments(
+ changelog[release_end:match.start()].splitlines())))
+ release_end = match.end()
release = match.group(1)
if release.endswith('.ebuild'):
release = release[:-7]
if release.endswith('-r0'):
release = release[:-3]
+ divs.append((release,
+ _strip_header_comments(changelog[release_end:].splitlines())))
+ return divs
class PkgInfo(object):
"""Simple class to hold instance attributes for current
information about the pkg being printed.
"""
- __slots__ = ("ordered", "fetch_symbol", "operation", "merge",
- "built", "cp", "ebuild_path", "repo_name", "repo_path_real",
- "world", "system", "use", "oldbest", "ver"
- )
+ __slots__ = ("built", "cp", "ebuild_path", "fetch_symbol", "merge",
+ "oldbest", "oldbest_list", "operation", "ordered",
+ "repo_name", "repo_path_real", "system", "use", "ver", "world")
def __init__(self):
@@ -566,6 +623,7 @@ class PkgInfo(object):
self.fetch_symbol = ''
self.merge = ''
self.oldbest = ''
+ self.oldbest_list = []
self.operation = ''
self.ordered = False
self.repo_path_real = ''
diff --git a/portage_with_autodep/pym/_emerge/resolver/output_helpers.pyo b/portage_with_autodep/pym/_emerge/resolver/output_helpers.pyo
new file mode 100644
index 0000000..ae39dd4
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/resolver/output_helpers.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/resolver/slot_collision.py b/portage_with_autodep/pym/_emerge/resolver/slot_collision.py
index 0df8f20..a1c8714 100644
--- a/portage_with_autodep/pym/_emerge/resolver/slot_collision.py
+++ b/portage_with_autodep/pym/_emerge/resolver/slot_collision.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2011 Gentoo Foundation
+# Copyright 2010-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -80,6 +80,8 @@ class slot_conflict_handler(object):
the needed USE changes and prepare the message for the user.
"""
+ _check_configuration_max = 1024
+
def __init__(self, depgraph):
self.depgraph = depgraph
self.myopts = depgraph._frozen_config.myopts
@@ -244,7 +246,7 @@ class slot_conflict_handler(object):
for (slot_atom, root), pkgs \
in self.slot_collision_info.items():
msg.append(str(slot_atom))
- if root != '/':
+ if root != self.depgraph._frozen_config._running_root.root:
msg.append(" for %s" % (root,))
msg.append("\n\n")
@@ -663,14 +665,24 @@ class slot_conflict_handler(object):
solutions = []
sol_gen = _solution_candidate_generator(all_involved_flags)
- while(True):
+ checked = 0
+ while True:
candidate = sol_gen.get_candidate()
if not candidate:
break
solution = self._check_solution(config, candidate, all_conflict_atoms_by_slotatom)
+ checked += 1
if solution:
solutions.append(solution)
-
+
+ if checked >= self._check_configuration_max:
+ # TODO: Implement early elimination for candidates that would
+ # change forced or masked flags, and don't count them here.
+ if self.debug:
+ writemsg("\nAborting _check_configuration due to "
+ "excessive number of candidates.\n", noiselevel=-1)
+ break
+
if self.debug:
if not solutions:
writemsg("No viable solutions. Rejecting configuration.\n", noiselevel=-1)
@@ -843,7 +855,7 @@ class slot_conflict_handler(object):
#Make sure the changes don't violate REQUIRED_USE
for pkg in required_changes:
- required_use = pkg.metadata["REQUIRED_USE"]
+ required_use = pkg.metadata.get("REQUIRED_USE")
if not required_use:
continue
diff --git a/portage_with_autodep/pym/_emerge/resolver/slot_collision.pyo b/portage_with_autodep/pym/_emerge/resolver/slot_collision.pyo
new file mode 100644
index 0000000..1fc3a13
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/resolver/slot_collision.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/search.py b/portage_with_autodep/pym/_emerge/search.py
index 35f0412..5abc8a0 100644
--- a/portage_with_autodep/pym/_emerge/search.py
+++ b/portage_with_autodep/pym/_emerge/search.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2011 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -6,6 +6,7 @@ from __future__ import print_function
import re
import portage
from portage import os
+from portage.dbapi.porttree import _parse_uri_map
from portage.output import bold, bold as white, darkgreen, green, red
from portage.util import writemsg_stdout
@@ -150,7 +151,7 @@ class search(object):
if not result or cpv == portage.best([cpv, result]):
result = cpv
else:
- db_keys = Package.metadata_keys
+ db_keys = list(db._aux_cache_keys)
# break out of this loop with highest visible
# match, checked in descending order
for cpv in reversed(db.match(atom)):
@@ -261,11 +262,13 @@ class search(object):
msg.append("[ Applications found : " + \
bold(str(self.mlen)) + " ]\n\n")
vardb = self.vartree.dbapi
+ metadata_keys = set(Package.metadata_keys)
+ metadata_keys.update(["DESCRIPTION", "HOMEPAGE", "LICENSE", "SRC_URI"])
+ metadata_keys = tuple(metadata_keys)
for mtype in self.matches:
for match,masked in self.matches[mtype]:
full_package = None
if mtype == "pkg":
- catpack = match
full_package = self._xmatch(
"bestmatch-visible", match)
if not full_package:
@@ -285,11 +288,16 @@ class search(object):
+ "\n\n")
if full_package:
try:
- desc, homepage, license = self._aux_get(
- full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
+ metadata = dict(zip(metadata_keys,
+ self._aux_get(full_package, metadata_keys)))
except KeyError:
msg.append("emerge: search: aux_get() failed, skipping\n")
continue
+
+ desc = metadata["DESCRIPTION"]
+ homepage = metadata["HOMEPAGE"]
+ license = metadata["LICENSE"]
+
if masked:
msg.append(green("*") + " " + \
white(match) + " " + red("[ Masked ]") + "\n")
@@ -304,12 +312,17 @@ class search(object):
mycpv = match + "-" + myversion
myebuild = self._findname(mycpv)
if myebuild:
+ pkg = Package(built=False, cpv=mycpv,
+ installed=False, metadata=metadata,
+ root_config=self.root_config, type_name="ebuild")
pkgdir = os.path.dirname(myebuild)
- from portage import manifest
- mf = manifest.Manifest(
+ mf = self.settings.repositories.get_repo_for_location(
+ os.path.dirname(os.path.dirname(pkgdir)))
+ mf = mf.load_manifest(
pkgdir, self.settings["DISTDIR"])
try:
- uri_map = self._getFetchMap(mycpv)
+ uri_map = _parse_uri_map(mycpv, metadata,
+ use=pkg.use.enabled)
except portage.exception.InvalidDependString as e:
file_size_str = "Unknown (%s)" % (e,)
del e
diff --git a/portage_with_autodep/pym/_emerge/search.pyo b/portage_with_autodep/pym/_emerge/search.pyo
new file mode 100644
index 0000000..055a734
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/search.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/show_invalid_depstring_notice.pyo b/portage_with_autodep/pym/_emerge/show_invalid_depstring_notice.pyo
new file mode 100644
index 0000000..337e135
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/show_invalid_depstring_notice.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/stdout_spinner.pyo b/portage_with_autodep/pym/_emerge/stdout_spinner.pyo
new file mode 100644
index 0000000..b091171
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/stdout_spinner.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/sync/__init__.pyo b/portage_with_autodep/pym/_emerge/sync/__init__.pyo
new file mode 100644
index 0000000..7314f80
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/sync/__init__.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/sync/getaddrinfo_validate.pyo b/portage_with_autodep/pym/_emerge/sync/getaddrinfo_validate.pyo
new file mode 100644
index 0000000..8e41377
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/sync/getaddrinfo_validate.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/sync/old_tree_timestamp.pyo b/portage_with_autodep/pym/_emerge/sync/old_tree_timestamp.pyo
new file mode 100644
index 0000000..5b59c5a
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/sync/old_tree_timestamp.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/unmerge.py b/portage_with_autodep/pym/_emerge/unmerge.py
index 3db3a8b..b46b89c 100644
--- a/portage_with_autodep/pym/_emerge/unmerge.py
+++ b/portage_with_autodep/pym/_emerge/unmerge.py
@@ -1,9 +1,10 @@
-# Copyright 1999-2011 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
import logging
+import signal
import sys
import textwrap
import portage
@@ -12,7 +13,7 @@ from portage.dbapi._expand_new_virt import expand_new_virt
from portage.output import bold, colorize, darkgreen, green
from portage._sets import SETPREFIX
from portage._sets.base import EditablePackageSet
-from portage.util import cmp_sort_key
+from portage.versions import cpv_sort_key, _pkg_str
from _emerge.emergelog import emergelog
from _emerge.Package import Package
@@ -467,20 +468,22 @@ def _unmerge_display(root_config, myopts, unmerge_action,
if not quiet:
writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
if pkgmap[x][mytype]:
- sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
- sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
- for pn, ver, rev in sorted_pkgs:
- if rev == "r0":
- myversion = ver
- else:
- myversion = ver + "-" + rev
+ sorted_pkgs = []
+ for mypkg in pkgmap[x][mytype]:
+ try:
+ sorted_pkgs.append(mypkg.cpv)
+ except AttributeError:
+ sorted_pkgs.append(_pkg_str(mypkg))
+ sorted_pkgs.sort(key=cpv_sort_key())
+ for mypkg in sorted_pkgs:
if mytype == "selected":
writemsg_level(
- colorize("UNMERGE_WARN", myversion + " "),
+ colorize("UNMERGE_WARN", mypkg.version + " "),
noiselevel=-1)
else:
writemsg_level(
- colorize("GOOD", myversion + " "), noiselevel=-1)
+ colorize("GOOD", mypkg.version + " "),
+ noiselevel=-1)
else:
writemsg_level("none ", noiselevel=-1)
if not quiet:
@@ -503,7 +506,8 @@ def unmerge(root_config, myopts, unmerge_action,
clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
scheduler=None, writemsg_level=portage.util.writemsg_level):
"""
- Returns 1 if successful, otherwise 0.
+ Returns os.EX_OK if no errors occur, 1 if an error occurs, and
+ 130 if interrupted due to a 'no' answer for --ask.
"""
if clean_world:
@@ -515,7 +519,7 @@ def unmerge(root_config, myopts, unmerge_action,
writemsg_level=writemsg_level)
if rval != os.EX_OK:
- return 0
+ return rval
enter_invalid = '--ask-enter-invalid' in myopts
vartree = root_config.trees["vartree"]
@@ -526,7 +530,7 @@ def unmerge(root_config, myopts, unmerge_action,
if "--pretend" in myopts:
#we're done... return
- return 0
+ return os.EX_OK
if "--ask" in myopts:
if userquery("Would you like to unmerge these packages?",
enter_invalid) == "No":
@@ -535,19 +539,32 @@ def unmerge(root_config, myopts, unmerge_action,
print()
print("Quitting.")
print()
- return 0
+ return 128 + signal.SIGINT
#the real unmerging begins, after a short delay....
if clean_delay and not autoclean:
countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
+ all_selected = set()
+ all_selected.update(*[x["selected"] for x in pkgmap])
+
+ # Set counter variables
+ curval = 1
+ maxval = len(all_selected)
+
for x in range(len(pkgmap)):
for y in pkgmap[x]["selected"]:
- writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
emergelog(xterm_titles, "=== Unmerging... ("+y+")")
+ message = ">>> Unmerging ({0} of {1}) {2}...\n".format(
+ colorize("MERGE_LIST_PROGRESS", str(curval)),
+ colorize("MERGE_LIST_PROGRESS", str(maxval)),
+ y)
+ writemsg_level(message, noiselevel=-1)
+ curval += 1
+
mysplit = y.split("/")
#unmerge...
- retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
- mysettings, unmerge_action not in ["clean","prune"],
+ retval = portage.unmerge(mysplit[0], mysplit[1],
+ settings=mysettings,
vartree=vartree, ldpath_mtimes=ldpath_mtimes,
scheduler=scheduler)
@@ -574,5 +591,5 @@ def unmerge(root_config, myopts, unmerge_action,
sets["selected"].remove(SETPREFIX + s)
sets["selected"].unlock()
- return 1
+ return os.EX_OK
diff --git a/portage_with_autodep/pym/_emerge/unmerge.pyo b/portage_with_autodep/pym/_emerge/unmerge.pyo
new file mode 100644
index 0000000..c16c9ec
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/unmerge.pyo
Binary files differ
diff --git a/portage_with_autodep/pym/_emerge/userquery.py b/portage_with_autodep/pym/_emerge/userquery.py
index e7ed400..efae80a 100644
--- a/portage_with_autodep/pym/_emerge/userquery.py
+++ b/portage_with_autodep/pym/_emerge/userquery.py
@@ -1,8 +1,9 @@
-# Copyright 1999-2009 Gentoo Foundation
+# Copyright 1999-2012 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
+import signal
import sys
from portage.output import bold, create_color_func
@@ -51,5 +52,4 @@ def userquery(prompt, enter_invalid, responses=None, colours=None):
print("Sorry, response '%s' not understood." % response, end=' ')
except (EOFError, KeyboardInterrupt):
print("Interrupted.")
- sys.exit(1)
-
+ sys.exit(128 + signal.SIGINT)
diff --git a/portage_with_autodep/pym/_emerge/userquery.pyo b/portage_with_autodep/pym/_emerge/userquery.pyo
new file mode 100644
index 0000000..5492f90
--- /dev/null
+++ b/portage_with_autodep/pym/_emerge/userquery.pyo
Binary files differ