aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichał Górny <mgorny@gentoo.org>2018-07-17 21:50:45 +0200
committerZac Medico <zmedico@gentoo.org>2018-07-18 16:19:11 -0700
commitbc0fa8d3795ed7e40aaa00f579bb2977897bce25 (patch)
tree2a62c721ee8dec47ddb564254e1cbd967577d1f0 /pym/portage/dbapi
parentEventLoop: raise TypeError for unexpected call_* keyword args (diff)
downloadportage-bc0fa8d3795ed7e40aaa00f579bb2977897bce25.tar.gz
portage-bc0fa8d3795ed7e40aaa00f579bb2977897bce25.tar.bz2
portage-bc0fa8d3795ed7e40aaa00f579bb2977897bce25.zip
Rename pym→lib, for better distutils-r1 interoperability
Closes: https://github.com/gentoo/portage/pull/343
Diffstat (limited to 'pym/portage/dbapi')
-rw-r--r--pym/portage/dbapi/DummyTree.py16
-rw-r--r--pym/portage/dbapi/IndexedPortdb.py171
-rw-r--r--pym/portage/dbapi/IndexedVardb.py114
-rw-r--r--pym/portage/dbapi/_ContentsCaseSensitivityManager.py93
-rw-r--r--pym/portage/dbapi/_MergeProcess.py287
-rw-r--r--pym/portage/dbapi/_SyncfsProcess.py53
-rw-r--r--pym/portage/dbapi/_VdbMetadataDelta.py176
-rw-r--r--pym/portage/dbapi/__init__.py443
-rw-r--r--pym/portage/dbapi/_expand_new_virt.py81
-rw-r--r--pym/portage/dbapi/_similar_name_search.py57
-rw-r--r--pym/portage/dbapi/bintree.py1710
-rw-r--r--pym/portage/dbapi/cpv_expand.py108
-rw-r--r--pym/portage/dbapi/dep_expand.py58
-rw-r--r--pym/portage/dbapi/porttree.py1526
-rw-r--r--pym/portage/dbapi/vartree.py5559
-rw-r--r--pym/portage/dbapi/virtual.py232
16 files changed, 0 insertions, 10684 deletions
diff --git a/pym/portage/dbapi/DummyTree.py b/pym/portage/dbapi/DummyTree.py
deleted file mode 100644
index 6579e88e2..000000000
--- a/pym/portage/dbapi/DummyTree.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright 2015 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-class DummyTree(object):
- """
- Most internal code only accesses the "dbapi" attribute of the
- binarytree, portagetree, and vartree classes. DummyTree is useful
- in cases where alternative dbapi implementations (or wrappers that
- modify or extend behavior of existing dbapi implementations) are
- needed, since it allows these implementations to be exposed through
- an interface which is minimally compatible with the *tree classes.
- """
- __slots__ = ("dbapi",)
-
- def __init__(self, dbapi):
- self.dbapi = dbapi
diff --git a/pym/portage/dbapi/IndexedPortdb.py b/pym/portage/dbapi/IndexedPortdb.py
deleted file mode 100644
index 510e0278c..000000000
--- a/pym/portage/dbapi/IndexedPortdb.py
+++ /dev/null
@@ -1,171 +0,0 @@
-# Copyright 2014 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import errno
-import io
-import functools
-import operator
-import os
-
-import portage
-from portage import _encodings
-from portage.dep import Atom
-from portage.exception import FileNotFound
-from portage.cache.index.IndexStreamIterator import IndexStreamIterator
-from portage.cache.index.pkg_desc_index import \
- pkg_desc_index_line_read, pkg_desc_index_node
-from portage.util.iterators.MultiIterGroupBy import MultiIterGroupBy
-from portage.versions import _pkg_str
-
-class IndexedPortdb(object):
- """
- A portdbapi interface that uses a package description index to
- improve performance. If the description index is missing for a
- particular repository, then all metadata for that repository is
- obtained using the normal pordbapi.aux_get method.
-
- For performance reasons, the match method only supports package
- name and version constraints. For the same reason, the xmatch
- method is not implemented.
- """
-
- # Match returns unordered results.
- match_unordered = True
-
- _copy_attrs = ('cpv_exists', 'findname', 'getFetchMap',
- '_aux_cache_keys', '_cpv_sort_ascending',
- '_have_root_eclass_dir')
-
- def __init__(self, portdb):
-
- self._portdb = portdb
-
- for k in self._copy_attrs:
- setattr(self, k, getattr(portdb, k))
-
- self._desc_cache = None
- self._cp_map = None
- self._unindexed_cp_map = None
-
- def _init_index(self):
-
- cp_map = {}
- desc_cache = {}
- self._desc_cache = desc_cache
- self._cp_map = cp_map
- index_missing = []
-
- streams = []
- for repo_path in self._portdb.porttrees:
- outside_repo = os.path.join(self._portdb.depcachedir,
- repo_path.lstrip(os.sep))
- filenames = []
- for parent_dir in (repo_path, outside_repo):
- filenames.append(os.path.join(parent_dir,
- "metadata", "pkg_desc_index"))
-
- repo_name = self._portdb.getRepositoryName(repo_path)
-
- try:
- f = None
- for filename in filenames:
- try:
- f = io.open(filename,
- encoding=_encodings["repo.content"])
- except IOError as e:
- if e.errno not in (errno.ENOENT, errno.ESTALE):
- raise
- else:
- break
-
- if f is None:
- raise FileNotFound(filename)
-
- streams.append(iter(IndexStreamIterator(f,
- functools.partial(pkg_desc_index_line_read,
- repo = repo_name))))
- except FileNotFound:
- index_missing.append(repo_path)
-
- if index_missing:
- self._unindexed_cp_map = {}
-
- class _NonIndexedStream(object):
- def __iter__(self_):
- for cp in self._portdb.cp_all(
- trees = index_missing):
- # Don't call cp_list yet, since it's a waste
- # if the package name does not match the current
- # search.
- self._unindexed_cp_map[cp] = index_missing
- yield pkg_desc_index_node(cp, (), None)
-
- streams.append(iter(_NonIndexedStream()))
-
- if streams:
- if len(streams) == 1:
- cp_group_iter = ([node] for node in streams[0])
- else:
- cp_group_iter = MultiIterGroupBy(streams,
- key = operator.attrgetter("cp"))
-
- for cp_group in cp_group_iter:
-
- new_cp = None
- cp_list = cp_map.get(cp_group[0].cp)
- if cp_list is None:
- new_cp = cp_group[0].cp
- cp_list = []
- cp_map[cp_group[0].cp] = cp_list
-
- for entry in cp_group:
- cp_list.extend(entry.cpv_list)
- if entry.desc is not None:
- for cpv in entry.cpv_list:
- desc_cache[cpv] = entry.desc
-
- if new_cp is not None:
- yield cp_group[0].cp
-
- def cp_all(self, sort=True):
- """
- Returns an ordered iterator instead of a list, so that search
- results can be displayed incrementally.
- """
- if self._cp_map is None:
- return self._init_index()
- return iter(sorted(self._cp_map)) if sort else iter(self._cp_map)
-
- def match(self, atom):
- """
- For performance reasons, only package name and version
- constraints are supported, and the returned list is
- unordered.
- """
- if not isinstance(atom, Atom):
- atom = Atom(atom)
- cp_list = self._cp_map.get(atom.cp)
- if cp_list is None:
- return []
-
- if self._unindexed_cp_map is not None:
- try:
- unindexed = self._unindexed_cp_map.pop(atom.cp)
- except KeyError:
- pass
- else:
- cp_list.extend(self._portdb.cp_list(atom.cp,
- mytree=unindexed))
-
- if atom == atom.cp:
- return cp_list[:]
- else:
- return portage.match_from_list(atom, cp_list)
-
- def aux_get(self, cpv, attrs, myrepo=None):
- if len(attrs) == 1 and attrs[0] == "DESCRIPTION":
- try:
- return [self._desc_cache[cpv]]
- except KeyError:
- pass
- return self._portdb.aux_get(cpv, attrs)
diff --git a/pym/portage/dbapi/IndexedVardb.py b/pym/portage/dbapi/IndexedVardb.py
deleted file mode 100644
index e2910b27f..000000000
--- a/pym/portage/dbapi/IndexedVardb.py
+++ /dev/null
@@ -1,114 +0,0 @@
-# Copyright 2014 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import portage
-from portage.dep import Atom
-from portage.exception import InvalidData
-from portage.versions import _pkg_str
-
-class IndexedVardb(object):
- """
- A vardbapi interface that sacrifices validation in order to
- improve performance. It takes advantage of vardbdbapi._aux_cache,
- which is backed by vdb_metadata.pickle. Since _aux_cache is
- not updated for every single merge/unmerge (see
- _aux_cache_threshold), the list of packages is obtained directly
- from the real vardbapi instance. If a package is missing from
- _aux_cache, then its metadata is obtained using the normal
- (validated) vardbapi.aux_get method.
-
- For performance reasons, the match method only supports package
- name and version constraints.
- """
-
- # Match returns unordered results.
- match_unordered = True
-
- _copy_attrs = ('cpv_exists',
- '_aux_cache_keys', '_cpv_sort_ascending')
-
- def __init__(self, vardb):
- self._vardb = vardb
-
- for k in self._copy_attrs:
- setattr(self, k, getattr(vardb, k))
-
- self._cp_map = None
-
- def cp_all(self, sort=True):
- """
- Returns an ordered iterator instead of a list, so that search
- results can be displayed incrementally.
- """
- if self._cp_map is not None:
- return iter(sorted(self._cp_map)) if sort else iter(self._cp_map)
-
- delta_data = self._vardb._cache_delta.loadRace()
- if delta_data is None:
- return self._iter_cp_all()
-
- self._vardb._cache_delta.applyDelta(delta_data)
-
- self._cp_map = cp_map = {}
- for cpv in self._vardb._aux_cache["packages"]:
- try:
- cpv = _pkg_str(cpv, db=self._vardb)
- except InvalidData:
- continue
-
- cp_list = cp_map.get(cpv.cp)
- if cp_list is None:
- cp_list = []
- cp_map[cpv.cp] = cp_list
- cp_list.append(cpv)
-
- return iter(sorted(self._cp_map)) if sort else iter(self._cp_map)
-
- def _iter_cp_all(self):
- self._cp_map = cp_map = {}
- previous_cp = None
- for cpv in self._vardb._iter_cpv_all(sort = True):
- cp = portage.cpv_getkey(cpv)
- if cp is not None:
- cp_list = cp_map.get(cp)
- if cp_list is None:
- cp_list = []
- cp_map[cp] = cp_list
- cp_list.append(cpv)
- if previous_cp is not None and \
- previous_cp != cp:
- yield previous_cp
- previous_cp = cp
-
- if previous_cp is not None:
- yield previous_cp
-
- def match(self, atom):
- """
- For performance reasons, only package name and version
- constraints are supported, and the returned list is
- unordered.
- """
- if not isinstance(atom, Atom):
- atom = Atom(atom)
- cp_list = self._cp_map.get(atom.cp)
- if cp_list is None:
- return []
-
- if atom == atom.cp:
- return cp_list[:]
- else:
- return portage.match_from_list(atom, cp_list)
-
- def aux_get(self, cpv, attrs, myrepo=None):
- pkg_data = self._vardb._aux_cache["packages"].get(cpv)
- if not isinstance(pkg_data, tuple) or \
- len(pkg_data) != 2 or \
- not isinstance(pkg_data[1], dict):
- pkg_data = None
- if pkg_data is None:
- # It may be missing from _aux_cache due to
- # _aux_cache_threshold.
- return self._vardb.aux_get(cpv, attrs)
- metadata = pkg_data[1]
- return [metadata.get(k, "") for k in attrs]
diff --git a/pym/portage/dbapi/_ContentsCaseSensitivityManager.py b/pym/portage/dbapi/_ContentsCaseSensitivityManager.py
deleted file mode 100644
index c479ec971..000000000
--- a/pym/portage/dbapi/_ContentsCaseSensitivityManager.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2014 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-class ContentsCaseSensitivityManager(object):
- """
- Implicitly handles case transformations that are needed for
- case-insensitive support.
- """
-
- def __init__(self, db):
- """
- @param db: A dblink instance
- @type db: vartree.dblink
- """
- self.getcontents = db.getcontents
-
- if "case-insensitive-fs" in db.settings.features:
- self.unmap_key = self._unmap_key_case_insensitive
- self.contains = self._contains_case_insensitive
- self.keys = self._keys_case_insensitive
-
- self._contents_insensitive = None
- self._reverse_key_map = None
-
- def clear_cache(self):
- """
- Clear all cached contents data.
- """
- self._contents_insensitive = None
- self._reverse_key_map = None
-
- def keys(self):
- """
- Iterate over all contents keys, which are transformed to
- lowercase when appropriate, for use in case-insensitive
- comparisons.
- @rtype: iterator
- @return: An iterator over all the contents keys
- """
- return iter(self.getcontents())
-
- def contains(self, key):
- """
- Check if the given key is contained in the contents, using
- case-insensitive comparison when appropriate.
- @param key: A filesystem path (including ROOT and EPREFIX)
- @type key: str
- @rtype: bool
- @return: True if the given key is contained in the contents,
- False otherwise
- """
- return key in self.getcontents()
-
- def unmap_key(self, key):
- """
- Map a key (from the keys method) back to its case-preserved
- form.
- @param key: A filesystem path (including ROOT and EPREFIX)
- @type key: str
- @rtype: str
- @return: The case-preserved form of key
- """
- return key
-
- def _case_insensitive_init(self):
- """
- Initialize data structures for case-insensitive support.
- """
- self._contents_insensitive = dict(
- (k.lower(), v) for k, v in self.getcontents().items())
- self._reverse_key_map = dict(
- (k.lower(), k) for k in self.getcontents())
-
- def _keys_case_insensitive(self):
- if self._contents_insensitive is None:
- self._case_insensitive_init()
- return iter(self._contents_insensitive)
-
- _keys_case_insensitive.__doc__ = keys.__doc__
-
- def _contains_case_insensitive(self, key):
- if self._contents_insensitive is None:
- self._case_insensitive_init()
- return key.lower() in self._contents_insensitive
-
- _contains_case_insensitive.__doc__ = contains.__doc__
-
- def _unmap_key_case_insensitive(self, key):
- if self._reverse_key_map is None:
- self._case_insensitive_init()
- return self._reverse_key_map[key]
-
- _unmap_key_case_insensitive.__doc__ = unmap_key.__doc__
diff --git a/pym/portage/dbapi/_MergeProcess.py b/pym/portage/dbapi/_MergeProcess.py
deleted file mode 100644
index 371550079..000000000
--- a/pym/portage/dbapi/_MergeProcess.py
+++ /dev/null
@@ -1,287 +0,0 @@
-# Copyright 2010-2018 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import io
-import platform
-import signal
-import sys
-import traceback
-
-import fcntl
-import portage
-from portage import os, _unicode_decode
-from portage.util._ctypes import find_library
-import portage.elog.messages
-from portage.util._async.ForkProcess import ForkProcess
-
-class MergeProcess(ForkProcess):
- """
- Merge packages in a subprocess, so the Scheduler can run in the main
- thread while files are moved or copied asynchronously.
- """
-
- __slots__ = ('mycat', 'mypkg', 'settings', 'treetype',
- 'vartree', 'blockers', 'pkgloc', 'infloc', 'myebuild',
- 'mydbapi', 'postinst_failure', 'prev_mtimes', 'unmerge',
- '_elog_reader_fd',
- '_buf', '_elog_keys', '_locked_vdb')
-
- def _start(self):
- # Portage should always call setcpv prior to this
- # point, but here we have a fallback as a convenience
- # for external API consumers. It's important that
- # this metadata access happens in the parent process,
- # since closing of file descriptors in the subprocess
- # can prevent access to open database connections such
- # as that used by the sqlite metadata cache module.
- cpv = "%s/%s" % (self.mycat, self.mypkg)
- settings = self.settings
- if cpv != settings.mycpv or \
- "EAPI" not in settings.configdict["pkg"]:
- settings.reload()
- settings.reset()
- settings.setcpv(cpv, mydb=self.mydbapi)
-
- # This caches the libc library lookup in the current
- # process, so that it's only done once rather than
- # for each child process.
- if platform.system() == "Linux" and \
- "merge-sync" in settings.features:
- find_library("c")
-
- # Inherit stdin by default, so that the pdb SIGUSR1
- # handler is usable for the subprocess.
- if self.fd_pipes is None:
- self.fd_pipes = {}
- else:
- self.fd_pipes = self.fd_pipes.copy()
- self.fd_pipes.setdefault(0, portage._get_stdin().fileno())
-
- super(MergeProcess, self)._start()
-
- def _lock_vdb(self):
- """
- Lock the vdb if FEATURES=parallel-install is NOT enabled,
- otherwise do nothing. This is implemented with
- vardbapi.lock(), which supports reentrance by the
- subprocess that we spawn.
- """
- if "parallel-install" not in self.settings.features:
- self.vartree.dbapi.lock()
- self._locked_vdb = True
-
- def _unlock_vdb(self):
- """
- Unlock the vdb if we hold a lock, otherwise do nothing.
- """
- if self._locked_vdb:
- self.vartree.dbapi.unlock()
- self._locked_vdb = False
-
- def _elog_output_handler(self):
- output = self._read_buf(self._elog_reader_fd)
- if output:
- lines = _unicode_decode(output).split('\n')
- if len(lines) == 1:
- self._buf += lines[0]
- else:
- lines[0] = self._buf + lines[0]
- self._buf = lines.pop()
- out = io.StringIO()
- for line in lines:
- funcname, phase, key, msg = line.split(' ', 3)
- self._elog_keys.add(key)
- reporter = getattr(portage.elog.messages, funcname)
- reporter(msg, phase=phase, key=key, out=out)
-
- elif output is not None: # EIO/POLLHUP
- self.scheduler.remove_reader(self._elog_reader_fd)
- os.close(self._elog_reader_fd)
- self._elog_reader_fd = None
- return False
-
- def _spawn(self, args, fd_pipes, **kwargs):
- """
- Fork a subprocess, apply local settings, and call
- dblink.merge(). TODO: Share code with ForkProcess.
- """
-
- elog_reader_fd, elog_writer_fd = os.pipe()
-
- fcntl.fcntl(elog_reader_fd, fcntl.F_SETFL,
- fcntl.fcntl(elog_reader_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
-
- # FD_CLOEXEC is enabled by default in Python >=3.4.
- if sys.hexversion < 0x3040000:
- try:
- fcntl.FD_CLOEXEC
- except AttributeError:
- pass
- else:
- fcntl.fcntl(elog_reader_fd, fcntl.F_SETFD,
- fcntl.fcntl(elog_reader_fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
-
- blockers = None
- if self.blockers is not None:
- # Query blockers in the main process, since closing
- # of file descriptors in the subprocess can prevent
- # access to open database connections such as that
- # used by the sqlite metadata cache module.
- blockers = self.blockers()
- mylink = portage.dblink(self.mycat, self.mypkg, settings=self.settings,
- treetype=self.treetype, vartree=self.vartree,
- blockers=blockers, pipe=elog_writer_fd)
- fd_pipes[elog_writer_fd] = elog_writer_fd
- self.scheduler.add_reader(elog_reader_fd, self._elog_output_handler)
-
- # If a concurrent emerge process tries to install a package
- # in the same SLOT as this one at the same time, there is an
- # extremely unlikely chance that the COUNTER values will not be
- # ordered correctly unless we lock the vdb here.
- # FEATURES=parallel-install skips this lock in order to
- # improve performance, and the risk is practically negligible.
- self._lock_vdb()
- counter = None
- if not self.unmerge:
- counter = self.vartree.dbapi.counter_tick()
-
- parent_pid = os.getpid()
- pid = None
- try:
- pid = os.fork()
-
- if pid != 0:
- if not isinstance(pid, int):
- raise AssertionError(
- "fork returned non-integer: %s" % (repr(pid),))
-
- os.close(elog_writer_fd)
- self._elog_reader_fd = elog_reader_fd
- self._buf = ""
- self._elog_keys = set()
- # Discard messages which will be collected by the subprocess,
- # in order to avoid duplicates (bug #446136).
- portage.elog.messages.collect_messages(key=mylink.mycpv)
-
- # invalidate relevant vardbapi caches
- if self.vartree.dbapi._categories is not None:
- self.vartree.dbapi._categories = None
- self.vartree.dbapi._pkgs_changed = True
- self.vartree.dbapi._clear_pkg_cache(mylink)
-
- return [pid]
-
- os.close(elog_reader_fd)
-
- # Use default signal handlers in order to avoid problems
- # killing subprocesses as reported in bug #353239.
- signal.signal(signal.SIGINT, signal.SIG_DFL)
- signal.signal(signal.SIGTERM, signal.SIG_DFL)
-
- # Unregister SIGCHLD handler and wakeup_fd for the parent
- # process's event loop (bug 655656).
- signal.signal(signal.SIGCHLD, signal.SIG_DFL)
- try:
- wakeup_fd = signal.set_wakeup_fd(-1)
- if wakeup_fd > 0:
- os.close(wakeup_fd)
- except (ValueError, OSError):
- pass
-
- portage.locks._close_fds()
- # We don't exec, so use close_fds=False
- # (see _setup_pipes docstring).
- portage.process._setup_pipes(fd_pipes, close_fds=False)
-
- portage.output.havecolor = self.settings.get('NOCOLOR') \
- not in ('yes', 'true')
-
- # Avoid wastful updates of the vdb cache.
- self.vartree.dbapi._flush_cache_enabled = False
-
- # In this subprocess we don't want PORTAGE_BACKGROUND to
- # suppress stdout/stderr output since they are pipes. We
- # also don't want to open PORTAGE_LOG_FILE, since it will
- # already be opened by the parent process, so we set the
- # "subprocess" value for use in conditional logging code
- # involving PORTAGE_LOG_FILE.
- if not self.unmerge:
- # unmerge phases have separate logs
- if self.settings.get("PORTAGE_BACKGROUND") == "1":
- self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "1"
- else:
- self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "0"
- self.settings.backup_changes("PORTAGE_BACKGROUND_UNMERGE")
- self.settings["PORTAGE_BACKGROUND"] = "subprocess"
- self.settings.backup_changes("PORTAGE_BACKGROUND")
-
- rval = 1
- try:
- if self.unmerge:
- if not mylink.exists():
- rval = os.EX_OK
- elif mylink.unmerge(
- ldpath_mtimes=self.prev_mtimes) == os.EX_OK:
- mylink.lockdb()
- try:
- mylink.delete()
- finally:
- mylink.unlockdb()
- rval = os.EX_OK
- else:
- rval = mylink.merge(self.pkgloc, self.infloc,
- myebuild=self.myebuild, mydbapi=self.mydbapi,
- prev_mtimes=self.prev_mtimes, counter=counter)
- except SystemExit:
- raise
- except:
- traceback.print_exc()
- # os._exit() skips stderr flush!
- sys.stderr.flush()
- finally:
- os._exit(rval)
-
- finally:
- if pid == 0 or (pid is None and os.getpid() != parent_pid):
- # Call os._exit() from a finally block in order
- # to suppress any finally blocks from earlier
- # in the call stack (see bug #345289). This
- # finally block has to be setup before the fork
- # in order to avoid a race condition.
- os._exit(1)
-
- def _async_waitpid_cb(self, *args, **kwargs):
- """
- Override _async_waitpid_cb to perform cleanup that is
- not necessarily idempotent.
- """
- ForkProcess._async_waitpid_cb(self, *args, **kwargs)
- if self.returncode == portage.const.RETURNCODE_POSTINST_FAILURE:
- self.postinst_failure = True
- self.returncode = os.EX_OK
-
- def _unregister(self):
- """
- Unregister from the scheduler and close open files.
- """
-
- if not self.unmerge:
- # Populate the vardbapi cache for the new package
- # while its inodes are still hot.
- try:
- self.vartree.dbapi.aux_get(self.settings.mycpv, ["EAPI"])
- except KeyError:
- pass
-
- self._unlock_vdb()
- if self._elog_reader_fd is not None:
- self.scheduler.remove_reader(self._elog_reader_fd)
- os.close(self._elog_reader_fd)
- self._elog_reader_fd = None
- if self._elog_keys is not None:
- for key in self._elog_keys:
- portage.elog.elog_process(key, self.settings,
- phasefilter=("prerm", "postrm"))
- self._elog_keys = None
-
- super(MergeProcess, self)._unregister()
diff --git a/pym/portage/dbapi/_SyncfsProcess.py b/pym/portage/dbapi/_SyncfsProcess.py
deleted file mode 100644
index 767dc2061..000000000
--- a/pym/portage/dbapi/_SyncfsProcess.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# Copyright 2012 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from portage import os
-from portage.util._ctypes import find_library, LoadLibrary
-from portage.util._async.ForkProcess import ForkProcess
-
-class SyncfsProcess(ForkProcess):
- """
- Isolate ctypes usage in a subprocess, in order to avoid
- potential problems with stale cached libraries as
- described in bug #448858, comment #14 (also see
- https://bugs.python.org/issue14597).
- """
-
- __slots__ = ('paths',)
-
- @staticmethod
- def _get_syncfs():
-
- filename = find_library("c")
- if filename is not None:
- library = LoadLibrary(filename)
- if library is not None:
- try:
- return library.syncfs
- except AttributeError:
- pass
-
- return None
-
- def _run(self):
-
- syncfs_failed = False
- syncfs = self._get_syncfs()
-
- if syncfs is not None:
- for path in self.paths:
- try:
- fd = os.open(path, os.O_RDONLY)
- except OSError:
- pass
- else:
- try:
- if syncfs(fd) != 0:
- # Happens with PyPy (bug #446610)
- syncfs_failed = True
- finally:
- os.close(fd)
-
- if syncfs is None or syncfs_failed:
- return 1
- return os.EX_OK
diff --git a/pym/portage/dbapi/_VdbMetadataDelta.py b/pym/portage/dbapi/_VdbMetadataDelta.py
deleted file mode 100644
index 7461f87c5..000000000
--- a/pym/portage/dbapi/_VdbMetadataDelta.py
+++ /dev/null
@@ -1,176 +0,0 @@
-# Copyright 2014-2015 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import errno
-import io
-import json
-import os
-
-from portage import _encodings
-from portage.util import atomic_ofstream
-from portage.versions import cpv_getkey
-
-class VdbMetadataDelta(object):
-
- _format_version = "1"
-
- def __init__(self, vardb):
- self._vardb = vardb
-
- def initialize(self, timestamp):
- f = atomic_ofstream(self._vardb._cache_delta_filename, 'w',
- encoding=_encodings['repo.content'], errors='strict')
- json.dump({
- "version": self._format_version,
- "timestamp": timestamp
- }, f, ensure_ascii=False)
- f.close()
-
- def load(self):
-
- if not os.path.exists(self._vardb._aux_cache_filename):
- # If the primary cache doesn't exist yet, then
- # we can't record a delta against it.
- return None
-
- try:
- with io.open(self._vardb._cache_delta_filename, 'r',
- encoding=_encodings['repo.content'],
- errors='strict') as f:
- cache_obj = json.load(f)
- except EnvironmentError as e:
- if e.errno not in (errno.ENOENT, errno.ESTALE):
- raise
- except (SystemExit, KeyboardInterrupt):
- raise
- except Exception:
- # Corrupt, or not json format.
- pass
- else:
- try:
- version = cache_obj["version"]
- except KeyError:
- pass
- else:
- # Verify that the format version is compatible,
- # since a newer version of portage may have
- # written an incompatible file.
- if version == self._format_version:
- try:
- deltas = cache_obj["deltas"]
- except KeyError:
- cache_obj["deltas"] = deltas = []
-
- if isinstance(deltas, list):
- return cache_obj
-
- return None
-
- def loadRace(self):
- """
- This calls self.load() and validates the timestamp
- against the currently loaded self._vardb._aux_cache. If a
- concurrent update causes the timestamps to be inconsistent,
- then it reloads the caches and tries one more time before
- it aborts. In practice, the race is very unlikely, so
- this will usually succeed on the first try.
- """
-
- tries = 2
- while tries:
- tries -= 1
- cache_delta = self.load()
- if cache_delta is not None and \
- cache_delta.get("timestamp") != \
- self._vardb._aux_cache.get("timestamp", False):
- self._vardb._aux_cache_obj = None
- else:
- return cache_delta
-
- return None
-
- def recordEvent(self, event, cpv, slot, counter):
-
- self._vardb.lock()
- try:
- deltas_obj = self.load()
-
- if deltas_obj is None:
- # We can't record meaningful deltas without
- # a pre-existing state.
- return
-
- delta_node = {
- "event": event,
- "package": cpv.cp,
- "version": cpv.version,
- "slot": slot,
- "counter": "%s" % counter
- }
-
- deltas_obj["deltas"].append(delta_node)
-
- # Eliminate earlier nodes cancelled out by later nodes
- # that have identical package and slot attributes.
- filtered_list = []
- slot_keys = set()
- version_keys = set()
- for delta_node in reversed(deltas_obj["deltas"]):
- slot_key = (delta_node["package"],
- delta_node["slot"])
- version_key = (delta_node["package"],
- delta_node["version"])
- if not (slot_key in slot_keys or \
- version_key in version_keys):
- filtered_list.append(delta_node)
- slot_keys.add(slot_key)
- version_keys.add(version_key)
-
- filtered_list.reverse()
- deltas_obj["deltas"] = filtered_list
-
- f = atomic_ofstream(self._vardb._cache_delta_filename,
- mode='w', encoding=_encodings['repo.content'])
- json.dump(deltas_obj, f, ensure_ascii=False)
- f.close()
-
- finally:
- self._vardb.unlock()
-
- def applyDelta(self, data):
- packages = self._vardb._aux_cache["packages"]
- deltas = {}
- for delta in data["deltas"]:
- cpv = delta["package"] + "-" + delta["version"]
- deltas[cpv] = delta
- event = delta["event"]
- if event == "add":
- # Use aux_get to populate the cache
- # for this cpv.
- if cpv not in packages:
- try:
- self._vardb.aux_get(cpv, ["DESCRIPTION"])
- except KeyError:
- pass
- elif event == "remove":
- packages.pop(cpv, None)
-
- if deltas:
- # Delete removed or replaced versions from affected slots
- for cached_cpv, (mtime, metadata) in list(packages.items()):
- if cached_cpv in deltas:
- continue
-
- removed = False
- for cpv, delta in deltas.items():
- if (cached_cpv.startswith(delta["package"]) and
- metadata.get("SLOT") == delta["slot"] and
- cpv_getkey(cached_cpv) == delta["package"]):
- removed = True
- break
-
- if removed:
- del packages[cached_cpv]
- del deltas[cpv]
- if not deltas:
- break
diff --git a/pym/portage/dbapi/__init__.py b/pym/portage/dbapi/__init__.py
deleted file mode 100644
index 6fca6090c..000000000
--- a/pym/portage/dbapi/__init__.py
+++ /dev/null
@@ -1,443 +0,0 @@
-# Copyright 1998-2018 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import unicode_literals
-
-__all__ = ["dbapi"]
-
-import functools
-import re
-
-import portage
-portage.proxy.lazyimport.lazyimport(globals(),
- 'portage.dbapi.dep_expand:dep_expand@_dep_expand',
- 'portage.dep:Atom,match_from_list,_match_slot',
- 'portage.output:colorize',
- 'portage.util:cmp_sort_key,writemsg',
- 'portage.versions:catsplit,catpkgsplit,vercmp,_pkg_str',
-)
-
-from portage.const import MERGING_IDENTIFIER
-
-from portage import os
-from portage import auxdbkeys
-from portage.eapi import _get_eapi_attrs
-from portage.exception import InvalidData
-from portage.localization import _
-from _emerge.Package import Package
-
-class dbapi(object):
- _category_re = re.compile(r'^\w[-.+\w]*$', re.UNICODE)
- _categories = None
- _use_mutable = False
- _known_keys = frozenset(x for x in auxdbkeys
- if not x.startswith("UNUSED_0"))
- _pkg_str_aux_keys = ("BUILD_TIME", "EAPI", "BUILD_ID",
- "KEYWORDS", "SLOT", "repository")
-
- def __init__(self):
- pass
-
- @property
- def categories(self):
- """
- Use self.cp_all() to generate a category list. Mutable instances
- can delete the self._categories attribute in cases when the cached
- categories become invalid and need to be regenerated.
- """
- if self._categories is not None:
- return self._categories
- self._categories = tuple(sorted(set(catsplit(x)[0] \
- for x in self.cp_all())))
- return self._categories
-
- def close_caches(self):
- pass
-
- def cp_list(self, cp, use_cache=1):
- raise NotImplementedError(self)
-
- @staticmethod
- def _cmp_cpv(cpv1, cpv2):
- result = vercmp(cpv1.version, cpv2.version)
- if (result == 0 and cpv1.build_time is not None and
- cpv2.build_time is not None):
- result = ((cpv1.build_time > cpv2.build_time) -
- (cpv1.build_time < cpv2.build_time))
- return result
-
- @staticmethod
- def _cpv_sort_ascending(cpv_list):
- """
- Use this to sort self.cp_list() results in ascending
- order. It sorts in place and returns None.
- """
- if len(cpv_list) > 1:
- # If the cpv includes explicit -r0, it has to be preserved
- # for consistency in findname and aux_get calls, so use a
- # dict to map strings back to their original values.
- cpv_list.sort(key=cmp_sort_key(dbapi._cmp_cpv))
-
- def cpv_all(self):
- """Return all CPVs in the db
- Args:
- None
- Returns:
- A list of Strings, 1 per CPV
-
- This function relies on a subclass implementing cp_all, this is why the hasattr is there
- """
-
- if not hasattr(self, "cp_all"):
- raise NotImplementedError
- cpv_list = []
- for cp in self.cp_all():
- cpv_list.extend(self.cp_list(cp))
- return cpv_list
-
- def cp_all(self, sort=False):
- """ Implement this in a child class
- Args
- sort - return sorted results
- Returns:
- A list of strings 1 per CP in the datastore
- """
- return NotImplementedError
-
- def aux_get(self, mycpv, mylist, myrepo=None):
- """Return the metadata keys in mylist for mycpv
- Args:
- mycpv - "sys-apps/foo-1.0"
- mylist - ["SLOT","DEPEND","HOMEPAGE"]
- myrepo - The repository name.
- Returns:
- a list of results, in order of keys in mylist, such as:
- ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
- """
- raise NotImplementedError
-
- def aux_update(self, cpv, metadata_updates):
- """
- Args:
- cpv - "sys-apps/foo-1.0"
- metadata_updates = { key : newvalue }
- Returns:
- None
- """
- raise NotImplementedError
-
- def match(self, origdep, use_cache=1):
- """Given a dependency, try to find packages that match
- Args:
- origdep - Depend atom
- use_cache - Boolean indicating if we should use the cache or not
- NOTE: Do we ever not want the cache?
- Returns:
- a list of packages that match origdep
- """
- mydep = _dep_expand(origdep, mydb=self, settings=self.settings)
- return list(self._iter_match(mydep,
- self.cp_list(mydep.cp, use_cache=use_cache)))
-
- def _iter_match(self, atom, cpv_iter):
- cpv_iter = iter(match_from_list(atom, cpv_iter))
- if atom.repo:
- cpv_iter = self._iter_match_repo(atom, cpv_iter)
- if atom.slot:
- cpv_iter = self._iter_match_slot(atom, cpv_iter)
- if atom.unevaluated_atom.use:
- cpv_iter = self._iter_match_use(atom, cpv_iter)
- return cpv_iter
-
- def _pkg_str(self, cpv, repo):
- """
- This is used to contruct _pkg_str instances on-demand during
- matching. If cpv is a _pkg_str instance with slot attribute,
- then simply return it. Otherwise, fetch metadata and construct
- a _pkg_str instance. This may raise KeyError or InvalidData.
- """
- try:
- cpv.slot
- except AttributeError:
- pass
- else:
- return cpv
-
- metadata = dict(zip(self._pkg_str_aux_keys,
- self.aux_get(cpv, self._pkg_str_aux_keys, myrepo=repo)))
-
- return _pkg_str(cpv, metadata=metadata, settings=self.settings, db=self)
-
- def _iter_match_repo(self, atom, cpv_iter):
- for cpv in cpv_iter:
- try:
- pkg_str = self._pkg_str(cpv, atom.repo)
- except (KeyError, InvalidData):
- pass
- else:
- if pkg_str.repo == atom.repo:
- yield pkg_str
-
- def _iter_match_slot(self, atom, cpv_iter):
- for cpv in cpv_iter:
- try:
- pkg_str = self._pkg_str(cpv, atom.repo)
- except (KeyError, InvalidData):
- pass
- else:
- if _match_slot(atom, pkg_str):
- yield pkg_str
-
- def _iter_match_use(self, atom, cpv_iter):
- """
- 1) Check for required IUSE intersection (need implicit IUSE here).
- 2) Check enabled/disabled flag states.
- """
-
- aux_keys = ["EAPI", "IUSE", "KEYWORDS", "SLOT", "USE", "repository"]
- for cpv in cpv_iter:
- try:
- metadata = dict(zip(aux_keys,
- self.aux_get(cpv, aux_keys, myrepo=atom.repo)))
- except KeyError:
- continue
-
- try:
- cpv.slot
- except AttributeError:
- try:
- cpv = _pkg_str(cpv, metadata=metadata,
- settings=self.settings)
- except InvalidData:
- continue
-
- if not self._match_use(atom, cpv, metadata):
- continue
-
- yield cpv
-
- def _repoman_iuse_implicit_cnstr(self, pkg, metadata):
- """
- In repoman's version of _iuse_implicit_cnstr, account for modifications
- of the self.settings reference between calls.
- """
- eapi_attrs = _get_eapi_attrs(metadata["EAPI"])
- if eapi_attrs.iuse_effective:
- iuse_implicit_match = lambda flag: self.settings._iuse_effective_match(flag)
- else:
- iuse_implicit_match = lambda flag: self.settings._iuse_implicit_match(flag)
- return iuse_implicit_match
-
- def _iuse_implicit_cnstr(self, pkg, metadata):
- """
- Construct a callable that checks if a given USE flag should
- be considered to be a member of the implicit IUSE for the
- given package.
-
- @param pkg: package
- @type pkg: _pkg_str
- @param metadata: package metadata
- @type metadata: Mapping
- @return: a callable that accepts a single USE flag argument,
- and returns True only if the USE flag should be considered
- to be a member of the implicit IUSE for the given package.
- @rtype: callable
- """
- eapi_attrs = _get_eapi_attrs(metadata["EAPI"])
- if eapi_attrs.iuse_effective:
- iuse_implicit_match = self.settings._iuse_effective_match
- else:
- iuse_implicit_match = self.settings._iuse_implicit_match
-
- if not self._use_mutable and eapi_attrs.iuse_effective:
- # For built packages, it is desirable for the built USE setting to
- # be independent of the profile's current IUSE_IMPLICIT state, since
- # the profile's IUSE_IMPLICT setting may have diverged. Therefore,
- # any member of the built USE setting is considered to be a valid
- # member of IUSE_EFFECTIVE. Note that the binary package may be
- # remote, so it's only possible to rely on metadata that is available
- # in the remote Packages file, and the IUSE_IMPLICIT header in the
- # Packages file is vulnerable to mutation (see bug 640318).
- #
- # This behavior is only used for EAPIs that support IUSE_EFFECTIVE,
- # since built USE settings for earlier EAPIs may contain a large
- # number of irrelevant flags.
- prof_iuse = iuse_implicit_match
- enabled = frozenset(metadata["USE"].split()).__contains__
- iuse_implicit_match = lambda flag: prof_iuse(flag) or enabled(flag)
-
- return iuse_implicit_match
-
- def _match_use(self, atom, pkg, metadata, ignore_profile=False):
- iuse_implicit_match = self._iuse_implicit_cnstr(pkg, metadata)
- usealiases = self.settings._use_manager.getUseAliases(pkg)
- iuse = Package._iuse(None, metadata["IUSE"].split(), iuse_implicit_match, usealiases, metadata["EAPI"])
-
- for x in atom.unevaluated_atom.use.required:
- if iuse.get_real_flag(x) is None:
- return False
-
- if atom.use is None:
- pass
-
- elif not self._use_mutable:
- # Use IUSE to validate USE settings for built packages,
- # in case the package manager that built this package
- # failed to do that for some reason (or in case of
- # data corruption). The enabled flags must be consistent
- # with implicit IUSE, in order to avoid potential
- # inconsistencies in USE dep matching (see bug #453400).
- use = frozenset(x for x in metadata["USE"].split() if iuse.get_real_flag(x) is not None)
- missing_enabled = frozenset(x for x in atom.use.missing_enabled if iuse.get_real_flag(x) is None)
- missing_disabled = frozenset(x for x in atom.use.missing_disabled if iuse.get_real_flag(x) is None)
- enabled = frozenset((iuse.get_real_flag(x) or x) for x in atom.use.enabled)
- disabled = frozenset((iuse.get_real_flag(x) or x) for x in atom.use.disabled)
-
- if enabled:
- if any(x in enabled for x in missing_disabled):
- return False
- need_enabled = enabled.difference(use)
- if need_enabled:
- if any(x not in missing_enabled for x in need_enabled):
- return False
-
- if disabled:
- if any(x in disabled for x in missing_enabled):
- return False
- need_disabled = disabled.intersection(use)
- if need_disabled:
- if any(x not in missing_disabled for x in need_disabled):
- return False
-
- elif not self.settings.local_config:
- if not ignore_profile:
- # Check masked and forced flags for repoman.
- usemask = self.settings._getUseMask(pkg,
- stable=self.settings._parent_stable)
- if any(x in usemask for x in atom.use.enabled):
- return False
-
- useforce = self.settings._getUseForce(pkg,
- stable=self.settings._parent_stable)
- if any(x in useforce and x not in usemask
- for x in atom.use.disabled):
- return False
-
- # Check unsatisfied use-default deps
- if atom.use.enabled:
- missing_disabled = frozenset(x for x in atom.use.missing_disabled if iuse.get_real_flag(x) is None)
- if any(x in atom.use.enabled for x in missing_disabled):
- return False
- if atom.use.disabled:
- missing_enabled = frozenset(x for x in atom.use.missing_enabled if iuse.get_real_flag(x) is None)
- if any(x in atom.use.disabled for x in missing_enabled):
- return False
-
- return True
-
- def invalidentry(self, mypath):
- if "/" + MERGING_IDENTIFIER in mypath:
- if os.path.exists(mypath):
- writemsg(colorize("BAD", _("INCOMPLETE MERGE:"))+" %s\n" % mypath,
- noiselevel=-1)
- else:
- writemsg("!!! Invalid db entry: %s\n" % mypath, noiselevel=-1)
-
- def update_ents(self, updates, onProgress=None, onUpdate=None):
- """
- Update metadata of all packages for package moves.
- @param updates: A list of move commands, or dict of {repo_name: list}
- @type updates: list or dict
- @param onProgress: A progress callback function
- @type onProgress: a callable that takes 2 integer arguments: maxval and curval
- @param onUpdate: A progress callback function called only
- for packages that are modified by updates.
- @type onUpdate: a callable that takes 2 integer arguments:
- maxval and curval
- """
- cpv_all = self.cpv_all()
- cpv_all.sort()
- maxval = len(cpv_all)
- aux_get = self.aux_get
- aux_update = self.aux_update
- update_keys = Package._dep_keys
- meta_keys = update_keys + self._pkg_str_aux_keys
- repo_dict = None
- if isinstance(updates, dict):
- repo_dict = updates
- if onUpdate:
- onUpdate(maxval, 0)
- if onProgress:
- onProgress(maxval, 0)
- for i, cpv in enumerate(cpv_all):
- try:
- metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys)))
- except KeyError:
- continue
- try:
- pkg = _pkg_str(cpv, metadata=metadata, settings=self.settings)
- except InvalidData:
- continue
- metadata = dict((k, metadata[k]) for k in update_keys)
- if repo_dict is None:
- updates_list = updates
- else:
- try:
- updates_list = repo_dict[pkg.repo]
- except KeyError:
- try:
- updates_list = repo_dict['DEFAULT']
- except KeyError:
- continue
-
- if not updates_list:
- continue
-
- metadata_updates = \
- portage.update_dbentries(updates_list, metadata, parent=pkg)
- if metadata_updates:
- aux_update(cpv, metadata_updates)
- if onUpdate:
- onUpdate(maxval, i+1)
- if onProgress:
- onProgress(maxval, i+1)
-
- def move_slot_ent(self, mylist, repo_match=None):
- """This function takes a sequence:
- Args:
- mylist: a sequence of (atom, originalslot, newslot)
- repo_match: callable that takes single repo_name argument
- and returns True if the update should be applied
- Returns:
- The number of slotmoves this function did
- """
- atom = mylist[1]
- origslot = mylist[2]
- newslot = mylist[3]
-
- try:
- atom.with_slot
- except AttributeError:
- atom = Atom(atom).with_slot(origslot)
- else:
- atom = atom.with_slot(origslot)
-
- origmatches = self.match(atom)
- moves = 0
- if not origmatches:
- return moves
- for mycpv in origmatches:
- try:
- mycpv = self._pkg_str(mycpv, atom.repo)
- except (KeyError, InvalidData):
- continue
- if repo_match is not None and not repo_match(mycpv.repo):
- continue
- moves += 1
- if "/" not in newslot and \
- mycpv.sub_slot and \
- mycpv.sub_slot not in (mycpv.slot, newslot):
- newslot = "%s/%s" % (newslot, mycpv.sub_slot)
- mydata = {"SLOT": newslot+"\n"}
- self.aux_update(mycpv, mydata)
- return moves
diff --git a/pym/portage/dbapi/_expand_new_virt.py b/pym/portage/dbapi/_expand_new_virt.py
deleted file mode 100644
index 9aa603d11..000000000
--- a/pym/portage/dbapi/_expand_new_virt.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# Copyright 2011-2013 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import unicode_literals
-
-import portage
-from portage.dep import Atom, _get_useflag_re
-from portage.eapi import _get_eapi_attrs
-
-def expand_new_virt(vardb, atom):
- """
- Iterate over the recursively expanded RDEPEND atoms of
- a new-style virtual. If atom is not a new-style virtual
- or it does not match an installed package then it is
- yielded without any expansion.
- """
- if not isinstance(atom, Atom):
- atom = Atom(atom)
-
- if not atom.cp.startswith("virtual/"):
- yield atom
- return
-
- traversed = set()
- stack = [atom]
-
- while stack:
- atom = stack.pop()
- if atom.blocker or \
- not atom.cp.startswith("virtual/"):
- yield atom
- continue
-
- matches = vardb.match(atom)
- if not (matches and matches[-1].startswith("virtual/")):
- yield atom
- continue
-
- virt_cpv = matches[-1]
- if virt_cpv in traversed:
- continue
-
- traversed.add(virt_cpv)
- eapi, iuse, rdepend, use = vardb.aux_get(virt_cpv,
- ["EAPI", "IUSE", "RDEPEND", "USE"])
- if not portage.eapi_is_supported(eapi):
- yield atom
- continue
-
- eapi_attrs = _get_eapi_attrs(eapi)
- # Validate IUSE and IUSE, for early detection of vardb corruption.
- useflag_re = _get_useflag_re(eapi)
- valid_iuse = []
- for x in iuse.split():
- if x[:1] in ("+", "-"):
- x = x[1:]
- if useflag_re.match(x) is not None:
- valid_iuse.append(x)
- valid_iuse = frozenset(valid_iuse)
-
- if eapi_attrs.iuse_effective:
- iuse_implicit_match = vardb.settings._iuse_effective_match
- else:
- iuse_implicit_match = vardb.settings._iuse_implicit_match
-
- valid_use = []
- for x in use.split():
- if x in valid_iuse or iuse_implicit_match(x):
- valid_use.append(x)
- valid_use = frozenset(valid_use)
-
- success, atoms = portage.dep_check(rdepend,
- None, vardb.settings, myuse=valid_use,
- myroot=vardb.settings['EROOT'],
- trees={vardb.settings['EROOT']:{"porttree":vardb.vartree,
- "vartree":vardb.vartree}})
-
- if success:
- stack.extend(atoms)
- else:
- yield atom
diff --git a/pym/portage/dbapi/_similar_name_search.py b/pym/portage/dbapi/_similar_name_search.py
deleted file mode 100644
index b6e4a1fbe..000000000
--- a/pym/portage/dbapi/_similar_name_search.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2011-2012 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import difflib
-
-from portage.versions import catsplit
-
-def similar_name_search(dbs, atom):
-
- cp_lower = atom.cp.lower()
- cat, pkg = catsplit(cp_lower)
- if cat == "null":
- cat = None
-
- all_cp = set()
- for db in dbs:
- all_cp.update(db.cp_all())
-
- # discard dir containing no ebuilds
- all_cp.discard(atom.cp)
-
- orig_cp_map = {}
- for cp_orig in all_cp:
- orig_cp_map.setdefault(cp_orig.lower(), []).append(cp_orig)
- all_cp = set(orig_cp_map)
-
- if cat:
- matches = difflib.get_close_matches(cp_lower, all_cp)
- else:
- pkg_to_cp = {}
- for other_cp in list(all_cp):
- other_pkg = catsplit(other_cp)[1]
- if other_pkg == pkg:
- # Check for non-identical package that
- # differs only by upper/lower case.
- identical = True
- for cp_orig in orig_cp_map[other_cp]:
- if catsplit(cp_orig)[1] != \
- catsplit(atom.cp)[1]:
- identical = False
- break
- if identical:
- # discard dir containing no ebuilds
- all_cp.discard(other_cp)
- continue
- pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
-
- pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
- matches = []
- for pkg_match in pkg_matches:
- matches.extend(pkg_to_cp[pkg_match])
-
- matches_orig_case = []
- for cp in matches:
- matches_orig_case.extend(orig_cp_map[cp])
-
- return matches_orig_case
diff --git a/pym/portage/dbapi/bintree.py b/pym/portage/dbapi/bintree.py
deleted file mode 100644
index 9c2d877e7..000000000
--- a/pym/portage/dbapi/bintree.py
+++ /dev/null
@@ -1,1710 +0,0 @@
-# Copyright 1998-2018 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import unicode_literals
-
-__all__ = ["bindbapi", "binarytree"]
-
-import portage
-portage.proxy.lazyimport.lazyimport(globals(),
- 'portage.checksum:get_valid_checksum_keys,perform_multiple_checksums,' + \
- 'verify_all,_apply_hash_filter,_hash_filter',
- 'portage.dbapi.dep_expand:dep_expand',
- 'portage.dep:dep_getkey,isjustname,isvalidatom,match_from_list',
- 'portage.output:EOutput,colorize',
- 'portage.locks:lockfile,unlockfile',
- 'portage.package.ebuild.fetch:_check_distfile,_hide_url_passwd',
- 'portage.update:update_dbentries',
- 'portage.util:atomic_ofstream,ensure_dirs,normalize_path,' + \
- 'writemsg,writemsg_stdout',
- 'portage.util.path:first_existing',
- 'portage.util._urlopen:urlopen@_urlopen,have_pep_476@_have_pep_476',
- 'portage.versions:best,catpkgsplit,catsplit,_pkg_str',
-)
-
-from portage.cache.mappings import slot_dict_class
-from portage.const import CACHE_PATH, SUPPORTED_XPAK_EXTENSIONS
-from portage.dbapi.virtual import fakedbapi
-from portage.dep import Atom, use_reduce, paren_enclose
-from portage.exception import AlarmSignal, InvalidData, InvalidPackageName, \
- ParseError, PermissionDenied, PortageException
-from portage.localization import _
-from portage.package.ebuild.profile_iuse import iter_iuse_vars
-from portage import _movefile
-from portage import os
-from portage import _encodings
-from portage import _unicode_decode
-from portage import _unicode_encode
-
-import codecs
-import errno
-import io
-import stat
-import subprocess
-import sys
-import tempfile
-import textwrap
-import time
-import traceback
-import warnings
-from gzip import GzipFile
-from itertools import chain
-try:
- from urllib.parse import urlparse
-except ImportError:
- from urlparse import urlparse
-
-if sys.hexversion >= 0x3000000:
- # pylint: disable=W0622
- _unicode = str
- basestring = str
- long = int
-else:
- _unicode = unicode
-
-class UseCachedCopyOfRemoteIndex(Exception):
- # If the local copy is recent enough
- # then fetching the remote index can be skipped.
- pass
-
-class bindbapi(fakedbapi):
- _known_keys = frozenset(list(fakedbapi._known_keys) + \
- ["CHOST", "repository", "USE"])
- def __init__(self, mybintree=None, **kwargs):
- # Always enable multi_instance mode for bindbapi indexing. This
- # does not affect the local PKGDIR file layout, since that is
- # controlled independently by FEATURES=binpkg-multi-instance.
- # The multi_instance mode is useful for the following reasons:
- # * binary packages with the same cpv from multiple binhosts
- # can be considered simultaneously
- # * if binpkg-multi-instance is disabled, it's still possible
- # to properly access a PKGDIR which has binpkg-multi-instance
- # layout (or mixed layout)
- fakedbapi.__init__(self, exclusive_slots=False,
- multi_instance=True, **kwargs)
- self.bintree = mybintree
- self.move_ent = mybintree.move_ent
- # Selectively cache metadata in order to optimize dep matching.
- self._aux_cache_keys = set(
- ["BDEPEND", "BUILD_ID", "BUILD_TIME", "CHOST", "DEFINED_PHASES",
- "DEPEND", "EAPI", "HDEPEND", "IUSE", "KEYWORDS",
- "LICENSE", "MD5", "PDEPEND", "PROPERTIES",
- "PROVIDES", "RDEPEND", "repository", "REQUIRES", "RESTRICT",
- "SIZE", "SLOT", "USE", "_mtime_"
- ])
- self._aux_cache_slot_dict = slot_dict_class(self._aux_cache_keys)
- self._aux_cache = {}
-
- @property
- def writable(self):
- """
- Check if PKGDIR is writable, or permissions are sufficient
- to create it if it does not exist yet.
- @rtype: bool
- @return: True if PKGDIR is writable or can be created,
- False otherwise
- """
- return os.access(first_existing(self.bintree.pkgdir), os.W_OK)
-
- def match(self, *pargs, **kwargs):
- if self.bintree and not self.bintree.populated:
- self.bintree.populate()
- return fakedbapi.match(self, *pargs, **kwargs)
-
- def cpv_exists(self, cpv, myrepo=None):
- if self.bintree and not self.bintree.populated:
- self.bintree.populate()
- return fakedbapi.cpv_exists(self, cpv)
-
- def cpv_inject(self, cpv, **kwargs):
- if not self.bintree.populated:
- self.bintree.populate()
- fakedbapi.cpv_inject(self, cpv,
- metadata=cpv._metadata, **kwargs)
-
- def cpv_remove(self, cpv):
- if not self.bintree.populated:
- self.bintree.populate()
- fakedbapi.cpv_remove(self, cpv)
-
- def aux_get(self, mycpv, wants, myrepo=None):
- if self.bintree and not self.bintree.populated:
- self.bintree.populate()
- # Support plain string for backward compatibility with API
- # consumers (including portageq, which passes in a cpv from
- # a command-line argument).
- instance_key = self._instance_key(mycpv,
- support_string=True)
- if not self._known_keys.intersection(
- wants).difference(self._aux_cache_keys):
- aux_cache = self.cpvdict[instance_key]
- if aux_cache is not None:
- return [aux_cache.get(x, "") for x in wants]
- mysplit = mycpv.split("/")
- mylist = []
- if not self.bintree._remotepkgs or \
- not self.bintree.isremote(mycpv):
- try:
- tbz2_path = self.bintree._pkg_paths[instance_key]
- except KeyError:
- raise KeyError(mycpv)
- tbz2_path = os.path.join(self.bintree.pkgdir, tbz2_path)
- try:
- st = os.lstat(tbz2_path)
- except OSError:
- raise KeyError(mycpv)
- metadata_bytes = portage.xpak.tbz2(tbz2_path).get_data()
- def getitem(k):
- if k == "_mtime_":
- return _unicode(st[stat.ST_MTIME])
- elif k == "SIZE":
- return _unicode(st.st_size)
- v = metadata_bytes.get(_unicode_encode(k,
- encoding=_encodings['repo.content'],
- errors='backslashreplace'))
- if v is not None:
- v = _unicode_decode(v,
- encoding=_encodings['repo.content'], errors='replace')
- return v
- else:
- getitem = self.cpvdict[instance_key].get
- mydata = {}
- mykeys = wants
- for x in mykeys:
- myval = getitem(x)
- # myval is None if the key doesn't exist
- # or the tbz2 is corrupt.
- if myval:
- mydata[x] = " ".join(myval.split())
-
- if not mydata.setdefault('EAPI', '0'):
- mydata['EAPI'] = '0'
-
- return [mydata.get(x, '') for x in wants]
-
- def aux_update(self, cpv, values):
- if not self.bintree.populated:
- self.bintree.populate()
- build_id = None
- try:
- build_id = cpv.build_id
- except AttributeError:
- if self.bintree._multi_instance:
- # The cpv.build_id attribute is required if we are in
- # multi-instance mode, since otherwise we won't know
- # which instance to update.
- raise
- else:
- cpv = self._instance_key(cpv, support_string=True)[0]
- build_id = cpv.build_id
-
- tbz2path = self.bintree.getname(cpv)
- if not os.path.exists(tbz2path):
- raise KeyError(cpv)
- mytbz2 = portage.xpak.tbz2(tbz2path)
- mydata = mytbz2.get_data()
-
- for k, v in values.items():
- k = _unicode_encode(k,
- encoding=_encodings['repo.content'], errors='backslashreplace')
- v = _unicode_encode(v,
- encoding=_encodings['repo.content'], errors='backslashreplace')
- mydata[k] = v
-
- for k, v in list(mydata.items()):
- if not v:
- del mydata[k]
- mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
- # inject will clear stale caches via cpv_inject.
- self.bintree.inject(cpv, filename=tbz2path)
-
- def cp_list(self, *pargs, **kwargs):
- if not self.bintree.populated:
- self.bintree.populate()
- return fakedbapi.cp_list(self, *pargs, **kwargs)
-
- def cp_all(self, sort=False):
- if not self.bintree.populated:
- self.bintree.populate()
- return fakedbapi.cp_all(self, sort=sort)
-
- def cpv_all(self):
- if not self.bintree.populated:
- self.bintree.populate()
- return fakedbapi.cpv_all(self)
-
- def getfetchsizes(self, pkg):
- """
- This will raise MissingSignature if SIZE signature is not available,
- or InvalidSignature if SIZE signature is invalid.
- """
-
- if not self.bintree.populated:
- self.bintree.populate()
-
- pkg = getattr(pkg, 'cpv', pkg)
-
- filesdict = {}
- if not self.bintree.isremote(pkg):
- pass
- else:
- metadata = self.bintree._remotepkgs[self._instance_key(pkg)]
- try:
- size = int(metadata["SIZE"])
- except KeyError:
- raise portage.exception.MissingSignature("SIZE")
- except ValueError:
- raise portage.exception.InvalidSignature(
- "SIZE: %s" % metadata["SIZE"])
- else:
- filesdict[os.path.basename(self.bintree.getname(pkg))] = size
-
- return filesdict
-
-class binarytree(object):
- "this tree scans for a list of all packages available in PKGDIR"
- def __init__(self, _unused=DeprecationWarning, pkgdir=None,
- virtual=DeprecationWarning, settings=None):
-
- if pkgdir is None:
- raise TypeError("pkgdir parameter is required")
-
- if settings is None:
- raise TypeError("settings parameter is required")
-
- if _unused is not DeprecationWarning:
- warnings.warn("The first parameter of the "
- "portage.dbapi.bintree.binarytree"
- " constructor is now unused. Instead "
- "settings['ROOT'] is used.",
- DeprecationWarning, stacklevel=2)
-
- if virtual is not DeprecationWarning:
- warnings.warn("The 'virtual' parameter of the "
- "portage.dbapi.bintree.binarytree"
- " constructor is unused",
- DeprecationWarning, stacklevel=2)
-
- if True:
- self.pkgdir = normalize_path(pkgdir)
- # NOTE: Event if binpkg-multi-instance is disabled, it's
- # still possible to access a PKGDIR which uses the
- # binpkg-multi-instance layout (or mixed layout).
- self._multi_instance = ("binpkg-multi-instance" in
- settings.features)
- if self._multi_instance:
- self._allocate_filename = self._allocate_filename_multi
- self.dbapi = bindbapi(self, settings=settings)
- self.update_ents = self.dbapi.update_ents
- self.move_slot_ent = self.dbapi.move_slot_ent
- self.populated = 0
- self.tree = {}
- self._remote_has_index = False
- self._remotepkgs = None # remote metadata indexed by cpv
- self.invalids = []
- self.settings = settings
- self._pkg_paths = {}
- self._populating = False
- self._all_directory = os.path.isdir(
- os.path.join(self.pkgdir, "All"))
- self._pkgindex_version = 0
- self._pkgindex_hashes = ["MD5","SHA1"]
- self._pkgindex_file = os.path.join(self.pkgdir, "Packages")
- self._pkgindex_keys = self.dbapi._aux_cache_keys.copy()
- self._pkgindex_keys.update(["CPV", "SIZE"])
- self._pkgindex_aux_keys = \
- ["BASE_URI", "BDEPEND", "BUILD_ID", "BUILD_TIME", "CHOST",
- "DEFINED_PHASES", "DEPEND", "DESCRIPTION", "EAPI",
- "HDEPEND", "IUSE", "KEYWORDS", "LICENSE", "PDEPEND",
- "PKGINDEX_URI", "PROPERTIES", "PROVIDES",
- "RDEPEND", "repository", "REQUIRES", "RESTRICT",
- "SIZE", "SLOT", "USE"]
- self._pkgindex_aux_keys = list(self._pkgindex_aux_keys)
- self._pkgindex_use_evaluated_keys = \
- ("BDEPEND", "DEPEND", "HDEPEND", "LICENSE", "RDEPEND",
- "PDEPEND", "PROPERTIES", "RESTRICT")
- self._pkgindex_header = None
- self._pkgindex_header_keys = set([
- "ACCEPT_KEYWORDS", "ACCEPT_LICENSE",
- "ACCEPT_PROPERTIES", "ACCEPT_RESTRICT", "CBUILD",
- "CONFIG_PROTECT", "CONFIG_PROTECT_MASK", "FEATURES",
- "GENTOO_MIRRORS", "INSTALL_MASK", "IUSE_IMPLICIT", "USE",
- "USE_EXPAND", "USE_EXPAND_HIDDEN", "USE_EXPAND_IMPLICIT",
- "USE_EXPAND_UNPREFIXED"])
- self._pkgindex_default_pkg_data = {
- "BDEPEND" : "",
- "BUILD_ID" : "",
- "BUILD_TIME" : "",
- "DEFINED_PHASES" : "",
- "DEPEND" : "",
- "EAPI" : "0",
- "HDEPEND" : "",
- "IUSE" : "",
- "KEYWORDS": "",
- "LICENSE" : "",
- "PATH" : "",
- "PDEPEND" : "",
- "PROPERTIES" : "",
- "PROVIDES": "",
- "RDEPEND" : "",
- "REQUIRES": "",
- "RESTRICT": "",
- "SLOT" : "0",
- "USE" : "",
- }
- self._pkgindex_inherited_keys = ["CHOST", "repository"]
-
- # Populate the header with appropriate defaults.
- self._pkgindex_default_header_data = {
- "CHOST" : self.settings.get("CHOST", ""),
- "repository" : "",
- }
-
- self._pkgindex_translated_keys = (
- ("DESCRIPTION" , "DESC"),
- ("_mtime_" , "MTIME"),
- ("repository" , "REPO"),
- )
-
- self._pkgindex_allowed_pkg_keys = set(chain(
- self._pkgindex_keys,
- self._pkgindex_aux_keys,
- self._pkgindex_hashes,
- self._pkgindex_default_pkg_data,
- self._pkgindex_inherited_keys,
- chain(*self._pkgindex_translated_keys)
- ))
-
- @property
- def root(self):
- warnings.warn("The root attribute of "
- "portage.dbapi.bintree.binarytree"
- " is deprecated. Use "
- "settings['ROOT'] instead.",
- DeprecationWarning, stacklevel=3)
- return self.settings['ROOT']
-
- def move_ent(self, mylist, repo_match=None):
- if not self.populated:
- self.populate()
- origcp = mylist[1]
- newcp = mylist[2]
- # sanity check
- for atom in (origcp, newcp):
- if not isjustname(atom):
- raise InvalidPackageName(_unicode(atom))
- mynewcat = catsplit(newcp)[0]
- origmatches=self.dbapi.cp_list(origcp)
- moves = 0
- if not origmatches:
- return moves
- for mycpv in origmatches:
- try:
- mycpv = self.dbapi._pkg_str(mycpv, None)
- except (KeyError, InvalidData):
- continue
- mycpv_cp = portage.cpv_getkey(mycpv)
- if mycpv_cp != origcp:
- # Ignore PROVIDE virtual match.
- continue
- if repo_match is not None \
- and not repo_match(mycpv.repo):
- continue
-
- # Use isvalidatom() to check if this move is valid for the
- # EAPI (characters allowed in package names may vary).
- if not isvalidatom(newcp, eapi=mycpv.eapi):
- continue
-
- mynewcpv = mycpv.replace(mycpv_cp, _unicode(newcp), 1)
- myoldpkg = catsplit(mycpv)[1]
- mynewpkg = catsplit(mynewcpv)[1]
-
- if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
- writemsg(_("!!! Cannot update binary: Destination exists.\n"),
- noiselevel=-1)
- writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
- continue
-
- tbz2path = self.getname(mycpv)
- if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
- writemsg(_("!!! Cannot update readonly binary: %s\n") % mycpv,
- noiselevel=-1)
- continue
-
- moves += 1
- mytbz2 = portage.xpak.tbz2(tbz2path)
- mydata = mytbz2.get_data()
- updated_items = update_dbentries([mylist], mydata, parent=mycpv)
- mydata.update(updated_items)
- mydata[b'PF'] = \
- _unicode_encode(mynewpkg + "\n",
- encoding=_encodings['repo.content'])
- mydata[b'CATEGORY'] = \
- _unicode_encode(mynewcat + "\n",
- encoding=_encodings['repo.content'])
- if mynewpkg != myoldpkg:
- ebuild_data = mydata.pop(_unicode_encode(myoldpkg + '.ebuild',
- encoding=_encodings['repo.content']), None)
- if ebuild_data is not None:
- mydata[_unicode_encode(mynewpkg + '.ebuild',
- encoding=_encodings['repo.content'])] = ebuild_data
-
- mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
-
- self.dbapi.cpv_remove(mycpv)
- del self._pkg_paths[self.dbapi._instance_key(mycpv)]
- metadata = self.dbapi._aux_cache_slot_dict()
- for k in self.dbapi._aux_cache_keys:
- v = mydata.get(_unicode_encode(k))
- if v is not None:
- v = _unicode_decode(v)
- metadata[k] = " ".join(v.split())
- mynewcpv = _pkg_str(mynewcpv, metadata=metadata, db=self.dbapi)
- new_path = self.getname(mynewcpv)
- self._pkg_paths[
- self.dbapi._instance_key(mynewcpv)] = new_path[len(self.pkgdir)+1:]
- if new_path != mytbz2:
- self._ensure_dir(os.path.dirname(new_path))
- _movefile(tbz2path, new_path, mysettings=self.settings)
- self.inject(mynewcpv)
-
- return moves
-
- def prevent_collision(self, cpv):
- warnings.warn("The "
- "portage.dbapi.bintree.binarytree.prevent_collision "
- "method is deprecated.",
- DeprecationWarning, stacklevel=2)
-
- def _ensure_dir(self, path):
- """
- Create the specified directory. Also, copy gid and group mode
- bits from self.pkgdir if possible.
- @param cat_dir: Absolute path of the directory to be created.
- @type cat_dir: String
- """
- try:
- pkgdir_st = os.stat(self.pkgdir)
- except OSError:
- ensure_dirs(path)
- return
- pkgdir_gid = pkgdir_st.st_gid
- pkgdir_grp_mode = 0o2070 & pkgdir_st.st_mode
- try:
- ensure_dirs(path, gid=pkgdir_gid, mode=pkgdir_grp_mode, mask=0)
- except PortageException:
- if not os.path.isdir(path):
- raise
-
- def _file_permissions(self, path):
- try:
- pkgdir_st = os.stat(self.pkgdir)
- except OSError:
- pass
- else:
- pkgdir_gid = pkgdir_st.st_gid
- pkgdir_grp_mode = 0o0060 & pkgdir_st.st_mode
- try:
- portage.util.apply_permissions(path, gid=pkgdir_gid,
- mode=pkgdir_grp_mode, mask=0)
- except PortageException:
- pass
-
- def populate(self, getbinpkgs=False, getbinpkg_refresh=True):
- """
- Populates the binarytree with package metadata.
-
- @param getbinpkgs: include remote packages
- @type getbinpkgs: bool
- @param getbinpkg_refresh: attempt to refresh the cache
- of remote package metadata if getbinpkgs is also True
- @type getbinpkg_refresh: bool
- """
-
- if self._populating:
- return
-
- if not os.path.isdir(self.pkgdir) and not getbinpkgs:
- self.populated = True
- return
-
- # Clear all caches in case populate is called multiple times
- # as may be the case when _global_updates calls populate()
- # prior to performing package moves since it only wants to
- # operate on local packages (getbinpkgs=0).
- self._remotepkgs = None
-
- self._populating = True
- try:
- update_pkgindex = self._populate_local()
-
- if update_pkgindex and self.dbapi.writable:
- # If the Packages file needs to be updated, then _populate_local
- # needs to be called once again while the file is locked, so
- # that changes made by a concurrent process cannot be lost. This
- # case is avoided when possible, in order to minimize lock
- # contention.
- pkgindex_lock = None
- try:
- pkgindex_lock = lockfile(self._pkgindex_file,
- wantnewlockfile=True)
- update_pkgindex = self._populate_local()
- if update_pkgindex:
- self._pkgindex_write(update_pkgindex)
- finally:
- if pkgindex_lock:
- unlockfile(pkgindex_lock)
-
- if getbinpkgs:
- if not self.settings.get("PORTAGE_BINHOST"):
- writemsg(_("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
- noiselevel=-1)
- else:
- self._populate_remote(getbinpkg_refresh=getbinpkg_refresh)
-
- finally:
- self._populating = False
-
- self.populated = True
-
- def _populate_local(self):
- self.dbapi.clear()
- _instance_key = self.dbapi._instance_key
- # In order to minimize disk I/O, we never compute digests here.
- # Therefore we exclude hashes from the minimum_keys, so that
- # the Packages file will not be needlessly re-written due to
- # missing digests.
- minimum_keys = self._pkgindex_keys.difference(self._pkgindex_hashes)
- if True:
- pkg_paths = {}
- self._pkg_paths = pkg_paths
- dir_files = {}
- for parent, dir_names, file_names in os.walk(self.pkgdir):
- relative_parent = parent[len(self.pkgdir)+1:]
- dir_files[relative_parent] = file_names
-
- pkgindex = self._load_pkgindex()
- if not self._pkgindex_version_supported(pkgindex):
- pkgindex = self._new_pkgindex()
- metadata = {}
- basename_index = {}
- for d in pkgindex.packages:
- cpv = _pkg_str(d["CPV"], metadata=d,
- settings=self.settings, db=self.dbapi)
- d["CPV"] = cpv
- metadata[_instance_key(cpv)] = d
- path = d.get("PATH")
- if not path:
- path = cpv + ".tbz2"
- basename = os.path.basename(path)
- basename_index.setdefault(basename, []).append(d)
-
- update_pkgindex = False
- for mydir, file_names in dir_files.items():
- try:
- mydir = _unicode_decode(mydir,
- encoding=_encodings["fs"], errors="strict")
- except UnicodeDecodeError:
- continue
- for myfile in file_names:
- try:
- myfile = _unicode_decode(myfile,
- encoding=_encodings["fs"], errors="strict")
- except UnicodeDecodeError:
- continue
- if not myfile.endswith(SUPPORTED_XPAK_EXTENSIONS):
- continue
- mypath = os.path.join(mydir, myfile)
- full_path = os.path.join(self.pkgdir, mypath)
- s = os.lstat(full_path)
-
- if not stat.S_ISREG(s.st_mode):
- continue
-
- # Validate data from the package index and try to avoid
- # reading the xpak if possible.
- possibilities = basename_index.get(myfile)
- if possibilities:
- match = None
- for d in possibilities:
- try:
- if long(d["_mtime_"]) != s[stat.ST_MTIME]:
- continue
- except (KeyError, ValueError):
- continue
- try:
- if long(d["SIZE"]) != long(s.st_size):
- continue
- except (KeyError, ValueError):
- continue
- if not minimum_keys.difference(d):
- match = d
- break
- if match:
- mycpv = match["CPV"]
- instance_key = _instance_key(mycpv)
- pkg_paths[instance_key] = mypath
- # update the path if the package has been moved
- oldpath = d.get("PATH")
- if oldpath and oldpath != mypath:
- update_pkgindex = True
- # Omit PATH if it is the default path for
- # the current Packages format version.
- if mypath != mycpv + ".tbz2":
- d["PATH"] = mypath
- if not oldpath:
- update_pkgindex = True
- else:
- d.pop("PATH", None)
- if oldpath:
- update_pkgindex = True
- self.dbapi.cpv_inject(mycpv)
- continue
- if not os.access(full_path, os.R_OK):
- writemsg(_("!!! Permission denied to read " \
- "binary package: '%s'\n") % full_path,
- noiselevel=-1)
- self.invalids.append(myfile[:-5])
- continue
- pkg_metadata = self._read_metadata(full_path, s,
- keys=chain(self.dbapi._aux_cache_keys,
- ("PF", "CATEGORY")))
- mycat = pkg_metadata.get("CATEGORY", "")
- mypf = pkg_metadata.get("PF", "")
- slot = pkg_metadata.get("SLOT", "")
- mypkg = myfile[:-5]
- if not mycat or not mypf or not slot:
- #old-style or corrupt package
- writemsg(_("\n!!! Invalid binary package: '%s'\n") % full_path,
- noiselevel=-1)
- missing_keys = []
- if not mycat:
- missing_keys.append("CATEGORY")
- if not mypf:
- missing_keys.append("PF")
- if not slot:
- missing_keys.append("SLOT")
- msg = []
- if missing_keys:
- missing_keys.sort()
- msg.append(_("Missing metadata key(s): %s.") % \
- ", ".join(missing_keys))
- msg.append(_(" This binary package is not " \
- "recoverable and should be deleted."))
- for line in textwrap.wrap("".join(msg), 72):
- writemsg("!!! %s\n" % line, noiselevel=-1)
- self.invalids.append(mypkg)
- continue
-
- multi_instance = False
- invalid_name = False
- build_id = None
- if myfile.endswith(".xpak"):
- multi_instance = True
- build_id = self._parse_build_id(myfile)
- if build_id < 1:
- invalid_name = True
- elif myfile != "%s-%s.xpak" % (
- mypf, build_id):
- invalid_name = True
- else:
- mypkg = mypkg[:-len(str(build_id))-1]
- elif myfile != mypf + ".tbz2":
- invalid_name = True
-
- if invalid_name:
- writemsg(_("\n!!! Binary package name is "
- "invalid: '%s'\n") % full_path,
- noiselevel=-1)
- continue
-
- if pkg_metadata.get("BUILD_ID"):
- try:
- build_id = long(pkg_metadata["BUILD_ID"])
- except ValueError:
- writemsg(_("!!! Binary package has "
- "invalid BUILD_ID: '%s'\n") %
- full_path, noiselevel=-1)
- continue
- else:
- build_id = None
-
- if multi_instance:
- name_split = catpkgsplit("%s/%s" %
- (mycat, mypf))
- if (name_split is None or
- tuple(catsplit(mydir)) != name_split[:2]):
- continue
- elif mycat != mydir and mydir != "All":
- continue
- if mypkg != mypf.strip():
- continue
- mycpv = mycat + "/" + mypkg
- if not self.dbapi._category_re.match(mycat):
- writemsg(_("!!! Binary package has an " \
- "unrecognized category: '%s'\n") % full_path,
- noiselevel=-1)
- writemsg(_("!!! '%s' has a category that is not" \
- " listed in %setc/portage/categories\n") % \
- (mycpv, self.settings["PORTAGE_CONFIGROOT"]),
- noiselevel=-1)
- continue
- if build_id is not None:
- pkg_metadata["BUILD_ID"] = _unicode(build_id)
- pkg_metadata["SIZE"] = _unicode(s.st_size)
- # Discard items used only for validation above.
- pkg_metadata.pop("CATEGORY")
- pkg_metadata.pop("PF")
- mycpv = _pkg_str(mycpv,
- metadata=self.dbapi._aux_cache_slot_dict(pkg_metadata),
- db=self.dbapi)
- pkg_paths[_instance_key(mycpv)] = mypath
- self.dbapi.cpv_inject(mycpv)
- update_pkgindex = True
- d = metadata.get(_instance_key(mycpv),
- pkgindex._pkg_slot_dict())
- if d:
- try:
- if long(d["_mtime_"]) != s[stat.ST_MTIME]:
- d.clear()
- except (KeyError, ValueError):
- d.clear()
- if d:
- try:
- if long(d["SIZE"]) != long(s.st_size):
- d.clear()
- except (KeyError, ValueError):
- d.clear()
-
- for k in self._pkgindex_allowed_pkg_keys:
- v = pkg_metadata.get(k)
- if v:
- d[k] = v
- d["CPV"] = mycpv
-
- try:
- self._eval_use_flags(mycpv, d)
- except portage.exception.InvalidDependString:
- writemsg(_("!!! Invalid binary package: '%s'\n") % \
- self.getname(mycpv), noiselevel=-1)
- self.dbapi.cpv_remove(mycpv)
- del pkg_paths[_instance_key(mycpv)]
-
- # record location if it's non-default
- if mypath != mycpv + ".tbz2":
- d["PATH"] = mypath
- else:
- d.pop("PATH", None)
- metadata[_instance_key(mycpv)] = d
-
- for instance_key in list(metadata):
- if instance_key not in pkg_paths:
- del metadata[instance_key]
-
- if update_pkgindex:
- del pkgindex.packages[:]
- pkgindex.packages.extend(iter(metadata.values()))
- self._update_pkgindex_header(pkgindex.header)
-
- self._pkgindex_header = {}
- self._merge_pkgindex_header(pkgindex.header,
- self._pkgindex_header)
-
- return pkgindex if update_pkgindex else None
-
- def _populate_remote(self, getbinpkg_refresh=True):
-
- self._remote_has_index = False
- self._remotepkgs = {}
- for base_url in self.settings["PORTAGE_BINHOST"].split():
- parsed_url = urlparse(base_url)
- host = parsed_url.netloc
- port = parsed_url.port
- user = None
- passwd = None
- user_passwd = ""
- if "@" in host:
- user, host = host.split("@", 1)
- user_passwd = user + "@"
- if ":" in user:
- user, passwd = user.split(":", 1)
-
- if port is not None:
- port_str = ":%s" % (port,)
- if host.endswith(port_str):
- host = host[:-len(port_str)]
- pkgindex_file = os.path.join(self.settings["EROOT"], CACHE_PATH, "binhost",
- host, parsed_url.path.lstrip("/"), "Packages")
- pkgindex = self._new_pkgindex()
- try:
- f = io.open(_unicode_encode(pkgindex_file,
- encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['repo.content'],
- errors='replace')
- try:
- pkgindex.read(f)
- finally:
- f.close()
- except EnvironmentError as e:
- if e.errno != errno.ENOENT:
- raise
- local_timestamp = pkgindex.header.get("TIMESTAMP", None)
- try:
- download_timestamp = \
- float(pkgindex.header.get("DOWNLOAD_TIMESTAMP", 0))
- except ValueError:
- download_timestamp = 0
- remote_timestamp = None
- rmt_idx = self._new_pkgindex()
- proc = None
- tmp_filename = None
- try:
- # urlparse.urljoin() only works correctly with recognized
- # protocols and requires the base url to have a trailing
- # slash, so join manually...
- url = base_url.rstrip("/") + "/Packages"
- f = None
-
- if not getbinpkg_refresh and local_timestamp:
- raise UseCachedCopyOfRemoteIndex()
-
- try:
- ttl = float(pkgindex.header.get("TTL", 0))
- except ValueError:
- pass
- else:
- if download_timestamp and ttl and \
- download_timestamp + ttl > time.time():
- raise UseCachedCopyOfRemoteIndex()
-
- # Don't use urlopen for https, unless
- # PEP 476 is supported (bug #469888).
- if parsed_url.scheme not in ('https',) or _have_pep_476():
- try:
- f = _urlopen(url, if_modified_since=local_timestamp)
- if hasattr(f, 'headers') and f.headers.get('timestamp', ''):
- remote_timestamp = f.headers.get('timestamp')
- except IOError as err:
- if hasattr(err, 'code') and err.code == 304: # not modified (since local_timestamp)
- raise UseCachedCopyOfRemoteIndex()
-
- if parsed_url.scheme in ('ftp', 'http', 'https'):
- # This protocol is supposedly supported by urlopen,
- # so apparently there's a problem with the url
- # or a bug in urlopen.
- if self.settings.get("PORTAGE_DEBUG", "0") != "0":
- traceback.print_exc()
-
- raise
- except ValueError:
- raise ParseError("Invalid Portage BINHOST value '%s'"
- % url.lstrip())
-
- if f is None:
-
- path = parsed_url.path.rstrip("/") + "/Packages"
-
- if parsed_url.scheme == 'ssh':
- # Use a pipe so that we can terminate the download
- # early if we detect that the TIMESTAMP header
- # matches that of the cached Packages file.
- ssh_args = ['ssh']
- if port is not None:
- ssh_args.append("-p%s" % (port,))
- # NOTE: shlex evaluates embedded quotes
- ssh_args.extend(portage.util.shlex_split(
- self.settings.get("PORTAGE_SSH_OPTS", "")))
- ssh_args.append(user_passwd + host)
- ssh_args.append('--')
- ssh_args.append('cat')
- ssh_args.append(path)
-
- proc = subprocess.Popen(ssh_args,
- stdout=subprocess.PIPE)
- f = proc.stdout
- else:
- setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper()
- fcmd = self.settings.get(setting)
- if not fcmd:
- fcmd = self.settings.get('FETCHCOMMAND')
- if not fcmd:
- raise EnvironmentError("FETCHCOMMAND is unset")
-
- fd, tmp_filename = tempfile.mkstemp()
- tmp_dirname, tmp_basename = os.path.split(tmp_filename)
- os.close(fd)
-
- fcmd_vars = {
- "DISTDIR": tmp_dirname,
- "FILE": tmp_basename,
- "URI": url
- }
-
- for k in ("PORTAGE_SSH_OPTS",):
- v = self.settings.get(k)
- if v is not None:
- fcmd_vars[k] = v
-
- success = portage.getbinpkg.file_get(
- fcmd=fcmd, fcmd_vars=fcmd_vars)
- if not success:
- raise EnvironmentError("%s failed" % (setting,))
- f = open(tmp_filename, 'rb')
-
- f_dec = codecs.iterdecode(f,
- _encodings['repo.content'], errors='replace')
- try:
- rmt_idx.readHeader(f_dec)
- if not remote_timestamp: # in case it had not been read from HTTP header
- remote_timestamp = rmt_idx.header.get("TIMESTAMP", None)
- if not remote_timestamp:
- # no timestamp in the header, something's wrong
- pkgindex = None
- writemsg(_("\n\n!!! Binhost package index " \
- " has no TIMESTAMP field.\n"), noiselevel=-1)
- else:
- if not self._pkgindex_version_supported(rmt_idx):
- writemsg(_("\n\n!!! Binhost package index version" \
- " is not supported: '%s'\n") % \
- rmt_idx.header.get("VERSION"), noiselevel=-1)
- pkgindex = None
- elif local_timestamp != remote_timestamp:
- rmt_idx.readBody(f_dec)
- pkgindex = rmt_idx
- finally:
- # Timeout after 5 seconds, in case close() blocks
- # indefinitely (see bug #350139).
- try:
- try:
- AlarmSignal.register(5)
- f.close()
- finally:
- AlarmSignal.unregister()
- except AlarmSignal:
- writemsg("\n\n!!! %s\n" % \
- _("Timed out while closing connection to binhost"),
- noiselevel=-1)
- except UseCachedCopyOfRemoteIndex:
- writemsg_stdout("\n")
- writemsg_stdout(
- colorize("GOOD", _("Local copy of remote index is up-to-date and will be used.")) + \
- "\n")
- rmt_idx = pkgindex
- except EnvironmentError as e:
- # This includes URLError which is raised for SSL
- # certificate errors when PEP 476 is supported.
- writemsg(_("\n\n!!! Error fetching binhost package" \
- " info from '%s'\n") % _hide_url_passwd(base_url))
- # With Python 2, the EnvironmentError message may
- # contain bytes or unicode, so use _unicode to ensure
- # safety with all locales (bug #532784).
- try:
- error_msg = _unicode(e)
- except UnicodeDecodeError as uerror:
- error_msg = _unicode(uerror.object,
- encoding='utf_8', errors='replace')
- writemsg("!!! %s\n\n" % error_msg)
- del e
- pkgindex = None
- if proc is not None:
- if proc.poll() is None:
- proc.kill()
- proc.wait()
- proc = None
- if tmp_filename is not None:
- try:
- os.unlink(tmp_filename)
- except OSError:
- pass
- if pkgindex is rmt_idx:
- pkgindex.modified = False # don't update the header
- pkgindex.header["DOWNLOAD_TIMESTAMP"] = "%d" % time.time()
- try:
- ensure_dirs(os.path.dirname(pkgindex_file))
- f = atomic_ofstream(pkgindex_file)
- pkgindex.write(f)
- f.close()
- except (IOError, PortageException):
- if os.access(os.path.dirname(pkgindex_file), os.W_OK):
- raise
- # The current user doesn't have permission to cache the
- # file, but that's alright.
- if pkgindex:
- remote_base_uri = pkgindex.header.get("URI", base_url)
- for d in pkgindex.packages:
- cpv = _pkg_str(d["CPV"], metadata=d,
- settings=self.settings, db=self.dbapi)
- # Local package instances override remote instances
- # with the same instance_key.
- if self.dbapi.cpv_exists(cpv):
- continue
-
- d["CPV"] = cpv
- d["BASE_URI"] = remote_base_uri
- d["PKGINDEX_URI"] = url
- self._remotepkgs[self.dbapi._instance_key(cpv)] = d
- self.dbapi.cpv_inject(cpv)
-
- self._remote_has_index = True
- self._merge_pkgindex_header(pkgindex.header,
- self._pkgindex_header)
-
- def inject(self, cpv, filename=None):
- """Add a freshly built package to the database. This updates
- $PKGDIR/Packages with the new package metadata (including MD5).
- @param cpv: The cpv of the new package to inject
- @type cpv: string
- @param filename: File path of the package to inject, or None if it's
- already in the location returned by getname()
- @type filename: string
- @rtype: _pkg_str or None
- @return: A _pkg_str instance on success, or None on failure.
- """
- mycat, mypkg = catsplit(cpv)
- if not self.populated:
- self.populate()
- if filename is None:
- full_path = self.getname(cpv)
- else:
- full_path = filename
- try:
- s = os.stat(full_path)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- del e
- writemsg(_("!!! Binary package does not exist: '%s'\n") % full_path,
- noiselevel=-1)
- return
- metadata = self._read_metadata(full_path, s)
- invalid_depend = False
- try:
- self._eval_use_flags(cpv, metadata)
- except portage.exception.InvalidDependString:
- invalid_depend = True
- if invalid_depend or not metadata.get("SLOT"):
- writemsg(_("!!! Invalid binary package: '%s'\n") % full_path,
- noiselevel=-1)
- return
-
- fetched = False
- try:
- build_id = cpv.build_id
- except AttributeError:
- build_id = None
- else:
- instance_key = self.dbapi._instance_key(cpv)
- if instance_key in self.dbapi.cpvdict:
- # This means we've been called by aux_update (or
- # similar). The instance key typically changes (due to
- # file modification), so we need to discard existing
- # instance key references.
- self.dbapi.cpv_remove(cpv)
- self._pkg_paths.pop(instance_key, None)
- if self._remotepkgs is not None:
- fetched = self._remotepkgs.pop(instance_key, None)
-
- cpv = _pkg_str(cpv, metadata=metadata, settings=self.settings,
- db=self.dbapi)
-
- # Reread the Packages index (in case it's been changed by another
- # process) and then updated it, all while holding a lock.
- pkgindex_lock = None
- try:
- pkgindex_lock = lockfile(self._pkgindex_file,
- wantnewlockfile=1)
- if filename is not None:
- new_filename = self.getname(cpv, allocate_new=True)
- try:
- samefile = os.path.samefile(filename, new_filename)
- except OSError:
- samefile = False
- if not samefile:
- self._ensure_dir(os.path.dirname(new_filename))
- _movefile(filename, new_filename, mysettings=self.settings)
- full_path = new_filename
-
- basename = os.path.basename(full_path)
- pf = catsplit(cpv)[1]
- if (build_id is None and not fetched and
- basename.endswith(".xpak")):
- # Apply the newly assigned BUILD_ID. This is intended
- # to occur only for locally built packages. If the
- # package was fetched, we want to preserve its
- # attributes, so that we can later distinguish that it
- # is identical to its remote counterpart.
- build_id = self._parse_build_id(basename)
- metadata["BUILD_ID"] = _unicode(build_id)
- cpv = _pkg_str(cpv, metadata=metadata,
- settings=self.settings, db=self.dbapi)
- binpkg = portage.xpak.tbz2(full_path)
- binary_data = binpkg.get_data()
- binary_data[b"BUILD_ID"] = _unicode_encode(
- metadata["BUILD_ID"])
- binpkg.recompose_mem(portage.xpak.xpak_mem(binary_data))
-
- self._file_permissions(full_path)
- pkgindex = self._load_pkgindex()
- if not self._pkgindex_version_supported(pkgindex):
- pkgindex = self._new_pkgindex()
-
- d = self._inject_file(pkgindex, cpv, full_path)
- self._update_pkgindex_header(pkgindex.header)
- self._pkgindex_write(pkgindex)
-
- finally:
- if pkgindex_lock:
- unlockfile(pkgindex_lock)
-
- # This is used to record BINPKGMD5 in the installed package
- # database, for a package that has just been built.
- cpv._metadata["MD5"] = d["MD5"]
-
- return cpv
-
- def _read_metadata(self, filename, st, keys=None):
- """
- Read metadata from a binary package. The returned metadata
- dictionary will contain empty strings for any values that
- are undefined (this is important because the _pkg_str class
- distinguishes between missing and undefined values).
-
- @param filename: File path of the binary package
- @type filename: string
- @param st: stat result for the binary package
- @type st: os.stat_result
- @param keys: optional list of specific metadata keys to retrieve
- @type keys: iterable
- @rtype: dict
- @return: package metadata
- """
- if keys is None:
- keys = self.dbapi._aux_cache_keys
- metadata = self.dbapi._aux_cache_slot_dict()
- else:
- metadata = {}
- binary_metadata = portage.xpak.tbz2(filename).get_data()
- for k in keys:
- if k == "_mtime_":
- metadata[k] = _unicode(st[stat.ST_MTIME])
- elif k == "SIZE":
- metadata[k] = _unicode(st.st_size)
- else:
- v = binary_metadata.get(_unicode_encode(k))
- if v is None:
- if k == "EAPI":
- metadata[k] = "0"
- else:
- metadata[k] = ""
- else:
- v = _unicode_decode(v)
- metadata[k] = " ".join(v.split())
- return metadata
-
- def _inject_file(self, pkgindex, cpv, filename):
- """
- Add a package to internal data structures, and add an
- entry to the given pkgindex.
- @param pkgindex: The PackageIndex instance to which an entry
- will be added.
- @type pkgindex: PackageIndex
- @param cpv: A _pkg_str instance corresponding to the package
- being injected.
- @type cpv: _pkg_str
- @param filename: Absolute file path of the package to inject.
- @type filename: string
- @rtype: dict
- @return: A dict corresponding to the new entry which has been
- added to pkgindex. This may be used to access the checksums
- which have just been generated.
- """
- # Update state for future isremote calls.
- instance_key = self.dbapi._instance_key(cpv)
- if self._remotepkgs is not None:
- self._remotepkgs.pop(instance_key, None)
-
- self.dbapi.cpv_inject(cpv)
- self._pkg_paths[instance_key] = filename[len(self.pkgdir)+1:]
- d = self._pkgindex_entry(cpv)
-
- # If found, remove package(s) with duplicate path.
- path = d.get("PATH", "")
- for i in range(len(pkgindex.packages) - 1, -1, -1):
- d2 = pkgindex.packages[i]
- if path and path == d2.get("PATH"):
- # Handle path collisions in $PKGDIR/All
- # when CPV is not identical.
- del pkgindex.packages[i]
- elif cpv == d2.get("CPV"):
- if path == d2.get("PATH", ""):
- del pkgindex.packages[i]
-
- pkgindex.packages.append(d)
- return d
-
- def _pkgindex_write(self, pkgindex):
- contents = codecs.getwriter(_encodings['repo.content'])(io.BytesIO())
- pkgindex.write(contents)
- contents = contents.getvalue()
- atime = mtime = long(pkgindex.header["TIMESTAMP"])
- output_files = [(atomic_ofstream(self._pkgindex_file, mode="wb"),
- self._pkgindex_file, None)]
-
- if "compress-index" in self.settings.features:
- gz_fname = self._pkgindex_file + ".gz"
- fileobj = atomic_ofstream(gz_fname, mode="wb")
- output_files.append((GzipFile(filename='', mode="wb",
- fileobj=fileobj, mtime=mtime), gz_fname, fileobj))
-
- for f, fname, f_close in output_files:
- f.write(contents)
- f.close()
- if f_close is not None:
- f_close.close()
- self._file_permissions(fname)
- # some seconds might have elapsed since TIMESTAMP
- os.utime(fname, (atime, mtime))
-
- def _pkgindex_entry(self, cpv):
- """
- Performs checksums, and gets size and mtime via lstat.
- Raises InvalidDependString if necessary.
- @rtype: dict
- @return: a dict containing entry for the give cpv.
- """
-
- pkg_path = self.getname(cpv)
-
- d = dict(cpv._metadata.items())
- d.update(perform_multiple_checksums(
- pkg_path, hashes=self._pkgindex_hashes))
-
- d["CPV"] = cpv
- st = os.lstat(pkg_path)
- d["_mtime_"] = _unicode(st[stat.ST_MTIME])
- d["SIZE"] = _unicode(st.st_size)
-
- rel_path = pkg_path[len(self.pkgdir)+1:]
- # record location if it's non-default
- if rel_path != cpv + ".tbz2":
- d["PATH"] = rel_path
-
- return d
-
- def _new_pkgindex(self):
- return portage.getbinpkg.PackageIndex(
- allowed_pkg_keys=self._pkgindex_allowed_pkg_keys,
- default_header_data=self._pkgindex_default_header_data,
- default_pkg_data=self._pkgindex_default_pkg_data,
- inherited_keys=self._pkgindex_inherited_keys,
- translated_keys=self._pkgindex_translated_keys)
-
- @staticmethod
- def _merge_pkgindex_header(src, dest):
- """
- Merge Packages header settings from src to dest, in order to
- propagate implicit IUSE and USE_EXPAND settings for use with
- binary and installed packages. Values are appended, so the
- result is a union of elements from src and dest.
-
- Pull in ARCH if it's not defined, since it's used for validation
- by emerge's profile_check function, and also for KEYWORDS logic
- in the _getmaskingstatus function.
-
- @param src: source mapping (read only)
- @type src: Mapping
- @param dest: destination mapping
- @type dest: MutableMapping
- """
- for k, v in iter_iuse_vars(src):
- v_before = dest.get(k)
- if v_before is not None:
- merged_values = set(v_before.split())
- merged_values.update(v.split())
- v = ' '.join(sorted(merged_values))
- dest[k] = v
-
- if 'ARCH' not in dest and 'ARCH' in src:
- dest['ARCH'] = src['ARCH']
-
- def _propagate_config(self, config):
- """
- Propagate implicit IUSE and USE_EXPAND settings from the binary
- package database to a config instance. If settings are not
- available to propagate, then this will do nothing and return
- False.
-
- @param config: config instance
- @type config: portage.config
- @rtype: bool
- @return: True if settings successfully propagated, False if settings
- were not available to propagate.
- """
- if self._pkgindex_header is None:
- return False
-
- self._merge_pkgindex_header(self._pkgindex_header,
- config.configdict['defaults'])
- config.regenerate()
- config._init_iuse()
- return True
-
- def _update_pkgindex_header(self, header):
- """
- Add useful settings to the Packages file header, for use by
- binhost clients.
-
- This will return silently if the current profile is invalid or
- does not have an IUSE_IMPLICIT variable, since it's useful to
- maintain a cache of implicit IUSE settings for use with binary
- packages.
- """
- if not (self.settings.profile_path and
- "IUSE_IMPLICIT" in self.settings):
- header.setdefault("VERSION", _unicode(self._pkgindex_version))
- return
-
- portdir = normalize_path(os.path.realpath(self.settings["PORTDIR"]))
- profiles_base = os.path.join(portdir, "profiles") + os.path.sep
- if self.settings.profile_path:
- profile_path = normalize_path(
- os.path.realpath(self.settings.profile_path))
- if profile_path.startswith(profiles_base):
- profile_path = profile_path[len(profiles_base):]
- header["PROFILE"] = profile_path
- header["VERSION"] = _unicode(self._pkgindex_version)
- base_uri = self.settings.get("PORTAGE_BINHOST_HEADER_URI")
- if base_uri:
- header["URI"] = base_uri
- else:
- header.pop("URI", None)
- for k in list(self._pkgindex_header_keys) + \
- self.settings.get("USE_EXPAND_IMPLICIT", "").split() + \
- self.settings.get("USE_EXPAND_UNPREFIXED", "").split():
- v = self.settings.get(k, None)
- if v:
- header[k] = v
- else:
- header.pop(k, None)
-
- # These values may be useful for using a binhost without
- # having a local copy of the profile (bug #470006).
- for k in self.settings.get("USE_EXPAND_IMPLICIT", "").split():
- k = "USE_EXPAND_VALUES_" + k
- v = self.settings.get(k)
- if v:
- header[k] = v
- else:
- header.pop(k, None)
-
- def _pkgindex_version_supported(self, pkgindex):
- version = pkgindex.header.get("VERSION")
- if version:
- try:
- if int(version) <= self._pkgindex_version:
- return True
- except ValueError:
- pass
- return False
-
- def _eval_use_flags(self, cpv, metadata):
- use = frozenset(metadata.get("USE", "").split())
- for k in self._pkgindex_use_evaluated_keys:
- if k.endswith('DEPEND'):
- token_class = Atom
- else:
- token_class = None
-
- deps = metadata.get(k)
- if deps is None:
- continue
- try:
- deps = use_reduce(deps, uselist=use, token_class=token_class)
- deps = paren_enclose(deps)
- except portage.exception.InvalidDependString as e:
- writemsg("%s: %s\n" % (k, e), noiselevel=-1)
- raise
- metadata[k] = deps
-
- def exists_specific(self, cpv):
- if not self.populated:
- self.populate()
- return self.dbapi.match(
- dep_expand("="+cpv, mydb=self.dbapi, settings=self.settings))
-
- def dep_bestmatch(self, mydep):
- "compatibility method -- all matches, not just visible ones"
- if not self.populated:
- self.populate()
- writemsg("\n\n", 1)
- writemsg("mydep: %s\n" % mydep, 1)
- mydep = dep_expand(mydep, mydb=self.dbapi, settings=self.settings)
- writemsg("mydep: %s\n" % mydep, 1)
- mykey = dep_getkey(mydep)
- writemsg("mykey: %s\n" % mykey, 1)
- mymatch = best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
- writemsg("mymatch: %s\n" % mymatch, 1)
- if mymatch is None:
- return ""
- return mymatch
-
- def getname(self, cpv, allocate_new=None):
- """Returns a file location for this package.
- If cpv has both build_time and build_id attributes, then the
- path to the specific corresponding instance is returned.
- Otherwise, allocate a new path and return that. When allocating
- a new path, behavior depends on the binpkg-multi-instance
- FEATURES setting.
- """
- if not self.populated:
- self.populate()
-
- try:
- cpv.cp
- except AttributeError:
- cpv = _pkg_str(cpv)
-
- filename = None
- if allocate_new:
- filename = self._allocate_filename(cpv)
- elif self._is_specific_instance(cpv):
- instance_key = self.dbapi._instance_key(cpv)
- path = self._pkg_paths.get(instance_key)
- if path is not None:
- filename = os.path.join(self.pkgdir, path)
-
- if filename is None and not allocate_new:
- try:
- instance_key = self.dbapi._instance_key(cpv,
- support_string=True)
- except KeyError:
- pass
- else:
- filename = self._pkg_paths.get(instance_key)
- if filename is not None:
- filename = os.path.join(self.pkgdir, filename)
-
- if filename is None:
- if self._multi_instance:
- pf = catsplit(cpv)[1]
- filename = "%s-%s.xpak" % (
- os.path.join(self.pkgdir, cpv.cp, pf), "1")
- else:
- filename = os.path.join(self.pkgdir, cpv + ".tbz2")
-
- return filename
-
- def _is_specific_instance(self, cpv):
- specific = True
- try:
- build_time = cpv.build_time
- build_id = cpv.build_id
- except AttributeError:
- specific = False
- else:
- if build_time is None or build_id is None:
- specific = False
- return specific
-
- def _max_build_id(self, cpv):
- max_build_id = 0
- for x in self.dbapi.cp_list(cpv.cp):
- if (x == cpv and x.build_id is not None and
- x.build_id > max_build_id):
- max_build_id = x.build_id
- return max_build_id
-
- def _allocate_filename(self, cpv):
- return os.path.join(self.pkgdir, cpv + ".tbz2")
-
- def _allocate_filename_multi(self, cpv):
-
- # First, get the max build_id found when _populate was
- # called.
- max_build_id = self._max_build_id(cpv)
-
- # A new package may have been added concurrently since the
- # last _populate call, so use increment build_id until
- # we locate an unused id.
- pf = catsplit(cpv)[1]
- build_id = max_build_id + 1
-
- while True:
- filename = "%s-%s.xpak" % (
- os.path.join(self.pkgdir, cpv.cp, pf), build_id)
- if os.path.exists(filename):
- build_id += 1
- else:
- return filename
-
- @staticmethod
- def _parse_build_id(filename):
- build_id = -1
- suffixlen = len(".xpak")
- hyphen = filename.rfind("-", 0, -(suffixlen + 1))
- if hyphen != -1:
- build_id = filename[hyphen+1:-suffixlen]
- try:
- build_id = long(build_id)
- except ValueError:
- pass
- return build_id
-
- def isremote(self, pkgname):
- """Returns true if the package is kept remotely and it has not been
- downloaded (or it is only partially downloaded)."""
- if (self._remotepkgs is None or
- self.dbapi._instance_key(pkgname) not in self._remotepkgs):
- return False
- # Presence in self._remotepkgs implies that it's remote. When a
- # package is downloaded, state is updated by self.inject().
- return True
-
- def get_pkgindex_uri(self, cpv):
- """Returns the URI to the Packages file for a given package."""
- uri = None
- if self._remotepkgs is not None:
- metadata = self._remotepkgs.get(self.dbapi._instance_key(cpv))
- if metadata is not None:
- uri = metadata["PKGINDEX_URI"]
- return uri
-
- def gettbz2(self, pkgname):
- """Fetches the package from a remote site, if necessary. Attempts to
- resume if the file appears to be partially downloaded."""
- instance_key = self.dbapi._instance_key(pkgname)
- tbz2_path = self.getname(pkgname)
- tbz2name = os.path.basename(tbz2_path)
- resume = False
- if os.path.exists(tbz2_path):
- if tbz2name[:-5] not in self.invalids:
- return
- else:
- resume = True
- writemsg(_("Resuming download of this tbz2, but it is possible that it is corrupt.\n"),
- noiselevel=-1)
-
- mydest = os.path.dirname(self.getname(pkgname))
- self._ensure_dir(mydest)
- # urljoin doesn't work correctly with unrecognized protocols like sftp
- if self._remote_has_index:
- rel_url = self._remotepkgs[instance_key].get("PATH")
- if not rel_url:
- rel_url = pkgname + ".tbz2"
- remote_base_uri = self._remotepkgs[instance_key]["BASE_URI"]
- url = remote_base_uri.rstrip("/") + "/" + rel_url.lstrip("/")
- else:
- url = self.settings["PORTAGE_BINHOST"].rstrip("/") + "/" + tbz2name
- protocol = urlparse(url)[0]
- fcmd_prefix = "FETCHCOMMAND"
- if resume:
- fcmd_prefix = "RESUMECOMMAND"
- fcmd = self.settings.get(fcmd_prefix + "_" + protocol.upper())
- if not fcmd:
- fcmd = self.settings.get(fcmd_prefix)
- success = portage.getbinpkg.file_get(url, mydest, fcmd=fcmd)
- if not success:
- try:
- os.unlink(self.getname(pkgname))
- except OSError:
- pass
- raise portage.exception.FileNotFound(mydest)
- self.inject(pkgname)
-
- def _load_pkgindex(self):
- pkgindex = self._new_pkgindex()
- try:
- f = io.open(_unicode_encode(self._pkgindex_file,
- encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['repo.content'],
- errors='replace')
- except EnvironmentError:
- pass
- else:
- try:
- pkgindex.read(f)
- finally:
- f.close()
- return pkgindex
-
- def _get_digests(self, pkg):
-
- try:
- cpv = pkg.cpv
- except AttributeError:
- cpv = pkg
-
- _instance_key = self.dbapi._instance_key
- instance_key = _instance_key(cpv)
- digests = {}
- metadata = (None if self._remotepkgs is None else
- self._remotepkgs.get(instance_key))
- if metadata is None:
- for d in self._load_pkgindex().packages:
- if (d["CPV"] == cpv and
- instance_key == _instance_key(_pkg_str(d["CPV"],
- metadata=d, settings=self.settings))):
- metadata = d
- break
-
- if metadata is None:
- return digests
-
- for k in get_valid_checksum_keys():
- v = metadata.get(k)
- if not v:
- continue
- digests[k] = v
-
- if "SIZE" in metadata:
- try:
- digests["size"] = int(metadata["SIZE"])
- except ValueError:
- writemsg(_("!!! Malformed SIZE attribute in remote " \
- "metadata for '%s'\n") % cpv)
-
- return digests
-
- def digestCheck(self, pkg):
- """
- Verify digests for the given package and raise DigestException
- if verification fails.
- @rtype: bool
- @return: True if digests could be located, False otherwise.
- """
-
- digests = self._get_digests(pkg)
-
- if not digests:
- return False
-
- try:
- cpv = pkg.cpv
- except AttributeError:
- cpv = pkg
-
- pkg_path = self.getname(cpv)
- hash_filter = _hash_filter(
- self.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
- if not hash_filter.transparent:
- digests = _apply_hash_filter(digests, hash_filter)
- eout = EOutput()
- eout.quiet = self.settings.get("PORTAGE_QUIET") == "1"
- ok, st = _check_distfile(pkg_path, digests, eout, show_errors=0)
- if not ok:
- ok, reason = verify_all(pkg_path, digests)
- if not ok:
- raise portage.exception.DigestException(
- (pkg_path,) + tuple(reason))
-
- return True
-
- def getslot(self, mycatpkg):
- "Get a slot for a catpkg; assume it exists."
- myslot = ""
- try:
- myslot = self.dbapi._pkg_str(mycatpkg, None).slot
- except KeyError:
- pass
- return myslot
diff --git a/pym/portage/dbapi/cpv_expand.py b/pym/portage/dbapi/cpv_expand.py
deleted file mode 100644
index 70ee78245..000000000
--- a/pym/portage/dbapi/cpv_expand.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Copyright 2010-2013 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import unicode_literals
-
-__all__ = ["cpv_expand"]
-
-import portage
-from portage.exception import AmbiguousPackageName
-from portage.localization import _
-from portage.util import writemsg
-from portage.versions import _pkgsplit
-
-def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
- """Given a string (packagename or virtual) expand it into a valid
- cat/package string. Virtuals use the mydb to determine which provided
- virtual is a valid choice and defaults to the first element when there
- are no installed/available candidates."""
- myslash=mycpv.split("/")
- mysplit = _pkgsplit(myslash[-1])
- if settings is None:
- try:
- settings = mydb.settings
- except AttributeError:
- settings = portage.settings
- if len(myslash)>2:
- # this is illegal case.
- mysplit=[]
- mykey=mycpv
- elif len(myslash)==2:
- if mysplit:
- mykey=myslash[0]+"/"+mysplit[0]
- else:
- mykey=mycpv
-
- # Since Gentoo stopped using old-style virtuals in
- # 2011, typically it's possible to avoid getvirtuals()
- # calls entirely. Therefore, only call getvirtuals()
- # if the atom category is "virtual" and cp_list()
- # returns nothing.
- if mykey.startswith("virtual/") and \
- hasattr(mydb, "cp_list") and \
- not mydb.cp_list(mykey, use_cache=use_cache):
- if hasattr(mydb, "vartree"):
- settings._populate_treeVirtuals_if_needed(mydb.vartree)
- virts = settings.getvirtuals().get(mykey)
- if virts:
- mykey_orig = mykey
- for vkey in virts:
- # The virtuals file can contain a versioned atom, so
- # it may be necessary to remove the operator and
- # version from the atom before it is passed into
- # dbapi.cp_list().
- if mydb.cp_list(vkey.cp):
- mykey = str(vkey)
- break
- if mykey == mykey_orig:
- mykey = str(virts[0])
- #we only perform virtual expansion if we are passed a dbapi
- else:
- #specific cpv, no category, ie. "foo-1.0"
- if mysplit:
- myp=mysplit[0]
- else:
- # "foo" ?
- myp=mycpv
- mykey=None
- matches=[]
- if mydb and hasattr(mydb, "categories"):
- for x in mydb.categories:
- if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
- matches.append(x+"/"+myp)
- if len(matches) > 1:
- virtual_name_collision = False
- if len(matches) == 2:
- for x in matches:
- if not x.startswith("virtual/"):
- # Assume that the non-virtual is desired. This helps
- # avoid the ValueError for invalid deps that come from
- # installed packages (during reverse blocker detection,
- # for example).
- mykey = x
- else:
- virtual_name_collision = True
- if not virtual_name_collision:
- # AmbiguousPackageName inherits from ValueError,
- # for backward compatibility with calling code
- # that already handles ValueError.
- raise AmbiguousPackageName(matches)
- elif matches:
- mykey=matches[0]
-
- if not mykey and not isinstance(mydb, list):
- if hasattr(mydb, "vartree"):
- settings._populate_treeVirtuals_if_needed(mydb.vartree)
- virts_p = settings.get_virts_p().get(myp)
- if virts_p:
- mykey = virts_p[0]
- #again, we only perform virtual expansion if we have a dbapi (not a list)
- if not mykey:
- mykey="null/"+myp
- if mysplit:
- if mysplit[2]=="r0":
- return mykey+"-"+mysplit[1]
- else:
- return mykey+"-"+mysplit[1]+"-"+mysplit[2]
- else:
- return mykey
diff --git a/pym/portage/dbapi/dep_expand.py b/pym/portage/dbapi/dep_expand.py
deleted file mode 100644
index 9515b7dec..000000000
--- a/pym/portage/dbapi/dep_expand.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright 2010-2018 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import unicode_literals
-
-__all__ = ["dep_expand"]
-
-import re
-
-from portage.dbapi.cpv_expand import cpv_expand
-from portage.dep import Atom, isvalidatom
-from portage.exception import InvalidAtom
-from portage.versions import catsplit
-
-def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
- '''
- @rtype: Atom
- '''
- orig_dep = mydep
- if isinstance(orig_dep, Atom):
- has_cat = True
- else:
- if not mydep:
- return mydep
- if mydep[0] == "*":
- mydep = mydep[1:]
- orig_dep = mydep
- has_cat = '/' in orig_dep.split(':')[0]
- if not has_cat:
- alphanum = re.search(r'\w', orig_dep)
- if alphanum:
- mydep = orig_dep[:alphanum.start()] + "null/" + \
- orig_dep[alphanum.start():]
- try:
- mydep = Atom(mydep, allow_repo=True)
- except InvalidAtom:
- # Missing '=' prefix is allowed for backward compatibility.
- if not isvalidatom("=" + mydep, allow_repo=True):
- raise
- mydep = Atom('=' + mydep, allow_repo=True)
- orig_dep = '=' + orig_dep
- if not has_cat:
- null_cat, pn = catsplit(mydep.cp)
- mydep = pn
-
- if has_cat:
- # Optimize most common cases to avoid calling cpv_expand.
- if not mydep.cp.startswith("virtual/"):
- return mydep
- if not hasattr(mydb, "cp_list") or \
- mydb.cp_list(mydep.cp):
- return mydep
- # Fallback to legacy cpv_expand for old-style PROVIDE virtuals.
- mydep = mydep.cp
-
- expanded = cpv_expand(mydep, mydb=mydb,
- use_cache=use_cache, settings=settings)
- return Atom(orig_dep.replace(mydep, expanded, 1), allow_repo=True)
diff --git a/pym/portage/dbapi/porttree.py b/pym/portage/dbapi/porttree.py
deleted file mode 100644
index 677452273..000000000
--- a/pym/portage/dbapi/porttree.py
+++ /dev/null
@@ -1,1526 +0,0 @@
-# Copyright 1998-2018 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import unicode_literals
-
-__all__ = [
- "close_portdbapi_caches", "FetchlistDict", "portagetree", "portdbapi"
-]
-
-import portage
-portage.proxy.lazyimport.lazyimport(globals(),
- 'portage.checksum',
- 'portage.data:portage_gid,secpass',
- 'portage.dbapi.dep_expand:dep_expand',
- 'portage.dep:Atom,dep_getkey,match_from_list,use_reduce,_match_slot',
- 'portage.package.ebuild.doebuild:doebuild',
- 'portage.util:ensure_dirs,shlex_split,writemsg,writemsg_level',
- 'portage.util.listdir:listdir',
- 'portage.versions:best,catsplit,catpkgsplit,_pkgsplit@pkgsplit,ver_regexp,_pkg_str',
-)
-
-from portage.cache import volatile
-from portage.cache.cache_errors import CacheError
-from portage.cache.mappings import Mapping
-from portage.dbapi import dbapi
-from portage.exception import PortageException, PortageKeyError, \
- FileNotFound, InvalidAtom, InvalidData, \
- InvalidDependString, InvalidPackageName
-from portage.localization import _
-
-from portage import eclass_cache, \
- eapi_is_supported, \
- _eapi_is_deprecated
-from portage import os
-from portage import _encodings
-from portage import _unicode_encode
-from portage import OrderedDict
-from portage.util._eventloop.EventLoop import EventLoop
-from portage.util.futures import asyncio
-from portage.util.futures.iter_completed import iter_gather
-from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
-
-import os as _os
-import sys
-import traceback
-import warnings
-import errno
-import collections
-import functools
-
-try:
- from urllib.parse import urlparse
-except ImportError:
- from urlparse import urlparse
-
-if sys.hexversion >= 0x3000000:
- # pylint: disable=W0622
- basestring = str
- long = int
-
-def close_portdbapi_caches():
- # The python interpreter does _not_ guarantee that destructors are
- # called for objects that remain when the interpreter exits, so we
- # use an atexit hook to call destructors for any global portdbapi
- # instances that may have been constructed.
- try:
- portage._legacy_globals_constructed
- except AttributeError:
- pass
- else:
- if "db" in portage._legacy_globals_constructed:
- try:
- db = portage.db
- except AttributeError:
- pass
- else:
- if isinstance(db, dict):
- for x in db.values():
- try:
- if "porttree" in x.lazy_items:
- continue
- except (AttributeError, TypeError):
- continue
- try:
- x = x.pop("porttree").dbapi
- except (AttributeError, KeyError):
- continue
- if not isinstance(x, portdbapi):
- continue
- x.close_caches()
-
-portage.process.atexit_register(close_portdbapi_caches)
-
-# It used to be necessary for API consumers to remove portdbapi instances
-# from portdbapi_instances, in order to avoid having accumulated instances
-# consume memory. Now, portdbapi_instances is just an empty dummy list, so
-# for backward compatibility, ignore ValueError for removal on non-existent
-# items.
-class _dummy_list(list):
- def remove(self, item):
- # TODO: Trigger a DeprecationWarning here, after stable portage
- # has dummy portdbapi_instances.
- try:
- list.remove(self, item)
- except ValueError:
- pass
-
-
-class _better_cache(object):
-
- """
- The purpose of better_cache is to locate catpkgs in repositories using ``os.listdir()`` as much as possible, which
- is less expensive IO-wise than exhaustively doing a stat on each repo for a particular catpkg. better_cache stores a
- list of repos in which particular catpkgs appear. Various dbapi methods use better_cache to locate repositories of
- interest related to particular catpkg rather than performing an exhaustive scan of all repos/overlays.
-
- Better_cache.items data may look like this::
-
- { "sys-apps/portage" : [ repo1, repo2 ] }
-
- Without better_cache, Portage will get slower and slower (due to excessive IO) as more overlays are added.
-
- Also note that it is OK if this cache has some 'false positive' catpkgs in it. We use it to search for specific
- catpkgs listed in ebuilds. The likelihood of a false positive catpkg in our cache causing a problem is extremely
- low, because the user of our cache is passing us a catpkg that came from somewhere and has already undergone some
- validation, and even then will further interrogate the short-list of repos we return to gather more information
- on the catpkg.
-
- Thus, the code below is optimized for speed rather than painstaking correctness. I have added a note to
- ``dbapi.getRepositories()`` to ensure that developers are aware of this just in case.
-
- The better_cache has been redesigned to perform on-demand scans -- it will only scan a category at a time, as
- needed. This should further optimize IO performance by not scanning category directories that are not needed by
- Portage.
- """
-
- def __init__(self, repositories):
- self._items = collections.defaultdict(list)
- self._scanned_cats = set()
-
- # ordered list of all portree locations we'll scan:
- self._repo_list = [repo for repo in reversed(list(repositories))
- if repo.location is not None]
-
- def __getitem__(self, catpkg):
- result = self._items.get(catpkg)
- if result is not None:
- return result
-
- cat, pkg = catsplit(catpkg)
- if cat not in self._scanned_cats:
- self._scan_cat(cat)
- return self._items[catpkg]
-
- def _scan_cat(self, cat):
- for repo in self._repo_list:
- cat_dir = repo.location + "/" + cat
- try:
- pkg_list = os.listdir(cat_dir)
- except OSError as e:
- if e.errno not in (errno.ENOTDIR, errno.ENOENT, errno.ESTALE):
- raise
- continue
- for p in pkg_list:
- if os.path.isdir(cat_dir + "/" + p):
- self._items[cat + "/" + p].append(repo)
- self._scanned_cats.add(cat)
-
-
-class portdbapi(dbapi):
- """this tree will scan a portage directory located at root (passed to init)"""
- portdbapi_instances = _dummy_list()
- _use_mutable = True
-
- @property
- def _categories(self):
- return self.settings.categories
-
- @property
- def porttree_root(self):
- warnings.warn("portage.dbapi.porttree.portdbapi.porttree_root is deprecated in favor of portage.repository.config.RepoConfig.location "
- "(available as repositories[repo_name].location attribute of instances of portage.dbapi.porttree.portdbapi class)",
- DeprecationWarning, stacklevel=2)
- return self.settings.repositories.mainRepoLocation()
-
- @property
- def eclassdb(self):
- warnings.warn("portage.dbapi.porttree.portdbapi.eclassdb is deprecated in favor of portage.repository.config.RepoConfig.eclass_db "
- "(available as repositories[repo_name].eclass_db attribute of instances of portage.dbapi.porttree.portdbapi class)",
- DeprecationWarning, stacklevel=2)
- main_repo = self.repositories.mainRepo()
- if main_repo is None:
- return None
- return main_repo.eclass_db
-
- def __init__(self, _unused_param=DeprecationWarning, mysettings=None):
- """
- @param _unused_param: deprecated, use mysettings['PORTDIR'] instead
- @type _unused_param: None
- @param mysettings: an immutable config instance
- @type mysettings: portage.config
- """
-
- from portage import config
- if mysettings:
- self.settings = mysettings
- else:
- from portage import settings
- self.settings = config(clone=settings)
-
- if _unused_param is not DeprecationWarning:
- warnings.warn("The first parameter of the " + \
- "portage.dbapi.porttree.portdbapi" + \
- " constructor is unused since portage-2.1.8. " + \
- "mysettings['PORTDIR'] is used instead.",
- DeprecationWarning, stacklevel=2)
-
- self.repositories = self.settings.repositories
- self.treemap = self.repositories.treemap
-
- # This is strictly for use in aux_get() doebuild calls when metadata
- # is generated by the depend phase. It's safest to use a clone for
- # this purpose because doebuild makes many changes to the config
- # instance that is passed in.
- self.doebuild_settings = config(clone=self.settings)
- self.depcachedir = os.path.realpath(self.settings.depcachedir)
-
- if os.environ.get("SANDBOX_ON") == "1":
- # Make api consumers exempt from sandbox violations
- # when doing metadata cache updates.
- sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":")
- if self.depcachedir not in sandbox_write:
- sandbox_write.append(self.depcachedir)
- os.environ["SANDBOX_WRITE"] = \
- ":".join(filter(None, sandbox_write))
-
- self.porttrees = list(self.settings.repositories.repoLocationList())
-
- # This is used as sanity check for aux_get(). If there is no
- # root eclass dir, we assume that PORTDIR is invalid or
- # missing. This check allows aux_get() to detect a missing
- # portage tree and return early by raising a KeyError.
- self._have_root_eclass_dir = os.path.isdir(
- os.path.join(self.settings.repositories.mainRepoLocation(), "eclass"))
-
- #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
- self.xcache = {}
- self.frozen = 0
-
- #Keep a list of repo names, sorted by priority (highest priority first).
- self._ordered_repo_name_list = tuple(reversed(self.repositories.prepos_order))
-
- self.auxdbmodule = self.settings.load_best_module("portdbapi.auxdbmodule")
- self.auxdb = {}
- self._pregen_auxdb = {}
- # If the current user doesn't have depcachedir write permission,
- # then the depcachedir cache is kept here read-only access.
- self._ro_auxdb = {}
- self._init_cache_dirs()
- try:
- depcachedir_st = os.stat(self.depcachedir)
- depcachedir_w_ok = os.access(self.depcachedir, os.W_OK)
- except OSError:
- depcachedir_st = None
- depcachedir_w_ok = False
-
- cache_kwargs = {}
-
- depcachedir_unshared = False
- if portage.data.secpass < 1 and \
- depcachedir_w_ok and \
- depcachedir_st is not None and \
- os.getuid() == depcachedir_st.st_uid and \
- os.getgid() == depcachedir_st.st_gid:
- # If this user owns depcachedir and is not in the
- # portage group, then don't bother to set permissions
- # on cache entries. This makes it possible to run
- # egencache without any need to be a member of the
- # portage group.
- depcachedir_unshared = True
- else:
- cache_kwargs.update({
- 'gid' : portage_gid,
- 'perms' : 0o664
- })
-
- # If secpass < 1, we don't want to write to the cache
- # since then we won't be able to apply group permissions
- # to the cache entries/directories.
- if (secpass < 1 and not depcachedir_unshared) or not depcachedir_w_ok:
- for x in self.porttrees:
- self.auxdb[x] = volatile.database(
- self.depcachedir, x, self._known_keys,
- **cache_kwargs)
- try:
- self._ro_auxdb[x] = self.auxdbmodule(self.depcachedir, x,
- self._known_keys, readonly=True, **cache_kwargs)
- except CacheError:
- pass
- else:
- for x in self.porttrees:
- if x in self.auxdb:
- continue
- # location, label, auxdbkeys
- self.auxdb[x] = self.auxdbmodule(
- self.depcachedir, x, self._known_keys, **cache_kwargs)
- if "metadata-transfer" not in self.settings.features:
- for x in self.porttrees:
- if x in self._pregen_auxdb:
- continue
- cache = self._create_pregen_cache(x)
- if cache is not None:
- self._pregen_auxdb[x] = cache
- # Selectively cache metadata in order to optimize dep matching.
- self._aux_cache_keys = set(
- ["BDEPEND", "DEPEND", "EAPI", "HDEPEND",
- "INHERITED", "IUSE", "KEYWORDS", "LICENSE",
- "PDEPEND", "PROPERTIES", "RDEPEND", "repository",
- "RESTRICT", "SLOT", "DEFINED_PHASES", "REQUIRED_USE"])
-
- self._aux_cache = {}
- self._better_cache = None
- self._broken_ebuilds = set()
-
- def _set_porttrees(self, porttrees):
- """
- Consumers, such as repoman and emirrordist, may modify the porttrees
- attribute in order to modify the effective set of repositories for
- all portdbapi operations.
-
- @param porttrees: list of repo locations, in ascending order by
- repo priority
- @type porttrees: list
- """
- self._porttrees_repos = portage.OrderedDict((repo.name, repo)
- for repo in (self.repositories.get_repo_for_location(location)
- for location in porttrees))
- self._porttrees = tuple(porttrees)
-
- def _get_porttrees(self):
- return self._porttrees
-
- porttrees = property(_get_porttrees, _set_porttrees)
-
- @property
- def _event_loop(self):
- if portage._internal_caller:
- # For internal portage usage, asyncio._wrap_loop() is safe.
- return asyncio._wrap_loop()
- else:
- # For external API consumers, use a local EventLoop, since
- # we don't want to assume that it's safe to override the
- # global SIGCHLD handler.
- return EventLoop(main=False)
-
- def _create_pregen_cache(self, tree):
- conf = self.repositories.get_repo_for_location(tree)
- cache = conf.get_pregenerated_cache(
- self._known_keys, readonly=True)
- if cache is not None:
- try:
- cache.ec = self.repositories.get_repo_for_location(tree).eclass_db
- except AttributeError:
- pass
-
- if not cache.complete_eclass_entries:
- warnings.warn(
- ("Repository '%s' used deprecated 'pms' cache format. "
- "Please migrate to 'md5-dict' format.") % (conf.name,),
- DeprecationWarning)
-
- return cache
-
- def _init_cache_dirs(self):
- """Create /var/cache/edb/dep and adjust permissions for the portage
- group."""
-
- dirmode = 0o2070
- modemask = 0o2
-
- try:
- ensure_dirs(self.depcachedir, gid=portage_gid,
- mode=dirmode, mask=modemask)
- except PortageException:
- pass
-
- def close_caches(self):
- if not hasattr(self, "auxdb"):
- # unhandled exception thrown from constructor
- return
- for x in self.auxdb:
- self.auxdb[x].sync()
- self.auxdb.clear()
-
- def flush_cache(self):
- for x in self.auxdb.values():
- x.sync()
-
- def findLicensePath(self, license_name):
- for x in reversed(self.porttrees):
- license_path = os.path.join(x, "licenses", license_name)
- if os.access(license_path, os.R_OK):
- return license_path
- return None
-
- def findname(self,mycpv, mytree = None, myrepo = None):
- return self.findname2(mycpv, mytree, myrepo)[0]
-
- def getRepositoryPath(self, repository_id):
- """
- This function is required for GLEP 42 compliance; given a valid repository ID
- it must return a path to the repository
- TreeMap = { id:path }
- """
- return self.treemap.get(repository_id)
-
- def getRepositoryName(self, canonical_repo_path):
- """
- This is the inverse of getRepositoryPath().
- @param canonical_repo_path: the canonical path of a repository, as
- resolved by os.path.realpath()
- @type canonical_repo_path: String
- @return: The repo_name for the corresponding repository, or None
- if the path does not correspond a known repository
- @rtype: String or None
- """
- try:
- return self.repositories.get_name_for_location(canonical_repo_path)
- except KeyError:
- return None
-
- def getRepositories(self, catpkg=None):
-
- """
- With catpkg=None, this will return a complete list of repositories in this dbapi. With catpkg set to a value,
- this method will return a short-list of repositories that contain this catpkg. Use this second approach if
- possible, to avoid exhaustively searching all repos for a particular catpkg. It's faster for this method to
- find the catpkg than for you do it yourself. When specifying catpkg, you should have reasonable assurance that
- the category is valid and PMS-compliant as the caching mechanism we use does not perform validation checks for
- categories.
-
- This function is required for GLEP 42 compliance.
-
- @param catpkg: catpkg for which we want a list of repositories; we'll get a list of all repos containing this
- catpkg; if None, return a list of all Repositories that contain a particular catpkg.
- @return: a list of repositories.
- """
-
- if catpkg is not None and self._better_cache is not None:
- return [repo.name for repo in self._better_cache[catpkg]]
- return self._ordered_repo_name_list
-
- def getMissingRepoNames(self):
- """
- Returns a list of repository paths that lack profiles/repo_name.
- """
- return self.settings.repositories.missing_repo_names
-
- def getIgnoredRepos(self):
- """
- Returns a list of repository paths that have been ignored, because
- another repo with the same name exists.
- """
- return self.settings.repositories.ignored_repos
-
- def findname2(self, mycpv, mytree=None, myrepo=None):
- """
- Returns the location of the CPV, and what overlay it was in.
- Searches overlays first, then PORTDIR; this allows us to return the first
- matching file. As opposed to starting in portdir and then doing overlays
- second, we would have to exhaustively search the overlays until we found
- the file we wanted.
- If myrepo is not None it will find packages from this repository(overlay)
- """
- if not mycpv:
- return (None, 0)
-
- if myrepo is not None:
- mytree = self.treemap.get(myrepo)
- if mytree is None:
- return (None, 0)
- elif mytree is not None:
- # myrepo enables cached results when available
- myrepo = self.repositories.location_map.get(mytree)
-
- mysplit = mycpv.split("/")
- psplit = pkgsplit(mysplit[1])
- if psplit is None or len(mysplit) != 2:
- raise InvalidPackageName(mycpv)
-
- try:
- cp = mycpv.cp
- except AttributeError:
- cp = mysplit[0] + "/" + psplit[0]
-
- if self._better_cache is None:
- if mytree:
- mytrees = [mytree]
- else:
- mytrees = reversed(self.porttrees)
- else:
- try:
- repos = self._better_cache[cp]
- except KeyError:
- return (None, 0)
-
- mytrees = []
- for repo in repos:
- if mytree is not None and mytree != repo.location:
- continue
- mytrees.append(repo.location)
-
- # For optimal performace in this hot spot, we do manual unicode
- # handling here instead of using the wrapped os module.
- encoding = _encodings['fs']
- errors = 'strict'
-
- relative_path = mysplit[0] + _os.sep + psplit[0] + _os.sep + \
- mysplit[1] + ".ebuild"
-
- # There is no need to access the filesystem when the package
- # comes from this db and the package repo attribute corresponds
- # to the desired repo, since the file was previously found by
- # the cp_list method.
- if (myrepo is not None and myrepo == getattr(mycpv, 'repo', None)
- and self is getattr(mycpv, '_db', None)):
- return (mytree + _os.sep + relative_path, mytree)
-
- for x in mytrees:
- filename = x + _os.sep + relative_path
- if _os.access(_unicode_encode(filename,
- encoding=encoding, errors=errors), _os.R_OK):
- return (filename, x)
- return (None, 0)
-
- def _write_cache(self, cpv, repo_path, metadata, ebuild_hash):
-
- try:
- cache = self.auxdb[repo_path]
- chf = cache.validation_chf
- metadata['_%s_' % chf] = getattr(ebuild_hash, chf)
- except CacheError:
- # Normally this shouldn't happen, so we'll show
- # a traceback for debugging purposes.
- traceback.print_exc()
- cache = None
-
- if cache is not None:
- try:
- cache[cpv] = metadata
- except CacheError:
- # Normally this shouldn't happen, so we'll show
- # a traceback for debugging purposes.
- traceback.print_exc()
-
- def _pull_valid_cache(self, cpv, ebuild_path, repo_path):
- try:
- ebuild_hash = eclass_cache.hashed_path(ebuild_path)
- # snag mtime since we use it later, and to trigger stat failure
- # if it doesn't exist
- ebuild_hash.mtime
- except FileNotFound:
- writemsg(_("!!! aux_get(): ebuild for " \
- "'%s' does not exist at:\n") % (cpv,), noiselevel=-1)
- writemsg("!!! %s\n" % ebuild_path, noiselevel=-1)
- raise PortageKeyError(cpv)
-
- # Pull pre-generated metadata from the metadata/cache/
- # directory if it exists and is valid, otherwise fall
- # back to the normal writable cache.
- auxdbs = []
- pregen_auxdb = self._pregen_auxdb.get(repo_path)
- if pregen_auxdb is not None:
- auxdbs.append(pregen_auxdb)
- ro_auxdb = self._ro_auxdb.get(repo_path)
- if ro_auxdb is not None:
- auxdbs.append(ro_auxdb)
- auxdbs.append(self.auxdb[repo_path])
- eclass_db = self.repositories.get_repo_for_location(repo_path).eclass_db
-
- for auxdb in auxdbs:
- try:
- metadata = auxdb[cpv]
- except KeyError:
- continue
- except CacheError:
- if not auxdb.readonly:
- try:
- del auxdb[cpv]
- except (KeyError, CacheError):
- pass
- continue
- eapi = metadata.get('EAPI', '').strip()
- if not eapi:
- eapi = '0'
- metadata['EAPI'] = eapi
- if not eapi_is_supported(eapi):
- # Since we're supposed to be able to efficiently obtain the
- # EAPI from _parse_eapi_ebuild_head, we disregard cache entries
- # for unsupported EAPIs.
- continue
- if auxdb.validate_entry(metadata, ebuild_hash, eclass_db):
- break
- else:
- metadata = None
-
- return (metadata, ebuild_hash)
-
- def aux_get(self, mycpv, mylist, mytree=None, myrepo=None):
- "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
- 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
- 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise PortageKeyError if error'
- # For external API consumers, self._event_loop returns a new event
- # loop on each access, so a local reference is needed in order
- # to avoid instantiating more than one.
- loop = self._event_loop
- return loop.run_until_complete(
- self.async_aux_get(mycpv, mylist, mytree=mytree,
- myrepo=myrepo, loop=loop))
-
- def async_aux_get(self, mycpv, mylist, mytree=None, myrepo=None, loop=None):
- """
- Asynchronous form form of aux_get.
-
- @param mycpv: cpv for an ebuild
- @type mycpv: str
- @param mylist: list of metadata keys
- @type mylist: list
- @param mytree: The canonical path of the tree in which the ebuild
- is located, or None for automatic lookup
- @type mytree: str
- @param myrepo: name of the repo in which the ebuild is located,
- or None for automatic lookup
- @type myrepo: str
- @param loop: event loop (defaults to global event loop)
- @type loop: EventLoop
- @return: list of metadata values
- @rtype: asyncio.Future (or compatible)
- """
- # Don't default to self._event_loop here, since that creates a
- # local event loop for thread safety, and that could easily lead
- # to simultaneous instantiation of multiple event loops here.
- # Callers of this method certainly want the same event loop to
- # be used for all calls.
- loop = asyncio._wrap_loop(loop)
- future = loop.create_future()
- cache_me = False
- if myrepo is not None:
- mytree = self.treemap.get(myrepo)
- if mytree is None:
- future.set_exception(PortageKeyError(myrepo))
- return future
-
- if mytree is not None and len(self.porttrees) == 1 \
- and mytree == self.porttrees[0]:
- # mytree matches our only tree, so it's safe to
- # ignore mytree and cache the result
- mytree = None
- myrepo = None
-
- if mytree is None:
- cache_me = True
- if mytree is None and not self._known_keys.intersection(
- mylist).difference(self._aux_cache_keys):
- aux_cache = self._aux_cache.get(mycpv)
- if aux_cache is not None:
- future.set_result([aux_cache.get(x, "") for x in mylist])
- return future
- cache_me = True
-
- try:
- cat, pkg = mycpv.split("/", 1)
- except ValueError:
- # Missing slash. Can't find ebuild so raise PortageKeyError.
- future.set_exception(PortageKeyError(mycpv))
- return future
-
- myebuild, mylocation = self.findname2(mycpv, mytree)
-
- if not myebuild:
- writemsg("!!! aux_get(): %s\n" % \
- _("ebuild not found for '%s'") % mycpv, noiselevel=1)
- future.set_exception(PortageKeyError(mycpv))
- return future
-
- mydata, ebuild_hash = self._pull_valid_cache(mycpv, myebuild, mylocation)
-
- if mydata is not None:
- self._aux_get_return(
- future, mycpv, mylist, myebuild, ebuild_hash,
- mydata, mylocation, cache_me, None)
- return future
-
- if myebuild in self._broken_ebuilds:
- future.set_exception(PortageKeyError(mycpv))
- return future
-
- proc = EbuildMetadataPhase(cpv=mycpv,
- ebuild_hash=ebuild_hash, portdb=self,
- repo_path=mylocation, scheduler=loop,
- settings=self.doebuild_settings)
-
- proc.addExitListener(functools.partial(self._aux_get_return,
- future, mycpv, mylist, myebuild, ebuild_hash, mydata, mylocation,
- cache_me))
- future.add_done_callback(functools.partial(self._aux_get_cancel, proc))
- proc.start()
- return future
-
- @staticmethod
- def _aux_get_cancel(proc, future):
- if future.cancelled() and proc.returncode is None:
- proc.cancel()
-
- def _aux_get_return(self, future, mycpv, mylist, myebuild, ebuild_hash,
- mydata, mylocation, cache_me, proc):
- if future.cancelled():
- return
- if proc is not None:
- if proc.returncode != os.EX_OK:
- self._broken_ebuilds.add(myebuild)
- future.set_exception(PortageKeyError(mycpv))
- return
- mydata = proc.metadata
- mydata["repository"] = self.repositories.get_name_for_location(mylocation)
- mydata["_mtime_"] = ebuild_hash.mtime
- eapi = mydata.get("EAPI")
- if not eapi:
- eapi = "0"
- mydata["EAPI"] = eapi
- if eapi_is_supported(eapi):
- mydata["INHERITED"] = " ".join(mydata.get("_eclasses_", []))
-
- #finally, we look at our internal cache entry and return the requested data.
- returnme = [mydata.get(x, "") for x in mylist]
-
- if cache_me and self.frozen:
- aux_cache = {}
- for x in self._aux_cache_keys:
- aux_cache[x] = mydata.get(x, "")
- self._aux_cache[mycpv] = aux_cache
-
- future.set_result(returnme)
-
- def getFetchMap(self, mypkg, useflags=None, mytree=None):
- """
- Get the SRC_URI metadata as a dict which maps each file name to a
- set of alternative URIs.
-
- @param mypkg: cpv for an ebuild
- @type mypkg: String
- @param useflags: a collection of enabled USE flags, for evaluation of
- conditionals
- @type useflags: set, or None to enable all conditionals
- @param mytree: The canonical path of the tree in which the ebuild
- is located, or None for automatic lookup
- @type mypkg: String
- @return: A dict which maps each file name to a set of alternative
- URIs.
- @rtype: dict
- """
- loop = self._event_loop
- return loop.run_until_complete(
- self.async_fetch_map(mypkg, useflags=useflags,
- mytree=mytree, loop=loop))
-
- def async_fetch_map(self, mypkg, useflags=None, mytree=None, loop=None):
- """
- Asynchronous form of getFetchMap.
-
- @param mypkg: cpv for an ebuild
- @type mypkg: String
- @param useflags: a collection of enabled USE flags, for evaluation of
- conditionals
- @type useflags: set, or None to enable all conditionals
- @param mytree: The canonical path of the tree in which the ebuild
- is located, or None for automatic lookup
- @type mypkg: String
- @param loop: event loop (defaults to global event loop)
- @type loop: EventLoop
- @return: A future that results in a dict which maps each file name to
- a set of alternative URIs.
- @rtype: asyncio.Future (or compatible)
- """
- loop = asyncio._wrap_loop(loop)
- result = loop.create_future()
-
- def aux_get_done(aux_get_future):
- if result.cancelled():
- return
- if aux_get_future.exception() is not None:
- if isinstance(aux_get_future.exception(), PortageKeyError):
- # Convert this to an InvalidDependString exception since
- # callers already handle it.
- result.set_exception(portage.exception.InvalidDependString(
- "getFetchMap(): aux_get() error reading "
- + mypkg + "; aborting."))
- else:
- result.set_exception(future.exception())
- return
-
- eapi, myuris = aux_get_future.result()
-
- if not eapi_is_supported(eapi):
- # Convert this to an InvalidDependString exception
- # since callers already handle it.
- result.set_exception(portage.exception.InvalidDependString(
- "getFetchMap(): '%s' has unsupported EAPI: '%s'" % \
- (mypkg, eapi)))
- return
-
- result.set_result(_parse_uri_map(mypkg,
- {'EAPI':eapi,'SRC_URI':myuris}, use=useflags))
-
- aux_get_future = self.async_aux_get(
- mypkg, ["EAPI", "SRC_URI"], mytree=mytree, loop=loop)
- result.add_done_callback(lambda result:
- aux_get_future.cancel() if result.cancelled() else None)
- aux_get_future.add_done_callback(aux_get_done)
- return result
-
- def getfetchsizes(self, mypkg, useflags=None, debug=0, myrepo=None):
- # returns a filename:size dictionnary of remaining downloads
- myebuild, mytree = self.findname2(mypkg, myrepo=myrepo)
- if myebuild is None:
- raise AssertionError(_("ebuild not found for '%s'") % mypkg)
- pkgdir = os.path.dirname(myebuild)
- mf = self.repositories.get_repo_for_location(
- os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
- pkgdir, self.settings["DISTDIR"])
- checksums = mf.getDigests()
- if not checksums:
- if debug:
- writemsg(_("[empty/missing/bad digest]: %s\n") % (mypkg,))
- return {}
- filesdict={}
- myfiles = self.getFetchMap(mypkg, useflags=useflags, mytree=mytree)
- #XXX: maybe this should be improved: take partial downloads
- # into account? check checksums?
- for myfile in myfiles:
- try:
- fetch_size = int(checksums[myfile]["size"])
- except (KeyError, ValueError):
- if debug:
- writemsg(_("[bad digest]: missing %(file)s for %(pkg)s\n") % {"file":myfile, "pkg":mypkg})
- continue
- file_path = os.path.join(self.settings["DISTDIR"], myfile)
- mystat = None
- try:
- mystat = os.stat(file_path)
- except OSError:
- pass
- if mystat is None:
- existing_size = 0
- ro_distdirs = self.settings.get("PORTAGE_RO_DISTDIRS")
- if ro_distdirs is not None:
- for x in shlex_split(ro_distdirs):
- try:
- mystat = os.stat(os.path.join(x, myfile))
- except OSError:
- pass
- else:
- if mystat.st_size == fetch_size:
- existing_size = fetch_size
- break
- else:
- existing_size = mystat.st_size
- remaining_size = fetch_size - existing_size
- if remaining_size > 0:
- # Assume the download is resumable.
- filesdict[myfile] = remaining_size
- elif remaining_size < 0:
- # The existing file is too large and therefore corrupt.
- filesdict[myfile] = int(checksums[myfile]["size"])
- return filesdict
-
- def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False, myrepo=None):
- """
- TODO: account for PORTAGE_RO_DISTDIRS
- """
- if all:
- useflags = None
- elif useflags is None:
- if mysettings:
- useflags = mysettings["USE"].split()
- if myrepo is not None:
- mytree = self.treemap.get(myrepo)
- if mytree is None:
- return False
- else:
- mytree = None
-
- myfiles = self.getFetchMap(mypkg, useflags=useflags, mytree=mytree)
- myebuild = self.findname(mypkg, myrepo=myrepo)
- if myebuild is None:
- raise AssertionError(_("ebuild not found for '%s'") % mypkg)
- pkgdir = os.path.dirname(myebuild)
- mf = self.repositories.get_repo_for_location(
- os.path.dirname(os.path.dirname(pkgdir)))
- mf = mf.load_manifest(pkgdir, self.settings["DISTDIR"])
- mysums = mf.getDigests()
-
- failures = {}
- for x in myfiles:
- if not mysums or x not in mysums:
- ok = False
- reason = _("digest missing")
- else:
- try:
- ok, reason = portage.checksum.verify_all(
- os.path.join(self.settings["DISTDIR"], x), mysums[x])
- except FileNotFound as e:
- ok = False
- reason = _("File Not Found: '%s'") % (e,)
- if not ok:
- failures[x] = reason
- if failures:
- return False
- return True
-
- def cpv_exists(self, mykey, myrepo=None):
- "Tells us whether an actual ebuild exists on disk (no masking)"
- cps2 = mykey.split("/")
- cps = catpkgsplit(mykey, silent=0)
- if not cps:
- #invalid cat/pkg-v
- return 0
- if self.findname(cps[0] + "/" + cps2[1], myrepo=myrepo):
- return 1
- else:
- return 0
-
- def cp_all(self, categories=None, trees=None, reverse=False, sort=True):
- """
- This returns a list of all keys in our tree or trees
- @param categories: optional list of categories to search or
- defaults to self.settings.categories
- @param trees: optional list of trees to search the categories in or
- defaults to self.porttrees
- @param reverse: reverse sort order (default is False)
- @param sort: return sorted results (default is True)
- @rtype list of [cat/pkg,...]
- """
- d = {}
- if categories is None:
- categories = self.settings.categories
- if trees is None:
- trees = self.porttrees
- for x in categories:
- for oroot in trees:
- for y in listdir(oroot+"/"+x, EmptyOnError=1, ignorecvs=1, dirsonly=1):
- try:
- atom = Atom("%s/%s" % (x, y))
- except InvalidAtom:
- continue
- if atom != atom.cp:
- continue
- d[atom.cp] = None
- l = list(d)
- if sort:
- l.sort(reverse=reverse)
- return l
-
- def cp_list(self, mycp, use_cache=1, mytree=None):
- # NOTE: Cache can be safely shared with the match cache, since the
- # match cache uses the result from dep_expand for the cache_key.
- if self.frozen and mytree is not None \
- and len(self.porttrees) == 1 \
- and mytree == self.porttrees[0]:
- # mytree matches our only tree, so it's safe to
- # ignore mytree and cache the result
- mytree = None
-
- if self.frozen and mytree is None:
- cachelist = self.xcache["cp-list"].get(mycp)
- if cachelist is not None:
- # Try to propagate this to the match-all cache here for
- # repoman since he uses separate match-all caches for each
- # profile (due to differences in _get_implicit_iuse).
- self.xcache["match-all"][(mycp, mycp)] = cachelist
- return cachelist[:]
- mysplit = mycp.split("/")
- invalid_category = mysplit[0] not in self._categories
- # Process repos in ascending order by repo.priority, so that
- # stable sort by version produces results ordered by
- # (pkg.version, repo.priority).
- if mytree is not None:
- if isinstance(mytree, basestring):
- repos = [self.repositories.get_repo_for_location(mytree)]
- else:
- # assume it's iterable
- repos = [self.repositories.get_repo_for_location(location)
- for location in mytree]
- elif self._better_cache is None:
- repos = self._porttrees_repos.values()
- else:
- repos = [repo for repo in reversed(self._better_cache[mycp])
- if repo.name in self._porttrees_repos]
- mylist = []
- for repo in repos:
- oroot = repo.location
- try:
- file_list = os.listdir(os.path.join(oroot, mycp))
- except OSError:
- continue
- for x in file_list:
- pf = None
- if x[-7:] == '.ebuild':
- pf = x[:-7]
-
- if pf is not None:
- ps = pkgsplit(pf)
- if not ps:
- writemsg(_("\nInvalid ebuild name: %s\n") % \
- os.path.join(oroot, mycp, x), noiselevel=-1)
- continue
- if ps[0] != mysplit[1]:
- writemsg(_("\nInvalid ebuild name: %s\n") % \
- os.path.join(oroot, mycp, x), noiselevel=-1)
- continue
- ver_match = ver_regexp.match("-".join(ps[1:]))
- if ver_match is None or not ver_match.groups():
- writemsg(_("\nInvalid ebuild version: %s\n") % \
- os.path.join(oroot, mycp, x), noiselevel=-1)
- continue
- mylist.append(_pkg_str(mysplit[0]+"/"+pf, db=self, repo=repo.name))
- if invalid_category and mylist:
- writemsg(_("\n!!! '%s' has a category that is not listed in " \
- "%setc/portage/categories\n") % \
- (mycp, self.settings["PORTAGE_CONFIGROOT"]), noiselevel=-1)
- mylist = []
- # Always sort in ascending order here since it's handy and
- # the result can be easily cached and reused. Since mylist
- # is initially in ascending order by repo.priority, stable
- # sort by version produces results in ascending order by
- # (pkg.version, repo.priority).
- self._cpv_sort_ascending(mylist)
- if self.frozen and mytree is None:
- cachelist = mylist[:]
- self.xcache["cp-list"][mycp] = cachelist
- self.xcache["match-all"][(mycp, mycp)] = cachelist
- return mylist
-
- def freeze(self):
- for x in ("bestmatch-visible", "cp-list", "match-all",
- "match-all-cpv-only", "match-visible", "minimum-all",
- "minimum-all-ignore-profile", "minimum-visible"):
- self.xcache[x]={}
- self.frozen=1
- self._better_cache = _better_cache(self.repositories)
-
- def melt(self):
- self.xcache = {}
- self._aux_cache = {}
- self._better_cache = None
- self.frozen = 0
-
- def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
- "caching match function; very trick stuff"
- if level == "list-visible":
- level = "match-visible"
- warnings.warn("The 'list-visible' mode of "
- "portage.dbapi.porttree.portdbapi.xmatch "
- "has been renamed to match-visible",
- DeprecationWarning, stacklevel=2)
-
- if mydep is None:
- #this stuff only runs on first call of xmatch()
- #create mydep, mykey from origdep
- mydep = dep_expand(origdep, mydb=self, settings=self.settings)
- mykey = mydep.cp
-
- #if no updates are being made to the tree, we can consult our xcache...
- cache_key = None
- if self.frozen:
- cache_key = (mydep, mydep.unevaluated_atom)
- try:
- return self.xcache[level][cache_key][:]
- except KeyError:
- pass
-
- myval = None
- mytree = None
- if mydep.repo is not None:
- mytree = self.treemap.get(mydep.repo)
- if mytree is None:
- if level.startswith("match-"):
- myval = []
- else:
- myval = ""
-
- if myval is not None:
- # Unknown repo, empty result.
- pass
- elif level == "match-all-cpv-only":
- # match *all* packages, only against the cpv, in order
- # to bypass unnecessary cache access for things like IUSE
- # and SLOT.
- if mydep == mykey:
- # Share cache with match-all/cp_list when the result is the
- # same. Note that this requires that mydep.repo is None and
- # thus mytree is also None.
- level = "match-all"
- myval = self.cp_list(mykey, mytree=mytree)
- else:
- myval = match_from_list(mydep,
- self.cp_list(mykey, mytree=mytree))
-
- elif level in ("bestmatch-visible", "match-all",
- "match-visible", "minimum-all", "minimum-all-ignore-profile",
- "minimum-visible"):
- # Find the minimum matching visible version. This is optimized to
- # minimize the number of metadata accesses (improves performance
- # especially in cases where metadata needs to be generated).
- if mydep == mykey:
- mylist = self.cp_list(mykey, mytree=mytree)
- else:
- mylist = match_from_list(mydep,
- self.cp_list(mykey, mytree=mytree))
-
- ignore_profile = level in ("minimum-all-ignore-profile",)
- visibility_filter = level not in ("match-all",
- "minimum-all", "minimum-all-ignore-profile")
- single_match = level not in ("match-all", "match-visible")
- myval = []
- aux_keys = list(self._aux_cache_keys)
- if level == "bestmatch-visible":
- iterfunc = reversed
- else:
- iterfunc = iter
-
- for cpv in iterfunc(mylist):
- try:
- metadata = dict(zip(aux_keys,
- self.aux_get(cpv, aux_keys, myrepo=cpv.repo)))
- except KeyError:
- # ebuild not in this repo, or masked by corruption
- continue
-
- try:
- pkg_str = _pkg_str(cpv, metadata=metadata,
- settings=self.settings, db=self)
- except InvalidData:
- continue
-
- if visibility_filter and not self._visible(pkg_str, metadata):
- continue
-
- if mydep.slot is not None and \
- not _match_slot(mydep, pkg_str):
- continue
-
- if mydep.unevaluated_atom.use is not None and \
- not self._match_use(mydep, pkg_str, metadata,
- ignore_profile=ignore_profile):
- continue
-
- myval.append(pkg_str)
- if single_match:
- break
-
- if single_match:
- if myval:
- myval = myval[0]
- else:
- myval = ""
-
- elif level == "bestmatch-list":
- #dep match -- find best match but restrict search to sublist
- warnings.warn("The 'bestmatch-list' mode of "
- "portage.dbapi.porttree.portdbapi.xmatch is deprecated",
- DeprecationWarning, stacklevel=2)
- myval = best(list(self._iter_match(mydep, mylist)))
- elif level == "match-list":
- #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
- warnings.warn("The 'match-list' mode of "
- "portage.dbapi.porttree.portdbapi.xmatch is deprecated",
- DeprecationWarning, stacklevel=2)
- myval = list(self._iter_match(mydep, mylist))
- else:
- raise AssertionError(
- "Invalid level argument: '%s'" % level)
-
- if self.frozen:
- xcache_this_level = self.xcache.get(level)
- if xcache_this_level is not None:
- xcache_this_level[cache_key] = myval
- if not isinstance(myval, _pkg_str):
- myval = myval[:]
-
- return myval
-
- def match(self, mydep, use_cache=1):
- return self.xmatch("match-visible", mydep)
-
- def gvisible(self, mylist):
- warnings.warn("The 'gvisible' method of "
- "portage.dbapi.porttree.portdbapi "
- "is deprecated",
- DeprecationWarning, stacklevel=2)
- return list(self._iter_visible(iter(mylist)))
-
- def visible(self, cpv_iter):
- warnings.warn("The 'visible' method of "
- "portage.dbapi.porttree.portdbapi "
- "is deprecated",
- DeprecationWarning, stacklevel=2)
- if cpv_iter is None:
- return []
- return list(self._iter_visible(iter(cpv_iter)))
-
- def _iter_visible(self, cpv_iter, myrepo=None):
- """
- Return a new list containing only visible packages.
- """
- aux_keys = list(self._aux_cache_keys)
- metadata = {}
-
- if myrepo is not None:
- repos = [myrepo]
- else:
- # We iterate over self.porttrees, since it's common to
- # tweak this attribute in order to adjust match behavior.
- repos = []
- for tree in reversed(self.porttrees):
- repos.append(self.repositories.get_name_for_location(tree))
-
- for mycpv in cpv_iter:
- for repo in repos:
- metadata.clear()
- try:
- metadata.update(zip(aux_keys,
- self.aux_get(mycpv, aux_keys, myrepo=repo)))
- except KeyError:
- continue
- except PortageException as e:
- writemsg("!!! Error: aux_get('%s', %s)\n" %
- (mycpv, aux_keys), noiselevel=-1)
- writemsg("!!! %s\n" % (e,), noiselevel=-1)
- del e
- continue
-
- if not self._visible(mycpv, metadata):
- continue
-
- yield mycpv
- # only yield a given cpv once
- break
-
- def _visible(self, cpv, metadata):
- eapi = metadata["EAPI"]
- if not eapi_is_supported(eapi):
- return False
- if _eapi_is_deprecated(eapi):
- return False
- if not metadata["SLOT"]:
- return False
-
- settings = self.settings
- if settings._getMaskAtom(cpv, metadata):
- return False
- if settings._getMissingKeywords(cpv, metadata):
- return False
- if settings.local_config:
- metadata['CHOST'] = settings.get('CHOST', '')
- if not settings._accept_chost(cpv, metadata):
- return False
- metadata["USE"] = ""
- if "?" in metadata["LICENSE"] or \
- "?" in metadata["PROPERTIES"]:
- self.doebuild_settings.setcpv(cpv, mydb=metadata)
- metadata['USE'] = self.doebuild_settings['PORTAGE_USE']
- try:
- if settings._getMissingLicenses(cpv, metadata):
- return False
- if settings._getMissingProperties(cpv, metadata):
- return False
- if settings._getMissingRestrict(cpv, metadata):
- return False
- except InvalidDependString:
- return False
-
- return True
-
-class portagetree(object):
- def __init__(self, root=DeprecationWarning, virtual=DeprecationWarning,
- settings=None):
- """
- Constructor for a PortageTree
-
- @param root: deprecated, defaults to settings['ROOT']
- @type root: String/Path
- @param virtual: UNUSED
- @type virtual: No Idea
- @param settings: Portage Configuration object (portage.settings)
- @type settings: Instance of portage.config
- """
-
- if settings is None:
- settings = portage.settings
- self.settings = settings
-
- if root is not DeprecationWarning:
- warnings.warn("The root parameter of the " + \
- "portage.dbapi.porttree.portagetree" + \
- " constructor is now unused. Use " + \
- "settings['ROOT'] instead.",
- DeprecationWarning, stacklevel=2)
-
- if virtual is not DeprecationWarning:
- warnings.warn("The 'virtual' parameter of the "
- "portage.dbapi.porttree.portagetree"
- " constructor is unused",
- DeprecationWarning, stacklevel=2)
-
- self.portroot = settings["PORTDIR"]
- self.__virtual = virtual
- self.dbapi = portdbapi(mysettings=settings)
-
- @property
- def root(self):
- warnings.warn("The root attribute of " + \
- "portage.dbapi.porttree.portagetree" + \
- " is deprecated. Use " + \
- "settings['ROOT'] instead.",
- DeprecationWarning, stacklevel=3)
- return self.settings['ROOT']
-
- @property
- def virtual(self):
- warnings.warn("The 'virtual' attribute of " + \
- "portage.dbapi.porttree.portagetree" + \
- " is deprecated.",
- DeprecationWarning, stacklevel=3)
- return self.__virtual
-
- def dep_bestmatch(self,mydep):
- "compatibility method"
- mymatch = self.dbapi.xmatch("bestmatch-visible",mydep)
- if mymatch is None:
- return ""
- return mymatch
-
- def dep_match(self,mydep):
- "compatibility method"
- mymatch = self.dbapi.xmatch("match-visible",mydep)
- if mymatch is None:
- return []
- return mymatch
-
- def exists_specific(self,cpv):
- return self.dbapi.cpv_exists(cpv)
-
- def getallnodes(self):
- """new behavior: these are all *unmasked* nodes. There may or may not be available
- masked package for nodes in this nodes list."""
- return self.dbapi.cp_all()
-
- def getname(self, pkgname):
- "returns file location for this particular package (DEPRECATED)"
- if not pkgname:
- return ""
- mysplit = pkgname.split("/")
- psplit = pkgsplit(mysplit[1])
- return "/".join([self.portroot, mysplit[0], psplit[0], mysplit[1]])+".ebuild"
-
- def getslot(self,mycatpkg):
- "Get a slot for a catpkg; assume it exists."
- myslot = ""
- try:
- myslot = self.dbapi._pkg_str(mycatpkg, None).slot
- except KeyError:
- pass
- return myslot
-
-class FetchlistDict(Mapping):
- """
- This provide a mapping interface to retrieve fetch lists. It's used
- to allow portage.manifest.Manifest to access fetch lists via a standard
- mapping interface rather than use the dbapi directly.
- """
- def __init__(self, pkgdir, settings, mydbapi):
- """pkgdir is a directory containing ebuilds and settings is passed into
- portdbapi.getfetchlist for __getitem__ calls."""
- self.pkgdir = pkgdir
- self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
- self.settings = settings
- self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
- self.portdb = mydbapi
-
- def __getitem__(self, pkg_key):
- """Returns the complete fetch list for a given package."""
- return list(self.portdb.getFetchMap(pkg_key, mytree=self.mytree))
-
- def __contains__(self, cpv):
- return cpv in self.__iter__()
-
- def has_key(self, pkg_key):
- """Returns true if the given package exists within pkgdir."""
- warnings.warn("portage.dbapi.porttree.FetchlistDict.has_key() is "
- "deprecated, use the 'in' operator instead",
- DeprecationWarning, stacklevel=2)
- return pkg_key in self
-
- def __iter__(self):
- return iter(self.portdb.cp_list(self.cp, mytree=self.mytree))
-
- def __len__(self):
- """This needs to be implemented in order to avoid
- infinite recursion in some cases."""
- return len(self.portdb.cp_list(self.cp, mytree=self.mytree))
-
- def keys(self):
- """Returns keys for all packages within pkgdir"""
- return self.portdb.cp_list(self.cp, mytree=self.mytree)
-
- if sys.hexversion >= 0x3000000:
- keys = __iter__
-
-
-def _async_manifest_fetchlist(portdb, repo_config, cp, cpv_list=None,
- max_jobs=None, max_load=None, loop=None):
- """
- Asynchronous form of FetchlistDict, with max_jobs and max_load
- parameters in order to control async_aux_get concurrency.
-
- @param portdb: portdbapi instance
- @type portdb: portdbapi
- @param repo_config: repository configuration for a Manifest
- @type repo_config: RepoConfig
- @param cp: cp for a Manifest
- @type cp: str
- @param cpv_list: list of ebuild cpv values for a Manifest
- @type cpv_list: list
- @param max_jobs: max number of futures to process concurrently (default
- is multiprocessing.cpu_count())
- @type max_jobs: int
- @param max_load: max load allowed when scheduling a new future,
- otherwise schedule no more than 1 future at a time (default
- is multiprocessing.cpu_count())
- @type max_load: int or float
- @param loop: event loop
- @type loop: EventLoop
- @return: a Future resulting in a Mapping compatible with FetchlistDict
- @rtype: asyncio.Future (or compatible)
- """
- loop = asyncio._wrap_loop(loop)
- result = loop.create_future()
- cpv_list = (portdb.cp_list(cp, mytree=repo_config.location)
- if cpv_list is None else cpv_list)
-
- def gather_done(gather_result):
- # All exceptions must be consumed from gather_result before this
- # function returns, in order to avoid triggering the event loop's
- # exception handler.
- e = None
- if not gather_result.cancelled():
- for future in gather_result.result():
- if (future.done() and not future.cancelled() and
- future.exception() is not None):
- e = future.exception()
-
- if result.cancelled():
- return
- elif e is None:
- result.set_result(dict((k, list(v.result()))
- for k, v in zip(cpv_list, gather_result.result())))
- else:
- result.set_exception(e)
-
- gather_result = iter_gather(
- # Use a generator expression for lazy evaluation, so that iter_gather
- # controls the number of concurrent async_fetch_map calls.
- (portdb.async_fetch_map(cpv, mytree=repo_config.location, loop=loop)
- for cpv in cpv_list),
- max_jobs=max_jobs,
- max_load=max_load,
- loop=loop,
- )
-
- gather_result.add_done_callback(gather_done)
- result.add_done_callback(lambda result:
- gather_result.cancel() if result.cancelled() else None)
-
- return result
-
-
-def _parse_uri_map(cpv, metadata, use=None):
-
- myuris = use_reduce(metadata.get('SRC_URI', ''),
- uselist=use, matchall=(use is None),
- is_src_uri=True,
- eapi=metadata['EAPI'])
-
- uri_map = OrderedDict()
-
- myuris.reverse()
- while myuris:
- uri = myuris.pop()
- if myuris and myuris[-1] == "->":
- myuris.pop()
- distfile = myuris.pop()
- else:
- distfile = os.path.basename(uri)
- if not distfile:
- raise portage.exception.InvalidDependString(
- ("getFetchMap(): '%s' SRC_URI has no file " + \
- "name: '%s'") % (cpv, uri))
-
- uri_set = uri_map.get(distfile)
- if uri_set is None:
- # Use OrderedDict to preserve order from SRC_URI
- # while ensuring uniqueness.
- uri_set = OrderedDict()
- uri_map[distfile] = uri_set
-
- # SRC_URI may contain a file name with no scheme, and in
- # this case it does not belong in uri_set.
- if urlparse(uri).scheme:
- uri_set[uri] = True
-
- # Convert OrderedDicts to tuples.
- for k, v in uri_map.items():
- uri_map[k] = tuple(v)
-
- return uri_map
diff --git a/pym/portage/dbapi/vartree.py b/pym/portage/dbapi/vartree.py
deleted file mode 100644
index a104306eb..000000000
--- a/pym/portage/dbapi/vartree.py
+++ /dev/null
@@ -1,5559 +0,0 @@
-# Copyright 1998-2018 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import division, unicode_literals
-
-__all__ = [
- "vardbapi", "vartree", "dblink"] + \
- ["write_contents", "tar_contents"]
-
-import portage
-portage.proxy.lazyimport.lazyimport(globals(),
- 'hashlib:md5',
- 'portage.checksum:_perform_md5_merge@perform_md5',
- 'portage.data:portage_gid,portage_uid,secpass',
- 'portage.dbapi.dep_expand:dep_expand',
- 'portage.dbapi._MergeProcess:MergeProcess',
- 'portage.dbapi._SyncfsProcess:SyncfsProcess',
- 'portage.dep:dep_getkey,isjustname,isvalidatom,match_from_list,' + \
- 'use_reduce,_slot_separator,_repo_separator',
- 'portage.eapi:_get_eapi_attrs',
- 'portage.elog:collect_ebuild_messages,collect_messages,' + \
- 'elog_process,_merge_logentries',
- 'portage.locks:lockdir,unlockdir,lockfile,unlockfile',
- 'portage.output:bold,colorize',
- 'portage.package.ebuild.doebuild:doebuild_environment,' + \
- '_merge_unicode_error', '_spawn_phase',
- 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
- 'portage.package.ebuild._ipc.QueryCommand:QueryCommand',
- 'portage.process:find_binary',
- 'portage.util:apply_secpass_permissions,ConfigProtect,ensure_dirs,' + \
- 'writemsg,writemsg_level,write_atomic,atomic_ofstream,writedict,' + \
- 'grabdict,normalize_path,new_protect_filename',
- 'portage.util.digraph:digraph',
- 'portage.util.env_update:env_update',
- 'portage.util.install_mask:install_mask_dir,InstallMask',
- 'portage.util.listdir:dircache,listdir',
- 'portage.util.movefile:movefile',
- 'portage.util.path:first_existing,iter_parents',
- 'portage.util.writeable_check:get_ro_checker',
- 'portage.util._xattr:xattr',
- 'portage.util._dyn_libs.PreservedLibsRegistry:PreservedLibsRegistry',
- 'portage.util._dyn_libs.LinkageMapELF:LinkageMapELF@LinkageMap',
- 'portage.util._dyn_libs.NeededEntry:NeededEntry',
- 'portage.util._async.SchedulerInterface:SchedulerInterface',
- 'portage.util._eventloop.EventLoop:EventLoop',
- 'portage.util._eventloop.global_event_loop:global_event_loop',
- 'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,vercmp,' + \
- '_get_slot_re,_pkgsplit@pkgsplit,_pkg_str,_unknown_repo',
- 'subprocess',
- 'tarfile',
-)
-
-from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \
- MERGING_IDENTIFIER, PORTAGE_PACKAGE_ATOM, PRIVATE_PATH, VDB_PATH
-from portage.dbapi import dbapi
-from portage.exception import CommandNotFound, \
- InvalidData, InvalidLocation, InvalidPackageName, \
- FileNotFound, PermissionDenied, UnsupportedAPIException
-from portage.localization import _
-
-from portage import abssymlink, _movefile, bsd_chflags
-
-# This is a special version of the os module, wrapped for unicode support.
-from portage import os
-from portage import shutil
-from portage import _encodings
-from portage import _os_merge
-from portage import _selinux_merge
-from portage import _unicode_decode
-from portage import _unicode_encode
-from ._VdbMetadataDelta import VdbMetadataDelta
-
-from _emerge.EbuildBuildDir import EbuildBuildDir
-from _emerge.EbuildPhase import EbuildPhase
-from _emerge.emergelog import emergelog
-from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
-from _emerge.SpawnProcess import SpawnProcess
-from ._ContentsCaseSensitivityManager import ContentsCaseSensitivityManager
-
-import errno
-import fnmatch
-import gc
-import grp
-import io
-from itertools import chain
-import logging
-import os as _os
-import platform
-import pwd
-import re
-import stat
-import sys
-import tempfile
-import textwrap
-import time
-import warnings
-
-try:
- import cPickle as pickle
-except ImportError:
- import pickle
-
-if sys.hexversion >= 0x3000000:
- # pylint: disable=W0622
- basestring = str
- long = int
- _unicode = str
-else:
- _unicode = unicode
-
-class vardbapi(dbapi):
-
- _excluded_dirs = ["CVS", "lost+found"]
- _excluded_dirs = [re.escape(x) for x in _excluded_dirs]
- _excluded_dirs = re.compile(r'^(\..*|' + MERGING_IDENTIFIER + '.*|' + \
- "|".join(_excluded_dirs) + r')$')
-
- _aux_cache_version = "1"
- _owners_cache_version = "1"
-
- # Number of uncached packages to trigger cache update, since
- # it's wasteful to update it for every vdb change.
- _aux_cache_threshold = 5
-
- _aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
- _aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
-
- def __init__(self, _unused_param=DeprecationWarning,
- categories=None, settings=None, vartree=None):
- """
- The categories parameter is unused since the dbapi class
- now has a categories property that is generated from the
- available packages.
- """
-
- # Used by emerge to check whether any packages
- # have been added or removed.
- self._pkgs_changed = False
-
- # The _aux_cache_threshold doesn't work as designed
- # if the cache is flushed from a subprocess, so we
- # use this to avoid waste vdb cache updates.
- self._flush_cache_enabled = True
-
- #cache for category directory mtimes
- self.mtdircache = {}
-
- #cache for dependency checks
- self.matchcache = {}
-
- #cache for cp_list results
- self.cpcache = {}
-
- self.blockers = None
- if settings is None:
- settings = portage.settings
- self.settings = settings
-
- if _unused_param is not DeprecationWarning:
- warnings.warn("The first parameter of the "
- "portage.dbapi.vartree.vardbapi"
- " constructor is now unused. Instead "
- "settings['ROOT'] is used.",
- DeprecationWarning, stacklevel=2)
-
- self._eroot = settings['EROOT']
- self._dbroot = self._eroot + VDB_PATH
- self._lock = None
- self._lock_count = 0
-
- self._conf_mem_file = self._eroot + CONFIG_MEMORY_FILE
- self._fs_lock_obj = None
- self._fs_lock_count = 0
- self._slot_locks = {}
-
- if vartree is None:
- vartree = portage.db[settings['EROOT']]['vartree']
- self.vartree = vartree
- self._aux_cache_keys = set(
- ["BDEPEND", "BUILD_TIME", "CHOST", "COUNTER", "DEPEND",
- "DESCRIPTION", "EAPI", "HDEPEND", "HOMEPAGE",
- "BUILD_ID", "IUSE", "KEYWORDS",
- "LICENSE", "PDEPEND", "PROPERTIES", "RDEPEND",
- "repository", "RESTRICT" , "SLOT", "USE", "DEFINED_PHASES",
- "PROVIDES", "REQUIRES"
- ])
- self._aux_cache_obj = None
- self._aux_cache_filename = os.path.join(self._eroot,
- CACHE_PATH, "vdb_metadata.pickle")
- self._cache_delta_filename = os.path.join(self._eroot,
- CACHE_PATH, "vdb_metadata_delta.json")
- self._cache_delta = VdbMetadataDelta(self)
- self._counter_path = os.path.join(self._eroot,
- CACHE_PATH, "counter")
-
- self._plib_registry = PreservedLibsRegistry(settings["ROOT"],
- os.path.join(self._eroot, PRIVATE_PATH, "preserved_libs_registry"))
- self._linkmap = LinkageMap(self)
- self._owners = self._owners_db(self)
-
- self._cached_counter = None
-
- @property
- def writable(self):
- """
- Check if var/db/pkg is writable, or permissions are sufficient
- to create it if it does not exist yet.
- @rtype: bool
- @return: True if var/db/pkg is writable or can be created,
- False otherwise
- """
- return os.access(first_existing(self._dbroot), os.W_OK)
-
- @property
- def root(self):
- warnings.warn("The root attribute of "
- "portage.dbapi.vartree.vardbapi"
- " is deprecated. Use "
- "settings['ROOT'] instead.",
- DeprecationWarning, stacklevel=3)
- return self.settings['ROOT']
-
- def getpath(self, mykey, filename=None):
- # This is an optimized hotspot, so don't use unicode-wrapped
- # os module and don't use os.path.join().
- rValue = self._eroot + VDB_PATH + _os.sep + mykey
- if filename is not None:
- # If filename is always relative, we can do just
- # rValue += _os.sep + filename
- rValue = _os.path.join(rValue, filename)
- return rValue
-
- def lock(self):
- """
- Acquire a reentrant lock, blocking, for cooperation with concurrent
- processes. State is inherited by subprocesses, allowing subprocesses
- to reenter a lock that was acquired by a parent process. However,
- a lock can be released only by the same process that acquired it.
- """
- if self._lock_count:
- self._lock_count += 1
- else:
- if self._lock is not None:
- raise AssertionError("already locked")
- # At least the parent needs to exist for the lock file.
- ensure_dirs(self._dbroot)
- self._lock = lockdir(self._dbroot)
- self._lock_count += 1
-
- def unlock(self):
- """
- Release a lock, decrementing the recursion level. Each unlock() call
- must be matched with a prior lock() call, or else an AssertionError
- will be raised if unlock() is called while not locked.
- """
- if self._lock_count > 1:
- self._lock_count -= 1
- else:
- if self._lock is None:
- raise AssertionError("not locked")
- self._lock_count = 0
- unlockdir(self._lock)
- self._lock = None
-
- def _fs_lock(self):
- """
- Acquire a reentrant lock, blocking, for cooperation with concurrent
- processes.
- """
- if self._fs_lock_count < 1:
- if self._fs_lock_obj is not None:
- raise AssertionError("already locked")
- try:
- self._fs_lock_obj = lockfile(self._conf_mem_file)
- except InvalidLocation:
- self.settings._init_dirs()
- self._fs_lock_obj = lockfile(self._conf_mem_file)
- self._fs_lock_count += 1
-
- def _fs_unlock(self):
- """
- Release a lock, decrementing the recursion level.
- """
- if self._fs_lock_count <= 1:
- if self._fs_lock_obj is None:
- raise AssertionError("not locked")
- unlockfile(self._fs_lock_obj)
- self._fs_lock_obj = None
- self._fs_lock_count -= 1
-
- def _slot_lock(self, slot_atom):
- """
- Acquire a slot lock (reentrant).
-
- WARNING: The varbapi._slot_lock method is not safe to call
- in the main process when that process is scheduling
- install/uninstall tasks in parallel, since the locks would
- be inherited by child processes. In order to avoid this sort
- of problem, this method should be called in a subprocess
- (typically spawned by the MergeProcess class).
- """
- lock, counter = self._slot_locks.get(slot_atom, (None, 0))
- if lock is None:
- lock_path = self.getpath("%s:%s" % (slot_atom.cp, slot_atom.slot))
- ensure_dirs(os.path.dirname(lock_path))
- lock = lockfile(lock_path, wantnewlockfile=True)
- self._slot_locks[slot_atom] = (lock, counter + 1)
-
- def _slot_unlock(self, slot_atom):
- """
- Release a slot lock (or decrementing recursion level).
- """
- lock, counter = self._slot_locks.get(slot_atom, (None, 0))
- if lock is None:
- raise AssertionError("not locked")
- counter -= 1
- if counter == 0:
- unlockfile(lock)
- del self._slot_locks[slot_atom]
- else:
- self._slot_locks[slot_atom] = (lock, counter)
-
- def _bump_mtime(self, cpv):
- """
- This is called before an after any modifications, so that consumers
- can use directory mtimes to validate caches. See bug #290428.
- """
- base = self._eroot + VDB_PATH
- cat = catsplit(cpv)[0]
- catdir = base + _os.sep + cat
- t = time.time()
- t = (t, t)
- try:
- for x in (catdir, base):
- os.utime(x, t)
- except OSError:
- ensure_dirs(catdir)
-
- def cpv_exists(self, mykey, myrepo=None):
- "Tells us whether an actual ebuild exists on disk (no masking)"
- return os.path.exists(self.getpath(mykey))
-
- def cpv_counter(self, mycpv):
- "This method will grab the COUNTER. Returns a counter value."
- try:
- return long(self.aux_get(mycpv, ["COUNTER"])[0])
- except (KeyError, ValueError):
- pass
- writemsg_level(_("portage: COUNTER for %s was corrupted; " \
- "resetting to value of 0\n") % (mycpv,),
- level=logging.ERROR, noiselevel=-1)
- return 0
-
- def cpv_inject(self, mycpv):
- "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
- ensure_dirs(self.getpath(mycpv))
- counter = self.counter_tick(mycpv=mycpv)
- # write local package counter so that emerge clean does the right thing
- write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter))
-
- def isInjected(self, mycpv):
- if self.cpv_exists(mycpv):
- if os.path.exists(self.getpath(mycpv, filename="INJECTED")):
- return True
- if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")):
- return True
- return False
-
- def move_ent(self, mylist, repo_match=None):
- origcp = mylist[1]
- newcp = mylist[2]
-
- # sanity check
- for atom in (origcp, newcp):
- if not isjustname(atom):
- raise InvalidPackageName(str(atom))
- origmatches = self.match(origcp, use_cache=0)
- moves = 0
- if not origmatches:
- return moves
- for mycpv in origmatches:
- try:
- mycpv = self._pkg_str(mycpv, None)
- except (KeyError, InvalidData):
- continue
- mycpv_cp = cpv_getkey(mycpv)
- if mycpv_cp != origcp:
- # Ignore PROVIDE virtual match.
- continue
- if repo_match is not None \
- and not repo_match(mycpv.repo):
- continue
-
- # Use isvalidatom() to check if this move is valid for the
- # EAPI (characters allowed in package names may vary).
- if not isvalidatom(newcp, eapi=mycpv.eapi):
- continue
-
- mynewcpv = mycpv.replace(mycpv_cp, _unicode(newcp), 1)
- mynewcat = catsplit(newcp)[0]
- origpath = self.getpath(mycpv)
- if not os.path.exists(origpath):
- continue
- moves += 1
- if not os.path.exists(self.getpath(mynewcat)):
- #create the directory
- ensure_dirs(self.getpath(mynewcat))
- newpath = self.getpath(mynewcpv)
- if os.path.exists(newpath):
- #dest already exists; keep this puppy where it is.
- continue
- _movefile(origpath, newpath, mysettings=self.settings)
- self._clear_pkg_cache(self._dblink(mycpv))
- self._clear_pkg_cache(self._dblink(mynewcpv))
-
- # We need to rename the ebuild now.
- old_pf = catsplit(mycpv)[1]
- new_pf = catsplit(mynewcpv)[1]
- if new_pf != old_pf:
- try:
- os.rename(os.path.join(newpath, old_pf + ".ebuild"),
- os.path.join(newpath, new_pf + ".ebuild"))
- except EnvironmentError as e:
- if e.errno != errno.ENOENT:
- raise
- del e
- write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
- write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
-
- return moves
-
- def cp_list(self, mycp, use_cache=1):
- mysplit=catsplit(mycp)
- if mysplit[0] == '*':
- mysplit[0] = mysplit[0][1:]
- try:
- if sys.hexversion >= 0x3030000:
- mystat = os.stat(self.getpath(mysplit[0])).st_mtime_ns
- else:
- mystat = os.stat(self.getpath(mysplit[0])).st_mtime
- except OSError:
- mystat = 0
- if use_cache and mycp in self.cpcache:
- cpc = self.cpcache[mycp]
- if cpc[0] == mystat:
- return cpc[1][:]
- cat_dir = self.getpath(mysplit[0])
- try:
- dir_list = os.listdir(cat_dir)
- except EnvironmentError as e:
- if e.errno == PermissionDenied.errno:
- raise PermissionDenied(cat_dir)
- del e
- dir_list = []
-
- returnme = []
- for x in dir_list:
- if self._excluded_dirs.match(x) is not None:
- continue
- ps = pkgsplit(x)
- if not ps:
- self.invalidentry(os.path.join(self.getpath(mysplit[0]), x))
- continue
- if len(mysplit) > 1:
- if ps[0] == mysplit[1]:
- cpv = "%s/%s" % (mysplit[0], x)
- metadata = dict(zip(self._aux_cache_keys,
- self.aux_get(cpv, self._aux_cache_keys)))
- returnme.append(_pkg_str(cpv, metadata=metadata,
- settings=self.settings, db=self))
- self._cpv_sort_ascending(returnme)
- if use_cache:
- self.cpcache[mycp] = [mystat, returnme[:]]
- elif mycp in self.cpcache:
- del self.cpcache[mycp]
- return returnme
-
- def cpv_all(self, use_cache=1):
- """
- Set use_cache=0 to bypass the portage.cachedir() cache in cases
- when the accuracy of mtime staleness checks should not be trusted
- (generally this is only necessary in critical sections that
- involve merge or unmerge of packages).
- """
- return list(self._iter_cpv_all(use_cache=use_cache))
-
- def _iter_cpv_all(self, use_cache=True, sort=False):
- returnme = []
- basepath = os.path.join(self._eroot, VDB_PATH) + os.path.sep
-
- if use_cache:
- from portage import listdir
- else:
- def listdir(p, **kwargs):
- try:
- return [x for x in os.listdir(p) \
- if os.path.isdir(os.path.join(p, x))]
- except EnvironmentError as e:
- if e.errno == PermissionDenied.errno:
- raise PermissionDenied(p)
- del e
- return []
-
- catdirs = listdir(basepath, EmptyOnError=1, ignorecvs=1, dirsonly=1)
- if sort:
- catdirs.sort()
-
- for x in catdirs:
- if self._excluded_dirs.match(x) is not None:
- continue
- if not self._category_re.match(x):
- continue
-
- pkgdirs = listdir(basepath + x, EmptyOnError=1, dirsonly=1)
- if sort:
- pkgdirs.sort()
-
- for y in pkgdirs:
- if self._excluded_dirs.match(y) is not None:
- continue
- subpath = x + "/" + y
- # -MERGING- should never be a cpv, nor should files.
- try:
- subpath = _pkg_str(subpath, db=self)
- except InvalidData:
- self.invalidentry(self.getpath(subpath))
- continue
-
- yield subpath
-
- def cp_all(self, use_cache=1, sort=False):
- mylist = self.cpv_all(use_cache=use_cache)
- d={}
- for y in mylist:
- if y[0] == '*':
- y = y[1:]
- try:
- mysplit = catpkgsplit(y)
- except InvalidData:
- self.invalidentry(self.getpath(y))
- continue
- if not mysplit:
- self.invalidentry(self.getpath(y))
- continue
- d[mysplit[0]+"/"+mysplit[1]] = None
- return sorted(d) if sort else list(d)
-
- def checkblockers(self, origdep):
- pass
-
- def _clear_cache(self):
- self.mtdircache.clear()
- self.matchcache.clear()
- self.cpcache.clear()
- self._aux_cache_obj = None
-
- def _add(self, pkg_dblink):
- self._pkgs_changed = True
- self._clear_pkg_cache(pkg_dblink)
-
- def _remove(self, pkg_dblink):
- self._pkgs_changed = True
- self._clear_pkg_cache(pkg_dblink)
-
- def _clear_pkg_cache(self, pkg_dblink):
- # Due to 1 second mtime granularity in <python-2.5, mtime checks
- # are not always sufficient to invalidate vardbapi caches. Therefore,
- # the caches need to be actively invalidated here.
- self.mtdircache.pop(pkg_dblink.cat, None)
- self.matchcache.pop(pkg_dblink.cat, None)
- self.cpcache.pop(pkg_dblink.mysplit[0], None)
- dircache.pop(pkg_dblink.dbcatdir, None)
-
- def match(self, origdep, use_cache=1):
- "caching match function"
- mydep = dep_expand(
- origdep, mydb=self, use_cache=use_cache, settings=self.settings)
- cache_key = (mydep, mydep.unevaluated_atom)
- mykey = dep_getkey(mydep)
- mycat = catsplit(mykey)[0]
- if not use_cache:
- if mycat in self.matchcache:
- del self.mtdircache[mycat]
- del self.matchcache[mycat]
- return list(self._iter_match(mydep,
- self.cp_list(mydep.cp, use_cache=use_cache)))
- try:
- if sys.hexversion >= 0x3030000:
- curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime_ns
- else:
- curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime
- except (IOError, OSError):
- curmtime=0
-
- if mycat not in self.matchcache or \
- self.mtdircache[mycat] != curmtime:
- # clear cache entry
- self.mtdircache[mycat] = curmtime
- self.matchcache[mycat] = {}
- if mydep not in self.matchcache[mycat]:
- mymatch = list(self._iter_match(mydep,
- self.cp_list(mydep.cp, use_cache=use_cache)))
- self.matchcache[mycat][cache_key] = mymatch
- return self.matchcache[mycat][cache_key][:]
-
- def findname(self, mycpv, myrepo=None):
- return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild")
-
- def flush_cache(self):
- """If the current user has permission and the internal aux_get cache has
- been updated, save it to disk and mark it unmodified. This is called
- by emerge after it has loaded the full vdb for use in dependency
- calculations. Currently, the cache is only written if the user has
- superuser privileges (since that's required to obtain a lock), but all
- users have read access and benefit from faster metadata lookups (as
- long as at least part of the cache is still valid)."""
- if self._flush_cache_enabled and \
- self._aux_cache is not None and \
- secpass >= 2 and \
- (len(self._aux_cache["modified"]) >= self._aux_cache_threshold or
- not os.path.exists(self._cache_delta_filename)):
-
- ensure_dirs(os.path.dirname(self._aux_cache_filename))
-
- self._owners.populate() # index any unindexed contents
- valid_nodes = set(self.cpv_all())
- for cpv in list(self._aux_cache["packages"]):
- if cpv not in valid_nodes:
- del self._aux_cache["packages"][cpv]
- del self._aux_cache["modified"]
- timestamp = time.time()
- self._aux_cache["timestamp"] = timestamp
-
- f = atomic_ofstream(self._aux_cache_filename, 'wb')
- pickle.dump(self._aux_cache, f, protocol=2)
- f.close()
- apply_secpass_permissions(
- self._aux_cache_filename, mode=0o644)
-
- self._cache_delta.initialize(timestamp)
- apply_secpass_permissions(
- self._cache_delta_filename, mode=0o644)
-
- self._aux_cache["modified"] = set()
-
- @property
- def _aux_cache(self):
- if self._aux_cache_obj is None:
- self._aux_cache_init()
- return self._aux_cache_obj
-
- def _aux_cache_init(self):
- aux_cache = None
- open_kwargs = {}
- if sys.hexversion >= 0x3000000 and sys.hexversion < 0x3020000:
- # Buffered io triggers extreme performance issues in
- # Unpickler.load() (problem observed with python-3.0.1).
- # Unfortunately, performance is still poor relative to
- # python-2.x, but buffering makes it much worse (problem
- # appears to be solved in Python >=3.2 at least).
- open_kwargs["buffering"] = 0
- try:
- with open(_unicode_encode(self._aux_cache_filename,
- encoding=_encodings['fs'], errors='strict'),
- mode='rb', **open_kwargs) as f:
- mypickle = pickle.Unpickler(f)
- try:
- mypickle.find_global = None
- except AttributeError:
- # TODO: If py3k, override Unpickler.find_class().
- pass
- aux_cache = mypickle.load()
- except (SystemExit, KeyboardInterrupt):
- raise
- except Exception as e:
- if isinstance(e, EnvironmentError) and \
- getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
- pass
- else:
- writemsg(_("!!! Error loading '%s': %s\n") % \
- (self._aux_cache_filename, e), noiselevel=-1)
- del e
-
- if not aux_cache or \
- not isinstance(aux_cache, dict) or \
- aux_cache.get("version") != self._aux_cache_version or \
- not aux_cache.get("packages"):
- aux_cache = {"version": self._aux_cache_version}
- aux_cache["packages"] = {}
-
- owners = aux_cache.get("owners")
- if owners is not None:
- if not isinstance(owners, dict):
- owners = None
- elif "version" not in owners:
- owners = None
- elif owners["version"] != self._owners_cache_version:
- owners = None
- elif "base_names" not in owners:
- owners = None
- elif not isinstance(owners["base_names"], dict):
- owners = None
-
- if owners is None:
- owners = {
- "base_names" : {},
- "version" : self._owners_cache_version
- }
- aux_cache["owners"] = owners
-
- aux_cache["modified"] = set()
- self._aux_cache_obj = aux_cache
-
- def aux_get(self, mycpv, wants, myrepo = None):
- """This automatically caches selected keys that are frequently needed
- by emerge for dependency calculations. The cached metadata is
- considered valid if the mtime of the package directory has not changed
- since the data was cached. The cache is stored in a pickled dict
- object with the following format:
-
- {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
-
- If an error occurs while loading the cache pickle or the version is
- unrecognized, the cache will simple be recreated from scratch (it is
- completely disposable).
- """
- cache_these_wants = self._aux_cache_keys.intersection(wants)
- for x in wants:
- if self._aux_cache_keys_re.match(x) is not None:
- cache_these_wants.add(x)
-
- if not cache_these_wants:
- mydata = self._aux_get(mycpv, wants)
- return [mydata[x] for x in wants]
-
- cache_these = set(self._aux_cache_keys)
- cache_these.update(cache_these_wants)
-
- mydir = self.getpath(mycpv)
- mydir_stat = None
- try:
- mydir_stat = os.stat(mydir)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- raise KeyError(mycpv)
- # Use float mtime when available.
- mydir_mtime = mydir_stat.st_mtime
- pkg_data = self._aux_cache["packages"].get(mycpv)
- pull_me = cache_these.union(wants)
- mydata = {"_mtime_" : mydir_mtime}
- cache_valid = False
- cache_incomplete = False
- cache_mtime = None
- metadata = None
- if pkg_data is not None:
- if not isinstance(pkg_data, tuple) or len(pkg_data) != 2:
- pkg_data = None
- else:
- cache_mtime, metadata = pkg_data
- if not isinstance(cache_mtime, (float, long, int)) or \
- not isinstance(metadata, dict):
- pkg_data = None
-
- if pkg_data:
- cache_mtime, metadata = pkg_data
- if isinstance(cache_mtime, float):
- if cache_mtime == mydir_stat.st_mtime:
- cache_valid = True
-
- # Handle truncated mtime in order to avoid cache
- # invalidation for livecd squashfs (bug 564222).
- elif long(cache_mtime) == mydir_stat.st_mtime:
- cache_valid = True
- else:
- # Cache may contain integer mtime.
- cache_valid = cache_mtime == mydir_stat[stat.ST_MTIME]
-
- if cache_valid:
- # Migrate old metadata to unicode.
- for k, v in metadata.items():
- metadata[k] = _unicode_decode(v,
- encoding=_encodings['repo.content'], errors='replace')
-
- mydata.update(metadata)
- pull_me.difference_update(mydata)
-
- if pull_me:
- # pull any needed data and cache it
- aux_keys = list(pull_me)
- mydata.update(self._aux_get(mycpv, aux_keys, st=mydir_stat))
- if not cache_valid or cache_these.difference(metadata):
- cache_data = {}
- if cache_valid and metadata:
- cache_data.update(metadata)
- for aux_key in cache_these:
- cache_data[aux_key] = mydata[aux_key]
- self._aux_cache["packages"][_unicode(mycpv)] = \
- (mydir_mtime, cache_data)
- self._aux_cache["modified"].add(mycpv)
-
- eapi_attrs = _get_eapi_attrs(mydata['EAPI'])
- if _get_slot_re(eapi_attrs).match(mydata['SLOT']) is None:
- # Empty or invalid slot triggers InvalidAtom exceptions when
- # generating slot atoms for packages, so translate it to '0' here.
- mydata['SLOT'] = '0'
-
- return [mydata[x] for x in wants]
-
- def _aux_get(self, mycpv, wants, st=None):
- mydir = self.getpath(mycpv)
- if st is None:
- try:
- st = os.stat(mydir)
- except OSError as e:
- if e.errno == errno.ENOENT:
- raise KeyError(mycpv)
- elif e.errno == PermissionDenied.errno:
- raise PermissionDenied(mydir)
- else:
- raise
- if not stat.S_ISDIR(st.st_mode):
- raise KeyError(mycpv)
- results = {}
- env_keys = []
- for x in wants:
- if x == "_mtime_":
- results[x] = st[stat.ST_MTIME]
- continue
- try:
- with io.open(
- _unicode_encode(os.path.join(mydir, x),
- encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['repo.content'],
- errors='replace') as f:
- myd = f.read()
- except IOError:
- if x not in self._aux_cache_keys and \
- self._aux_cache_keys_re.match(x) is None:
- env_keys.append(x)
- continue
- myd = ''
-
- # Preserve \n for metadata that is known to
- # contain multiple lines.
- if self._aux_multi_line_re.match(x) is None:
- myd = " ".join(myd.split())
-
- results[x] = myd
-
- if env_keys:
- env_results = self._aux_env_search(mycpv, env_keys)
- for k in env_keys:
- v = env_results.get(k)
- if v is None:
- v = ''
- if self._aux_multi_line_re.match(k) is None:
- v = " ".join(v.split())
- results[k] = v
-
- if results.get("EAPI") == "":
- results["EAPI"] = '0'
-
- return results
-
- def _aux_env_search(self, cpv, variables):
- """
- Search environment.bz2 for the specified variables. Returns
- a dict mapping variables to values, and any variables not
- found in the environment will not be included in the dict.
- This is useful for querying variables like ${SRC_URI} and
- ${A}, which are not saved in separate files but are available
- in environment.bz2 (see bug #395463).
- """
- env_file = self.getpath(cpv, filename="environment.bz2")
- if not os.path.isfile(env_file):
- return {}
- bunzip2_cmd = portage.util.shlex_split(
- self.settings.get("PORTAGE_BUNZIP2_COMMAND", ""))
- if not bunzip2_cmd:
- bunzip2_cmd = portage.util.shlex_split(
- self.settings["PORTAGE_BZIP2_COMMAND"])
- bunzip2_cmd.append("-d")
- args = bunzip2_cmd + ["-c", env_file]
- try:
- proc = subprocess.Popen(args, stdout=subprocess.PIPE)
- except EnvironmentError as e:
- if e.errno != errno.ENOENT:
- raise
- raise portage.exception.CommandNotFound(args[0])
-
- # Parts of the following code are borrowed from
- # filter-bash-environment.py (keep them in sync).
- var_assign_re = re.compile(r'(^|^declare\s+-\S+\s+|^declare\s+|^export\s+)([^=\s]+)=("|\')?(.*)$')
- close_quote_re = re.compile(r'(\\"|"|\')\s*$')
- def have_end_quote(quote, line):
- close_quote_match = close_quote_re.search(line)
- return close_quote_match is not None and \
- close_quote_match.group(1) == quote
-
- variables = frozenset(variables)
- results = {}
- for line in proc.stdout:
- line = _unicode_decode(line,
- encoding=_encodings['content'], errors='replace')
- var_assign_match = var_assign_re.match(line)
- if var_assign_match is not None:
- key = var_assign_match.group(2)
- quote = var_assign_match.group(3)
- if quote is not None:
- if have_end_quote(quote,
- line[var_assign_match.end(2)+2:]):
- value = var_assign_match.group(4)
- else:
- value = [var_assign_match.group(4)]
- for line in proc.stdout:
- line = _unicode_decode(line,
- encoding=_encodings['content'],
- errors='replace')
- value.append(line)
- if have_end_quote(quote, line):
- break
- value = ''.join(value)
- # remove trailing quote and whitespace
- value = value.rstrip()[:-1]
- else:
- value = var_assign_match.group(4).rstrip()
-
- if key in variables:
- results[key] = value
-
- proc.wait()
- proc.stdout.close()
- return results
-
- def aux_update(self, cpv, values):
- mylink = self._dblink(cpv)
- if not mylink.exists():
- raise KeyError(cpv)
- self._bump_mtime(cpv)
- self._clear_pkg_cache(mylink)
- for k, v in values.items():
- if v:
- mylink.setfile(k, v)
- else:
- try:
- os.unlink(os.path.join(self.getpath(cpv), k))
- except EnvironmentError:
- pass
- self._bump_mtime(cpv)
-
- def counter_tick(self, myroot=None, mycpv=None):
- """
- @param myroot: ignored, self._eroot is used instead
- """
- return self.counter_tick_core(incrementing=1, mycpv=mycpv)
-
- def get_counter_tick_core(self, myroot=None, mycpv=None):
- """
- Use this method to retrieve the counter instead
- of having to trust the value of a global counter
- file that can lead to invalid COUNTER
- generation. When cache is valid, the package COUNTER
- files are not read and we rely on the timestamp of
- the package directory to validate cache. The stat
- calls should only take a short time, so performance
- is sufficient without having to rely on a potentially
- corrupt global counter file.
-
- The global counter file located at
- $CACHE_PATH/counter serves to record the
- counter of the last installed package and
- it also corresponds to the total number of
- installation actions that have occurred in
- the history of this package database.
-
- @param myroot: ignored, self._eroot is used instead
- """
- del myroot
- counter = -1
- try:
- with io.open(
- _unicode_encode(self._counter_path,
- encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['repo.content'],
- errors='replace') as f:
- try:
- counter = long(f.readline().strip())
- except (OverflowError, ValueError) as e:
- writemsg(_("!!! COUNTER file is corrupt: '%s'\n") %
- self._counter_path, noiselevel=-1)
- writemsg("!!! %s\n" % (e,), noiselevel=-1)
- except EnvironmentError as e:
- # Silently allow ENOENT since files under
- # /var/cache/ are allowed to disappear.
- if e.errno != errno.ENOENT:
- writemsg(_("!!! Unable to read COUNTER file: '%s'\n") % \
- self._counter_path, noiselevel=-1)
- writemsg("!!! %s\n" % str(e), noiselevel=-1)
- del e
-
- if self._cached_counter == counter:
- max_counter = counter
- else:
- # We must ensure that we return a counter
- # value that is at least as large as the
- # highest one from the installed packages,
- # since having a corrupt value that is too low
- # can trigger incorrect AUTOCLEAN behavior due
- # to newly installed packages having lower
- # COUNTERs than the previous version in the
- # same slot.
- max_counter = counter
- for cpv in self.cpv_all():
- try:
- pkg_counter = int(self.aux_get(cpv, ["COUNTER"])[0])
- except (KeyError, OverflowError, ValueError):
- continue
- if pkg_counter > max_counter:
- max_counter = pkg_counter
-
- return max_counter + 1
-
- def counter_tick_core(self, myroot=None, incrementing=1, mycpv=None):
- """
- This method will grab the next COUNTER value and record it back
- to the global file. Note that every package install must have
- a unique counter, since a slotmove update can move two packages
- into the same SLOT and in that case it's important that both
- packages have different COUNTER metadata.
-
- @param myroot: ignored, self._eroot is used instead
- @param mycpv: ignored
- @rtype: int
- @return: new counter value
- """
- myroot = None
- mycpv = None
- self.lock()
- try:
- counter = self.get_counter_tick_core() - 1
- if incrementing:
- #increment counter
- counter += 1
- # update new global counter file
- try:
- write_atomic(self._counter_path, str(counter))
- except InvalidLocation:
- self.settings._init_dirs()
- write_atomic(self._counter_path, str(counter))
- self._cached_counter = counter
-
- # Since we hold a lock, this is a good opportunity
- # to flush the cache. Note that this will only
- # flush the cache periodically in the main process
- # when _aux_cache_threshold is exceeded.
- self.flush_cache()
- finally:
- self.unlock()
-
- return counter
-
- def _dblink(self, cpv):
- category, pf = catsplit(cpv)
- return dblink(category, pf, settings=self.settings,
- vartree=self.vartree, treetype="vartree")
-
- def removeFromContents(self, pkg, paths, relative_paths=True):
- """
- @param pkg: cpv for an installed package
- @type pkg: string
- @param paths: paths of files to remove from contents
- @type paths: iterable
- """
- if not hasattr(pkg, "getcontents"):
- pkg = self._dblink(pkg)
- root = self.settings['ROOT']
- root_len = len(root) - 1
- new_contents = pkg.getcontents().copy()
- removed = 0
-
- for filename in paths:
- filename = _unicode_decode(filename,
- encoding=_encodings['content'], errors='strict')
- filename = normalize_path(filename)
- if relative_paths:
- relative_filename = filename
- else:
- relative_filename = filename[root_len:]
- contents_key = pkg._match_contents(relative_filename)
- if contents_key:
- # It's possible for two different paths to refer to the same
- # contents_key, due to directory symlinks. Therefore, pass a
- # default value to pop, in order to avoid a KeyError which
- # could otherwise be triggered (see bug #454400).
- new_contents.pop(contents_key, None)
- removed += 1
-
- if removed:
- # Also remove corresponding NEEDED lines, so that they do
- # no corrupt LinkageMap data for preserve-libs.
- needed_filename = os.path.join(pkg.dbdir, LinkageMap._needed_aux_key)
- new_needed = None
- try:
- with io.open(_unicode_encode(needed_filename,
- encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['repo.content'],
- errors='replace') as f:
- needed_lines = f.readlines()
- except IOError as e:
- if e.errno not in (errno.ENOENT, errno.ESTALE):
- raise
- else:
- new_needed = []
- for l in needed_lines:
- l = l.rstrip("\n")
- if not l:
- continue
- try:
- entry = NeededEntry.parse(needed_filename, l)
- except InvalidData as e:
- writemsg_level("\n%s\n\n" % (e,),
- level=logging.ERROR, noiselevel=-1)
- continue
-
- filename = os.path.join(root, entry.filename.lstrip(os.sep))
- if filename in new_contents:
- new_needed.append(entry)
-
- self.writeContentsToContentsFile(pkg, new_contents, new_needed=new_needed)
-
- def writeContentsToContentsFile(self, pkg, new_contents, new_needed=None):
- """
- @param pkg: package to write contents file for
- @type pkg: dblink
- @param new_contents: contents to write to CONTENTS file
- @type new_contents: contents dictionary of the form
- {u'/path/to/file' : (contents_attribute 1, ...), ...}
- @param new_needed: new NEEDED entries
- @type new_needed: list of NeededEntry
- """
- root = self.settings['ROOT']
- self._bump_mtime(pkg.mycpv)
- if new_needed is not None:
- f = atomic_ofstream(os.path.join(pkg.dbdir, LinkageMap._needed_aux_key))
- for entry in new_needed:
- f.write(_unicode(entry))
- f.close()
- f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
- write_contents(new_contents, root, f)
- f.close()
- self._bump_mtime(pkg.mycpv)
- pkg._clear_contents_cache()
-
- class _owners_cache(object):
- """
- This class maintains an hash table that serves to index package
- contents by mapping the basename of file to a list of possible
- packages that own it. This is used to optimize owner lookups
- by narrowing the search down to a smaller number of packages.
- """
- _new_hash = md5
- _hash_bits = 16
- _hex_chars = _hash_bits // 4
-
- def __init__(self, vardb):
- self._vardb = vardb
-
- def add(self, cpv):
- eroot_len = len(self._vardb._eroot)
- pkg_hash = self._hash_pkg(cpv)
- db = self._vardb._dblink(cpv)
- if not db.getcontents():
- # Empty path is a code used to represent empty contents.
- self._add_path("", pkg_hash)
-
- for x in db._contents.keys():
- self._add_path(x[eroot_len:], pkg_hash)
-
- self._vardb._aux_cache["modified"].add(cpv)
-
- def _add_path(self, path, pkg_hash):
- """
- Empty path is a code that represents empty contents.
- """
- if path:
- name = os.path.basename(path.rstrip(os.path.sep))
- if not name:
- return
- else:
- name = path
- name_hash = self._hash_str(name)
- base_names = self._vardb._aux_cache["owners"]["base_names"]
- pkgs = base_names.get(name_hash)
- if pkgs is None:
- pkgs = {}
- base_names[name_hash] = pkgs
- pkgs[pkg_hash] = None
-
- def _hash_str(self, s):
- h = self._new_hash()
- # Always use a constant utf_8 encoding here, since
- # the "default" encoding can change.
- h.update(_unicode_encode(s,
- encoding=_encodings['repo.content'],
- errors='backslashreplace'))
- h = h.hexdigest()
- h = h[-self._hex_chars:]
- h = int(h, 16)
- return h
-
- def _hash_pkg(self, cpv):
- counter, mtime = self._vardb.aux_get(
- cpv, ["COUNTER", "_mtime_"])
- try:
- counter = int(counter)
- except ValueError:
- counter = 0
- return (_unicode(cpv), counter, mtime)
-
- class _owners_db(object):
-
- def __init__(self, vardb):
- self._vardb = vardb
-
- def populate(self):
- self._populate()
-
- def _populate(self):
- owners_cache = vardbapi._owners_cache(self._vardb)
- cached_hashes = set()
- base_names = self._vardb._aux_cache["owners"]["base_names"]
-
- # Take inventory of all cached package hashes.
- for name, hash_values in list(base_names.items()):
- if not isinstance(hash_values, dict):
- del base_names[name]
- continue
- cached_hashes.update(hash_values)
-
- # Create sets of valid package hashes and uncached packages.
- uncached_pkgs = set()
- hash_pkg = owners_cache._hash_pkg
- valid_pkg_hashes = set()
- for cpv in self._vardb.cpv_all():
- hash_value = hash_pkg(cpv)
- valid_pkg_hashes.add(hash_value)
- if hash_value not in cached_hashes:
- uncached_pkgs.add(cpv)
-
- # Cache any missing packages.
- for cpv in uncached_pkgs:
- owners_cache.add(cpv)
-
- # Delete any stale cache.
- stale_hashes = cached_hashes.difference(valid_pkg_hashes)
- if stale_hashes:
- for base_name_hash, bucket in list(base_names.items()):
- for hash_value in stale_hashes.intersection(bucket):
- del bucket[hash_value]
- if not bucket:
- del base_names[base_name_hash]
-
- return owners_cache
-
- def get_owners(self, path_iter):
- """
- @return the owners as a dblink -> set(files) mapping.
- """
- owners = {}
- for owner, f in self.iter_owners(path_iter):
- owned_files = owners.get(owner)
- if owned_files is None:
- owned_files = set()
- owners[owner] = owned_files
- owned_files.add(f)
- return owners
-
- def getFileOwnerMap(self, path_iter):
- owners = self.get_owners(path_iter)
- file_owners = {}
- for pkg_dblink, files in owners.items():
- for f in files:
- owner_set = file_owners.get(f)
- if owner_set is None:
- owner_set = set()
- file_owners[f] = owner_set
- owner_set.add(pkg_dblink)
- return file_owners
-
- def iter_owners(self, path_iter):
- """
- Iterate over tuples of (dblink, path). In order to avoid
- consuming too many resources for too much time, resources
- are only allocated for the duration of a given iter_owners()
- call. Therefore, to maximize reuse of resources when searching
- for multiple files, it's best to search for them all in a single
- call.
- """
-
- if not isinstance(path_iter, list):
- path_iter = list(path_iter)
- owners_cache = self._populate()
- vardb = self._vardb
- root = vardb._eroot
- hash_pkg = owners_cache._hash_pkg
- hash_str = owners_cache._hash_str
- base_names = self._vardb._aux_cache["owners"]["base_names"]
- case_insensitive = "case-insensitive-fs" \
- in vardb.settings.features
-
- dblink_cache = {}
-
- def dblink(cpv):
- x = dblink_cache.get(cpv)
- if x is None:
- if len(dblink_cache) > 20:
- # Ensure that we don't run out of memory.
- raise StopIteration()
- x = self._vardb._dblink(cpv)
- dblink_cache[cpv] = x
- return x
-
- while path_iter:
-
- path = path_iter.pop()
- if case_insensitive:
- path = path.lower()
- is_basename = os.sep != path[:1]
- if is_basename:
- name = path
- else:
- name = os.path.basename(path.rstrip(os.path.sep))
-
- if not name:
- continue
-
- name_hash = hash_str(name)
- pkgs = base_names.get(name_hash)
- owners = []
- if pkgs is not None:
- try:
- for hash_value in pkgs:
- if not isinstance(hash_value, tuple) or \
- len(hash_value) != 3:
- continue
- cpv, counter, mtime = hash_value
- if not isinstance(cpv, basestring):
- continue
- try:
- current_hash = hash_pkg(cpv)
- except KeyError:
- continue
-
- if current_hash != hash_value:
- continue
-
- if is_basename:
- for p in dblink(cpv)._contents.keys():
- if os.path.basename(p) == name:
- owners.append((cpv, dblink(cpv).
- _contents.unmap_key(
- p)[len(root):]))
- else:
- key = dblink(cpv)._match_contents(path)
- if key is not False:
- owners.append(
- (cpv, key[len(root):]))
-
- except StopIteration:
- path_iter.append(path)
- del owners[:]
- dblink_cache.clear()
- gc.collect()
- for x in self._iter_owners_low_mem(path_iter):
- yield x
- return
- else:
- for cpv, p in owners:
- yield (dblink(cpv), p)
-
- def _iter_owners_low_mem(self, path_list):
- """
- This implemention will make a short-lived dblink instance (and
- parse CONTENTS) for every single installed package. This is
- slower and but uses less memory than the method which uses the
- basename cache.
- """
-
- if not path_list:
- return
-
- case_insensitive = "case-insensitive-fs" \
- in self._vardb.settings.features
- path_info_list = []
- for path in path_list:
- if case_insensitive:
- path = path.lower()
- is_basename = os.sep != path[:1]
- if is_basename:
- name = path
- else:
- name = os.path.basename(path.rstrip(os.path.sep))
- path_info_list.append((path, name, is_basename))
-
- # Do work via the global event loop, so that it can be used
- # for indication of progress during the search (bug #461412).
- event_loop = (portage._internal_caller and
- global_event_loop() or EventLoop(main=False))
- root = self._vardb._eroot
-
- def search_pkg(cpv, search_future):
- dblnk = self._vardb._dblink(cpv)
- results = []
- for path, name, is_basename in path_info_list:
- if is_basename:
- for p in dblnk._contents.keys():
- if os.path.basename(p) == name:
- results.append((dblnk,
- dblnk._contents.unmap_key(
- p)[len(root):]))
- else:
- key = dblnk._match_contents(path)
- if key is not False:
- results.append(
- (dblnk, key[len(root):]))
- search_future.set_result(results)
-
- for cpv in self._vardb.cpv_all():
- search_future = event_loop.create_future()
- event_loop.call_soon(search_pkg, cpv, search_future)
- event_loop.run_until_complete(search_future)
- for result in search_future.result():
- yield result
-
-class vartree(object):
- "this tree will scan a var/db/pkg database located at root (passed to init)"
- def __init__(self, root=None, virtual=DeprecationWarning, categories=None,
- settings=None):
-
- if settings is None:
- settings = portage.settings
-
- if root is not None and root != settings['ROOT']:
- warnings.warn("The 'root' parameter of the "
- "portage.dbapi.vartree.vartree"
- " constructor is now unused. Use "
- "settings['ROOT'] instead.",
- DeprecationWarning, stacklevel=2)
-
- if virtual is not DeprecationWarning:
- warnings.warn("The 'virtual' parameter of the "
- "portage.dbapi.vartree.vartree"
- " constructor is unused",
- DeprecationWarning, stacklevel=2)
-
- self.settings = settings
- self.dbapi = vardbapi(settings=settings, vartree=self)
- self.populated = 1
-
- @property
- def root(self):
- warnings.warn("The root attribute of "
- "portage.dbapi.vartree.vartree"
- " is deprecated. Use "
- "settings['ROOT'] instead.",
- DeprecationWarning, stacklevel=3)
- return self.settings['ROOT']
-
- def getpath(self, mykey, filename=None):
- return self.dbapi.getpath(mykey, filename=filename)
-
- def zap(self, mycpv):
- return
-
- def inject(self, mycpv):
- return
-
- def get_provide(self, mycpv):
- return []
-
- def get_all_provides(self):
- return {}
-
- def dep_bestmatch(self, mydep, use_cache=1):
- "compatibility method -- all matches, not just visible ones"
- #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
- mymatch = best(self.dbapi.match(
- dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
- use_cache=use_cache))
- if mymatch is None:
- return ""
- else:
- return mymatch
-
- def dep_match(self, mydep, use_cache=1):
- "compatibility method -- we want to see all matches, not just visible ones"
- #mymatch = match(mydep,self.dbapi)
- mymatch = self.dbapi.match(mydep, use_cache=use_cache)
- if mymatch is None:
- return []
- else:
- return mymatch
-
- def exists_specific(self, cpv):
- return self.dbapi.cpv_exists(cpv)
-
- def getallcpv(self):
- """temporary function, probably to be renamed --- Gets a list of all
- category/package-versions installed on the system."""
- return self.dbapi.cpv_all()
-
- def getallnodes(self):
- """new behavior: these are all *unmasked* nodes. There may or may not be available
- masked package for nodes in this nodes list."""
- return self.dbapi.cp_all()
-
- def getebuildpath(self, fullpackage):
- cat, package = catsplit(fullpackage)
- return self.getpath(fullpackage, filename=package+".ebuild")
-
- def getslot(self, mycatpkg):
- "Get a slot for a catpkg; assume it exists."
- try:
- return self.dbapi._pkg_str(mycatpkg, None).slot
- except KeyError:
- return ""
-
- def populate(self):
- self.populated=1
-
-class dblink(object):
- """
- This class provides an interface to the installed package database
- At present this is implemented as a text backend in /var/db/pkg.
- """
-
- import re
- _normalize_needed = re.compile(r'//|^[^/]|./$|(^|/)\.\.?(/|$)')
-
- _contents_re = re.compile(r'^(' + \
- r'(?P<dir>(dev|dir|fif) (.+))|' + \
- r'(?P<obj>(obj) (.+) (\S+) (\d+))|' + \
- r'(?P<sym>(sym) (.+) -> (.+) ((\d+)|(?P<oldsym>(' + \
- r'\(\d+, \d+L, \d+L, \d+, \d+, \d+, \d+L, \d+, (\d+), \d+\)))))' + \
- r')$'
- )
-
- # These files are generated by emerge, so we need to remove
- # them when they are the only thing left in a directory.
- _infodir_cleanup = frozenset(["dir", "dir.old"])
-
- _ignored_unlink_errnos = (
- errno.EBUSY, errno.ENOENT,
- errno.ENOTDIR, errno.EISDIR)
-
- _ignored_rmdir_errnos = (
- errno.EEXIST, errno.ENOTEMPTY,
- errno.EBUSY, errno.ENOENT,
- errno.ENOTDIR, errno.EISDIR,
- errno.EPERM)
-
- def __init__(self, cat, pkg, myroot=None, settings=None, treetype=None,
- vartree=None, blockers=None, scheduler=None, pipe=None):
- """
- Creates a DBlink object for a given CPV.
- The given CPV may not be present in the database already.
-
- @param cat: Category
- @type cat: String
- @param pkg: Package (PV)
- @type pkg: String
- @param myroot: ignored, settings['ROOT'] is used instead
- @type myroot: String (Path)
- @param settings: Typically portage.settings
- @type settings: portage.config
- @param treetype: one of ['porttree','bintree','vartree']
- @type treetype: String
- @param vartree: an instance of vartree corresponding to myroot.
- @type vartree: vartree
- """
-
- if settings is None:
- raise TypeError("settings argument is required")
-
- mysettings = settings
- self._eroot = mysettings['EROOT']
- self.cat = cat
- self.pkg = pkg
- self.mycpv = self.cat + "/" + self.pkg
- if self.mycpv == settings.mycpv and \
- isinstance(settings.mycpv, _pkg_str):
- self.mycpv = settings.mycpv
- else:
- self.mycpv = _pkg_str(self.mycpv)
- self.mysplit = list(self.mycpv.cpv_split[1:])
- self.mysplit[0] = self.mycpv.cp
- self.treetype = treetype
- if vartree is None:
- vartree = portage.db[self._eroot]["vartree"]
- self.vartree = vartree
- self._blockers = blockers
- self._scheduler = scheduler
- self.dbroot = normalize_path(os.path.join(self._eroot, VDB_PATH))
- self.dbcatdir = self.dbroot+"/"+cat
- self.dbpkgdir = self.dbcatdir+"/"+pkg
- self.dbtmpdir = self.dbcatdir+"/"+MERGING_IDENTIFIER+pkg
- self.dbdir = self.dbpkgdir
- self.settings = mysettings
- self._verbose = self.settings.get("PORTAGE_VERBOSE") == "1"
-
- self.myroot = self.settings['ROOT']
- self._installed_instance = None
- self.contentscache = None
- self._contents_inodes = None
- self._contents_basenames = None
- self._linkmap_broken = False
- self._device_path_map = {}
- self._hardlink_merge_map = {}
- self._hash_key = (self._eroot, self.mycpv)
- self._protect_obj = None
- self._pipe = pipe
- self._postinst_failure = False
-
- # When necessary, this attribute is modified for
- # compliance with RESTRICT=preserve-libs.
- self._preserve_libs = "preserve-libs" in mysettings.features
- self._contents = ContentsCaseSensitivityManager(self)
- self._slot_locks = []
-
- def __hash__(self):
- return hash(self._hash_key)
-
- def __eq__(self, other):
- return isinstance(other, dblink) and \
- self._hash_key == other._hash_key
-
- def _get_protect_obj(self):
-
- if self._protect_obj is None:
- self._protect_obj = ConfigProtect(self._eroot,
- portage.util.shlex_split(
- self.settings.get("CONFIG_PROTECT", "")),
- portage.util.shlex_split(
- self.settings.get("CONFIG_PROTECT_MASK", "")),
- case_insensitive=("case-insensitive-fs"
- in self.settings.features))
-
- return self._protect_obj
-
- def isprotected(self, obj):
- return self._get_protect_obj().isprotected(obj)
-
- def updateprotect(self):
- self._get_protect_obj().updateprotect()
-
- def lockdb(self):
- self.vartree.dbapi.lock()
-
- def unlockdb(self):
- self.vartree.dbapi.unlock()
-
- def _slot_locked(f):
- """
- A decorator function which, when parallel-install is enabled,
- acquires and releases slot locks for the current package and
- blocked packages. This is required in order to account for
- interactions with blocked packages (involving resolution of
- file collisions).
- """
- def wrapper(self, *args, **kwargs):
- if "parallel-install" in self.settings.features:
- self._acquire_slot_locks(
- kwargs.get("mydbapi", self.vartree.dbapi))
- try:
- return f(self, *args, **kwargs)
- finally:
- self._release_slot_locks()
- return wrapper
-
- def _acquire_slot_locks(self, db):
- """
- Acquire slot locks for the current package and blocked packages.
- """
-
- slot_atoms = []
-
- try:
- slot = self.mycpv.slot
- except AttributeError:
- slot, = db.aux_get(self.mycpv, ["SLOT"])
- slot = slot.partition("/")[0]
-
- slot_atoms.append(portage.dep.Atom(
- "%s:%s" % (self.mycpv.cp, slot)))
-
- for blocker in self._blockers or []:
- slot_atoms.append(blocker.slot_atom)
-
- # Sort atoms so that locks are acquired in a predictable
- # order, preventing deadlocks with competitors that may
- # be trying to acquire overlapping locks.
- slot_atoms.sort()
- for slot_atom in slot_atoms:
- self.vartree.dbapi._slot_lock(slot_atom)
- self._slot_locks.append(slot_atom)
-
- def _release_slot_locks(self):
- """
- Release all slot locks.
- """
- while self._slot_locks:
- self.vartree.dbapi._slot_unlock(self._slot_locks.pop())
-
- def getpath(self):
- "return path to location of db information (for >>> informational display)"
- return self.dbdir
-
- def exists(self):
- "does the db entry exist? boolean."
- return os.path.exists(self.dbdir)
-
- def delete(self):
- """
- Remove this entry from the database
- """
- try:
- os.lstat(self.dbdir)
- except OSError as e:
- if e.errno not in (errno.ENOENT, errno.ENOTDIR, errno.ESTALE):
- raise
- return
-
- # Check validity of self.dbdir before attempting to remove it.
- if not self.dbdir.startswith(self.dbroot):
- writemsg(_("portage.dblink.delete(): invalid dbdir: %s\n") % \
- self.dbdir, noiselevel=-1)
- return
-
- if self.dbdir is self.dbpkgdir:
- counter, = self.vartree.dbapi.aux_get(
- self.mycpv, ["COUNTER"])
- self.vartree.dbapi._cache_delta.recordEvent(
- "remove", self.mycpv,
- self.settings["SLOT"].split("/")[0], counter)
-
- shutil.rmtree(self.dbdir)
- # If empty, remove parent category directory.
- try:
- os.rmdir(os.path.dirname(self.dbdir))
- except OSError:
- pass
- self.vartree.dbapi._remove(self)
-
- # Use self.dbroot since we need an existing path for syncfs.
- try:
- self._merged_path(self.dbroot, os.lstat(self.dbroot))
- except OSError:
- pass
-
- self._post_merge_sync()
-
- def clearcontents(self):
- """
- For a given db entry (self), erase the CONTENTS values.
- """
- self.lockdb()
- try:
- if os.path.exists(self.dbdir+"/CONTENTS"):
- os.unlink(self.dbdir+"/CONTENTS")
- finally:
- self.unlockdb()
-
- def _clear_contents_cache(self):
- self.contentscache = None
- self._contents_inodes = None
- self._contents_basenames = None
- self._contents.clear_cache()
-
- def getcontents(self):
- """
- Get the installed files of a given package (aka what that package installed)
- """
- if self.contentscache is not None:
- return self.contentscache
- contents_file = os.path.join(self.dbdir, "CONTENTS")
- pkgfiles = {}
- try:
- with io.open(_unicode_encode(contents_file,
- encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['repo.content'],
- errors='replace') as f:
- mylines = f.readlines()
- except EnvironmentError as e:
- if e.errno != errno.ENOENT:
- raise
- del e
- self.contentscache = pkgfiles
- return pkgfiles
-
- null_byte = "\0"
- normalize_needed = self._normalize_needed
- contents_re = self._contents_re
- obj_index = contents_re.groupindex['obj']
- dir_index = contents_re.groupindex['dir']
- sym_index = contents_re.groupindex['sym']
- # The old symlink format may exist on systems that have packages
- # which were installed many years ago (see bug #351814).
- oldsym_index = contents_re.groupindex['oldsym']
- # CONTENTS files already contain EPREFIX
- myroot = self.settings['ROOT']
- if myroot == os.path.sep:
- myroot = None
- # used to generate parent dir entries
- dir_entry = ("dir",)
- eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1
- pos = 0
- errors = []
- for pos, line in enumerate(mylines):
- if null_byte in line:
- # Null bytes are a common indication of corruption.
- errors.append((pos + 1, _("Null byte found in CONTENTS entry")))
- continue
- line = line.rstrip("\n")
- m = contents_re.match(line)
- if m is None:
- errors.append((pos + 1, _("Unrecognized CONTENTS entry")))
- continue
-
- if m.group(obj_index) is not None:
- base = obj_index
- #format: type, mtime, md5sum
- data = (m.group(base+1), m.group(base+4), m.group(base+3))
- elif m.group(dir_index) is not None:
- base = dir_index
- #format: type
- data = (m.group(base+1),)
- elif m.group(sym_index) is not None:
- base = sym_index
- if m.group(oldsym_index) is None:
- mtime = m.group(base+5)
- else:
- mtime = m.group(base+8)
- #format: type, mtime, dest
- data = (m.group(base+1), mtime, m.group(base+3))
- else:
- # This won't happen as long the regular expression
- # is written to only match valid entries.
- raise AssertionError(_("required group not found " + \
- "in CONTENTS entry: '%s'") % line)
-
- path = m.group(base+2)
- if normalize_needed.search(path) is not None:
- path = normalize_path(path)
- if not path.startswith(os.path.sep):
- path = os.path.sep + path
-
- if myroot is not None:
- path = os.path.join(myroot, path.lstrip(os.path.sep))
-
- # Implicitly add parent directories, since we can't necessarily
- # assume that they are explicitly listed in CONTENTS, and it's
- # useful for callers if they can rely on parent directory entries
- # being generated here (crucial for things like dblink.isowner()).
- path_split = path.split(os.sep)
- path_split.pop()
- while len(path_split) > eroot_split_len:
- parent = os.sep.join(path_split)
- if parent in pkgfiles:
- break
- pkgfiles[parent] = dir_entry
- path_split.pop()
-
- pkgfiles[path] = data
-
- if errors:
- writemsg(_("!!! Parse error in '%s'\n") % contents_file, noiselevel=-1)
- for pos, e in errors:
- writemsg(_("!!! line %d: %s\n") % (pos, e), noiselevel=-1)
- self.contentscache = pkgfiles
- return pkgfiles
-
- def _prune_plib_registry(self, unmerge=False,
- needed=None, preserve_paths=None):
- # remove preserved libraries that don't have any consumers left
- if not (self._linkmap_broken or
- self.vartree.dbapi._linkmap is None or
- self.vartree.dbapi._plib_registry is None):
- self.vartree.dbapi._fs_lock()
- plib_registry = self.vartree.dbapi._plib_registry
- plib_registry.lock()
- try:
- plib_registry.load()
-
- unmerge_with_replacement = \
- unmerge and preserve_paths is not None
- if unmerge_with_replacement:
- # If self.mycpv is about to be unmerged and we
- # have a replacement package, we want to exclude
- # the irrelevant NEEDED data that belongs to
- # files which are being unmerged now.
- exclude_pkgs = (self.mycpv,)
- else:
- exclude_pkgs = None
-
- self._linkmap_rebuild(exclude_pkgs=exclude_pkgs,
- include_file=needed, preserve_paths=preserve_paths)
-
- if unmerge:
- unmerge_preserve = None
- if not unmerge_with_replacement:
- unmerge_preserve = \
- self._find_libs_to_preserve(unmerge=True)
- counter = self.vartree.dbapi.cpv_counter(self.mycpv)
- try:
- slot = self.mycpv.slot
- except AttributeError:
- slot = _pkg_str(self.mycpv, slot=self.settings["SLOT"]).slot
- plib_registry.unregister(self.mycpv, slot, counter)
- if unmerge_preserve:
- for path in sorted(unmerge_preserve):
- contents_key = self._match_contents(path)
- if not contents_key:
- continue
- obj_type = self.getcontents()[contents_key][0]
- self._display_merge(_(">>> needed %s %s\n") % \
- (obj_type, contents_key), noiselevel=-1)
- plib_registry.register(self.mycpv,
- slot, counter, unmerge_preserve)
- # Remove the preserved files from our contents
- # so that they won't be unmerged.
- self.vartree.dbapi.removeFromContents(self,
- unmerge_preserve)
-
- unmerge_no_replacement = \
- unmerge and not unmerge_with_replacement
- cpv_lib_map = self._find_unused_preserved_libs(
- unmerge_no_replacement)
- if cpv_lib_map:
- self._remove_preserved_libs(cpv_lib_map)
- self.vartree.dbapi.lock()
- try:
- for cpv, removed in cpv_lib_map.items():
- if not self.vartree.dbapi.cpv_exists(cpv):
- continue
- self.vartree.dbapi.removeFromContents(cpv, removed)
- finally:
- self.vartree.dbapi.unlock()
-
- plib_registry.store()
- finally:
- plib_registry.unlock()
- self.vartree.dbapi._fs_unlock()
-
- @_slot_locked
- def unmerge(self, pkgfiles=None, trimworld=None, cleanup=True,
- ldpath_mtimes=None, others_in_slot=None, needed=None,
- preserve_paths=None):
- """
- Calls prerm
- Unmerges a given package (CPV)
- calls postrm
- calls cleanrm
- calls env_update
-
- @param pkgfiles: files to unmerge (generally self.getcontents() )
- @type pkgfiles: Dictionary
- @param trimworld: Unused
- @type trimworld: Boolean
- @param cleanup: cleanup to pass to doebuild (see doebuild)
- @type cleanup: Boolean
- @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
- @type ldpath_mtimes: Dictionary
- @param others_in_slot: all dblink instances in this slot, excluding self
- @type others_in_slot: list
- @param needed: Filename containing libraries needed after unmerge.
- @type needed: String
- @param preserve_paths: Libraries preserved by a package instance that
- is currently being merged. They need to be explicitly passed to the
- LinkageMap, since they are not registered in the
- PreservedLibsRegistry yet.
- @type preserve_paths: set
- @rtype: Integer
- @return:
- 1. os.EX_OK if everything went well.
- 2. return code of the failed phase (for prerm, postrm, cleanrm)
- """
-
- if trimworld is not None:
- warnings.warn("The trimworld parameter of the " + \
- "portage.dbapi.vartree.dblink.unmerge()" + \
- " method is now unused.",
- DeprecationWarning, stacklevel=2)
-
- background = False
- log_path = self.settings.get("PORTAGE_LOG_FILE")
- if self._scheduler is None:
- # We create a scheduler instance and use it to
- # log unmerge output separately from merge output.
- self._scheduler = SchedulerInterface(portage._internal_caller and
- global_event_loop() or EventLoop(main=False))
- if self.settings.get("PORTAGE_BACKGROUND") == "subprocess":
- if self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "1":
- self.settings["PORTAGE_BACKGROUND"] = "1"
- self.settings.backup_changes("PORTAGE_BACKGROUND")
- background = True
- elif self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "0":
- self.settings["PORTAGE_BACKGROUND"] = "0"
- self.settings.backup_changes("PORTAGE_BACKGROUND")
- elif self.settings.get("PORTAGE_BACKGROUND") == "1":
- background = True
-
- self.vartree.dbapi._bump_mtime(self.mycpv)
- showMessage = self._display_merge
- if self.vartree.dbapi._categories is not None:
- self.vartree.dbapi._categories = None
-
- # When others_in_slot is not None, the backup has already been
- # handled by the caller.
- caller_handles_backup = others_in_slot is not None
-
- # When others_in_slot is supplied, the security check has already been
- # done for this slot, so it shouldn't be repeated until the next
- # replacement or unmerge operation.
- if others_in_slot is None:
- slot = self.vartree.dbapi._pkg_str(self.mycpv, None).slot
- slot_matches = self.vartree.dbapi.match(
- "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
- others_in_slot = []
- for cur_cpv in slot_matches:
- if cur_cpv == self.mycpv:
- continue
- others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
- settings=self.settings, vartree=self.vartree,
- treetype="vartree", pipe=self._pipe))
-
- retval = self._security_check([self] + others_in_slot)
- if retval:
- return retval
-
- contents = self.getcontents()
- # Now, don't assume that the name of the ebuild is the same as the
- # name of the dir; the package may have been moved.
- myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
- failures = 0
- ebuild_phase = "prerm"
- mystuff = os.listdir(self.dbdir)
- for x in mystuff:
- if x.endswith(".ebuild"):
- if x[:-7] != self.pkg:
- # Clean up after vardbapi.move_ent() breakage in
- # portage versions before 2.1.2
- os.rename(os.path.join(self.dbdir, x), myebuildpath)
- write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
- break
-
- if self.mycpv != self.settings.mycpv or \
- "EAPI" not in self.settings.configdict["pkg"]:
- # We avoid a redundant setcpv call here when
- # the caller has already taken care of it.
- self.settings.setcpv(self.mycpv, mydb=self.vartree.dbapi)
-
- eapi_unsupported = False
- try:
- doebuild_environment(myebuildpath, "prerm",
- settings=self.settings, db=self.vartree.dbapi)
- except UnsupportedAPIException as e:
- eapi_unsupported = e
-
- if self._preserve_libs and "preserve-libs" in \
- self.settings["PORTAGE_RESTRICT"].split():
- self._preserve_libs = False
-
- builddir_lock = None
- scheduler = self._scheduler
- retval = os.EX_OK
- try:
- # Only create builddir_lock if the caller
- # has not already acquired the lock.
- if "PORTAGE_BUILDDIR_LOCKED" not in self.settings:
- builddir_lock = EbuildBuildDir(
- scheduler=scheduler,
- settings=self.settings)
- scheduler.run_until_complete(builddir_lock.async_lock())
- prepare_build_dirs(settings=self.settings, cleanup=True)
- log_path = self.settings.get("PORTAGE_LOG_FILE")
-
- # Do this before the following _prune_plib_registry call, since
- # that removes preserved libraries from our CONTENTS, and we
- # may want to backup those libraries first.
- if not caller_handles_backup:
- retval = self._pre_unmerge_backup(background)
- if retval != os.EX_OK:
- showMessage(_("!!! FAILED prerm: quickpkg: %s\n") % retval,
- level=logging.ERROR, noiselevel=-1)
- return retval
-
- self._prune_plib_registry(unmerge=True, needed=needed,
- preserve_paths=preserve_paths)
-
- # Log the error after PORTAGE_LOG_FILE is initialized
- # by prepare_build_dirs above.
- if eapi_unsupported:
- # Sometimes this happens due to corruption of the EAPI file.
- failures += 1
- showMessage(_("!!! FAILED prerm: %s\n") % \
- os.path.join(self.dbdir, "EAPI"),
- level=logging.ERROR, noiselevel=-1)
- showMessage("%s\n" % (eapi_unsupported,),
- level=logging.ERROR, noiselevel=-1)
- elif os.path.isfile(myebuildpath):
- phase = EbuildPhase(background=background,
- phase=ebuild_phase, scheduler=scheduler,
- settings=self.settings)
- phase.start()
- retval = phase.wait()
-
- # XXX: Decide how to handle failures here.
- if retval != os.EX_OK:
- failures += 1
- showMessage(_("!!! FAILED prerm: %s\n") % retval,
- level=logging.ERROR, noiselevel=-1)
-
- self.vartree.dbapi._fs_lock()
- try:
- self._unmerge_pkgfiles(pkgfiles, others_in_slot)
- finally:
- self.vartree.dbapi._fs_unlock()
- self._clear_contents_cache()
-
- if not eapi_unsupported and os.path.isfile(myebuildpath):
- ebuild_phase = "postrm"
- phase = EbuildPhase(background=background,
- phase=ebuild_phase, scheduler=scheduler,
- settings=self.settings)
- phase.start()
- retval = phase.wait()
-
- # XXX: Decide how to handle failures here.
- if retval != os.EX_OK:
- failures += 1
- showMessage(_("!!! FAILED postrm: %s\n") % retval,
- level=logging.ERROR, noiselevel=-1)
-
- finally:
- self.vartree.dbapi._bump_mtime(self.mycpv)
- try:
- if not eapi_unsupported and os.path.isfile(myebuildpath):
- if retval != os.EX_OK:
- msg_lines = []
- msg = _("The '%(ebuild_phase)s' "
- "phase of the '%(cpv)s' package "
- "has failed with exit value %(retval)s.") % \
- {"ebuild_phase":ebuild_phase, "cpv":self.mycpv,
- "retval":retval}
- from textwrap import wrap
- msg_lines.extend(wrap(msg, 72))
- msg_lines.append("")
-
- ebuild_name = os.path.basename(myebuildpath)
- ebuild_dir = os.path.dirname(myebuildpath)
- msg = _("The problem occurred while executing "
- "the ebuild file named '%(ebuild_name)s' "
- "located in the '%(ebuild_dir)s' directory. "
- "If necessary, manually remove "
- "the environment.bz2 file and/or the "
- "ebuild file located in that directory.") % \
- {"ebuild_name":ebuild_name, "ebuild_dir":ebuild_dir}
- msg_lines.extend(wrap(msg, 72))
- msg_lines.append("")
-
- msg = _("Removal "
- "of the environment.bz2 file is "
- "preferred since it may allow the "
- "removal phases to execute successfully. "
- "The ebuild will be "
- "sourced and the eclasses "
- "from the current portage tree will be used "
- "when necessary. Removal of "
- "the ebuild file will cause the "
- "pkg_prerm() and pkg_postrm() removal "
- "phases to be skipped entirely.")
- msg_lines.extend(wrap(msg, 72))
-
- self._eerror(ebuild_phase, msg_lines)
-
- self._elog_process(phasefilter=("prerm", "postrm"))
-
- if retval == os.EX_OK:
- try:
- doebuild_environment(myebuildpath, "cleanrm",
- settings=self.settings, db=self.vartree.dbapi)
- except UnsupportedAPIException:
- pass
- phase = EbuildPhase(background=background,
- phase="cleanrm", scheduler=scheduler,
- settings=self.settings)
- phase.start()
- retval = phase.wait()
- finally:
- if builddir_lock is not None:
- scheduler.run_until_complete(
- builddir_lock.async_unlock())
-
- if log_path is not None:
-
- if not failures and 'unmerge-logs' not in self.settings.features:
- try:
- os.unlink(log_path)
- except OSError:
- pass
-
- try:
- st = os.stat(log_path)
- except OSError:
- pass
- else:
- if st.st_size == 0:
- try:
- os.unlink(log_path)
- except OSError:
- pass
-
- if log_path is not None and os.path.exists(log_path):
- # Restore this since it gets lost somewhere above and it
- # needs to be set for _display_merge() to be able to log.
- # Note that the log isn't necessarily supposed to exist
- # since if PORT_LOGDIR is unset then it's a temp file
- # so it gets cleaned above.
- self.settings["PORTAGE_LOG_FILE"] = log_path
- else:
- self.settings.pop("PORTAGE_LOG_FILE", None)
-
- env_update(target_root=self.settings['ROOT'],
- prev_mtimes=ldpath_mtimes,
- contents=contents, env=self.settings,
- writemsg_level=self._display_merge, vardbapi=self.vartree.dbapi)
-
- unmerge_with_replacement = preserve_paths is not None
- if not unmerge_with_replacement:
- # When there's a replacement package which calls us via treewalk,
- # treewalk will automatically call _prune_plib_registry for us.
- # Otherwise, we need to call _prune_plib_registry ourselves.
- # Don't pass in the "unmerge=True" flag here, since that flag
- # is intended to be used _prior_ to unmerge, not after.
- self._prune_plib_registry()
-
- return os.EX_OK
-
- def _display_merge(self, msg, level=0, noiselevel=0):
- if not self._verbose and noiselevel >= 0 and level < logging.WARN:
- return
- if self._scheduler is None:
- writemsg_level(msg, level=level, noiselevel=noiselevel)
- else:
- log_path = None
- if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
- log_path = self.settings.get("PORTAGE_LOG_FILE")
- background = self.settings.get("PORTAGE_BACKGROUND") == "1"
-
- if background and log_path is None:
- if level >= logging.WARN:
- writemsg_level(msg, level=level, noiselevel=noiselevel)
- else:
- self._scheduler.output(msg,
- log_path=log_path, background=background,
- level=level, noiselevel=noiselevel)
-
- def _show_unmerge(self, zing, desc, file_type, file_name):
- self._display_merge("%s %s %s %s\n" % \
- (zing, desc.ljust(8), file_type, file_name))
-
- def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
- """
-
- Unmerges the contents of a package from the liveFS
- Removes the VDB entry for self
-
- @param pkgfiles: typically self.getcontents()
- @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
- @param others_in_slot: all dblink instances in this slot, excluding self
- @type others_in_slot: list
- @rtype: None
- """
-
- os = _os_merge
- perf_md5 = perform_md5
- showMessage = self._display_merge
- show_unmerge = self._show_unmerge
- ignored_unlink_errnos = self._ignored_unlink_errnos
- ignored_rmdir_errnos = self._ignored_rmdir_errnos
-
- if not pkgfiles:
- showMessage(_("No package files given... Grabbing a set.\n"))
- pkgfiles = self.getcontents()
-
- if others_in_slot is None:
- others_in_slot = []
- slot = self.vartree.dbapi._pkg_str(self.mycpv, None).slot
- slot_matches = self.vartree.dbapi.match(
- "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
- for cur_cpv in slot_matches:
- if cur_cpv == self.mycpv:
- continue
- others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
- settings=self.settings,
- vartree=self.vartree, treetype="vartree", pipe=self._pipe))
-
- cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
- stale_confmem = []
- protected_symlinks = {}
-
- unmerge_orphans = "unmerge-orphans" in self.settings.features
- calc_prelink = "prelink-checksums" in self.settings.features
-
- if pkgfiles:
- self.updateprotect()
- mykeys = list(pkgfiles)
- mykeys.sort()
- mykeys.reverse()
-
- #process symlinks second-to-last, directories last.
- mydirs = set()
-
- uninstall_ignore = portage.util.shlex_split(
- self.settings.get("UNINSTALL_IGNORE", ""))
-
- def unlink(file_name, lstatobj):
- if bsd_chflags:
- if lstatobj.st_flags != 0:
- bsd_chflags.lchflags(file_name, 0)
- parent_name = os.path.dirname(file_name)
- # Use normal stat/chflags for the parent since we want to
- # follow any symlinks to the real parent directory.
- pflags = os.stat(parent_name).st_flags
- if pflags != 0:
- bsd_chflags.chflags(parent_name, 0)
- try:
- if not stat.S_ISLNK(lstatobj.st_mode):
- # Remove permissions to ensure that any hardlinks to
- # suid/sgid files are rendered harmless.
- os.chmod(file_name, 0)
- os.unlink(file_name)
- except OSError as ose:
- # If the chmod or unlink fails, you are in trouble.
- # With Prefix this can be because the file is owned
- # by someone else (a screwup by root?), on a normal
- # system maybe filesystem corruption. In any case,
- # if we backtrace and die here, we leave the system
- # in a totally undefined state, hence we just bleed
- # like hell and continue to hopefully finish all our
- # administrative and pkg_postinst stuff.
- self._eerror("postrm",
- ["Could not chmod or unlink '%s': %s" % \
- (file_name, ose)])
- else:
-
- # Even though the file no longer exists, we log it
- # here so that _unmerge_dirs can see that we've
- # removed a file from this device, and will record
- # the parent directory for a syncfs call.
- self._merged_path(file_name, lstatobj, exists=False)
-
- finally:
- if bsd_chflags and pflags != 0:
- # Restore the parent flags we saved before unlinking
- bsd_chflags.chflags(parent_name, pflags)
-
- unmerge_desc = {}
- unmerge_desc["cfgpro"] = _("cfgpro")
- unmerge_desc["replaced"] = _("replaced")
- unmerge_desc["!dir"] = _("!dir")
- unmerge_desc["!empty"] = _("!empty")
- unmerge_desc["!fif"] = _("!fif")
- unmerge_desc["!found"] = _("!found")
- unmerge_desc["!md5"] = _("!md5")
- unmerge_desc["!mtime"] = _("!mtime")
- unmerge_desc["!obj"] = _("!obj")
- unmerge_desc["!sym"] = _("!sym")
- unmerge_desc["!prefix"] = _("!prefix")
-
- real_root = self.settings['ROOT']
- real_root_len = len(real_root) - 1
- eroot = self.settings["EROOT"]
-
- infodirs = frozenset(infodir for infodir in chain(
- self.settings.get("INFOPATH", "").split(":"),
- self.settings.get("INFODIR", "").split(":")) if infodir)
- infodirs_inodes = set()
- for infodir in infodirs:
- infodir = os.path.join(real_root, infodir.lstrip(os.sep))
- try:
- statobj = os.stat(infodir)
- except OSError:
- pass
- else:
- infodirs_inodes.add((statobj.st_dev, statobj.st_ino))
-
- for i, objkey in enumerate(mykeys):
-
- obj = normalize_path(objkey)
- if os is _os_merge:
- try:
- _unicode_encode(obj,
- encoding=_encodings['merge'], errors='strict')
- except UnicodeEncodeError:
- # The package appears to have been merged with a
- # different value of sys.getfilesystemencoding(),
- # so fall back to utf_8 if appropriate.
- try:
- _unicode_encode(obj,
- encoding=_encodings['fs'], errors='strict')
- except UnicodeEncodeError:
- pass
- else:
- os = portage.os
- perf_md5 = portage.checksum.perform_md5
-
- file_data = pkgfiles[objkey]
- file_type = file_data[0]
-
- # don't try to unmerge the prefix offset itself
- if len(obj) <= len(eroot) or not obj.startswith(eroot):
- show_unmerge("---", unmerge_desc["!prefix"], file_type, obj)
- continue
-
- statobj = None
- try:
- statobj = os.stat(obj)
- except OSError:
- pass
- lstatobj = None
- try:
- lstatobj = os.lstat(obj)
- except (OSError, AttributeError):
- pass
- islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
- if lstatobj is None:
- show_unmerge("---", unmerge_desc["!found"], file_type, obj)
- continue
-
- f_match = obj[len(eroot)-1:]
- ignore = False
- for pattern in uninstall_ignore:
- if fnmatch.fnmatch(f_match, pattern):
- ignore = True
- break
-
- if not ignore:
- if islink and f_match in \
- ("/lib", "/usr/lib", "/usr/local/lib"):
- # Ignore libdir symlinks for bug #423127.
- ignore = True
-
- if ignore:
- show_unmerge("---", unmerge_desc["cfgpro"], file_type, obj)
- continue
-
- # don't use EROOT, CONTENTS entries already contain EPREFIX
- if obj.startswith(real_root):
- relative_path = obj[real_root_len:]
- is_owned = False
- for dblnk in others_in_slot:
- if dblnk.isowner(relative_path):
- is_owned = True
- break
-
- if is_owned and islink and \
- file_type in ("sym", "dir") and \
- statobj and stat.S_ISDIR(statobj.st_mode):
- # A new instance of this package claims the file, so
- # don't unmerge it. If the file is symlink to a
- # directory and the unmerging package installed it as
- # a symlink, but the new owner has it listed as a
- # directory, then we'll produce a warning since the
- # symlink is a sort of orphan in this case (see
- # bug #326685).
- symlink_orphan = False
- for dblnk in others_in_slot:
- parent_contents_key = \
- dblnk._match_contents(relative_path)
- if not parent_contents_key:
- continue
- if not parent_contents_key.startswith(
- real_root):
- continue
- if dblnk.getcontents()[
- parent_contents_key][0] == "dir":
- symlink_orphan = True
- break
-
- if symlink_orphan:
- protected_symlinks.setdefault(
- (statobj.st_dev, statobj.st_ino),
- []).append(relative_path)
-
- if is_owned:
- show_unmerge("---", unmerge_desc["replaced"], file_type, obj)
- continue
- elif relative_path in cfgfiledict:
- stale_confmem.append(relative_path)
-
- # Don't unlink symlinks to directories here since that can
- # remove /lib and /usr/lib symlinks.
- if unmerge_orphans and \
- lstatobj and not stat.S_ISDIR(lstatobj.st_mode) and \
- not (islink and statobj and stat.S_ISDIR(statobj.st_mode)) and \
- not self.isprotected(obj):
- try:
- unlink(obj, lstatobj)
- except EnvironmentError as e:
- if e.errno not in ignored_unlink_errnos:
- raise
- del e
- show_unmerge("<<<", "", file_type, obj)
- continue
-
- lmtime = str(lstatobj[stat.ST_MTIME])
- if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (lmtime != pkgfiles[objkey][1]):
- show_unmerge("---", unmerge_desc["!mtime"], file_type, obj)
- continue
-
- if file_type == "dir" and not islink:
- if lstatobj is None or not stat.S_ISDIR(lstatobj.st_mode):
- show_unmerge("---", unmerge_desc["!dir"], file_type, obj)
- continue
- mydirs.add((obj, (lstatobj.st_dev, lstatobj.st_ino)))
- elif file_type == "sym" or (file_type == "dir" and islink):
- if not islink:
- show_unmerge("---", unmerge_desc["!sym"], file_type, obj)
- continue
-
- # If this symlink points to a directory then we don't want
- # to unmerge it if there are any other packages that
- # installed files into the directory via this symlink
- # (see bug #326685).
- # TODO: Resolving a symlink to a directory will require
- # simulation if $ROOT != / and the link is not relative.
- if islink and statobj and stat.S_ISDIR(statobj.st_mode) \
- and obj.startswith(real_root):
-
- relative_path = obj[real_root_len:]
- try:
- target_dir_contents = os.listdir(obj)
- except OSError:
- pass
- else:
- if target_dir_contents:
- # If all the children are regular files owned
- # by this package, then the symlink should be
- # safe to unmerge.
- all_owned = True
- for child in target_dir_contents:
- child = os.path.join(relative_path, child)
- if not self.isowner(child):
- all_owned = False
- break
- try:
- child_lstat = os.lstat(os.path.join(
- real_root, child.lstrip(os.sep)))
- except OSError:
- continue
-
- if not stat.S_ISREG(child_lstat.st_mode):
- # Nested symlinks or directories make
- # the issue very complex, so just
- # preserve the symlink in order to be
- # on the safe side.
- all_owned = False
- break
-
- if not all_owned:
- protected_symlinks.setdefault(
- (statobj.st_dev, statobj.st_ino),
- []).append(relative_path)
- show_unmerge("---", unmerge_desc["!empty"],
- file_type, obj)
- continue
-
- # Go ahead and unlink symlinks to directories here when
- # they're actually recorded as symlinks in the contents.
- # Normally, symlinks such as /lib -> lib64 are not recorded
- # as symlinks in the contents of a package. If a package
- # installs something into ${D}/lib/, it is recorded in the
- # contents as a directory even if it happens to correspond
- # to a symlink when it's merged to the live filesystem.
- try:
- unlink(obj, lstatobj)
- show_unmerge("<<<", "", file_type, obj)
- except (OSError, IOError) as e:
- if e.errno not in ignored_unlink_errnos:
- raise
- del e
- show_unmerge("!!!", "", file_type, obj)
- elif pkgfiles[objkey][0] == "obj":
- if statobj is None or not stat.S_ISREG(statobj.st_mode):
- show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
- continue
- mymd5 = None
- try:
- mymd5 = perf_md5(obj, calc_prelink=calc_prelink)
- except FileNotFound as e:
- # the file has disappeared between now and our stat call
- show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
- continue
-
- # string.lower is needed because db entries used to be in upper-case. The
- # string.lower allows for backwards compatibility.
- if mymd5 != pkgfiles[objkey][2].lower():
- show_unmerge("---", unmerge_desc["!md5"], file_type, obj)
- continue
- try:
- unlink(obj, lstatobj)
- except (OSError, IOError) as e:
- if e.errno not in ignored_unlink_errnos:
- raise
- del e
- show_unmerge("<<<", "", file_type, obj)
- elif pkgfiles[objkey][0] == "fif":
- if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
- show_unmerge("---", unmerge_desc["!fif"], file_type, obj)
- continue
- show_unmerge("---", "", file_type, obj)
- elif pkgfiles[objkey][0] == "dev":
- show_unmerge("---", "", file_type, obj)
-
- self._unmerge_dirs(mydirs, infodirs_inodes,
- protected_symlinks, unmerge_desc, unlink, os)
- mydirs.clear()
-
- if protected_symlinks:
- self._unmerge_protected_symlinks(others_in_slot, infodirs_inodes,
- protected_symlinks, unmerge_desc, unlink, os)
-
- if protected_symlinks:
- msg = "One or more symlinks to directories have been " + \
- "preserved in order to ensure that files installed " + \
- "via these symlinks remain accessible. " + \
- "This indicates that the mentioned symlink(s) may " + \
- "be obsolete remnants of an old install, and it " + \
- "may be appropriate to replace a given symlink " + \
- "with the directory that it points to."
- lines = textwrap.wrap(msg, 72)
- lines.append("")
- flat_list = set()
- flat_list.update(*protected_symlinks.values())
- flat_list = sorted(flat_list)
- for f in flat_list:
- lines.append("\t%s" % (os.path.join(real_root,
- f.lstrip(os.sep))))
- lines.append("")
- self._elog("elog", "postrm", lines)
-
- # Remove stale entries from config memory.
- if stale_confmem:
- for filename in stale_confmem:
- del cfgfiledict[filename]
- writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
-
- #remove self from vartree database so that our own virtual gets zapped if we're the last node
- self.vartree.zap(self.mycpv)
-
- def _unmerge_protected_symlinks(self, others_in_slot, infodirs_inodes,
- protected_symlinks, unmerge_desc, unlink, os):
-
- real_root = self.settings['ROOT']
- show_unmerge = self._show_unmerge
- ignored_unlink_errnos = self._ignored_unlink_errnos
-
- flat_list = set()
- flat_list.update(*protected_symlinks.values())
- flat_list = sorted(flat_list)
-
- for f in flat_list:
- for dblnk in others_in_slot:
- if dblnk.isowner(f):
- # If another package in the same slot installed
- # a file via a protected symlink, return early
- # and don't bother searching for any other owners.
- return
-
- msg = []
- msg.append("")
- msg.append(_("Directory symlink(s) may need protection:"))
- msg.append("")
-
- for f in flat_list:
- msg.append("\t%s" % \
- os.path.join(real_root, f.lstrip(os.path.sep)))
-
- msg.append("")
- msg.append("Use the UNINSTALL_IGNORE variable to exempt specific symlinks")
- msg.append("from the following search (see the make.conf man page).")
- msg.append("")
- msg.append(_("Searching all installed"
- " packages for files installed via above symlink(s)..."))
- msg.append("")
- self._elog("elog", "postrm", msg)
-
- self.lockdb()
- try:
- owners = self.vartree.dbapi._owners.get_owners(flat_list)
- self.vartree.dbapi.flush_cache()
- finally:
- self.unlockdb()
-
- for owner in list(owners):
- if owner.mycpv == self.mycpv:
- owners.pop(owner, None)
-
- if not owners:
- msg = []
- msg.append(_("The above directory symlink(s) are all "
- "safe to remove. Removing them now..."))
- msg.append("")
- self._elog("elog", "postrm", msg)
- dirs = set()
- for unmerge_syms in protected_symlinks.values():
- for relative_path in unmerge_syms:
- obj = os.path.join(real_root,
- relative_path.lstrip(os.sep))
- parent = os.path.dirname(obj)
- while len(parent) > len(self._eroot):
- try:
- lstatobj = os.lstat(parent)
- except OSError:
- break
- else:
- dirs.add((parent,
- (lstatobj.st_dev, lstatobj.st_ino)))
- parent = os.path.dirname(parent)
- try:
- unlink(obj, os.lstat(obj))
- show_unmerge("<<<", "", "sym", obj)
- except (OSError, IOError) as e:
- if e.errno not in ignored_unlink_errnos:
- raise
- del e
- show_unmerge("!!!", "", "sym", obj)
-
- protected_symlinks.clear()
- self._unmerge_dirs(dirs, infodirs_inodes,
- protected_symlinks, unmerge_desc, unlink, os)
- dirs.clear()
-
- def _unmerge_dirs(self, dirs, infodirs_inodes,
- protected_symlinks, unmerge_desc, unlink, os):
-
- show_unmerge = self._show_unmerge
- infodir_cleanup = self._infodir_cleanup
- ignored_unlink_errnos = self._ignored_unlink_errnos
- ignored_rmdir_errnos = self._ignored_rmdir_errnos
- real_root = self.settings['ROOT']
-
- dirs = sorted(dirs)
- revisit = {}
-
- while True:
- try:
- obj, inode_key = dirs.pop()
- except IndexError:
- break
- # Treat any directory named "info" as a candidate here,
- # since it might have been in INFOPATH previously even
- # though it may not be there now.
- if inode_key in infodirs_inodes or \
- os.path.basename(obj) == "info":
- try:
- remaining = os.listdir(obj)
- except OSError:
- pass
- else:
- cleanup_info_dir = ()
- if remaining and \
- len(remaining) <= len(infodir_cleanup):
- if not set(remaining).difference(infodir_cleanup):
- cleanup_info_dir = remaining
-
- for child in cleanup_info_dir:
- child = os.path.join(obj, child)
- try:
- lstatobj = os.lstat(child)
- if stat.S_ISREG(lstatobj.st_mode):
- unlink(child, lstatobj)
- show_unmerge("<<<", "", "obj", child)
- except EnvironmentError as e:
- if e.errno not in ignored_unlink_errnos:
- raise
- del e
- show_unmerge("!!!", "", "obj", child)
-
- try:
- parent_name = os.path.dirname(obj)
- parent_stat = os.stat(parent_name)
-
- if bsd_chflags:
- lstatobj = os.lstat(obj)
- if lstatobj.st_flags != 0:
- bsd_chflags.lchflags(obj, 0)
-
- # Use normal stat/chflags for the parent since we want to
- # follow any symlinks to the real parent directory.
- pflags = parent_stat.st_flags
- if pflags != 0:
- bsd_chflags.chflags(parent_name, 0)
- try:
- os.rmdir(obj)
- finally:
- if bsd_chflags and pflags != 0:
- # Restore the parent flags we saved before unlinking
- bsd_chflags.chflags(parent_name, pflags)
-
- # Record the parent directory for use in syncfs calls.
- # Note that we use a realpath and a regular stat here, since
- # we want to follow any symlinks back to the real device where
- # the real parent directory resides.
- self._merged_path(os.path.realpath(parent_name), parent_stat)
-
- show_unmerge("<<<", "", "dir", obj)
- except EnvironmentError as e:
- if e.errno not in ignored_rmdir_errnos:
- raise
- if e.errno != errno.ENOENT:
- show_unmerge("---", unmerge_desc["!empty"], "dir", obj)
- revisit[obj] = inode_key
-
- # Since we didn't remove this directory, record the directory
- # itself for use in syncfs calls, if we have removed another
- # file from the same device.
- # Note that we use a realpath and a regular stat here, since
- # we want to follow any symlinks back to the real device where
- # the real directory resides.
- try:
- dir_stat = os.stat(obj)
- except OSError:
- pass
- else:
- if dir_stat.st_dev in self._device_path_map:
- self._merged_path(os.path.realpath(obj), dir_stat)
-
- else:
- # When a directory is successfully removed, there's
- # no need to protect symlinks that point to it.
- unmerge_syms = protected_symlinks.pop(inode_key, None)
- if unmerge_syms is not None:
- parents = []
- for relative_path in unmerge_syms:
- obj = os.path.join(real_root,
- relative_path.lstrip(os.sep))
- try:
- unlink(obj, os.lstat(obj))
- show_unmerge("<<<", "", "sym", obj)
- except (OSError, IOError) as e:
- if e.errno not in ignored_unlink_errnos:
- raise
- del e
- show_unmerge("!!!", "", "sym", obj)
- else:
- parents.append(os.path.dirname(obj))
-
- if parents:
- # Revisit parents recursively (bug 640058).
- recursive_parents = []
- for parent in set(parents):
- while parent in revisit:
- recursive_parents.append(parent)
- parent = os.path.dirname(parent)
- if parent == '/':
- break
-
- for parent in sorted(set(recursive_parents)):
- dirs.append((parent, revisit.pop(parent)))
-
- def isowner(self, filename, destroot=None):
- """
- Check if a file belongs to this package. This may
- result in a stat call for the parent directory of
- every installed file, since the inode numbers are
- used to work around the problem of ambiguous paths
- caused by symlinked directories. The results of
- stat calls are cached to optimize multiple calls
- to this method.
-
- @param filename:
- @type filename:
- @param destroot:
- @type destroot:
- @rtype: Boolean
- @return:
- 1. True if this package owns the file.
- 2. False if this package does not own the file.
- """
-
- if destroot is not None and destroot != self._eroot:
- warnings.warn("The second parameter of the " + \
- "portage.dbapi.vartree.dblink.isowner()" + \
- " is now unused. Instead " + \
- "self.settings['EROOT'] will be used.",
- DeprecationWarning, stacklevel=2)
-
- return bool(self._match_contents(filename))
-
- def _match_contents(self, filename, destroot=None):
- """
- The matching contents entry is returned, which is useful
- since the path may differ from the one given by the caller,
- due to symlinks.
-
- @rtype: String
- @return: the contents entry corresponding to the given path, or False
- if the file is not owned by this package.
- """
-
- filename = _unicode_decode(filename,
- encoding=_encodings['content'], errors='strict')
-
- if destroot is not None and destroot != self._eroot:
- warnings.warn("The second parameter of the " + \
- "portage.dbapi.vartree.dblink._match_contents()" + \
- " is now unused. Instead " + \
- "self.settings['ROOT'] will be used.",
- DeprecationWarning, stacklevel=2)
-
- # don't use EROOT here, image already contains EPREFIX
- destroot = self.settings['ROOT']
-
- # The given filename argument might have a different encoding than the
- # the filenames contained in the contents, so use separate wrapped os
- # modules for each. The basename is more likely to contain non-ascii
- # characters than the directory path, so use os_filename_arg for all
- # operations involving the basename of the filename arg.
- os_filename_arg = _os_merge
- os = _os_merge
-
- try:
- _unicode_encode(filename,
- encoding=_encodings['merge'], errors='strict')
- except UnicodeEncodeError:
- # The package appears to have been merged with a
- # different value of sys.getfilesystemencoding(),
- # so fall back to utf_8 if appropriate.
- try:
- _unicode_encode(filename,
- encoding=_encodings['fs'], errors='strict')
- except UnicodeEncodeError:
- pass
- else:
- os_filename_arg = portage.os
-
- destfile = normalize_path(
- os_filename_arg.path.join(destroot,
- filename.lstrip(os_filename_arg.path.sep)))
-
- if "case-insensitive-fs" in self.settings.features:
- destfile = destfile.lower()
-
- if self._contents.contains(destfile):
- return self._contents.unmap_key(destfile)
-
- if self.getcontents():
- basename = os_filename_arg.path.basename(destfile)
- if self._contents_basenames is None:
-
- try:
- for x in self._contents.keys():
- _unicode_encode(x,
- encoding=_encodings['merge'],
- errors='strict')
- except UnicodeEncodeError:
- # The package appears to have been merged with a
- # different value of sys.getfilesystemencoding(),
- # so fall back to utf_8 if appropriate.
- try:
- for x in self._contents.keys():
- _unicode_encode(x,
- encoding=_encodings['fs'],
- errors='strict')
- except UnicodeEncodeError:
- pass
- else:
- os = portage.os
-
- self._contents_basenames = set(
- os.path.basename(x) for x in self._contents.keys())
- if basename not in self._contents_basenames:
- # This is a shortcut that, in most cases, allows us to
- # eliminate this package as an owner without the need
- # to examine inode numbers of parent directories.
- return False
-
- # Use stat rather than lstat since we want to follow
- # any symlinks to the real parent directory.
- parent_path = os_filename_arg.path.dirname(destfile)
- try:
- parent_stat = os_filename_arg.stat(parent_path)
- except EnvironmentError as e:
- if e.errno != errno.ENOENT:
- raise
- del e
- return False
- if self._contents_inodes is None:
-
- if os is _os_merge:
- try:
- for x in self._contents.keys():
- _unicode_encode(x,
- encoding=_encodings['merge'],
- errors='strict')
- except UnicodeEncodeError:
- # The package appears to have been merged with a
- # different value of sys.getfilesystemencoding(),
- # so fall back to utf_8 if appropriate.
- try:
- for x in self._contents.keys():
- _unicode_encode(x,
- encoding=_encodings['fs'],
- errors='strict')
- except UnicodeEncodeError:
- pass
- else:
- os = portage.os
-
- self._contents_inodes = {}
- parent_paths = set()
- for x in self._contents.keys():
- p_path = os.path.dirname(x)
- if p_path in parent_paths:
- continue
- parent_paths.add(p_path)
- try:
- s = os.stat(p_path)
- except OSError:
- pass
- else:
- inode_key = (s.st_dev, s.st_ino)
- # Use lists of paths in case multiple
- # paths reference the same inode.
- p_path_list = self._contents_inodes.get(inode_key)
- if p_path_list is None:
- p_path_list = []
- self._contents_inodes[inode_key] = p_path_list
- if p_path not in p_path_list:
- p_path_list.append(p_path)
-
- p_path_list = self._contents_inodes.get(
- (parent_stat.st_dev, parent_stat.st_ino))
- if p_path_list:
- for p_path in p_path_list:
- x = os_filename_arg.path.join(p_path, basename)
- if self._contents.contains(x):
- return self._contents.unmap_key(x)
-
- return False
-
- def _linkmap_rebuild(self, **kwargs):
- """
- Rebuild the self._linkmap if it's not broken due to missing
- scanelf binary. Also, return early if preserve-libs is disabled
- and the preserve-libs registry is empty.
- """
- if self._linkmap_broken or \
- self.vartree.dbapi._linkmap is None or \
- self.vartree.dbapi._plib_registry is None or \
- ("preserve-libs" not in self.settings.features and \
- not self.vartree.dbapi._plib_registry.hasEntries()):
- return
- try:
- self.vartree.dbapi._linkmap.rebuild(**kwargs)
- except CommandNotFound as e:
- self._linkmap_broken = True
- self._display_merge(_("!!! Disabling preserve-libs " \
- "due to error: Command Not Found: %s\n") % (e,),
- level=logging.ERROR, noiselevel=-1)
-
- def _find_libs_to_preserve(self, unmerge=False):
- """
- Get set of relative paths for libraries to be preserved. When
- unmerge is False, file paths to preserve are selected from
- self._installed_instance. Otherwise, paths are selected from
- self.
- """
- if self._linkmap_broken or \
- self.vartree.dbapi._linkmap is None or \
- self.vartree.dbapi._plib_registry is None or \
- (not unmerge and self._installed_instance is None) or \
- not self._preserve_libs:
- return set()
-
- os = _os_merge
- linkmap = self.vartree.dbapi._linkmap
- if unmerge:
- installed_instance = self
- else:
- installed_instance = self._installed_instance
- old_contents = installed_instance.getcontents()
- root = self.settings['ROOT']
- root_len = len(root) - 1
- lib_graph = digraph()
- path_node_map = {}
-
- def path_to_node(path):
- node = path_node_map.get(path)
- if node is None:
- node = LinkageMap._LibGraphNode(linkmap._obj_key(path))
- alt_path_node = lib_graph.get(node)
- if alt_path_node is not None:
- node = alt_path_node
- node.alt_paths.add(path)
- path_node_map[path] = node
- return node
-
- consumer_map = {}
- provider_nodes = set()
- # Create provider nodes and add them to the graph.
- for f_abs in old_contents:
-
- if os is _os_merge:
- try:
- _unicode_encode(f_abs,
- encoding=_encodings['merge'], errors='strict')
- except UnicodeEncodeError:
- # The package appears to have been merged with a
- # different value of sys.getfilesystemencoding(),
- # so fall back to utf_8 if appropriate.
- try:
- _unicode_encode(f_abs,
- encoding=_encodings['fs'], errors='strict')
- except UnicodeEncodeError:
- pass
- else:
- os = portage.os
-
- f = f_abs[root_len:]
- if not unmerge and self.isowner(f):
- # We have an indentically named replacement file,
- # so we don't try to preserve the old copy.
- continue
- try:
- consumers = linkmap.findConsumers(f,
- exclude_providers=(installed_instance.isowner,))
- except KeyError:
- continue
- if not consumers:
- continue
- provider_node = path_to_node(f)
- lib_graph.add(provider_node, None)
- provider_nodes.add(provider_node)
- consumer_map[provider_node] = consumers
-
- # Create consumer nodes and add them to the graph.
- # Note that consumers can also be providers.
- for provider_node, consumers in consumer_map.items():
- for c in consumers:
- consumer_node = path_to_node(c)
- if installed_instance.isowner(c) and \
- consumer_node not in provider_nodes:
- # This is not a provider, so it will be uninstalled.
- continue
- lib_graph.add(provider_node, consumer_node)
-
- # Locate nodes which should be preserved. They consist of all
- # providers that are reachable from consumers that are not
- # providers themselves.
- preserve_nodes = set()
- for consumer_node in lib_graph.root_nodes():
- if consumer_node in provider_nodes:
- continue
- # Preserve all providers that are reachable from this consumer.
- node_stack = lib_graph.child_nodes(consumer_node)
- while node_stack:
- provider_node = node_stack.pop()
- if provider_node in preserve_nodes:
- continue
- preserve_nodes.add(provider_node)
- node_stack.extend(lib_graph.child_nodes(provider_node))
-
- preserve_paths = set()
- for preserve_node in preserve_nodes:
- # Preserve the library itself, and also preserve the
- # soname symlink which is the only symlink that is
- # strictly required.
- hardlinks = set()
- soname_symlinks = set()
- soname = linkmap.getSoname(next(iter(preserve_node.alt_paths)))
- for f in preserve_node.alt_paths:
- f_abs = os.path.join(root, f.lstrip(os.sep))
- try:
- if stat.S_ISREG(os.lstat(f_abs).st_mode):
- hardlinks.add(f)
- elif os.path.basename(f) == soname:
- soname_symlinks.add(f)
- except OSError:
- pass
-
- if hardlinks:
- preserve_paths.update(hardlinks)
- preserve_paths.update(soname_symlinks)
-
- return preserve_paths
-
- def _add_preserve_libs_to_contents(self, preserve_paths):
- """
- Preserve libs returned from _find_libs_to_preserve().
- """
-
- if not preserve_paths:
- return
-
- os = _os_merge
- showMessage = self._display_merge
- root = self.settings['ROOT']
-
- # Copy contents entries from the old package to the new one.
- new_contents = self.getcontents().copy()
- old_contents = self._installed_instance.getcontents()
- for f in sorted(preserve_paths):
- f = _unicode_decode(f,
- encoding=_encodings['content'], errors='strict')
- f_abs = os.path.join(root, f.lstrip(os.sep))
- contents_entry = old_contents.get(f_abs)
- if contents_entry is None:
- # This will probably never happen, but it might if one of the
- # paths returned from findConsumers() refers to one of the libs
- # that should be preserved yet the path is not listed in the
- # contents. Such a path might belong to some other package, so
- # it shouldn't be preserved here.
- showMessage(_("!!! File '%s' will not be preserved "
- "due to missing contents entry\n") % (f_abs,),
- level=logging.ERROR, noiselevel=-1)
- preserve_paths.remove(f)
- continue
- new_contents[f_abs] = contents_entry
- obj_type = contents_entry[0]
- showMessage(_(">>> needed %s %s\n") % (obj_type, f_abs),
- noiselevel=-1)
- # Add parent directories to contents if necessary.
- parent_dir = os.path.dirname(f_abs)
- while len(parent_dir) > len(root):
- new_contents[parent_dir] = ["dir"]
- prev = parent_dir
- parent_dir = os.path.dirname(parent_dir)
- if prev == parent_dir:
- break
- outfile = atomic_ofstream(os.path.join(self.dbtmpdir, "CONTENTS"))
- write_contents(new_contents, root, outfile)
- outfile.close()
- self._clear_contents_cache()
-
- def _find_unused_preserved_libs(self, unmerge_no_replacement):
- """
- Find preserved libraries that don't have any consumers left.
- """
-
- if self._linkmap_broken or \
- self.vartree.dbapi._linkmap is None or \
- self.vartree.dbapi._plib_registry is None or \
- not self.vartree.dbapi._plib_registry.hasEntries():
- return {}
-
- # Since preserved libraries can be consumers of other preserved
- # libraries, use a graph to track consumer relationships.
- plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
- linkmap = self.vartree.dbapi._linkmap
- lib_graph = digraph()
- preserved_nodes = set()
- preserved_paths = set()
- path_cpv_map = {}
- path_node_map = {}
- root = self.settings['ROOT']
-
- def path_to_node(path):
- node = path_node_map.get(path)
- if node is None:
- node = LinkageMap._LibGraphNode(linkmap._obj_key(path))
- alt_path_node = lib_graph.get(node)
- if alt_path_node is not None:
- node = alt_path_node
- node.alt_paths.add(path)
- path_node_map[path] = node
- return node
-
- for cpv, plibs in plib_dict.items():
- for f in plibs:
- path_cpv_map[f] = cpv
- preserved_node = path_to_node(f)
- if not preserved_node.file_exists():
- continue
- lib_graph.add(preserved_node, None)
- preserved_paths.add(f)
- preserved_nodes.add(preserved_node)
- for c in self.vartree.dbapi._linkmap.findConsumers(f):
- consumer_node = path_to_node(c)
- if not consumer_node.file_exists():
- continue
- # Note that consumers may also be providers.
- lib_graph.add(preserved_node, consumer_node)
-
- # Eliminate consumers having providers with the same soname as an
- # installed library that is not preserved. This eliminates
- # libraries that are erroneously preserved due to a move from one
- # directory to another.
- # Also eliminate consumers that are going to be unmerged if
- # unmerge_no_replacement is True.
- provider_cache = {}
- for preserved_node in preserved_nodes:
- soname = linkmap.getSoname(preserved_node)
- for consumer_node in lib_graph.parent_nodes(preserved_node):
- if consumer_node in preserved_nodes:
- continue
- if unmerge_no_replacement:
- will_be_unmerged = True
- for path in consumer_node.alt_paths:
- if not self.isowner(path):
- will_be_unmerged = False
- break
- if will_be_unmerged:
- # This consumer is not preserved and it is
- # being unmerged, so drop this edge.
- lib_graph.remove_edge(preserved_node, consumer_node)
- continue
-
- providers = provider_cache.get(consumer_node)
- if providers is None:
- providers = linkmap.findProviders(consumer_node)
- provider_cache[consumer_node] = providers
- providers = providers.get(soname)
- if providers is None:
- continue
- for provider in providers:
- if provider in preserved_paths:
- continue
- provider_node = path_to_node(provider)
- if not provider_node.file_exists():
- continue
- if provider_node in preserved_nodes:
- continue
- # An alternative provider seems to be
- # installed, so drop this edge.
- lib_graph.remove_edge(preserved_node, consumer_node)
- break
-
- cpv_lib_map = {}
- while lib_graph:
- root_nodes = preserved_nodes.intersection(lib_graph.root_nodes())
- if not root_nodes:
- break
- lib_graph.difference_update(root_nodes)
- unlink_list = set()
- for node in root_nodes:
- unlink_list.update(node.alt_paths)
- unlink_list = sorted(unlink_list)
- for obj in unlink_list:
- cpv = path_cpv_map.get(obj)
- if cpv is None:
- # This means that a symlink is in the preserved libs
- # registry, but the actual lib it points to is not.
- self._display_merge(_("!!! symlink to lib is preserved, "
- "but not the lib itself:\n!!! '%s'\n") % (obj,),
- level=logging.ERROR, noiselevel=-1)
- continue
- removed = cpv_lib_map.get(cpv)
- if removed is None:
- removed = set()
- cpv_lib_map[cpv] = removed
- removed.add(obj)
-
- return cpv_lib_map
-
- def _remove_preserved_libs(self, cpv_lib_map):
- """
- Remove files returned from _find_unused_preserved_libs().
- """
-
- os = _os_merge
-
- files_to_remove = set()
- for files in cpv_lib_map.values():
- files_to_remove.update(files)
- files_to_remove = sorted(files_to_remove)
- showMessage = self._display_merge
- root = self.settings['ROOT']
-
- parent_dirs = set()
- for obj in files_to_remove:
- obj = os.path.join(root, obj.lstrip(os.sep))
- parent_dirs.add(os.path.dirname(obj))
- if os.path.islink(obj):
- obj_type = _("sym")
- else:
- obj_type = _("obj")
- try:
- os.unlink(obj)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- del e
- else:
- showMessage(_("<<< !needed %s %s\n") % (obj_type, obj),
- noiselevel=-1)
-
- # Remove empty parent directories if possible.
- while parent_dirs:
- x = parent_dirs.pop()
- while True:
- try:
- os.rmdir(x)
- except OSError:
- break
- prev = x
- x = os.path.dirname(x)
- if x == prev:
- break
-
- self.vartree.dbapi._plib_registry.pruneNonExisting()
-
- def _collision_protect(self, srcroot, destroot, mypkglist,
- file_list, symlink_list):
-
- os = _os_merge
-
- collision_ignore = []
- for x in portage.util.shlex_split(
- self.settings.get("COLLISION_IGNORE", "")):
- if os.path.isdir(os.path.join(self._eroot, x.lstrip(os.sep))):
- x = normalize_path(x)
- x += "/*"
- collision_ignore.append(x)
-
- # For collisions with preserved libraries, the current package
- # will assume ownership and the libraries will be unregistered.
- if self.vartree.dbapi._plib_registry is None:
- # preserve-libs is entirely disabled
- plib_cpv_map = None
- plib_paths = None
- plib_inodes = {}
- else:
- plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
- plib_cpv_map = {}
- plib_paths = set()
- for cpv, paths in plib_dict.items():
- plib_paths.update(paths)
- for f in paths:
- plib_cpv_map[f] = cpv
- plib_inodes = self._lstat_inode_map(plib_paths)
-
- plib_collisions = {}
-
- showMessage = self._display_merge
- stopmerge = False
- collisions = []
- dirs = set()
- dirs_ro = set()
- symlink_collisions = []
- destroot = self.settings['ROOT']
- totfiles = len(file_list) + len(symlink_list)
- showMessage(_(" %s checking %d files for package collisions\n") % \
- (colorize("GOOD", "*"), totfiles))
- for i, (f, f_type) in enumerate(chain(
- ((f, "reg") for f in file_list),
- ((f, "sym") for f in symlink_list))):
- if i % 1000 == 0 and i != 0:
- showMessage(_("%d files remaining ...\n") % (totfiles - i))
-
- dest_path = normalize_path(
- os.path.join(destroot, f.lstrip(os.path.sep)))
-
- parent = os.path.dirname(dest_path)
- if parent not in dirs:
- for x in iter_parents(parent):
- if x in dirs:
- break
- dirs.add(x)
- if os.path.isdir(x):
- if not os.access(x, os.W_OK):
- dirs_ro.add(x)
- break
-
- try:
- dest_lstat = os.lstat(dest_path)
- except EnvironmentError as e:
- if e.errno == errno.ENOENT:
- del e
- continue
- elif e.errno == errno.ENOTDIR:
- del e
- # A non-directory is in a location where this package
- # expects to have a directory.
- dest_lstat = None
- parent_path = dest_path
- while len(parent_path) > len(destroot):
- parent_path = os.path.dirname(parent_path)
- try:
- dest_lstat = os.lstat(parent_path)
- break
- except EnvironmentError as e:
- if e.errno != errno.ENOTDIR:
- raise
- del e
- if not dest_lstat:
- raise AssertionError(
- "unable to find non-directory " + \
- "parent for '%s'" % dest_path)
- dest_path = parent_path
- f = os.path.sep + dest_path[len(destroot):]
- if f in collisions:
- continue
- else:
- raise
- if f[0] != "/":
- f="/"+f
-
- if stat.S_ISDIR(dest_lstat.st_mode):
- if f_type == "sym":
- # This case is explicitly banned
- # by PMS (see bug #326685).
- symlink_collisions.append(f)
- collisions.append(f)
- continue
-
- plibs = plib_inodes.get((dest_lstat.st_dev, dest_lstat.st_ino))
- if plibs:
- for path in plibs:
- cpv = plib_cpv_map[path]
- paths = plib_collisions.get(cpv)
- if paths is None:
- paths = set()
- plib_collisions[cpv] = paths
- paths.add(path)
- # The current package will assume ownership and the
- # libraries will be unregistered, so exclude this
- # path from the normal collisions.
- continue
-
- isowned = False
- full_path = os.path.join(destroot, f.lstrip(os.path.sep))
- for ver in mypkglist:
- if ver.isowner(f):
- isowned = True
- break
- if not isowned and self.isprotected(full_path):
- isowned = True
- if not isowned:
- f_match = full_path[len(self._eroot)-1:]
- stopmerge = True
- for pattern in collision_ignore:
- if fnmatch.fnmatch(f_match, pattern):
- stopmerge = False
- break
- if stopmerge:
- collisions.append(f)
- return collisions, dirs_ro, symlink_collisions, plib_collisions
-
- def _lstat_inode_map(self, path_iter):
- """
- Use lstat to create a map of the form:
- {(st_dev, st_ino) : set([path1, path2, ...])}
- Multiple paths may reference the same inode due to hardlinks.
- All lstat() calls are relative to self.myroot.
- """
-
- os = _os_merge
-
- root = self.settings['ROOT']
- inode_map = {}
- for f in path_iter:
- path = os.path.join(root, f.lstrip(os.sep))
- try:
- st = os.lstat(path)
- except OSError as e:
- if e.errno not in (errno.ENOENT, errno.ENOTDIR):
- raise
- del e
- continue
- key = (st.st_dev, st.st_ino)
- paths = inode_map.get(key)
- if paths is None:
- paths = set()
- inode_map[key] = paths
- paths.add(f)
- return inode_map
-
- def _security_check(self, installed_instances):
- if not installed_instances:
- return 0
-
- os = _os_merge
-
- showMessage = self._display_merge
-
- file_paths = set()
- for dblnk in installed_instances:
- file_paths.update(dblnk.getcontents())
- inode_map = {}
- real_paths = set()
- for i, path in enumerate(file_paths):
-
- if os is _os_merge:
- try:
- _unicode_encode(path,
- encoding=_encodings['merge'], errors='strict')
- except UnicodeEncodeError:
- # The package appears to have been merged with a
- # different value of sys.getfilesystemencoding(),
- # so fall back to utf_8 if appropriate.
- try:
- _unicode_encode(path,
- encoding=_encodings['fs'], errors='strict')
- except UnicodeEncodeError:
- pass
- else:
- os = portage.os
-
- try:
- s = os.lstat(path)
- except OSError as e:
- if e.errno not in (errno.ENOENT, errno.ENOTDIR):
- raise
- del e
- continue
- if not stat.S_ISREG(s.st_mode):
- continue
- path = os.path.realpath(path)
- if path in real_paths:
- continue
- real_paths.add(path)
- if s.st_nlink > 1 and \
- s.st_mode & (stat.S_ISUID | stat.S_ISGID):
- k = (s.st_dev, s.st_ino)
- inode_map.setdefault(k, []).append((path, s))
- suspicious_hardlinks = []
- for path_list in inode_map.values():
- path, s = path_list[0]
- if len(path_list) == s.st_nlink:
- # All hardlinks seem to be owned by this package.
- continue
- suspicious_hardlinks.append(path_list)
- if not suspicious_hardlinks:
- return 0
-
- msg = []
- msg.append(_("suid/sgid file(s) "
- "with suspicious hardlink(s):"))
- msg.append("")
- for path_list in suspicious_hardlinks:
- for path, s in path_list:
- msg.append("\t%s" % path)
- msg.append("")
- msg.append(_("See the Gentoo Security Handbook "
- "guide for advice on how to proceed."))
-
- self._eerror("preinst", msg)
-
- return 1
-
- def _eqawarn(self, phase, lines):
- self._elog("eqawarn", phase, lines)
-
- def _eerror(self, phase, lines):
- self._elog("eerror", phase, lines)
-
- def _elog(self, funcname, phase, lines):
- func = getattr(portage.elog.messages, funcname)
- if self._scheduler is None:
- for l in lines:
- func(l, phase=phase, key=self.mycpv)
- else:
- background = self.settings.get("PORTAGE_BACKGROUND") == "1"
- log_path = None
- if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
- log_path = self.settings.get("PORTAGE_LOG_FILE")
- out = io.StringIO()
- for line in lines:
- func(line, phase=phase, key=self.mycpv, out=out)
- msg = out.getvalue()
- self._scheduler.output(msg,
- background=background, log_path=log_path)
-
- def _elog_process(self, phasefilter=None):
- cpv = self.mycpv
- if self._pipe is None:
- elog_process(cpv, self.settings, phasefilter=phasefilter)
- else:
- logdir = os.path.join(self.settings["T"], "logging")
- ebuild_logentries = collect_ebuild_messages(logdir)
- # phasefilter is irrelevant for the above collect_ebuild_messages
- # call, since this package instance has a private logdir. However,
- # it may be relevant for the following collect_messages call.
- py_logentries = collect_messages(key=cpv, phasefilter=phasefilter).get(cpv, {})
- logentries = _merge_logentries(py_logentries, ebuild_logentries)
- funcnames = {
- "INFO": "einfo",
- "LOG": "elog",
- "WARN": "ewarn",
- "QA": "eqawarn",
- "ERROR": "eerror"
- }
- str_buffer = []
- for phase, messages in logentries.items():
- for key, lines in messages:
- funcname = funcnames[key]
- if isinstance(lines, basestring):
- lines = [lines]
- for line in lines:
- for line in line.split('\n'):
- fields = (funcname, phase, cpv, line)
- str_buffer.append(' '.join(fields))
- str_buffer.append('\n')
- if str_buffer:
- str_buffer = _unicode_encode(''.join(str_buffer))
- while str_buffer:
- str_buffer = str_buffer[os.write(self._pipe, str_buffer):]
-
- def _emerge_log(self, msg):
- emergelog(False, msg)
-
- def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
- mydbapi=None, prev_mtimes=None, counter=None):
- """
-
- This function does the following:
-
- calls get_ro_checker to retrieve a function for checking whether Portage
- will write to a read-only filesystem, then runs it against the directory list
- calls self._preserve_libs if FEATURES=preserve-libs
- calls self._collision_protect if FEATURES=collision-protect
- calls doebuild(mydo=pkg_preinst)
- Merges the package to the livefs
- unmerges old version (if required)
- calls doebuild(mydo=pkg_postinst)
- calls env_update
-
- @param srcroot: Typically this is ${D}
- @type srcroot: String (Path)
- @param destroot: ignored, self.settings['ROOT'] is used instead
- @type destroot: String (Path)
- @param inforoot: root of the vardb entry ?
- @type inforoot: String (Path)
- @param myebuild: path to the ebuild that we are processing
- @type myebuild: String (Path)
- @param mydbapi: dbapi which is handed to doebuild.
- @type mydbapi: portdbapi instance
- @param prev_mtimes: { Filename:mtime } mapping for env_update
- @type prev_mtimes: Dictionary
- @rtype: Boolean
- @return:
- 1. 0 on success
- 2. 1 on failure
-
- secondhand is a list of symlinks that have been skipped due to their target
- not existing; we will merge these symlinks at a later time.
- """
-
- os = _os_merge
-
- srcroot = _unicode_decode(srcroot,
- encoding=_encodings['content'], errors='strict')
- destroot = self.settings['ROOT']
- inforoot = _unicode_decode(inforoot,
- encoding=_encodings['content'], errors='strict')
- myebuild = _unicode_decode(myebuild,
- encoding=_encodings['content'], errors='strict')
-
- showMessage = self._display_merge
- srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
-
- if not os.path.isdir(srcroot):
- showMessage(_("!!! Directory Not Found: D='%s'\n") % srcroot,
- level=logging.ERROR, noiselevel=-1)
- return 1
-
- is_binpkg = self.settings.get("EMERGE_FROM") == "binary"
- slot = ''
- for var_name in ('CHOST', 'SLOT'):
- try:
- with io.open(_unicode_encode(
- os.path.join(inforoot, var_name),
- encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['repo.content'],
- errors='replace') as f:
- val = f.readline().strip()
- except EnvironmentError as e:
- if e.errno != errno.ENOENT:
- raise
- del e
- val = ''
-
- if var_name == 'SLOT':
- slot = val
-
- if not slot.strip():
- slot = self.settings.get(var_name, '')
- if not slot.strip():
- showMessage(_("!!! SLOT is undefined\n"),
- level=logging.ERROR, noiselevel=-1)
- return 1
- write_atomic(os.path.join(inforoot, var_name), slot + '\n')
-
- # This check only applies when built from source, since
- # inforoot values are written just after src_install.
- if not is_binpkg and val != self.settings.get(var_name, ''):
- self._eqawarn('preinst',
- [_("QA Notice: Expected %(var_name)s='%(expected_value)s', got '%(actual_value)s'\n") % \
- {"var_name":var_name, "expected_value":self.settings.get(var_name, ''), "actual_value":val}])
-
- def eerror(lines):
- self._eerror("preinst", lines)
-
- if not os.path.exists(self.dbcatdir):
- ensure_dirs(self.dbcatdir)
-
- # NOTE: We use SLOT obtained from the inforoot
- # directory, in order to support USE=multislot.
- # Use _pkg_str discard the sub-slot part if necessary.
- slot = _pkg_str(self.mycpv, slot=slot).slot
- cp = self.mysplit[0]
- slot_atom = "%s:%s" % (cp, slot)
-
- self.lockdb()
- try:
- # filter any old-style virtual matches
- slot_matches = [cpv for cpv in self.vartree.dbapi.match(slot_atom)
- if cpv_getkey(cpv) == cp]
-
- if self.mycpv not in slot_matches and \
- self.vartree.dbapi.cpv_exists(self.mycpv):
- # handle multislot or unapplied slotmove
- slot_matches.append(self.mycpv)
-
- others_in_slot = []
- for cur_cpv in slot_matches:
- # Clone the config in case one of these has to be unmerged,
- # since we need it to have private ${T} etc... for things
- # like elog.
- settings_clone = portage.config(clone=self.settings)
- settings_clone.pop("PORTAGE_BUILDDIR_LOCKED", None)
- settings_clone.setcpv(cur_cpv, mydb=self.vartree.dbapi)
- if self._preserve_libs and "preserve-libs" in \
- settings_clone["PORTAGE_RESTRICT"].split():
- self._preserve_libs = False
- others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
- settings=settings_clone,
- vartree=self.vartree, treetype="vartree",
- scheduler=self._scheduler, pipe=self._pipe))
- finally:
- self.unlockdb()
-
- # If any instance has RESTRICT=preserve-libs, then
- # restrict it for all instances.
- if not self._preserve_libs:
- for dblnk in others_in_slot:
- dblnk._preserve_libs = False
-
- retval = self._security_check(others_in_slot)
- if retval:
- return retval
-
- if slot_matches:
- # Used by self.isprotected().
- max_dblnk = None
- max_counter = -1
- for dblnk in others_in_slot:
- cur_counter = self.vartree.dbapi.cpv_counter(dblnk.mycpv)
- if cur_counter > max_counter:
- max_counter = cur_counter
- max_dblnk = dblnk
- self._installed_instance = max_dblnk
-
- # Apply INSTALL_MASK before collision-protect, since it may
- # be useful to avoid collisions in some scenarios.
- # We cannot detect if this is needed or not here as INSTALL_MASK can be
- # modified by bashrc files.
- phase = MiscFunctionsProcess(background=False,
- commands=["preinst_mask"], phase="preinst",
- scheduler=self._scheduler, settings=self.settings)
- phase.start()
- phase.wait()
- try:
- with io.open(_unicode_encode(os.path.join(inforoot, "INSTALL_MASK"),
- encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['repo.content'],
- errors='replace') as f:
- install_mask = InstallMask(f.read())
- except EnvironmentError:
- install_mask = None
-
- if install_mask:
- install_mask_dir(self.settings["ED"], install_mask)
- if any(x in self.settings.features for x in ('nodoc', 'noman', 'noinfo')):
- try:
- os.rmdir(os.path.join(self.settings["ED"], 'usr', 'share'))
- except OSError:
- pass
-
- # We check for unicode encoding issues after src_install. However,
- # the check must be repeated here for binary packages (it's
- # inexpensive since we call os.walk() here anyway).
- unicode_errors = []
- line_ending_re = re.compile('[\n\r]')
- srcroot_len = len(srcroot)
- ed_len = len(self.settings["ED"])
- eprefix_len = len(self.settings["EPREFIX"])
-
- while True:
-
- unicode_error = False
- eagain_error = False
-
- filelist = []
- linklist = []
- paths_with_newlines = []
- def onerror(e):
- raise
- walk_iter = os.walk(srcroot, onerror=onerror)
- while True:
- try:
- parent, dirs, files = next(walk_iter)
- except StopIteration:
- break
- except OSError as e:
- if e.errno != errno.EAGAIN:
- raise
- # Observed with PyPy 1.8.
- eagain_error = True
- break
-
- try:
- parent = _unicode_decode(parent,
- encoding=_encodings['merge'], errors='strict')
- except UnicodeDecodeError:
- new_parent = _unicode_decode(parent,
- encoding=_encodings['merge'], errors='replace')
- new_parent = _unicode_encode(new_parent,
- encoding='ascii', errors='backslashreplace')
- new_parent = _unicode_decode(new_parent,
- encoding=_encodings['merge'], errors='replace')
- os.rename(parent, new_parent)
- unicode_error = True
- unicode_errors.append(new_parent[ed_len:])
- break
-
- for fname in files:
- try:
- fname = _unicode_decode(fname,
- encoding=_encodings['merge'], errors='strict')
- except UnicodeDecodeError:
- fpath = portage._os.path.join(
- parent.encode(_encodings['merge']), fname)
- new_fname = _unicode_decode(fname,
- encoding=_encodings['merge'], errors='replace')
- new_fname = _unicode_encode(new_fname,
- encoding='ascii', errors='backslashreplace')
- new_fname = _unicode_decode(new_fname,
- encoding=_encodings['merge'], errors='replace')
- new_fpath = os.path.join(parent, new_fname)
- os.rename(fpath, new_fpath)
- unicode_error = True
- unicode_errors.append(new_fpath[ed_len:])
- fname = new_fname
- fpath = new_fpath
- else:
- fpath = os.path.join(parent, fname)
-
- relative_path = fpath[srcroot_len:]
-
- if line_ending_re.search(relative_path) is not None:
- paths_with_newlines.append(relative_path)
-
- file_mode = os.lstat(fpath).st_mode
- if stat.S_ISREG(file_mode):
- filelist.append(relative_path)
- elif stat.S_ISLNK(file_mode):
- # Note: os.walk puts symlinks to directories in the "dirs"
- # list and it does not traverse them since that could lead
- # to an infinite recursion loop.
- linklist.append(relative_path)
-
- myto = _unicode_decode(
- _os.readlink(_unicode_encode(fpath,
- encoding=_encodings['merge'], errors='strict')),
- encoding=_encodings['merge'], errors='replace')
- if line_ending_re.search(myto) is not None:
- paths_with_newlines.append(relative_path)
-
- if unicode_error:
- break
-
- if not (unicode_error or eagain_error):
- break
-
- if unicode_errors:
- self._elog("eqawarn", "preinst",
- _merge_unicode_error(unicode_errors))
-
- if paths_with_newlines:
- msg = []
- msg.append(_("This package installs one or more files containing line ending characters:"))
- msg.append("")
- paths_with_newlines.sort()
- for f in paths_with_newlines:
- msg.append("\t/%s" % (f.replace("\n", "\\n").replace("\r", "\\r")))
- msg.append("")
- msg.append(_("package %s NOT merged") % self.mycpv)
- msg.append("")
- eerror(msg)
- return 1
-
- # If there are no files to merge, and an installed package in the same
- # slot has files, it probably means that something went wrong.
- if self.settings.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
- not filelist and not linklist and others_in_slot:
- installed_files = None
- for other_dblink in others_in_slot:
- installed_files = other_dblink.getcontents()
- if not installed_files:
- continue
- from textwrap import wrap
- wrap_width = 72
- msg = []
- d = {
- "new_cpv":self.mycpv,
- "old_cpv":other_dblink.mycpv
- }
- msg.extend(wrap(_("The '%(new_cpv)s' package will not install "
- "any files, but the currently installed '%(old_cpv)s'"
- " package has the following files: ") % d, wrap_width))
- msg.append("")
- msg.extend(sorted(installed_files))
- msg.append("")
- msg.append(_("package %s NOT merged") % self.mycpv)
- msg.append("")
- msg.extend(wrap(
- _("Manually run `emerge --unmerge =%s` if you "
- "really want to remove the above files. Set "
- "PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in "
- "/etc/portage/make.conf if you do not want to "
- "abort in cases like this.") % other_dblink.mycpv,
- wrap_width))
- eerror(msg)
- if installed_files:
- return 1
-
- # Make sure the ebuild environment is initialized and that ${T}/elog
- # exists for logging of collision-protect eerror messages.
- if myebuild is None:
- myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
- doebuild_environment(myebuild, "preinst",
- settings=self.settings, db=mydbapi)
- self.settings["REPLACING_VERSIONS"] = " ".join(
- [portage.versions.cpv_getversion(other.mycpv)
- for other in others_in_slot])
- prepare_build_dirs(settings=self.settings, cleanup=cleanup)
-
- # check for package collisions
- blockers = []
- for blocker in self._blockers or []:
- blocker = self.vartree.dbapi._dblink(blocker.cpv)
- # It may have been unmerged before lock(s)
- # were aquired.
- if blocker.exists():
- blockers.append(blocker)
-
- collisions, dirs_ro, symlink_collisions, plib_collisions = \
- self._collision_protect(srcroot, destroot,
- others_in_slot + blockers, filelist, linklist)
-
- # Check for read-only filesystems.
- ro_checker = get_ro_checker()
- rofilesystems = ro_checker(dirs_ro)
-
- if rofilesystems:
- msg = _("One or more files installed to this package are "
- "set to be installed to read-only filesystems. "
- "Please mount the following filesystems as read-write "
- "and retry.")
- msg = textwrap.wrap(msg, 70)
- msg.append("")
- for f in rofilesystems:
- msg.append("\t%s" % f)
- msg.append("")
- self._elog("eerror", "preinst", msg)
-
- msg = _("Package '%s' NOT merged due to read-only file systems.") % \
- self.settings.mycpv
- msg += _(" If necessary, refer to your elog "
- "messages for the whole content of the above message.")
- msg = textwrap.wrap(msg, 70)
- eerror(msg)
- return 1
-
- if symlink_collisions:
- # Symlink collisions need to be distinguished from other types
- # of collisions, in order to avoid confusion (see bug #409359).
- msg = _("Package '%s' has one or more collisions "
- "between symlinks and directories, which is explicitly "
- "forbidden by PMS section 13.4 (see bug #326685):") % \
- (self.settings.mycpv,)
- msg = textwrap.wrap(msg, 70)
- msg.append("")
- for f in symlink_collisions:
- msg.append("\t%s" % os.path.join(destroot,
- f.lstrip(os.path.sep)))
- msg.append("")
- self._elog("eerror", "preinst", msg)
-
- if collisions:
- collision_protect = "collision-protect" in self.settings.features
- protect_owned = "protect-owned" in self.settings.features
- msg = _("This package will overwrite one or more files that"
- " may belong to other packages (see list below).")
- if not (collision_protect or protect_owned):
- msg += _(" Add either \"collision-protect\" or"
- " \"protect-owned\" to FEATURES in"
- " make.conf if you would like the merge to abort"
- " in cases like this. See the make.conf man page for"
- " more information about these features.")
- if self.settings.get("PORTAGE_QUIET") != "1":
- msg += _(" You can use a command such as"
- " `portageq owners / <filename>` to identify the"
- " installed package that owns a file. If portageq"
- " reports that only one package owns a file then do NOT"
- " file a bug report. A bug report is only useful if it"
- " identifies at least two or more packages that are known"
- " to install the same file(s)."
- " If a collision occurs and you"
- " can not explain where the file came from then you"
- " should simply ignore the collision since there is not"
- " enough information to determine if a real problem"
- " exists. Please do NOT file a bug report at"
- " https://bugs.gentoo.org/ unless you report exactly which"
- " two packages install the same file(s). See"
- " https://wiki.gentoo.org/wiki/Knowledge_Base:Blockers"
- " for tips on how to solve the problem. And once again,"
- " please do NOT file a bug report unless you have"
- " completely understood the above message.")
-
- self.settings["EBUILD_PHASE"] = "preinst"
- from textwrap import wrap
- msg = wrap(msg, 70)
- if collision_protect:
- msg.append("")
- msg.append(_("package %s NOT merged") % self.settings.mycpv)
- msg.append("")
- msg.append(_("Detected file collision(s):"))
- msg.append("")
-
- for f in collisions:
- msg.append("\t%s" % \
- os.path.join(destroot, f.lstrip(os.path.sep)))
-
- eerror(msg)
-
- owners = None
- if collision_protect or protect_owned or symlink_collisions:
- msg = []
- msg.append("")
- msg.append(_("Searching all installed"
- " packages for file collisions..."))
- msg.append("")
- msg.append(_("Press Ctrl-C to Stop"))
- msg.append("")
- eerror(msg)
-
- if len(collisions) > 20:
- # get_owners is slow for large numbers of files, so
- # don't look them all up.
- collisions = collisions[:20]
-
- pkg_info_strs = {}
- self.lockdb()
- try:
- owners = self.vartree.dbapi._owners.get_owners(collisions)
- self.vartree.dbapi.flush_cache()
-
- for pkg in owners:
- pkg = self.vartree.dbapi._pkg_str(pkg.mycpv, None)
- pkg_info_str = "%s%s%s" % (pkg,
- _slot_separator, pkg.slot)
- if pkg.repo != _unknown_repo:
- pkg_info_str += "%s%s" % (_repo_separator,
- pkg.repo)
- pkg_info_strs[pkg] = pkg_info_str
-
- finally:
- self.unlockdb()
-
- for pkg, owned_files in owners.items():
- msg = []
- msg.append(pkg_info_strs[pkg.mycpv])
- for f in sorted(owned_files):
- msg.append("\t%s" % os.path.join(destroot,
- f.lstrip(os.path.sep)))
- msg.append("")
- eerror(msg)
-
- if not owners:
- eerror([_("None of the installed"
- " packages claim the file(s)."), ""])
-
- symlink_abort_msg =_("Package '%s' NOT merged since it has "
- "one or more collisions between symlinks and directories, "
- "which is explicitly forbidden by PMS section 13.4 "
- "(see bug #326685).")
-
- # The explanation about the collision and how to solve
- # it may not be visible via a scrollback buffer, especially
- # if the number of file collisions is large. Therefore,
- # show a summary at the end.
- abort = False
- if symlink_collisions:
- abort = True
- msg = symlink_abort_msg % (self.settings.mycpv,)
- elif collision_protect:
- abort = True
- msg = _("Package '%s' NOT merged due to file collisions.") % \
- self.settings.mycpv
- elif protect_owned and owners:
- abort = True
- msg = _("Package '%s' NOT merged due to file collisions.") % \
- self.settings.mycpv
- else:
- msg = _("Package '%s' merged despite file collisions.") % \
- self.settings.mycpv
- msg += _(" If necessary, refer to your elog "
- "messages for the whole content of the above message.")
- eerror(wrap(msg, 70))
-
- if abort:
- return 1
-
- # The merge process may move files out of the image directory,
- # which causes invalidation of the .installed flag.
- try:
- os.unlink(os.path.join(
- os.path.dirname(normalize_path(srcroot)), ".installed"))
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- del e
-
- self.dbdir = self.dbtmpdir
- self.delete()
- ensure_dirs(self.dbtmpdir)
-
- downgrade = False
- if self._installed_instance is not None and \
- vercmp(self.mycpv.version,
- self._installed_instance.mycpv.version) < 0:
- downgrade = True
-
- if self._installed_instance is not None:
- rval = self._pre_merge_backup(self._installed_instance, downgrade)
- if rval != os.EX_OK:
- showMessage(_("!!! FAILED preinst: ") +
- "quickpkg: %s\n" % rval,
- level=logging.ERROR, noiselevel=-1)
- return rval
-
- # run preinst script
- showMessage(_(">>> Merging %(cpv)s to %(destroot)s\n") % \
- {"cpv":self.mycpv, "destroot":destroot})
- phase = EbuildPhase(background=False, phase="preinst",
- scheduler=self._scheduler, settings=self.settings)
- phase.start()
- a = phase.wait()
-
- # XXX: Decide how to handle failures here.
- if a != os.EX_OK:
- showMessage(_("!!! FAILED preinst: ")+str(a)+"\n",
- level=logging.ERROR, noiselevel=-1)
- return a
-
- # copy "info" files (like SLOT, CFLAGS, etc.) into the database
- for x in os.listdir(inforoot):
- self.copyfile(inforoot+"/"+x)
-
- # write local package counter for recording
- if counter is None:
- counter = self.vartree.dbapi.counter_tick(mycpv=self.mycpv)
- with io.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'),
- encoding=_encodings['fs'], errors='strict'),
- mode='w', encoding=_encodings['repo.content'],
- errors='backslashreplace') as f:
- f.write("%s" % counter)
-
- self.updateprotect()
-
- #if we have a file containing previously-merged config file md5sums, grab it.
- self.vartree.dbapi._fs_lock()
- try:
- # This prunes any libraries from the registry that no longer
- # exist on disk, in case they have been manually removed.
- # This has to be done prior to merge, since after merge it
- # is non-trivial to distinguish these files from files
- # that have just been merged.
- plib_registry = self.vartree.dbapi._plib_registry
- if plib_registry:
- plib_registry.lock()
- try:
- plib_registry.load()
- plib_registry.store()
- finally:
- plib_registry.unlock()
-
- # Always behave like --noconfmem is enabled for downgrades
- # so that people who don't know about this option are less
- # likely to get confused when doing upgrade/downgrade cycles.
- cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
- if "NOCONFMEM" in self.settings or downgrade:
- cfgfiledict["IGNORE"]=1
- else:
- cfgfiledict["IGNORE"]=0
-
- rval = self._merge_contents(srcroot, destroot, cfgfiledict)
- if rval != os.EX_OK:
- return rval
- finally:
- self.vartree.dbapi._fs_unlock()
-
- # These caches are populated during collision-protect and the data
- # they contain is now invalid. It's very important to invalidate
- # the contents_inodes cache so that FEATURES=unmerge-orphans
- # doesn't unmerge anything that belongs to this package that has
- # just been merged.
- for dblnk in others_in_slot:
- dblnk._clear_contents_cache()
- self._clear_contents_cache()
-
- linkmap = self.vartree.dbapi._linkmap
- plib_registry = self.vartree.dbapi._plib_registry
- # We initialize preserve_paths to an empty set rather
- # than None here because it plays an important role
- # in prune_plib_registry logic by serving to indicate
- # that we have a replacement for a package that's
- # being unmerged.
-
- preserve_paths = set()
- needed = None
- if not (self._linkmap_broken or linkmap is None or
- plib_registry is None):
- self.vartree.dbapi._fs_lock()
- plib_registry.lock()
- try:
- plib_registry.load()
- needed = os.path.join(inforoot, linkmap._needed_aux_key)
- self._linkmap_rebuild(include_file=needed)
-
- # Preserve old libs if they are still in use
- # TODO: Handle cases where the previous instance
- # has already been uninstalled but it still has some
- # preserved libraries in the registry that we may
- # want to preserve here.
- preserve_paths = self._find_libs_to_preserve()
- finally:
- plib_registry.unlock()
- self.vartree.dbapi._fs_unlock()
-
- if preserve_paths:
- self._add_preserve_libs_to_contents(preserve_paths)
-
- # If portage is reinstalling itself, remove the old
- # version now since we want to use the temporary
- # PORTAGE_BIN_PATH that will be removed when we return.
- reinstall_self = False
- if self.myroot == "/" and \
- match_from_list(PORTAGE_PACKAGE_ATOM, [self.mycpv]):
- reinstall_self = True
-
- emerge_log = self._emerge_log
-
- # If we have any preserved libraries then autoclean
- # is forced so that preserve-libs logic doesn't have
- # to account for the additional complexity of the
- # AUTOCLEAN=no mode.
- autoclean = self.settings.get("AUTOCLEAN", "yes") == "yes" \
- or preserve_paths
-
- if autoclean:
- emerge_log(_(" >>> AUTOCLEAN: %s") % (slot_atom,))
-
- others_in_slot.append(self) # self has just been merged
- for dblnk in list(others_in_slot):
- if dblnk is self:
- continue
- if not (autoclean or dblnk.mycpv == self.mycpv or reinstall_self):
- continue
- showMessage(_(">>> Safely unmerging already-installed instance...\n"))
- emerge_log(_(" === Unmerging... (%s)") % (dblnk.mycpv,))
- others_in_slot.remove(dblnk) # dblnk will unmerge itself now
- dblnk._linkmap_broken = self._linkmap_broken
- dblnk.settings["REPLACED_BY_VERSION"] = portage.versions.cpv_getversion(self.mycpv)
- dblnk.settings.backup_changes("REPLACED_BY_VERSION")
- unmerge_rval = dblnk.unmerge(ldpath_mtimes=prev_mtimes,
- others_in_slot=others_in_slot, needed=needed,
- preserve_paths=preserve_paths)
- dblnk.settings.pop("REPLACED_BY_VERSION", None)
-
- if unmerge_rval == os.EX_OK:
- emerge_log(_(" >>> unmerge success: %s") % (dblnk.mycpv,))
- else:
- emerge_log(_(" !!! unmerge FAILURE: %s") % (dblnk.mycpv,))
-
- self.lockdb()
- try:
- # TODO: Check status and abort if necessary.
- dblnk.delete()
- finally:
- self.unlockdb()
- showMessage(_(">>> Original instance of package unmerged safely.\n"))
-
- if len(others_in_slot) > 1:
- showMessage(colorize("WARN", _("WARNING:"))
- + _(" AUTOCLEAN is disabled. This can cause serious"
- " problems due to overlapping packages.\n"),
- level=logging.WARN, noiselevel=-1)
-
- # We hold both directory locks.
- self.dbdir = self.dbpkgdir
- self.lockdb()
- try:
- self.delete()
- _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
- self._merged_path(self.dbpkgdir, os.lstat(self.dbpkgdir))
- self.vartree.dbapi._cache_delta.recordEvent(
- "add", self.mycpv, slot, counter)
- finally:
- self.unlockdb()
-
- # Check for file collisions with blocking packages
- # and remove any colliding files from their CONTENTS
- # since they now belong to this package.
- self._clear_contents_cache()
- contents = self.getcontents()
- destroot_len = len(destroot) - 1
- self.lockdb()
- try:
- for blocker in blockers:
- self.vartree.dbapi.removeFromContents(blocker, iter(contents),
- relative_paths=False)
- finally:
- self.unlockdb()
-
- plib_registry = self.vartree.dbapi._plib_registry
- if plib_registry:
- self.vartree.dbapi._fs_lock()
- plib_registry.lock()
- try:
- plib_registry.load()
-
- if preserve_paths:
- # keep track of the libs we preserved
- plib_registry.register(self.mycpv, slot, counter,
- sorted(preserve_paths))
-
- # Unregister any preserved libs that this package has overwritten
- # and update the contents of the packages that owned them.
- plib_dict = plib_registry.getPreservedLibs()
- for cpv, paths in plib_collisions.items():
- if cpv not in plib_dict:
- continue
- has_vdb_entry = False
- if cpv != self.mycpv:
- # If we've replaced another instance with the
- # same cpv then the vdb entry no longer belongs
- # to it, so we'll have to get the slot and counter
- # from plib_registry._data instead.
- self.vartree.dbapi.lock()
- try:
- try:
- slot = self.vartree.dbapi._pkg_str(cpv, None).slot
- counter = self.vartree.dbapi.cpv_counter(cpv)
- except (KeyError, InvalidData):
- pass
- else:
- has_vdb_entry = True
- self.vartree.dbapi.removeFromContents(
- cpv, paths)
- finally:
- self.vartree.dbapi.unlock()
-
- if not has_vdb_entry:
- # It's possible for previously unmerged packages
- # to have preserved libs in the registry, so try
- # to retrieve the slot and counter from there.
- has_registry_entry = False
- for plib_cps, (plib_cpv, plib_counter, plib_paths) in \
- plib_registry._data.items():
- if plib_cpv != cpv:
- continue
- try:
- cp, slot = plib_cps.split(":", 1)
- except ValueError:
- continue
- counter = plib_counter
- has_registry_entry = True
- break
-
- if not has_registry_entry:
- continue
-
- remaining = [f for f in plib_dict[cpv] if f not in paths]
- plib_registry.register(cpv, slot, counter, remaining)
-
- plib_registry.store()
- finally:
- plib_registry.unlock()
- self.vartree.dbapi._fs_unlock()
-
- self.vartree.dbapi._add(self)
- contents = self.getcontents()
-
- #do postinst script
- self.settings["PORTAGE_UPDATE_ENV"] = \
- os.path.join(self.dbpkgdir, "environment.bz2")
- self.settings.backup_changes("PORTAGE_UPDATE_ENV")
- try:
- phase = EbuildPhase(background=False, phase="postinst",
- scheduler=self._scheduler, settings=self.settings)
- phase.start()
- a = phase.wait()
- if a == os.EX_OK:
- showMessage(_(">>> %s merged.\n") % self.mycpv)
- finally:
- self.settings.pop("PORTAGE_UPDATE_ENV", None)
-
- if a != os.EX_OK:
- # It's stupid to bail out here, so keep going regardless of
- # phase return code.
- self._postinst_failure = True
- self._elog("eerror", "postinst", [
- _("FAILED postinst: %s") % (a,),
- ])
-
- #update environment settings, library paths. DO NOT change symlinks.
- env_update(
- target_root=self.settings['ROOT'], prev_mtimes=prev_mtimes,
- contents=contents, env=self.settings,
- writemsg_level=self._display_merge, vardbapi=self.vartree.dbapi)
-
- # For gcc upgrades, preserved libs have to be removed after the
- # the library path has been updated.
- self._prune_plib_registry()
- self._post_merge_sync()
-
- return os.EX_OK
-
- def _new_backup_path(self, p):
- """
- The works for any type path, such as a regular file, symlink,
- or directory. The parent directory is assumed to exist.
- The returned filename is of the form p + '.backup.' + x, where
- x guarantees that the returned path does not exist yet.
- """
- os = _os_merge
-
- x = -1
- while True:
- x += 1
- backup_p = '%s.backup.%04d' % (p, x)
- try:
- os.lstat(backup_p)
- except OSError:
- break
-
- return backup_p
-
- def _merge_contents(self, srcroot, destroot, cfgfiledict):
-
- cfgfiledict_orig = cfgfiledict.copy()
-
- # open CONTENTS file (possibly overwriting old one) for recording
- # Use atomic_ofstream for automatic coercion of raw bytes to
- # unicode, in order to prevent TypeError when writing raw bytes
- # to TextIOWrapper with python2.
- outfile = atomic_ofstream(_unicode_encode(
- os.path.join(self.dbtmpdir, 'CONTENTS'),
- encoding=_encodings['fs'], errors='strict'),
- mode='w', encoding=_encodings['repo.content'],
- errors='backslashreplace')
-
- # Don't bump mtimes on merge since some application require
- # preservation of timestamps. This means that the unmerge phase must
- # check to see if file belongs to an installed instance in the same
- # slot.
- mymtime = None
-
- # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
- prevmask = os.umask(0)
- secondhand = []
-
- # we do a first merge; this will recurse through all files in our srcroot but also build up a
- # "second hand" of symlinks to merge later
- if self.mergeme(srcroot, destroot, outfile, secondhand,
- self.settings["EPREFIX"].lstrip(os.sep), cfgfiledict, mymtime):
- return 1
-
- # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
- # broken symlinks. We'll merge them too.
- lastlen = 0
- while len(secondhand) and len(secondhand)!=lastlen:
- # clear the thirdhand. Anything from our second hand that
- # couldn't get merged will be added to thirdhand.
-
- thirdhand = []
- if self.mergeme(srcroot, destroot, outfile, thirdhand,
- secondhand, cfgfiledict, mymtime):
- return 1
-
- #swap hands
- lastlen = len(secondhand)
-
- # our thirdhand now becomes our secondhand. It's ok to throw
- # away secondhand since thirdhand contains all the stuff that
- # couldn't be merged.
- secondhand = thirdhand
-
- if len(secondhand):
- # force merge of remaining symlinks (broken or circular; oh well)
- if self.mergeme(srcroot, destroot, outfile, None,
- secondhand, cfgfiledict, mymtime):
- return 1
-
- #restore umask
- os.umask(prevmask)
-
- #if we opened it, close it
- outfile.flush()
- outfile.close()
-
- # write out our collection of md5sums
- if cfgfiledict != cfgfiledict_orig:
- cfgfiledict.pop("IGNORE", None)
- try:
- writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
- except InvalidLocation:
- self.settings._init_dirs()
- writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
-
- return os.EX_OK
-
- def mergeme(self, srcroot, destroot, outfile, secondhand, stufftomerge, cfgfiledict, thismtime):
- """
-
- This function handles actual merging of the package contents to the livefs.
- It also handles config protection.
-
- @param srcroot: Where are we copying files from (usually ${D})
- @type srcroot: String (Path)
- @param destroot: Typically ${ROOT}
- @type destroot: String (Path)
- @param outfile: File to log operations to
- @type outfile: File Object
- @param secondhand: A set of items to merge in pass two (usually
- or symlinks that point to non-existing files that may get merged later)
- @type secondhand: List
- @param stufftomerge: Either a diretory to merge, or a list of items.
- @type stufftomerge: String or List
- @param cfgfiledict: { File:mtime } mapping for config_protected files
- @type cfgfiledict: Dictionary
- @param thismtime: None or new mtime for merged files (expressed in seconds
- in Python <3.3 and nanoseconds in Python >=3.3)
- @type thismtime: None or Int
- @rtype: None or Boolean
- @return:
- 1. True on failure
- 2. None otherwise
-
- """
-
- showMessage = self._display_merge
- writemsg = self._display_merge
-
- os = _os_merge
- sep = os.sep
- join = os.path.join
- srcroot = normalize_path(srcroot).rstrip(sep) + sep
- destroot = normalize_path(destroot).rstrip(sep) + sep
- calc_prelink = "prelink-checksums" in self.settings.features
-
- protect_if_modified = \
- "config-protect-if-modified" in self.settings.features and \
- self._installed_instance is not None
-
- # this is supposed to merge a list of files. There will be 2 forms of argument passing.
- if isinstance(stufftomerge, basestring):
- #A directory is specified. Figure out protection paths, listdir() it and process it.
- mergelist = [join(stufftomerge, child) for child in \
- os.listdir(join(srcroot, stufftomerge))]
- else:
- mergelist = stufftomerge[:]
-
- while mergelist:
-
- relative_path = mergelist.pop()
- mysrc = join(srcroot, relative_path)
- mydest = join(destroot, relative_path)
- # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
- myrealdest = join(sep, relative_path)
- # stat file once, test using S_* macros many times (faster that way)
- mystat = os.lstat(mysrc)
- mymode = mystat[stat.ST_MODE]
- mymd5 = None
- myto = None
-
- if sys.hexversion >= 0x3030000:
- mymtime = mystat.st_mtime_ns
- else:
- mymtime = mystat[stat.ST_MTIME]
-
- if stat.S_ISREG(mymode):
- mymd5 = perform_md5(mysrc, calc_prelink=calc_prelink)
- elif stat.S_ISLNK(mymode):
- # The file name of mysrc and the actual file that it points to
- # will have earlier been forcefully converted to the 'merge'
- # encoding if necessary, but the content of the symbolic link
- # may need to be forcefully converted here.
- myto = _os.readlink(_unicode_encode(mysrc,
- encoding=_encodings['merge'], errors='strict'))
- try:
- myto = _unicode_decode(myto,
- encoding=_encodings['merge'], errors='strict')
- except UnicodeDecodeError:
- myto = _unicode_decode(myto, encoding=_encodings['merge'],
- errors='replace')
- myto = _unicode_encode(myto, encoding='ascii',
- errors='backslashreplace')
- myto = _unicode_decode(myto, encoding=_encodings['merge'],
- errors='replace')
- os.unlink(mysrc)
- os.symlink(myto, mysrc)
-
- mymd5 = md5(_unicode_encode(myto)).hexdigest()
-
- protected = False
- if stat.S_ISLNK(mymode) or stat.S_ISREG(mymode):
- protected = self.isprotected(mydest)
-
- if stat.S_ISREG(mymode) and \
- mystat.st_size == 0 and \
- os.path.basename(mydest).startswith(".keep"):
- protected = False
-
- destmd5 = None
- mydest_link = None
- # handy variables; mydest is the target object on the live filesystems;
- # mysrc is the source object in the temporary install dir
- try:
- mydstat = os.lstat(mydest)
- mydmode = mydstat.st_mode
- if protected:
- if stat.S_ISLNK(mydmode):
- # Read symlink target as bytes, in case the
- # target path has a bad encoding.
- mydest_link = _os.readlink(
- _unicode_encode(mydest,
- encoding=_encodings['merge'],
- errors='strict'))
- mydest_link = _unicode_decode(mydest_link,
- encoding=_encodings['merge'],
- errors='replace')
-
- # For protection of symlinks, the md5
- # of the link target path string is used
- # for cfgfiledict (symlinks are
- # protected since bug #485598).
- destmd5 = md5(_unicode_encode(mydest_link)).hexdigest()
-
- elif stat.S_ISREG(mydmode):
- destmd5 = perform_md5(mydest,
- calc_prelink=calc_prelink)
- except (FileNotFound, OSError) as e:
- if isinstance(e, OSError) and e.errno != errno.ENOENT:
- raise
- #dest file doesn't exist
- mydstat = None
- mydmode = None
- mydest_link = None
- destmd5 = None
-
- moveme = True
- if protected:
- mydest, protected, moveme = self._protect(cfgfiledict,
- protect_if_modified, mymd5, myto, mydest,
- myrealdest, mydmode, destmd5, mydest_link)
-
- zing = "!!!"
- if not moveme:
- # confmem rejected this update
- zing = "---"
-
- if stat.S_ISLNK(mymode):
- # we are merging a symbolic link
- # Pass in the symlink target in order to bypass the
- # os.readlink() call inside abssymlink(), since that
- # call is unsafe if the merge encoding is not ascii
- # or utf_8 (see bug #382021).
- myabsto = abssymlink(mysrc, target=myto)
-
- if myabsto.startswith(srcroot):
- myabsto = myabsto[len(srcroot):]
- myabsto = myabsto.lstrip(sep)
- if self.settings and self.settings["D"]:
- if myto.startswith(self.settings["D"]):
- myto = myto[len(self.settings["D"])-1:]
- # myrealto contains the path of the real file to which this symlink points.
- # we can simply test for existence of this file to see if the target has been merged yet
- myrealto = normalize_path(os.path.join(destroot, myabsto))
- if mydmode is not None and stat.S_ISDIR(mydmode):
- if not protected:
- # we can't merge a symlink over a directory
- newdest = self._new_backup_path(mydest)
- msg = []
- msg.append("")
- msg.append(_("Installation of a symlink is blocked by a directory:"))
- msg.append(" '%s'" % mydest)
- msg.append(_("This symlink will be merged with a different name:"))
- msg.append(" '%s'" % newdest)
- msg.append("")
- self._eerror("preinst", msg)
- mydest = newdest
-
- # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
- if (secondhand != None) and (not os.path.exists(myrealto)):
- # either the target directory doesn't exist yet or the target file doesn't exist -- or
- # the target is a broken symlink. We will add this file to our "second hand" and merge
- # it later.
- secondhand.append(mysrc[len(srcroot):])
- continue
- # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
- if moveme:
- zing = ">>>"
- mymtime = movefile(mysrc, mydest, newmtime=thismtime,
- sstat=mystat, mysettings=self.settings,
- encoding=_encodings['merge'])
-
- try:
- self._merged_path(mydest, os.lstat(mydest))
- except OSError:
- pass
-
- if mymtime != None:
- # Use lexists, since if the target happens to be a broken
- # symlink then that should trigger an independent warning.
- if not (os.path.lexists(myrealto) or
- os.path.lexists(join(srcroot, myabsto))):
- self._eqawarn('preinst',
- [_("QA Notice: Symbolic link /%s points to /%s which does not exist.")
- % (relative_path, myabsto)])
-
- showMessage("%s %s -> %s\n" % (zing, mydest, myto))
- if sys.hexversion >= 0x3030000:
- outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime // 1000000000)+"\n")
- else:
- outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
- else:
- showMessage(_("!!! Failed to move file.\n"),
- level=logging.ERROR, noiselevel=-1)
- showMessage("!!! %s -> %s\n" % (mydest, myto),
- level=logging.ERROR, noiselevel=-1)
- return 1
- elif stat.S_ISDIR(mymode):
- # we are merging a directory
- if mydmode != None:
- # destination exists
-
- if bsd_chflags:
- # Save then clear flags on dest.
- dflags = mydstat.st_flags
- if dflags != 0:
- bsd_chflags.lchflags(mydest, 0)
-
- if not stat.S_ISLNK(mydmode) and \
- not os.access(mydest, os.W_OK):
- pkgstuff = pkgsplit(self.pkg)
- writemsg(_("\n!!! Cannot write to '%s'.\n") % mydest, noiselevel=-1)
- writemsg(_("!!! Please check permissions and directories for broken symlinks.\n"))
- writemsg(_("!!! You may start the merge process again by using ebuild:\n"))
- writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
- writemsg(_("!!! And finish by running this: env-update\n\n"))
- return 1
-
- if stat.S_ISDIR(mydmode) or \
- (stat.S_ISLNK(mydmode) and os.path.isdir(mydest)):
- # a symlink to an existing directory will work for us; keep it:
- showMessage("--- %s/\n" % mydest)
- if bsd_chflags:
- bsd_chflags.lchflags(mydest, dflags)
- else:
- # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
- backup_dest = self._new_backup_path(mydest)
- msg = []
- msg.append("")
- msg.append(_("Installation of a directory is blocked by a file:"))
- msg.append(" '%s'" % mydest)
- msg.append(_("This file will be renamed to a different name:"))
- msg.append(" '%s'" % backup_dest)
- msg.append("")
- self._eerror("preinst", msg)
- if movefile(mydest, backup_dest,
- mysettings=self.settings,
- encoding=_encodings['merge']) is None:
- return 1
- showMessage(_("bak %s %s.backup\n") % (mydest, mydest),
- level=logging.ERROR, noiselevel=-1)
- #now create our directory
- try:
- if self.settings.selinux_enabled():
- _selinux_merge.mkdir(mydest, mysrc)
- else:
- os.mkdir(mydest)
- except OSError as e:
- # Error handling should be equivalent to
- # portage.util.ensure_dirs() for cases
- # like bug #187518.
- if e.errno in (errno.EEXIST,):
- pass
- elif os.path.isdir(mydest):
- pass
- else:
- raise
- del e
-
- if bsd_chflags:
- bsd_chflags.lchflags(mydest, dflags)
- os.chmod(mydest, mystat[0])
- os.chown(mydest, mystat[4], mystat[5])
- showMessage(">>> %s/\n" % mydest)
- else:
- try:
- #destination doesn't exist
- if self.settings.selinux_enabled():
- _selinux_merge.mkdir(mydest, mysrc)
- else:
- os.mkdir(mydest)
- except OSError as e:
- # Error handling should be equivalent to
- # portage.util.ensure_dirs() for cases
- # like bug #187518.
- if e.errno in (errno.EEXIST,):
- pass
- elif os.path.isdir(mydest):
- pass
- else:
- raise
- del e
- os.chmod(mydest, mystat[0])
- os.chown(mydest, mystat[4], mystat[5])
- showMessage(">>> %s/\n" % mydest)
-
- try:
- self._merged_path(mydest, os.lstat(mydest))
- except OSError:
- pass
-
- outfile.write("dir "+myrealdest+"\n")
- # recurse and merge this directory
- mergelist.extend(join(relative_path, child) for child in
- os.listdir(join(srcroot, relative_path)))
-
- elif stat.S_ISREG(mymode):
- # we are merging a regular file
- if not protected and \
- mydmode is not None and stat.S_ISDIR(mydmode):
- # install of destination is blocked by an existing directory with the same name
- newdest = self._new_backup_path(mydest)
- msg = []
- msg.append("")
- msg.append(_("Installation of a regular file is blocked by a directory:"))
- msg.append(" '%s'" % mydest)
- msg.append(_("This file will be merged with a different name:"))
- msg.append(" '%s'" % newdest)
- msg.append("")
- self._eerror("preinst", msg)
- mydest = newdest
-
- # whether config protection or not, we merge the new file the
- # same way. Unless moveme=0 (blocking directory)
- if moveme:
- # Create hardlinks only for source files that already exist
- # as hardlinks (having identical st_dev and st_ino).
- hardlink_key = (mystat.st_dev, mystat.st_ino)
-
- hardlink_candidates = self._hardlink_merge_map.get(hardlink_key)
- if hardlink_candidates is None:
- hardlink_candidates = []
- self._hardlink_merge_map[hardlink_key] = hardlink_candidates
-
- mymtime = movefile(mysrc, mydest, newmtime=thismtime,
- sstat=mystat, mysettings=self.settings,
- hardlink_candidates=hardlink_candidates,
- encoding=_encodings['merge'])
- if mymtime is None:
- return 1
- hardlink_candidates.append(mydest)
- zing = ">>>"
-
- try:
- self._merged_path(mydest, os.lstat(mydest))
- except OSError:
- pass
-
- if mymtime != None:
- if sys.hexversion >= 0x3030000:
- outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime // 1000000000)+"\n")
- else:
- outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
- showMessage("%s %s\n" % (zing,mydest))
- else:
- # we are merging a fifo or device node
- zing = "!!!"
- if mydmode is None:
- # destination doesn't exist
- if movefile(mysrc, mydest, newmtime=thismtime,
- sstat=mystat, mysettings=self.settings,
- encoding=_encodings['merge']) is not None:
- zing = ">>>"
-
- try:
- self._merged_path(mydest, os.lstat(mydest))
- except OSError:
- pass
-
- else:
- return 1
- if stat.S_ISFIFO(mymode):
- outfile.write("fif %s\n" % myrealdest)
- else:
- outfile.write("dev %s\n" % myrealdest)
- showMessage(zing + " " + mydest + "\n")
-
- def _protect(self, cfgfiledict, protect_if_modified, src_md5,
- src_link, dest, dest_real, dest_mode, dest_md5, dest_link):
-
- move_me = True
- protected = True
- force = False
- k = False
- if self._installed_instance is not None:
- k = self._installed_instance._match_contents(dest_real)
- if k is not False:
- if dest_mode is None:
- # If the file doesn't exist, then it may
- # have been deleted or renamed by the
- # admin. Therefore, force the file to be
- # merged with a ._cfg name, so that the
- # admin will be prompted for this update
- # (see bug #523684).
- force = True
-
- elif protect_if_modified:
- data = self._installed_instance.getcontents()[k]
- if data[0] == "obj" and data[2] == dest_md5:
- protected = False
- elif data[0] == "sym" and data[2] == dest_link:
- protected = False
-
- if protected and dest_mode is not None:
- # we have a protection path; enable config file management.
- if src_md5 == dest_md5:
- protected = False
-
- elif src_md5 == cfgfiledict.get(dest_real, [None])[0]:
- # An identical update has previously been
- # merged. Skip it unless the user has chosen
- # --noconfmem.
- move_me = protected = bool(cfgfiledict["IGNORE"])
-
- if protected and \
- (dest_link is not None or src_link is not None) and \
- dest_link != src_link:
- # If either one is a symlink, and they are not
- # identical symlinks, then force config protection.
- force = True
-
- if move_me:
- # Merging a new file, so update confmem.
- cfgfiledict[dest_real] = [src_md5]
- elif dest_md5 == cfgfiledict.get(dest_real, [None])[0]:
- # A previously remembered update has been
- # accepted, so it is removed from confmem.
- del cfgfiledict[dest_real]
-
- if protected and move_me:
- dest = new_protect_filename(dest,
- newmd5=(dest_link or src_md5),
- force=force)
-
- return dest, protected, move_me
-
- def _merged_path(self, path, lstatobj, exists=True):
- previous_path = self._device_path_map.get(lstatobj.st_dev)
- if previous_path is None or previous_path is False or \
- (exists and len(path) < len(previous_path)):
- if exists:
- self._device_path_map[lstatobj.st_dev] = path
- else:
- # This entry is used to indicate that we've unmerged
- # a file from this device, and later, this entry is
- # replaced by a parent directory.
- self._device_path_map[lstatobj.st_dev] = False
-
- def _post_merge_sync(self):
- """
- Call this after merge or unmerge, in order to sync relevant files to
- disk and avoid data-loss in the event of a power failure. This method
- does nothing if FEATURES=merge-sync is disabled.
- """
- if not self._device_path_map or \
- "merge-sync" not in self.settings.features:
- return
-
- returncode = None
- if platform.system() == "Linux":
-
- paths = []
- for path in self._device_path_map.values():
- if path is not False:
- paths.append(path)
- paths = tuple(paths)
-
- proc = SyncfsProcess(paths=paths,
- scheduler=(self._scheduler or
- portage._internal_caller and global_event_loop() or
- EventLoop(main=False)))
- proc.start()
- returncode = proc.wait()
-
- if returncode is None or returncode != os.EX_OK:
- try:
- proc = subprocess.Popen(["sync"])
- except EnvironmentError:
- pass
- else:
- proc.wait()
-
- @_slot_locked
- def merge(self, mergeroot, inforoot, myroot=None, myebuild=None, cleanup=0,
- mydbapi=None, prev_mtimes=None, counter=None):
- """
- @param myroot: ignored, self._eroot is used instead
- """
- myroot = None
- retval = -1
- parallel_install = "parallel-install" in self.settings.features
- if not parallel_install:
- self.lockdb()
- self.vartree.dbapi._bump_mtime(self.mycpv)
- if self._scheduler is None:
- self._scheduler = SchedulerInterface(portage._internal_caller and
- global_event_loop() or EventLoop(main=False))
- try:
- retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
- cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes,
- counter=counter)
-
- # If PORTAGE_BUILDDIR doesn't exist, then it probably means
- # fail-clean is enabled, and the success/die hooks have
- # already been called by EbuildPhase.
- if os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
-
- if retval == os.EX_OK:
- phase = 'success_hooks'
- else:
- phase = 'die_hooks'
-
- ebuild_phase = MiscFunctionsProcess(
- background=False, commands=[phase],
- scheduler=self._scheduler, settings=self.settings)
- ebuild_phase.start()
- ebuild_phase.wait()
- self._elog_process()
-
- if 'noclean' not in self.settings.features and \
- (retval == os.EX_OK or \
- 'fail-clean' in self.settings.features):
- if myebuild is None:
- myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
-
- doebuild_environment(myebuild, "clean",
- settings=self.settings, db=mydbapi)
- phase = EbuildPhase(background=False, phase="clean",
- scheduler=self._scheduler, settings=self.settings)
- phase.start()
- phase.wait()
- finally:
- self.settings.pop('REPLACING_VERSIONS', None)
- if self.vartree.dbapi._linkmap is None:
- # preserve-libs is entirely disabled
- pass
- else:
- self.vartree.dbapi._linkmap._clear_cache()
- self.vartree.dbapi._bump_mtime(self.mycpv)
- if not parallel_install:
- self.unlockdb()
-
- if retval == os.EX_OK and self._postinst_failure:
- retval = portage.const.RETURNCODE_POSTINST_FAILURE
-
- return retval
-
- def getstring(self,name):
- "returns contents of a file with whitespace converted to spaces"
- if not os.path.exists(self.dbdir+"/"+name):
- return ""
- with io.open(
- _unicode_encode(os.path.join(self.dbdir, name),
- encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['repo.content'], errors='replace'
- ) as f:
- mydata = f.read().split()
- return " ".join(mydata)
-
- def copyfile(self,fname):
- shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
-
- def getfile(self,fname):
- if not os.path.exists(self.dbdir+"/"+fname):
- return ""
- with io.open(_unicode_encode(os.path.join(self.dbdir, fname),
- encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['repo.content'], errors='replace'
- ) as f:
- return f.read()
-
- def setfile(self,fname,data):
- kwargs = {}
- if fname == 'environment.bz2' or not isinstance(data, basestring):
- kwargs['mode'] = 'wb'
- else:
- kwargs['mode'] = 'w'
- kwargs['encoding'] = _encodings['repo.content']
- write_atomic(os.path.join(self.dbdir, fname), data, **kwargs)
-
- def getelements(self,ename):
- if not os.path.exists(self.dbdir+"/"+ename):
- return []
- with io.open(_unicode_encode(
- os.path.join(self.dbdir, ename),
- encoding=_encodings['fs'], errors='strict'),
- mode='r', encoding=_encodings['repo.content'], errors='replace'
- ) as f:
- mylines = f.readlines()
- myreturn = []
- for x in mylines:
- for y in x[:-1].split():
- myreturn.append(y)
- return myreturn
-
- def setelements(self,mylist,ename):
- with io.open(_unicode_encode(
- os.path.join(self.dbdir, ename),
- encoding=_encodings['fs'], errors='strict'),
- mode='w', encoding=_encodings['repo.content'],
- errors='backslashreplace') as f:
- for x in mylist:
- f.write("%s\n" % x)
-
- def isregular(self):
- "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
- return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
-
- def _pre_merge_backup(self, backup_dblink, downgrade):
-
- if ("unmerge-backup" in self.settings.features or
- (downgrade and "downgrade-backup" in self.settings.features)):
- return self._quickpkg_dblink(backup_dblink, False, None)
-
- return os.EX_OK
-
- def _pre_unmerge_backup(self, background):
-
- if "unmerge-backup" in self.settings.features :
- logfile = None
- if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
- logfile = self.settings.get("PORTAGE_LOG_FILE")
- return self._quickpkg_dblink(self, background, logfile)
-
- return os.EX_OK
-
- def _quickpkg_dblink(self, backup_dblink, background, logfile):
-
- build_time = backup_dblink.getfile('BUILD_TIME')
- try:
- build_time = long(build_time.strip())
- except ValueError:
- build_time = 0
-
- trees = QueryCommand.get_db()[self.settings["EROOT"]]
- bintree = trees["bintree"]
-
- for binpkg in reversed(
- bintree.dbapi.match('={}'.format(backup_dblink.mycpv))):
- if binpkg.build_time == build_time:
- return os.EX_OK
-
- self.lockdb()
- try:
-
- if not backup_dblink.exists():
- # It got unmerged by a concurrent process.
- return os.EX_OK
-
- # Call quickpkg for support of QUICKPKG_DEFAULT_OPTS and stuff.
- quickpkg_binary = os.path.join(self.settings["PORTAGE_BIN_PATH"],
- "quickpkg")
-
- if not os.access(quickpkg_binary, os.X_OK):
- # If not running from the source tree, use PATH.
- quickpkg_binary = find_binary("quickpkg")
- if quickpkg_binary is None:
- self._display_merge(
- _("%s: command not found") % "quickpkg",
- level=logging.ERROR, noiselevel=-1)
- return 127
-
- # Let quickpkg inherit the global vartree config's env.
- env = dict(self.vartree.settings.items())
- env["__PORTAGE_INHERIT_VARDB_LOCK"] = "1"
-
- pythonpath = [x for x in env.get('PYTHONPATH', '').split(":") if x]
- if not pythonpath or \
- not os.path.samefile(pythonpath[0], portage._pym_path):
- pythonpath.insert(0, portage._pym_path)
- env['PYTHONPATH'] = ":".join(pythonpath)
-
- quickpkg_proc = SpawnProcess(
- args=[portage._python_interpreter, quickpkg_binary,
- "=%s" % (backup_dblink.mycpv,)],
- background=background, env=env,
- scheduler=self._scheduler, logfile=logfile)
- quickpkg_proc.start()
-
- return quickpkg_proc.wait()
-
- finally:
- self.unlockdb()
-
-def merge(mycat, mypkg, pkgloc, infloc,
- myroot=None, settings=None, myebuild=None,
- mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
- scheduler=None, fd_pipes=None):
- """
- @param myroot: ignored, settings['EROOT'] is used instead
- """
- myroot = None
- if settings is None:
- raise TypeError("settings argument is required")
- if not os.access(settings['EROOT'], os.W_OK):
- writemsg(_("Permission denied: access('%s', W_OK)\n") % settings['EROOT'],
- noiselevel=-1)
- return errno.EACCES
- background = (settings.get('PORTAGE_BACKGROUND') == '1')
- merge_task = MergeProcess(
- mycat=mycat, mypkg=mypkg, settings=settings,
- treetype=mytree, vartree=vartree,
- scheduler=(scheduler or portage._internal_caller and
- global_event_loop() or EventLoop(main=False)),
- background=background, blockers=blockers, pkgloc=pkgloc,
- infloc=infloc, myebuild=myebuild, mydbapi=mydbapi,
- prev_mtimes=prev_mtimes, logfile=settings.get('PORTAGE_LOG_FILE'),
- fd_pipes=fd_pipes)
- merge_task.start()
- retcode = merge_task.wait()
- return retcode
-
-def unmerge(cat, pkg, myroot=None, settings=None,
- mytrimworld=None, vartree=None,
- ldpath_mtimes=None, scheduler=None):
- """
- @param myroot: ignored, settings['EROOT'] is used instead
- @param mytrimworld: ignored
- """
- myroot = None
- if settings is None:
- raise TypeError("settings argument is required")
- mylink = dblink(cat, pkg, settings=settings, treetype="vartree",
- vartree=vartree, scheduler=scheduler)
- vartree = mylink.vartree
- parallel_install = "parallel-install" in settings.features
- if not parallel_install:
- mylink.lockdb()
- try:
- if mylink.exists():
- retval = mylink.unmerge(ldpath_mtimes=ldpath_mtimes)
- if retval == os.EX_OK:
- mylink.lockdb()
- try:
- mylink.delete()
- finally:
- mylink.unlockdb()
- return retval
- return os.EX_OK
- finally:
- if vartree.dbapi._linkmap is None:
- # preserve-libs is entirely disabled
- pass
- else:
- vartree.dbapi._linkmap._clear_cache()
- if not parallel_install:
- mylink.unlockdb()
-
-def write_contents(contents, root, f):
- """
- Write contents to any file like object. The file will be left open.
- """
- root_len = len(root) - 1
- for filename in sorted(contents):
- entry_data = contents[filename]
- entry_type = entry_data[0]
- relative_filename = filename[root_len:]
- if entry_type == "obj":
- entry_type, mtime, md5sum = entry_data
- line = "%s %s %s %s\n" % \
- (entry_type, relative_filename, md5sum, mtime)
- elif entry_type == "sym":
- entry_type, mtime, link = entry_data
- line = "%s %s -> %s %s\n" % \
- (entry_type, relative_filename, link, mtime)
- else: # dir, dev, fif
- line = "%s %s\n" % (entry_type, relative_filename)
- f.write(line)
-
-def tar_contents(contents, root, tar, protect=None, onProgress=None,
- xattrs=False):
- os = _os_merge
- encoding = _encodings['merge']
-
- try:
- for x in contents:
- _unicode_encode(x,
- encoding=_encodings['merge'],
- errors='strict')
- except UnicodeEncodeError:
- # The package appears to have been merged with a
- # different value of sys.getfilesystemencoding(),
- # so fall back to utf_8 if appropriate.
- try:
- for x in contents:
- _unicode_encode(x,
- encoding=_encodings['fs'],
- errors='strict')
- except UnicodeEncodeError:
- pass
- else:
- os = portage.os
- encoding = _encodings['fs']
-
- tar.encoding = encoding
- root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
- id_strings = {}
- maxval = len(contents)
- curval = 0
- if onProgress:
- onProgress(maxval, 0)
- paths = list(contents)
- paths.sort()
- for path in paths:
- curval += 1
- try:
- lst = os.lstat(path)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- del e
- if onProgress:
- onProgress(maxval, curval)
- continue
- contents_type = contents[path][0]
- if path.startswith(root):
- arcname = "./" + path[len(root):]
- else:
- raise ValueError("invalid root argument: '%s'" % root)
- live_path = path
- if 'dir' == contents_type and \
- not stat.S_ISDIR(lst.st_mode) and \
- os.path.isdir(live_path):
- # Even though this was a directory in the original ${D}, it exists
- # as a symlink to a directory in the live filesystem. It must be
- # recorded as a real directory in the tar file to ensure that tar
- # can properly extract it's children.
- live_path = os.path.realpath(live_path)
- lst = os.lstat(live_path)
-
- # Since os.lstat() inside TarFile.gettarinfo() can trigger a
- # UnicodeEncodeError when python has something other than utf_8
- # return from sys.getfilesystemencoding() (as in bug #388773),
- # we implement the needed functionality here, using the result
- # of our successful lstat call. An alternative to this would be
- # to pass in the fileobj argument to TarFile.gettarinfo(), so
- # that it could use fstat instead of lstat. However, that would
- # have the unwanted effect of dereferencing symlinks.
-
- tarinfo = tar.tarinfo()
- tarinfo.name = arcname
- tarinfo.mode = lst.st_mode
- tarinfo.uid = lst.st_uid
- tarinfo.gid = lst.st_gid
- tarinfo.size = 0
- tarinfo.mtime = lst.st_mtime
- tarinfo.linkname = ""
- if stat.S_ISREG(lst.st_mode):
- inode = (lst.st_ino, lst.st_dev)
- if (lst.st_nlink > 1 and
- inode in tar.inodes and
- arcname != tar.inodes[inode]):
- tarinfo.type = tarfile.LNKTYPE
- tarinfo.linkname = tar.inodes[inode]
- else:
- tar.inodes[inode] = arcname
- tarinfo.type = tarfile.REGTYPE
- tarinfo.size = lst.st_size
- elif stat.S_ISDIR(lst.st_mode):
- tarinfo.type = tarfile.DIRTYPE
- elif stat.S_ISLNK(lst.st_mode):
- tarinfo.type = tarfile.SYMTYPE
- tarinfo.linkname = os.readlink(live_path)
- else:
- continue
- try:
- tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
- except KeyError:
- pass
- try:
- tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
- except KeyError:
- pass
-
- if stat.S_ISREG(lst.st_mode):
- if protect and protect(path):
- # Create an empty file as a place holder in order to avoid
- # potential collision-protect issues.
- f = tempfile.TemporaryFile()
- f.write(_unicode_encode(
- "# empty file because --include-config=n " + \
- "when `quickpkg` was used\n"))
- f.flush()
- f.seek(0)
- tarinfo.size = os.fstat(f.fileno()).st_size
- tar.addfile(tarinfo, f)
- f.close()
- else:
- path_bytes = _unicode_encode(path,
- encoding=encoding,
- errors='strict')
-
- if xattrs:
- # Compatible with GNU tar, which saves the xattrs
- # under the SCHILY.xattr namespace.
- for k in xattr.list(path_bytes):
- tarinfo.pax_headers['SCHILY.xattr.' +
- _unicode_decode(k)] = _unicode_decode(
- xattr.get(path_bytes, _unicode_encode(k)))
-
- with open(path_bytes, 'rb') as f:
- tar.addfile(tarinfo, f)
-
- else:
- tar.addfile(tarinfo)
- if onProgress:
- onProgress(maxval, curval)
diff --git a/pym/portage/dbapi/virtual.py b/pym/portage/dbapi/virtual.py
deleted file mode 100644
index 3f7e6c221..000000000
--- a/pym/portage/dbapi/virtual.py
+++ /dev/null
@@ -1,232 +0,0 @@
-# Copyright 1998-2013 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import unicode_literals
-
-from portage.dbapi import dbapi
-from portage.dbapi.dep_expand import dep_expand
-from portage.versions import cpv_getkey, _pkg_str
-
-class fakedbapi(dbapi):
- """A fake dbapi that allows consumers to inject/remove packages to/from it
- portage.settings is required to maintain the dbAPI.
- """
- def __init__(self, settings=None, exclusive_slots=True,
- multi_instance=False):
- """
- @param exclusive_slots: When True, injecting a package with SLOT
- metadata causes an existing package in the same slot to be
- automatically removed (default is True).
- @type exclusive_slots: Boolean
- @param multi_instance: When True, multiple instances with the
- same cpv may be stored simultaneously, as long as they are
- distinguishable (default is False).
- @type multi_instance: Boolean
- """
- self._exclusive_slots = exclusive_slots
- self.cpvdict = {}
- self.cpdict = {}
- if settings is None:
- from portage import settings
- self.settings = settings
- self._match_cache = {}
- self._set_multi_instance(multi_instance)
-
- def _set_multi_instance(self, multi_instance):
- """
- Enable or disable multi_instance mode. This should before any
- packages are injected, so that all packages are indexed with
- the same implementation of self._instance_key.
- """
- if self.cpvdict:
- raise AssertionError("_set_multi_instance called after "
- "packages have already been added")
- self._multi_instance = multi_instance
- if multi_instance:
- self._instance_key = self._instance_key_multi_instance
- else:
- self._instance_key = self._instance_key_cpv
-
- def _instance_key_cpv(self, cpv, support_string=False):
- return cpv
-
- def _instance_key_multi_instance(self, cpv, support_string=False):
- try:
- return (cpv, cpv.build_id, cpv.file_size, cpv.build_time,
- cpv.mtime)
- except AttributeError:
- if not support_string:
- raise
-
- # Fallback for interfaces such as aux_get where API consumers
- # may pass in a plain string.
- latest = None
- for pkg in self.cp_list(cpv_getkey(cpv)):
- if pkg == cpv and (
- latest is None or
- latest.build_time < pkg.build_time):
- latest = pkg
-
- if latest is not None:
- return (latest, latest.build_id, latest.file_size,
- latest.build_time, latest.mtime)
-
- raise KeyError(cpv)
-
- def clear(self):
- """
- Remove all packages.
- """
- self._clear_cache()
- self.cpvdict.clear()
- self.cpdict.clear()
-
- def _clear_cache(self):
- if self._categories is not None:
- self._categories = None
- if self._match_cache:
- self._match_cache = {}
-
- def match(self, origdep, use_cache=1):
- atom = dep_expand(origdep, mydb=self, settings=self.settings)
- cache_key = (atom, atom.unevaluated_atom)
- result = self._match_cache.get(cache_key)
- if result is not None:
- return result[:]
- result = list(self._iter_match(atom, self.cp_list(atom.cp)))
- self._match_cache[cache_key] = result
- return result[:]
-
- def cpv_exists(self, mycpv, myrepo=None):
- try:
- return self._instance_key(mycpv,
- support_string=True) in self.cpvdict
- except KeyError:
- # _instance_key failure
- return False
-
- def cp_list(self, mycp, use_cache=1, myrepo=None):
- # NOTE: Cache can be safely shared with the match cache, since the
- # match cache uses the result from dep_expand for the cache_key.
- cache_key = (mycp, mycp)
- cachelist = self._match_cache.get(cache_key)
- if cachelist is not None:
- return cachelist[:]
- cpv_list = self.cpdict.get(mycp)
- if cpv_list is None:
- cpv_list = []
- self._cpv_sort_ascending(cpv_list)
- self._match_cache[cache_key] = cpv_list
- return cpv_list[:]
-
- def cp_all(self, sort=False):
- return sorted(self.cpdict) if sort else list(self.cpdict)
-
- def cpv_all(self):
- if self._multi_instance:
- return [x[0] for x in self.cpvdict]
- else:
- return list(self.cpvdict)
-
- def cpv_inject(self, mycpv, metadata=None):
- """Adds a cpv to the list of available packages. See the
- exclusive_slots constructor parameter for behavior with
- respect to SLOT metadata.
- @param mycpv: cpv for the package to inject
- @type mycpv: str
- @param metadata: dictionary of raw metadata for aux_get() calls
- @param metadata: dict
- """
- self._clear_cache()
-
- try:
- mycp = mycpv.cp
- except AttributeError:
- mycp = None
- try:
- myslot = mycpv.slot
- except AttributeError:
- myslot = None
-
- if mycp is None or \
- (myslot is None and metadata is not None and metadata.get('SLOT')):
- if metadata is None:
- mycpv = _pkg_str(mycpv, db=self)
- else:
- mycpv = _pkg_str(mycpv, metadata=metadata,
- settings=self.settings, db=self)
-
- mycp = mycpv.cp
- try:
- myslot = mycpv.slot
- except AttributeError:
- pass
-
- instance_key = self._instance_key(mycpv)
- self.cpvdict[instance_key] = metadata
- if not self._exclusive_slots:
- myslot = None
- if myslot and mycp in self.cpdict:
- # If necessary, remove another package in the same SLOT.
- for cpv in self.cpdict[mycp]:
- if instance_key != self._instance_key(cpv):
- try:
- other_slot = cpv.slot
- except AttributeError:
- pass
- else:
- if myslot == other_slot:
- self.cpv_remove(cpv)
- break
-
- cp_list = self.cpdict.get(mycp, [])
- cp_list = [x for x in cp_list
- if self._instance_key(x) != instance_key]
- cp_list.append(mycpv)
- self.cpdict[mycp] = cp_list
-
- def cpv_remove(self,mycpv):
- """Removes a cpv from the list of available packages."""
- self._clear_cache()
- mycp = cpv_getkey(mycpv)
- instance_key = self._instance_key(mycpv)
- self.cpvdict.pop(instance_key, None)
- cp_list = self.cpdict.get(mycp)
- if cp_list is not None:
- cp_list = [x for x in cp_list
- if self._instance_key(x) != instance_key]
- if cp_list:
- self.cpdict[mycp] = cp_list
- else:
- del self.cpdict[mycp]
-
- def aux_get(self, mycpv, wants, myrepo=None):
- metadata = self.cpvdict.get(
- self._instance_key(mycpv, support_string=True))
- if metadata is None:
- raise KeyError(mycpv)
- return [metadata.get(x, "") for x in wants]
-
- def aux_update(self, cpv, values):
- self._clear_cache()
- metadata = self.cpvdict.get(
- self._instance_key(cpv, support_string=True))
- if metadata is None:
- raise KeyError(cpv)
- metadata.update(values)
-
-class testdbapi(object):
- """A dbapi instance with completely fake functions to get by hitting disk
- TODO(antarus):
- This class really needs to be rewritten to have better stubs; but these work for now.
- The dbapi classes themselves need unit tests...and that will be a lot of work.
- """
-
- def __init__(self):
- self.cpvs = {}
- def f(*args, **kwargs):
- return True
- fake_api = dir(dbapi)
- for call in fake_api:
- if not hasattr(self, call):
- setattr(self, call, f)