aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'lib/portage/package/ebuild/_config')
-rw-r--r--lib/portage/package/ebuild/_config/KeywordsManager.py325
-rw-r--r--lib/portage/package/ebuild/_config/LicenseManager.py237
-rw-r--r--lib/portage/package/ebuild/_config/LocationsManager.py349
-rw-r--r--lib/portage/package/ebuild/_config/MaskManager.py261
-rw-r--r--lib/portage/package/ebuild/_config/UseManager.py579
-rw-r--r--lib/portage/package/ebuild/_config/VirtualsManager.py233
-rw-r--r--lib/portage/package/ebuild/_config/__init__.py2
-rw-r--r--lib/portage/package/ebuild/_config/env_var_validation.py23
-rw-r--r--lib/portage/package/ebuild/_config/features_set.py128
-rw-r--r--lib/portage/package/ebuild/_config/helper.py64
-rw-r--r--lib/portage/package/ebuild/_config/special_env_vars.py211
-rw-r--r--lib/portage/package/ebuild/_config/unpack_dependencies.py38
12 files changed, 2450 insertions, 0 deletions
diff --git a/lib/portage/package/ebuild/_config/KeywordsManager.py b/lib/portage/package/ebuild/_config/KeywordsManager.py
new file mode 100644
index 000000000..fd0a6318d
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/KeywordsManager.py
@@ -0,0 +1,325 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'KeywordsManager',
+)
+
+from _emerge.Package import Package
+from portage import os
+from portage.dep import ExtendedAtomDict, _repo_separator, _slot_separator
+from portage.localization import _
+from portage.package.ebuild._config.helper import ordered_by_atom_specificity
+from portage.util import grabdict_package, stack_lists, writemsg
+from portage.versions import _pkg_str
+
+class KeywordsManager(object):
+ """Manager class to handle keywords processing and validation"""
+
+ def __init__(self, profiles, abs_user_config, user_config=True,
+ global_accept_keywords=""):
+ self._pkeywords_list = []
+ rawpkeywords = [grabdict_package(
+ os.path.join(x.location, "package.keywords"),
+ recursive=x.portage1_directories,
+ verify_eapi=True, eapi=x.eapi, eapi_default=None,
+ allow_build_id=x.allow_build_id)
+ for x in profiles]
+ for pkeyworddict in rawpkeywords:
+ if not pkeyworddict:
+ # Omit non-existent files from the stack.
+ continue
+ cpdict = {}
+ for k, v in pkeyworddict.items():
+ cpdict.setdefault(k.cp, {})[k] = v
+ self._pkeywords_list.append(cpdict)
+ self._pkeywords_list = tuple(self._pkeywords_list)
+
+ self._p_accept_keywords = []
+ raw_p_accept_keywords = [grabdict_package(
+ os.path.join(x.location, "package.accept_keywords"),
+ recursive=x.portage1_directories,
+ verify_eapi=True, eapi=x.eapi, eapi_default=None)
+ for x in profiles]
+ for d in raw_p_accept_keywords:
+ if not d:
+ # Omit non-existent files from the stack.
+ continue
+ cpdict = {}
+ for k, v in d.items():
+ cpdict.setdefault(k.cp, {})[k] = tuple(v)
+ self._p_accept_keywords.append(cpdict)
+ self._p_accept_keywords = tuple(self._p_accept_keywords)
+
+ self.pkeywordsdict = ExtendedAtomDict(dict)
+
+ if user_config:
+ pkgdict = grabdict_package(
+ os.path.join(abs_user_config, "package.keywords"),
+ recursive=1, allow_wildcard=True, allow_repo=True,
+ verify_eapi=False, allow_build_id=True)
+
+ for k, v in grabdict_package(
+ os.path.join(abs_user_config, "package.accept_keywords"),
+ recursive=1, allow_wildcard=True, allow_repo=True,
+ verify_eapi=False, allow_build_id=True).items():
+ pkgdict.setdefault(k, []).extend(v)
+
+ accept_keywords_defaults = global_accept_keywords.split()
+ accept_keywords_defaults = tuple('~' + keyword for keyword in \
+ accept_keywords_defaults if keyword[:1] not in "~-")
+ for k, v in pkgdict.items():
+ # default to ~arch if no specific keyword is given
+ if not v:
+ v = accept_keywords_defaults
+ else:
+ v = tuple(v)
+ self.pkeywordsdict.setdefault(k.cp, {})[k] = v
+
+
+ def getKeywords(self, cpv, slot, keywords, repo):
+ try:
+ cpv.slot
+ except AttributeError:
+ pkg = _pkg_str(cpv, slot=slot, repo=repo)
+ else:
+ pkg = cpv
+ cp = pkg.cp
+ keywords = [[x for x in keywords.split() if x != "-*"]]
+ for pkeywords_dict in self._pkeywords_list:
+ cpdict = pkeywords_dict.get(cp)
+ if cpdict:
+ pkg_keywords = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_keywords:
+ keywords.extend(pkg_keywords)
+ return stack_lists(keywords, incremental=True)
+
+ def isStable(self, pkg, global_accept_keywords, backuped_accept_keywords):
+ mygroups = self.getKeywords(pkg, None, pkg._metadata["KEYWORDS"], None)
+ pgroups = global_accept_keywords.split()
+
+ unmaskgroups = self.getPKeywords(pkg, None, None,
+ global_accept_keywords)
+ pgroups.extend(unmaskgroups)
+
+ egroups = backuped_accept_keywords.split()
+
+ if unmaskgroups or egroups:
+ pgroups = self._getEgroups(egroups, pgroups)
+ else:
+ pgroups = set(pgroups)
+
+ if self._getMissingKeywords(pkg, pgroups, mygroups):
+ return False
+
+ # If replacing all keywords with unstable variants would mask the
+ # package, then it's considered stable for the purposes of
+ # use.stable.mask/force interpretation. For unstable configurations,
+ # this guarantees that the effective use.force/mask settings for a
+ # particular ebuild do not change when that ebuild is stabilized.
+ unstable = []
+ for kw in mygroups:
+ if kw[:1] != "~":
+ kw = "~" + kw
+ unstable.append(kw)
+
+ return bool(self._getMissingKeywords(pkg, pgroups, set(unstable)))
+
+ def getMissingKeywords(self,
+ cpv,
+ slot,
+ keywords,
+ repo,
+ global_accept_keywords,
+ backuped_accept_keywords):
+ """
+ Take a package and return a list of any KEYWORDS that the user may
+ need to accept for the given package. If the KEYWORDS are empty
+ and the the ** keyword has not been accepted, the returned list will
+ contain ** alone (in order to distinguish from the case of "none
+ missing").
+
+ @param cpv: The package name (for package.keywords support)
+ @type cpv: String
+ @param slot: The 'SLOT' key from the raw package metadata
+ @type slot: String
+ @param keywords: The 'KEYWORDS' key from the raw package metadata
+ @type keywords: String
+ @param global_accept_keywords: The current value of ACCEPT_KEYWORDS
+ @type global_accept_keywords: String
+ @param backuped_accept_keywords: ACCEPT_KEYWORDS from the backup env
+ @type backuped_accept_keywords: String
+ @rtype: List
+ @return: A list of KEYWORDS that have not been accepted.
+ """
+
+ mygroups = self.getKeywords(cpv, slot, keywords, repo)
+ # Repoman may modify this attribute as necessary.
+ pgroups = global_accept_keywords.split()
+
+ unmaskgroups = self.getPKeywords(cpv, slot, repo,
+ global_accept_keywords)
+ pgroups.extend(unmaskgroups)
+
+ # Hack: Need to check the env directly here as otherwise stacking
+ # doesn't work properly as negative values are lost in the config
+ # object (bug #139600)
+ egroups = backuped_accept_keywords.split()
+
+ if unmaskgroups or egroups:
+ pgroups = self._getEgroups(egroups, pgroups)
+ else:
+ pgroups = set(pgroups)
+
+ return self._getMissingKeywords(cpv, pgroups, mygroups)
+
+
+ def getRawMissingKeywords(self,
+ cpv,
+ slot,
+ keywords,
+ repo,
+ global_accept_keywords):
+ """
+ Take a package and return a list of any KEYWORDS that the user may
+ need to accept for the given package. If the KEYWORDS are empty,
+ the returned list will contain ** alone (in order to distinguish
+ from the case of "none missing"). This DOES NOT apply any user config
+ package.accept_keywords acceptance.
+
+ @param cpv: The package name (for package.keywords support)
+ @type cpv: String
+ @param slot: The 'SLOT' key from the raw package metadata
+ @type slot: String
+ @param keywords: The 'KEYWORDS' key from the raw package metadata
+ @type keywords: String
+ @param global_accept_keywords: The current value of ACCEPT_KEYWORDS
+ @type global_accept_keywords: String
+ @rtype: List
+ @return: lists of KEYWORDS that have not been accepted
+ and the keywords it looked for.
+ """
+
+ mygroups = self.getKeywords(cpv, slot, keywords, repo)
+ pgroups = global_accept_keywords.split()
+ pgroups = set(pgroups)
+ return self._getMissingKeywords(cpv, pgroups, mygroups)
+
+
+ @staticmethod
+ def _getEgroups(egroups, mygroups):
+ """gets any keywords defined in the environment
+
+ @param backuped_accept_keywords: ACCEPT_KEYWORDS from the backup env
+ @type backuped_accept_keywords: String
+ @rtype: List
+ @return: list of KEYWORDS that have been accepted
+ """
+ mygroups = list(mygroups)
+ mygroups.extend(egroups)
+ inc_pgroups = set()
+ for x in mygroups:
+ if x[:1] == "-":
+ if x == "-*":
+ inc_pgroups.clear()
+ else:
+ inc_pgroups.discard(x[1:])
+ else:
+ inc_pgroups.add(x)
+ return inc_pgroups
+
+
+ @staticmethod
+ def _getMissingKeywords(cpv, pgroups, mygroups):
+ """Determines the missing keywords
+
+ @param pgroups: The pkg keywords accepted
+ @type pgroups: list
+ @param mygroups: The ebuild keywords
+ @type mygroups: list
+ """
+ match = False
+ hasstable = False
+ hastesting = False
+ for gp in mygroups:
+ if gp == "*":
+ match = True
+ break
+ elif gp == "~*":
+ hastesting = True
+ for x in pgroups:
+ if x[:1] == "~":
+ match = True
+ break
+ if match:
+ break
+ elif gp in pgroups:
+ match = True
+ break
+ elif gp.startswith("~"):
+ hastesting = True
+ elif not gp.startswith("-"):
+ hasstable = True
+ if not match and \
+ ((hastesting and "~*" in pgroups) or \
+ (hasstable and "*" in pgroups) or "**" in pgroups):
+ match = True
+ if match:
+ missing = []
+ else:
+ if not mygroups:
+ # If KEYWORDS is empty then we still have to return something
+ # in order to distinguish from the case of "none missing".
+ mygroups = ["**"]
+ missing = mygroups
+ return missing
+
+
+ def getPKeywords(self, cpv, slot, repo, global_accept_keywords):
+ """Gets any package.keywords settings for cp for the given
+ cpv, slot and repo
+
+ @param cpv: The package name (for package.keywords support)
+ @type cpv: String
+ @param slot: The 'SLOT' key from the raw package metadata
+ @type slot: String
+ @param keywords: The 'KEYWORDS' key from the raw package metadata
+ @type keywords: String
+ @param global_accept_keywords: The current value of ACCEPT_KEYWORDS
+ @type global_accept_keywords: String
+ @param backuped_accept_keywords: ACCEPT_KEYWORDS from the backup env
+ @type backuped_accept_keywords: String
+ @rtype: List
+ @return: list of KEYWORDS that have been accepted
+ """
+
+ pgroups = global_accept_keywords.split()
+ try:
+ cpv.slot
+ except AttributeError:
+ cpv = _pkg_str(cpv, slot=slot, repo=repo)
+ cp = cpv.cp
+
+ unmaskgroups = []
+ if self._p_accept_keywords:
+ accept_keywords_defaults = tuple('~' + keyword for keyword in \
+ pgroups if keyword[:1] not in "~-")
+ for d in self._p_accept_keywords:
+ cpdict = d.get(cp)
+ if cpdict:
+ pkg_accept_keywords = \
+ ordered_by_atom_specificity(cpdict, cpv)
+ if pkg_accept_keywords:
+ for x in pkg_accept_keywords:
+ if not x:
+ x = accept_keywords_defaults
+ unmaskgroups.extend(x)
+
+ pkgdict = self.pkeywordsdict.get(cp)
+ if pkgdict:
+ pkg_accept_keywords = \
+ ordered_by_atom_specificity(pkgdict, cpv)
+ if pkg_accept_keywords:
+ for x in pkg_accept_keywords:
+ unmaskgroups.extend(x)
+ return unmaskgroups
diff --git a/lib/portage/package/ebuild/_config/LicenseManager.py b/lib/portage/package/ebuild/_config/LicenseManager.py
new file mode 100644
index 000000000..1d4e08207
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/LicenseManager.py
@@ -0,0 +1,237 @@
+# Copyright 2010-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'LicenseManager',
+)
+
+from portage import os
+from portage.dep import ExtendedAtomDict, use_reduce
+from portage.exception import InvalidDependString
+from portage.localization import _
+from portage.util import grabdict, grabdict_package, writemsg
+from portage.versions import cpv_getkey, _pkg_str
+
+from portage.package.ebuild._config.helper import ordered_by_atom_specificity
+
+
+class LicenseManager(object):
+
+ def __init__(self, license_group_locations, abs_user_config, user_config=True):
+
+ self._accept_license_str = None
+ self._accept_license = None
+ self._license_groups = {}
+ self._plicensedict = ExtendedAtomDict(dict)
+ self._undef_lic_groups = set()
+
+ if user_config:
+ license_group_locations = list(license_group_locations) + [abs_user_config]
+
+ self._read_license_groups(license_group_locations)
+
+ if user_config:
+ self._read_user_config(abs_user_config)
+
+ def _read_user_config(self, abs_user_config):
+ licdict = grabdict_package(os.path.join(
+ abs_user_config, "package.license"), recursive=1, allow_wildcard=True, allow_repo=True, verify_eapi=False)
+ for k, v in licdict.items():
+ self._plicensedict.setdefault(k.cp, {})[k] = \
+ self.expandLicenseTokens(v)
+
+ def _read_license_groups(self, locations):
+ for loc in locations:
+ for k, v in grabdict(
+ os.path.join(loc, "license_groups")).items():
+ self._license_groups.setdefault(k, []).extend(v)
+
+ for k, v in self._license_groups.items():
+ self._license_groups[k] = frozenset(v)
+
+ def extract_global_changes(self, old=""):
+ ret = old
+ atom_license_map = self._plicensedict.get("*/*")
+ if atom_license_map is not None:
+ v = atom_license_map.pop("*/*", None)
+ if v is not None:
+ ret = " ".join(v)
+ if old:
+ ret = old + " " + ret
+ if not atom_license_map:
+ #No tokens left in atom_license_map, remove it.
+ del self._plicensedict["*/*"]
+ return ret
+
+ def expandLicenseTokens(self, tokens):
+ """ Take a token from ACCEPT_LICENSE or package.license and expand it
+ if it's a group token (indicated by @) or just return it if it's not a
+ group. If a group is negated then negate all group elements."""
+ expanded_tokens = []
+ for x in tokens:
+ expanded_tokens.extend(self._expandLicenseToken(x, None))
+ return expanded_tokens
+
+ def _expandLicenseToken(self, token, traversed_groups):
+ negate = False
+ rValue = []
+ if token.startswith("-"):
+ negate = True
+ license_name = token[1:]
+ else:
+ license_name = token
+ if not license_name.startswith("@"):
+ rValue.append(token)
+ return rValue
+ group_name = license_name[1:]
+ if traversed_groups is None:
+ traversed_groups = set()
+ license_group = self._license_groups.get(group_name)
+ if group_name in traversed_groups:
+ writemsg(_("Circular license group reference"
+ " detected in '%s'\n") % group_name, noiselevel=-1)
+ rValue.append("@"+group_name)
+ elif license_group:
+ traversed_groups.add(group_name)
+ for l in license_group:
+ if l.startswith("-"):
+ writemsg(_("Skipping invalid element %s"
+ " in license group '%s'\n") % (l, group_name),
+ noiselevel=-1)
+ else:
+ rValue.extend(self._expandLicenseToken(l, traversed_groups))
+ else:
+ if self._license_groups and \
+ group_name not in self._undef_lic_groups:
+ self._undef_lic_groups.add(group_name)
+ writemsg(_("Undefined license group '%s'\n") % group_name,
+ noiselevel=-1)
+ rValue.append("@"+group_name)
+ if negate:
+ rValue = ["-" + token for token in rValue]
+ return rValue
+
+ def _getPkgAcceptLicense(self, cpv, slot, repo):
+ """
+ Get an ACCEPT_LICENSE list, accounting for package.license.
+ """
+ accept_license = self._accept_license
+ cp = cpv_getkey(cpv)
+ cpdict = self._plicensedict.get(cp)
+ if cpdict:
+ if not hasattr(cpv, "slot"):
+ cpv = _pkg_str(cpv, slot=slot, repo=repo)
+ plicence_list = ordered_by_atom_specificity(cpdict, cpv)
+ if plicence_list:
+ accept_license = list(self._accept_license)
+ for x in plicence_list:
+ accept_license.extend(x)
+ return accept_license
+
+ def get_prunned_accept_license(self, cpv, use, lic, slot, repo):
+ """
+ Generate a pruned version of ACCEPT_LICENSE, by intersection with
+ LICENSE. This is required since otherwise ACCEPT_LICENSE might be
+ too big (bigger than ARG_MAX), causing execve() calls to fail with
+ E2BIG errors as in bug #262647.
+ """
+ try:
+ licenses = set(use_reduce(lic, uselist=use, flat=True))
+ except InvalidDependString:
+ licenses = set()
+ licenses.discard('||')
+
+ accept_license = self._getPkgAcceptLicense(cpv, slot, repo)
+
+ if accept_license:
+ acceptable_licenses = set()
+ for x in accept_license:
+ if x == '*':
+ acceptable_licenses.update(licenses)
+ elif x == '-*':
+ acceptable_licenses.clear()
+ elif x[:1] == '-':
+ acceptable_licenses.discard(x[1:])
+ elif x in licenses:
+ acceptable_licenses.add(x)
+
+ licenses = acceptable_licenses
+ return ' '.join(sorted(licenses))
+
+ def getMissingLicenses(self, cpv, use, lic, slot, repo):
+ """
+ Take a LICENSE string and return a list of any licenses that the user
+ may need to accept for the given package. The returned list will not
+ contain any licenses that have already been accepted. This method
+ can throw an InvalidDependString exception.
+
+ @param cpv: The package name (for package.license support)
+ @type cpv: String
+ @param use: "USE" from the cpv's metadata
+ @type use: String
+ @param lic: "LICENSE" from the cpv's metadata
+ @type lic: String
+ @param slot: "SLOT" from the cpv's metadata
+ @type slot: String
+ @rtype: List
+ @return: A list of licenses that have not been accepted.
+ """
+
+ licenses = set(use_reduce(lic, matchall=1, flat=True))
+ licenses.discard('||')
+
+ acceptable_licenses = set()
+ for x in self._getPkgAcceptLicense(cpv, slot, repo):
+ if x == '*':
+ acceptable_licenses.update(licenses)
+ elif x == '-*':
+ acceptable_licenses.clear()
+ elif x[:1] == '-':
+ acceptable_licenses.discard(x[1:])
+ else:
+ acceptable_licenses.add(x)
+
+ license_str = lic
+ if "?" in license_str:
+ use = use.split()
+ else:
+ use = []
+
+ license_struct = use_reduce(license_str, uselist=use, opconvert=True)
+ return self._getMaskedLicenses(license_struct, acceptable_licenses)
+
+ def _getMaskedLicenses(self, license_struct, acceptable_licenses):
+ if not license_struct:
+ return []
+ if license_struct[0] == "||":
+ ret = []
+ for element in license_struct[1:]:
+ if isinstance(element, list):
+ if element:
+ tmp = self._getMaskedLicenses(element, acceptable_licenses)
+ if not tmp:
+ return []
+ ret.extend(tmp)
+ else:
+ if element in acceptable_licenses:
+ return []
+ ret.append(element)
+ # Return all masked licenses, since we don't know which combination
+ # (if any) the user will decide to unmask.
+ return ret
+
+ ret = []
+ for element in license_struct:
+ if isinstance(element, list):
+ if element:
+ ret.extend(self._getMaskedLicenses(element,
+ acceptable_licenses))
+ else:
+ if element not in acceptable_licenses:
+ ret.append(element)
+ return ret
+
+ def set_accept_license_str(self, accept_license_str):
+ if accept_license_str != self._accept_license_str:
+ self._accept_license_str = accept_license_str
+ self._accept_license = tuple(self.expandLicenseTokens(accept_license_str.split()))
diff --git a/lib/portage/package/ebuild/_config/LocationsManager.py b/lib/portage/package/ebuild/_config/LocationsManager.py
new file mode 100644
index 000000000..f7d7209ff
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/LocationsManager.py
@@ -0,0 +1,349 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = (
+ 'LocationsManager',
+)
+
+import collections
+import io
+import warnings
+
+import portage
+from portage import os, eapi_is_supported, _encodings, _unicode_encode
+from portage.const import CUSTOM_PROFILE_PATH, GLOBAL_CONFIG_PATH, \
+ PROFILE_PATH, USER_CONFIG_PATH
+from portage.eapi import eapi_allows_directories_on_profile_level_and_repository_level
+from portage.exception import DirectoryNotFound, InvalidLocation, ParseError
+from portage.localization import _
+from portage.util import ensure_dirs, grabfile, \
+ normalize_path, read_corresponding_eapi_file, shlex_split, writemsg
+from portage.util._path import exists_raise_eaccess, isdir_raise_eaccess
+from portage.repository.config import parse_layout_conf, \
+ _portage1_profiles_allow_directories
+
+
+_PORTAGE1_DIRECTORIES = frozenset([
+ 'package.mask', 'package.provided',
+ 'package.use', 'package.use.mask', 'package.use.force',
+ 'use.mask', 'use.force'])
+
+_profile_node = collections.namedtuple('_profile_node',
+ ('location', 'portage1_directories', 'user_config',
+ 'profile_formats', 'eapi', 'allow_build_id'))
+
+_allow_parent_colon = frozenset(
+ ["portage-2"])
+
+class LocationsManager(object):
+
+ def __init__(self, config_root=None, eprefix=None, config_profile_path=None, local_config=True, \
+ target_root=None, sysroot=None):
+ self.user_profile_dir = None
+ self._local_repo_conf_path = None
+ self.eprefix = eprefix
+ self.config_root = config_root
+ self.target_root = target_root
+ self.sysroot = sysroot
+ self._user_config = local_config
+
+ if self.eprefix is None:
+ self.eprefix = portage.const.EPREFIX
+ elif self.eprefix:
+ self.eprefix = normalize_path(self.eprefix)
+ if self.eprefix == os.sep:
+ self.eprefix = ""
+
+ if self.config_root is None:
+ self.config_root = portage.const.EPREFIX + os.sep
+
+ self.config_root = normalize_path(os.path.abspath(
+ self.config_root)).rstrip(os.path.sep) + os.path.sep
+
+ self._check_var_directory("PORTAGE_CONFIGROOT", self.config_root)
+ self.abs_user_config = os.path.join(self.config_root, USER_CONFIG_PATH)
+ self.config_profile_path = config_profile_path
+
+ if self.sysroot is None:
+ self.sysroot = "/"
+ else:
+ self.sysroot = normalize_path(os.path.abspath(self.sysroot or os.sep)).rstrip(os.sep) + os.sep
+
+ self.esysroot = self.sysroot.rstrip(os.sep) + self.eprefix + os.sep
+
+ # TODO: Set this via the constructor using
+ # PORTAGE_OVERRIDE_EPREFIX.
+ self.broot = portage.const.EPREFIX
+
+ def load_profiles(self, repositories, known_repository_paths):
+ known_repository_paths = set(os.path.realpath(x)
+ for x in known_repository_paths)
+
+ known_repos = []
+ for x in known_repository_paths:
+ try:
+ repo = repositories.get_repo_for_location(x)
+ except KeyError:
+ layout_data = parse_layout_conf(x)[0]
+ else:
+ layout_data = {
+ "profile-formats": repo.profile_formats,
+ "profile_eapi_when_unspecified": repo.eapi
+ }
+ # force a trailing '/' for ease of doing startswith checks
+ known_repos.append((x + '/', layout_data))
+ known_repos = tuple(known_repos)
+
+ if self.config_profile_path is None:
+ deprecated_profile_path = os.path.join(
+ self.config_root, 'etc', 'make.profile')
+ self.config_profile_path = \
+ os.path.join(self.config_root, PROFILE_PATH)
+ if isdir_raise_eaccess(self.config_profile_path):
+ self.profile_path = self.config_profile_path
+ if isdir_raise_eaccess(deprecated_profile_path) and not \
+ os.path.samefile(self.profile_path,
+ deprecated_profile_path):
+ # Don't warn if they refer to the same path, since
+ # that can be used for backward compatibility with
+ # old software.
+ writemsg("!!! %s\n" %
+ _("Found 2 make.profile dirs: "
+ "using '%s', ignoring '%s'") %
+ (self.profile_path, deprecated_profile_path),
+ noiselevel=-1)
+ else:
+ self.config_profile_path = deprecated_profile_path
+ if isdir_raise_eaccess(self.config_profile_path):
+ self.profile_path = self.config_profile_path
+ else:
+ self.profile_path = None
+ else:
+ # NOTE: repoman may pass in an empty string
+ # here, in order to create an empty profile
+ # for checking dependencies of packages with
+ # empty KEYWORDS.
+ self.profile_path = self.config_profile_path
+
+
+ # The symlink might not exist or might not be a symlink.
+ self.profiles = []
+ self.profiles_complex = []
+ if self.profile_path:
+ try:
+ self._addProfile(os.path.realpath(self.profile_path),
+ repositories, known_repos)
+ except ParseError as e:
+ if not portage._sync_mode:
+ writemsg(_("!!! Unable to parse profile: '%s'\n") % self.profile_path, noiselevel=-1)
+ writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
+ self.profiles = []
+ self.profiles_complex = []
+
+ if self._user_config and self.profiles:
+ custom_prof = os.path.join(
+ self.config_root, CUSTOM_PROFILE_PATH)
+ if os.path.exists(custom_prof):
+ # For read_corresponding_eapi_file, specify default=None
+ # in order to allow things like wildcard atoms when
+ # is no explicit EAPI setting.
+ self.user_profile_dir = custom_prof
+ self.profiles.append(custom_prof)
+ self.profiles_complex.append(
+ _profile_node(custom_prof, True, True,
+ ('profile-bashrcs', 'profile-set'),
+ read_corresponding_eapi_file(
+ custom_prof + os.sep, default=None),
+ True))
+ del custom_prof
+
+ self.profiles = tuple(self.profiles)
+ self.profiles_complex = tuple(self.profiles_complex)
+
+ def _check_var_directory(self, varname, var):
+ if not isdir_raise_eaccess(var):
+ writemsg(_("!!! Error: %s='%s' is not a directory. "
+ "Please correct this.\n") % (varname, var),
+ noiselevel=-1)
+ raise DirectoryNotFound(var)
+
+ def _addProfile(self, currentPath, repositories, known_repos):
+ current_abs_path = os.path.abspath(currentPath)
+ allow_directories = True
+ allow_parent_colon = True
+ repo_loc = None
+ compat_mode = False
+ current_formats = ()
+ eapi = None
+
+ intersecting_repos = [x for x in known_repos
+ if current_abs_path.startswith(x[0])]
+ if intersecting_repos:
+ # Handle nested repositories. The longest path
+ # will be the correct one.
+ repo_loc, layout_data = max(intersecting_repos,
+ key=lambda x:len(x[0]))
+ eapi = layout_data.get("profile_eapi_when_unspecified")
+
+ eapi_file = os.path.join(currentPath, "eapi")
+ eapi = eapi or "0"
+ f = None
+ try:
+ f = io.open(_unicode_encode(eapi_file,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace')
+ eapi = f.readline().strip()
+ except IOError:
+ pass
+ else:
+ if not eapi_is_supported(eapi):
+ raise ParseError(_(
+ "Profile contains unsupported "
+ "EAPI '%s': '%s'") % \
+ (eapi, os.path.realpath(eapi_file),))
+ finally:
+ if f is not None:
+ f.close()
+
+ if intersecting_repos:
+ allow_directories = eapi_allows_directories_on_profile_level_and_repository_level(eapi) or \
+ any(x in _portage1_profiles_allow_directories for x in layout_data['profile-formats'])
+ compat_mode = not eapi_allows_directories_on_profile_level_and_repository_level(eapi) and \
+ layout_data['profile-formats'] == ('portage-1-compat',)
+ allow_parent_colon = any(x in _allow_parent_colon
+ for x in layout_data['profile-formats'])
+ current_formats = tuple(layout_data['profile-formats'])
+
+
+ if compat_mode:
+ offenders = _PORTAGE1_DIRECTORIES.intersection(os.listdir(currentPath))
+ offenders = sorted(x for x in offenders
+ if os.path.isdir(os.path.join(currentPath, x)))
+ if offenders:
+ warnings.warn(_(
+ "\nThe selected profile is implicitly using the 'portage-1' format:\n"
+ "\tprofile = %(profile_path)s\n"
+ "But this repository is not using that format:\n"
+ "\trepo = %(repo_name)s\n"
+ "This will break in the future. Please convert these dirs to files:\n"
+ "\t%(files)s\n"
+ "Or, add this line to the repository's layout.conf:\n"
+ "\tprofile-formats = portage-1")
+ % dict(profile_path=currentPath, repo_name=repo_loc,
+ files='\n\t'.join(offenders)))
+
+ parentsFile = os.path.join(currentPath, "parent")
+ if exists_raise_eaccess(parentsFile):
+ parents = grabfile(parentsFile)
+ if not parents:
+ raise ParseError(
+ _("Empty parent file: '%s'") % parentsFile)
+ for parentPath in parents:
+ abs_parent = parentPath[:1] == os.sep
+ if not abs_parent and allow_parent_colon:
+ parentPath = self._expand_parent_colon(parentsFile,
+ parentPath, repo_loc, repositories)
+
+ # NOTE: This os.path.join() call is intended to ignore
+ # currentPath if parentPath is already absolute.
+ parentPath = normalize_path(os.path.join(
+ currentPath, parentPath))
+
+ if abs_parent or repo_loc is None or \
+ not parentPath.startswith(repo_loc):
+ # It seems that this parent may point outside
+ # of the current repo, so realpath it.
+ parentPath = os.path.realpath(parentPath)
+
+ if exists_raise_eaccess(parentPath):
+ self._addProfile(parentPath, repositories, known_repos)
+ else:
+ raise ParseError(
+ _("Parent '%s' not found: '%s'") % \
+ (parentPath, parentsFile))
+
+ self.profiles.append(currentPath)
+ self.profiles_complex.append(
+ _profile_node(currentPath, allow_directories, False,
+ current_formats, eapi, 'build-id' in current_formats))
+
+ def _expand_parent_colon(self, parentsFile, parentPath,
+ repo_loc, repositories):
+ colon = parentPath.find(":")
+ if colon == -1:
+ return parentPath
+
+ if colon == 0:
+ if repo_loc is None:
+ raise ParseError(
+ _("Parent '%s' not found: '%s'") % \
+ (parentPath, parentsFile))
+ else:
+ parentPath = normalize_path(os.path.join(
+ repo_loc, 'profiles', parentPath[colon+1:]))
+ else:
+ p_repo_name = parentPath[:colon]
+ try:
+ p_repo_loc = repositories.get_location_for_name(p_repo_name)
+ except KeyError:
+ raise ParseError(
+ _("Parent '%s' not found: '%s'") % \
+ (parentPath, parentsFile))
+ else:
+ parentPath = normalize_path(os.path.join(
+ p_repo_loc, 'profiles', parentPath[colon+1:]))
+
+ return parentPath
+
+ def set_root_override(self, root_overwrite=None):
+ # Allow ROOT setting to come from make.conf if it's not overridden
+ # by the constructor argument (from the calling environment).
+ if self.target_root is None and root_overwrite is not None:
+ self.target_root = root_overwrite
+ if not self.target_root.strip():
+ self.target_root = None
+ if self.target_root is None:
+ self.target_root = "/"
+
+ self.target_root = normalize_path(os.path.abspath(
+ self.target_root)).rstrip(os.path.sep) + os.path.sep
+
+ if self.sysroot != "/" and self.sysroot != self.target_root:
+ writemsg(_("!!! Error: SYSROOT (currently %s) must "
+ "equal / or ROOT (currently %s).\n") %
+ (self.sysroot, self.target_root),
+ noiselevel=-1)
+ raise InvalidLocation(self.sysroot)
+
+ ensure_dirs(self.target_root)
+ self._check_var_directory("ROOT", self.target_root)
+
+ self.eroot = self.target_root.rstrip(os.sep) + self.eprefix + os.sep
+
+ self.global_config_path = GLOBAL_CONFIG_PATH
+ if portage.const.EPREFIX:
+ self.global_config_path = os.path.join(portage.const.EPREFIX,
+ GLOBAL_CONFIG_PATH.lstrip(os.sep))
+
+ def set_port_dirs(self, portdir, portdir_overlay):
+ self.portdir = portdir
+ self.portdir_overlay = portdir_overlay
+ if self.portdir_overlay is None:
+ self.portdir_overlay = ""
+
+ self.overlay_profiles = []
+ for ov in shlex_split(self.portdir_overlay):
+ ov = normalize_path(ov)
+ profiles_dir = os.path.join(ov, "profiles")
+ if isdir_raise_eaccess(profiles_dir):
+ self.overlay_profiles.append(profiles_dir)
+
+ self.profile_locations = [os.path.join(portdir, "profiles")] + self.overlay_profiles
+ self.profile_and_user_locations = self.profile_locations[:]
+ if self._user_config:
+ self.profile_and_user_locations.append(self.abs_user_config)
+
+ self.profile_locations = tuple(self.profile_locations)
+ self.profile_and_user_locations = tuple(self.profile_and_user_locations)
diff --git a/lib/portage/package/ebuild/_config/MaskManager.py b/lib/portage/package/ebuild/_config/MaskManager.py
new file mode 100644
index 000000000..40cc6e0c4
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/MaskManager.py
@@ -0,0 +1,261 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'MaskManager',
+)
+
+import warnings
+
+from portage import os
+from portage.dep import ExtendedAtomDict, match_from_list
+from portage.localization import _
+from portage.util import append_repo, grabfile_package, stack_lists, writemsg
+from portage.versions import _pkg_str
+
+class MaskManager(object):
+
+ def __init__(self, repositories, profiles, abs_user_config,
+ user_config=True, strict_umatched_removal=False):
+ self._punmaskdict = ExtendedAtomDict(list)
+ self._pmaskdict = ExtendedAtomDict(list)
+ # Preserves atoms that are eliminated by negative
+ # incrementals in user_pkgmasklines.
+ self._pmaskdict_raw = ExtendedAtomDict(list)
+
+ #Read profile/package.mask from every repo.
+ #Repositories inherit masks from their parent profiles and
+ #are able to remove mask from them with -atoms.
+ #Such a removal affects only the current repo, but not the parent.
+ #Add ::repo specs to every atom to make sure atoms only affect
+ #packages from the current repo.
+
+ # Cache the repository-wide package.mask files as a particular
+ # repo may be often referenced by others as the master.
+ pmask_cache = {}
+
+ def grab_pmask(loc, repo_config):
+ if loc not in pmask_cache:
+ path = os.path.join(loc, 'profiles', 'package.mask')
+ pmask_cache[loc] = grabfile_package(path,
+ recursive=repo_config.portage1_profiles,
+ remember_source_file=True, verify_eapi=True,
+ eapi_default=repo_config.eapi,
+ allow_build_id=("build-id"
+ in repo_config.profile_formats))
+ if repo_config.portage1_profiles_compat and os.path.isdir(path):
+ warnings.warn(_("Repository '%(repo_name)s' is implicitly using "
+ "'portage-1' profile format in its profiles/package.mask, but "
+ "the repository profiles are not marked as that format. This will break "
+ "in the future. Please either convert the following paths "
+ "to files, or add\nprofile-formats = portage-1\nto the "
+ "repository's layout.conf.\n")
+ % dict(repo_name=repo_config.name))
+
+ return pmask_cache[loc]
+
+ repo_pkgmasklines = []
+ for repo in repositories.repos_with_profiles():
+ lines = []
+ repo_lines = grab_pmask(repo.location, repo)
+ removals = frozenset(line[0][1:] for line in repo_lines
+ if line[0][:1] == "-")
+ matched_removals = set()
+ for master in repo.masters:
+ master_lines = grab_pmask(master.location, master)
+ for line in master_lines:
+ if line[0] in removals:
+ matched_removals.add(line[0])
+ # Since we don't stack masters recursively, there aren't any
+ # atoms earlier in the stack to be matched by negative atoms in
+ # master_lines. Also, repo_lines may contain negative atoms
+ # that are intended to negate atoms from a different master
+ # than the one with which we are currently stacking. Therefore,
+ # we disable warn_for_unmatched_removal here (see bug #386569).
+ lines.append(stack_lists([master_lines, repo_lines], incremental=1,
+ remember_source_file=True, warn_for_unmatched_removal=False))
+
+ # It's safe to warn for unmatched removal if masters have not
+ # been overridden by the user, which is guaranteed when
+ # user_config is false (when called by repoman).
+ if repo.masters:
+ unmatched_removals = removals.difference(matched_removals)
+ if unmatched_removals and not user_config:
+ source_file = os.path.join(repo.location,
+ "profiles", "package.mask")
+ unmatched_removals = list(unmatched_removals)
+ if len(unmatched_removals) > 3:
+ writemsg(
+ _("--- Unmatched removal atoms in %s: %s and %s more\n") %
+ (source_file,
+ ", ".join("-" + x for x in unmatched_removals[:3]),
+ len(unmatched_removals) - 3), noiselevel=-1)
+ else:
+ writemsg(
+ _("--- Unmatched removal atom(s) in %s: %s\n") %
+ (source_file,
+ ", ".join("-" + x for x in unmatched_removals)),
+ noiselevel=-1)
+
+ else:
+ lines.append(stack_lists([repo_lines], incremental=1,
+ remember_source_file=True, warn_for_unmatched_removal=not user_config,
+ strict_warn_for_unmatched_removal=strict_umatched_removal))
+ repo_pkgmasklines.extend(append_repo(stack_lists(lines), repo.name, remember_source_file=True))
+
+ repo_pkgunmasklines = []
+ for repo in repositories.repos_with_profiles():
+ if not repo.portage1_profiles:
+ continue
+ repo_lines = grabfile_package(os.path.join(repo.location, "profiles", "package.unmask"), \
+ recursive=1, remember_source_file=True,
+ verify_eapi=True, eapi_default=repo.eapi,
+ allow_build_id=("build-id" in repo.profile_formats))
+ lines = stack_lists([repo_lines], incremental=1, \
+ remember_source_file=True, warn_for_unmatched_removal=True,
+ strict_warn_for_unmatched_removal=strict_umatched_removal)
+ repo_pkgunmasklines.extend(append_repo(lines, repo.name, remember_source_file=True))
+
+ #Read package.mask from the user's profile. Stack them in the end
+ #to allow profiles to override masks from their parent profiles.
+ profile_pkgmasklines = []
+ profile_pkgunmasklines = []
+ for x in profiles:
+ profile_pkgmasklines.append(grabfile_package(
+ os.path.join(x.location, "package.mask"),
+ recursive=x.portage1_directories,
+ remember_source_file=True, verify_eapi=True,
+ eapi=x.eapi, eapi_default=None,
+ allow_build_id=x.allow_build_id))
+ if x.portage1_directories:
+ profile_pkgunmasklines.append(grabfile_package(
+ os.path.join(x.location, "package.unmask"),
+ recursive=x.portage1_directories,
+ remember_source_file=True, verify_eapi=True,
+ eapi=x.eapi, eapi_default=None,
+ allow_build_id=x.allow_build_id))
+ profile_pkgmasklines = stack_lists(profile_pkgmasklines, incremental=1, \
+ remember_source_file=True, warn_for_unmatched_removal=True,
+ strict_warn_for_unmatched_removal=strict_umatched_removal)
+ profile_pkgunmasklines = stack_lists(profile_pkgunmasklines, incremental=1, \
+ remember_source_file=True, warn_for_unmatched_removal=True,
+ strict_warn_for_unmatched_removal=strict_umatched_removal)
+
+ #Read /etc/portage/package.mask. Don't stack it to allow the user to
+ #remove mask atoms from everywhere with -atoms.
+ user_pkgmasklines = []
+ user_pkgunmasklines = []
+ if user_config:
+ user_pkgmasklines = grabfile_package(
+ os.path.join(abs_user_config, "package.mask"), recursive=1, \
+ allow_wildcard=True, allow_repo=True,
+ remember_source_file=True, verify_eapi=False,
+ allow_build_id=True)
+ user_pkgunmasklines = grabfile_package(
+ os.path.join(abs_user_config, "package.unmask"), recursive=1, \
+ allow_wildcard=True, allow_repo=True,
+ remember_source_file=True, verify_eapi=False,
+ allow_build_id=True)
+
+ #Stack everything together. At this point, only user_pkgmasklines may contain -atoms.
+ #Don't warn for unmatched -atoms here, since we don't do it for any other user config file.
+ raw_pkgmasklines = stack_lists([repo_pkgmasklines, profile_pkgmasklines], \
+ incremental=1, remember_source_file=True, warn_for_unmatched_removal=False, ignore_repo=True)
+ pkgmasklines = stack_lists([repo_pkgmasklines, profile_pkgmasklines, user_pkgmasklines], \
+ incremental=1, remember_source_file=True, warn_for_unmatched_removal=False, ignore_repo=True)
+ pkgunmasklines = stack_lists([repo_pkgunmasklines, profile_pkgunmasklines, user_pkgunmasklines], \
+ incremental=1, remember_source_file=True, warn_for_unmatched_removal=False, ignore_repo=True)
+
+ for x, source_file in raw_pkgmasklines:
+ self._pmaskdict_raw.setdefault(x.cp, []).append(x)
+
+ for x, source_file in pkgmasklines:
+ self._pmaskdict.setdefault(x.cp, []).append(x)
+
+ for x, source_file in pkgunmasklines:
+ self._punmaskdict.setdefault(x.cp, []).append(x)
+
+ for d in (self._pmaskdict_raw, self._pmaskdict, self._punmaskdict):
+ for k, v in d.items():
+ d[k] = tuple(v)
+
+ def _getMaskAtom(self, cpv, slot, repo, unmask_atoms=None):
+ """
+ Take a package and return a matching package.mask atom, or None if no
+ such atom exists or it has been cancelled by package.unmask.
+
+ @param cpv: The package name
+ @type cpv: String
+ @param slot: The package's slot
+ @type slot: String
+ @param repo: The package's repository [optional]
+ @type repo: String
+ @param unmask_atoms: if desired pass in self._punmaskdict.get(cp)
+ @type unmask_atoms: list
+ @rtype: String
+ @return: A matching atom string or None if one is not found.
+ """
+
+ try:
+ cpv.slot
+ except AttributeError:
+ pkg = _pkg_str(cpv, slot=slot, repo=repo)
+ else:
+ pkg = cpv
+
+ mask_atoms = self._pmaskdict.get(pkg.cp)
+ if mask_atoms:
+ pkg_list = [pkg]
+ for x in mask_atoms:
+ if not match_from_list(x, pkg_list):
+ continue
+ if unmask_atoms:
+ for y in unmask_atoms:
+ if match_from_list(y, pkg_list):
+ return None
+ return x
+ return None
+
+
+ def getMaskAtom(self, cpv, slot, repo):
+ """
+ Take a package and return a matching package.mask atom, or None if no
+ such atom exists or it has been cancelled by package.unmask.
+
+ @param cpv: The package name
+ @type cpv: String
+ @param slot: The package's slot
+ @type slot: String
+ @param repo: The package's repository [optional]
+ @type repo: String
+ @rtype: String
+ @return: A matching atom string or None if one is not found.
+ """
+
+ try:
+ cpv.slot
+ except AttributeError:
+ pkg = _pkg_str(cpv, slot=slot, repo=repo)
+ else:
+ pkg = cpv
+
+ return self._getMaskAtom(pkg, slot, repo,
+ self._punmaskdict.get(pkg.cp))
+
+
+ def getRawMaskAtom(self, cpv, slot, repo):
+ """
+ Take a package and return a matching package.mask atom, or None if no
+ such atom exists. It HAS NOT! been cancelled by any package.unmask.
+
+ @param cpv: The package name
+ @type cpv: String
+ @param slot: The package's slot
+ @type slot: String
+ @param repo: The package's repository [optional]
+ @type repo: String
+ @rtype: String
+ @return: A matching atom string or None if one is not found.
+ """
+
+ return self._getMaskAtom(cpv, slot, repo)
diff --git a/lib/portage/package/ebuild/_config/UseManager.py b/lib/portage/package/ebuild/_config/UseManager.py
new file mode 100644
index 000000000..7302876ab
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/UseManager.py
@@ -0,0 +1,579 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'UseManager',
+)
+
+from _emerge.Package import Package
+from portage import os
+from portage.dep import Atom, dep_getrepo, dep_getslot, ExtendedAtomDict, remove_slot, _get_useflag_re, _repo_separator
+from portage.eapi import eapi_has_use_aliases, eapi_supports_stable_use_forcing_and_masking
+from portage.exception import InvalidAtom
+from portage.localization import _
+from portage.util import grabfile, grabdict, grabdict_package, read_corresponding_eapi_file, stack_lists, writemsg
+from portage.versions import _pkg_str
+
+from portage.package.ebuild._config.helper import ordered_by_atom_specificity
+
+class UseManager(object):
+
+ def __init__(self, repositories, profiles, abs_user_config, is_stable,
+ user_config=True):
+ # file variable
+ #--------------------------------
+ # repositories
+ #--------------------------------
+ # use.mask _repo_usemask_dict
+ # use.stable.mask _repo_usestablemask_dict
+ # use.force _repo_useforce_dict
+ # use.stable.force _repo_usestableforce_dict
+ # use.aliases _repo_usealiases_dict
+ # package.use.mask _repo_pusemask_dict
+ # package.use.stable.mask _repo_pusestablemask_dict
+ # package.use.force _repo_puseforce_dict
+ # package.use.stable.force _repo_pusestableforce_dict
+ # package.use.aliases _repo_pusealiases_dict
+ #--------------------------------
+ # profiles
+ #--------------------------------
+ # use.mask _usemask_list
+ # use.stable.mask _usestablemask_list
+ # use.force _useforce_list
+ # use.stable.force _usestableforce_list
+ # package.use.mask _pusemask_list
+ # package.use.stable.mask _pusestablemask_list
+ # package.use _pkgprofileuse
+ # package.use.force _puseforce_list
+ # package.use.stable.force _pusestableforce_list
+ #--------------------------------
+ # user config
+ #--------------------------------
+ # package.use _pusedict
+
+ # Dynamic variables tracked by the config class
+ #--------------------------------
+ # profiles
+ #--------------------------------
+ # usemask
+ # useforce
+ #--------------------------------
+ # user config
+ #--------------------------------
+ # puse
+
+ self._user_config = user_config
+ self._is_stable = is_stable
+ self._repo_usemask_dict = self._parse_repository_files_to_dict_of_tuples("use.mask", repositories)
+ self._repo_usestablemask_dict = \
+ self._parse_repository_files_to_dict_of_tuples("use.stable.mask",
+ repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+ self._repo_useforce_dict = self._parse_repository_files_to_dict_of_tuples("use.force", repositories)
+ self._repo_usestableforce_dict = \
+ self._parse_repository_files_to_dict_of_tuples("use.stable.force",
+ repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+ self._repo_pusemask_dict = self._parse_repository_files_to_dict_of_dicts("package.use.mask", repositories)
+ self._repo_pusestablemask_dict = \
+ self._parse_repository_files_to_dict_of_dicts("package.use.stable.mask",
+ repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+ self._repo_puseforce_dict = self._parse_repository_files_to_dict_of_dicts("package.use.force", repositories)
+ self._repo_pusestableforce_dict = \
+ self._parse_repository_files_to_dict_of_dicts("package.use.stable.force",
+ repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+ self._repo_puse_dict = self._parse_repository_files_to_dict_of_dicts("package.use", repositories)
+
+ self._usemask_list = self._parse_profile_files_to_tuple_of_tuples("use.mask", profiles)
+ self._usestablemask_list = \
+ self._parse_profile_files_to_tuple_of_tuples("use.stable.mask",
+ profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+ self._useforce_list = self._parse_profile_files_to_tuple_of_tuples("use.force", profiles)
+ self._usestableforce_list = \
+ self._parse_profile_files_to_tuple_of_tuples("use.stable.force",
+ profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+ self._pusemask_list = self._parse_profile_files_to_tuple_of_dicts("package.use.mask", profiles)
+ self._pusestablemask_list = \
+ self._parse_profile_files_to_tuple_of_dicts("package.use.stable.mask",
+ profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+ self._pkgprofileuse = self._parse_profile_files_to_tuple_of_dicts("package.use", profiles, juststrings=True)
+ self._puseforce_list = self._parse_profile_files_to_tuple_of_dicts("package.use.force", profiles)
+ self._pusestableforce_list = \
+ self._parse_profile_files_to_tuple_of_dicts("package.use.stable.force",
+ profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+
+ self._pusedict = self._parse_user_files_to_extatomdict("package.use", abs_user_config, user_config)
+
+ self._repo_usealiases_dict = self._parse_repository_usealiases(repositories)
+ self._repo_pusealiases_dict = self._parse_repository_packageusealiases(repositories)
+
+ self.repositories = repositories
+
+ def _parse_file_to_tuple(self, file_name, recursive=True,
+ eapi_filter=None, eapi=None, eapi_default="0"):
+ """
+ @param file_name: input file name
+ @type file_name: str
+ @param recursive: triggers recursion if the input file is a
+ directory
+ @type recursive: bool
+ @param eapi_filter: a function that accepts a single eapi
+ argument, and returns true if the the current file type
+ is supported by the given EAPI
+ @type eapi_filter: callable
+ @param eapi: the EAPI of the current profile node, which allows
+ a call to read_corresponding_eapi_file to be skipped
+ @type eapi: str
+ @param eapi_default: the default EAPI which applies if the
+ current profile node does not define a local EAPI
+ @type eapi_default: str
+ @rtype: tuple
+ @return: collection of USE flags
+ """
+ ret = []
+ lines = grabfile(file_name, recursive=recursive)
+ if eapi is None:
+ eapi = read_corresponding_eapi_file(
+ file_name, default=eapi_default)
+ if eapi_filter is not None and not eapi_filter(eapi):
+ if lines:
+ writemsg(_("--- EAPI '%s' does not support '%s': '%s'\n") %
+ (eapi, os.path.basename(file_name), file_name),
+ noiselevel=-1)
+ return ()
+ useflag_re = _get_useflag_re(eapi)
+ for prefixed_useflag in lines:
+ if prefixed_useflag[:1] == "-":
+ useflag = prefixed_useflag[1:]
+ else:
+ useflag = prefixed_useflag
+ if useflag_re.match(useflag) is None:
+ writemsg(_("--- Invalid USE flag in '%s': '%s'\n") %
+ (file_name, prefixed_useflag), noiselevel=-1)
+ else:
+ ret.append(prefixed_useflag)
+ return tuple(ret)
+
+ def _parse_file_to_dict(self, file_name, juststrings=False, recursive=True,
+ eapi_filter=None, user_config=False, eapi=None, eapi_default="0",
+ allow_build_id=False):
+ """
+ @param file_name: input file name
+ @type file_name: str
+ @param juststrings: store dict values as space-delimited strings
+ instead of tuples
+ @type juststrings: bool
+ @param recursive: triggers recursion if the input file is a
+ directory
+ @type recursive: bool
+ @param eapi_filter: a function that accepts a single eapi
+ argument, and returns true if the the current file type
+ is supported by the given EAPI
+ @type eapi_filter: callable
+ @param user_config: current file is part of the local
+ configuration (not repository content)
+ @type user_config: bool
+ @param eapi: the EAPI of the current profile node, which allows
+ a call to read_corresponding_eapi_file to be skipped
+ @type eapi: str
+ @param eapi_default: the default EAPI which applies if the
+ current profile node does not define a local EAPI
+ @type eapi_default: str
+ @param allow_build_id: allow atoms to specify a particular
+ build-id
+ @type allow_build_id: bool
+ @rtype: tuple
+ @return: collection of USE flags
+ """
+ ret = {}
+ location_dict = {}
+ if eapi is None:
+ eapi = read_corresponding_eapi_file(file_name,
+ default=eapi_default)
+ extended_syntax = eapi is None and user_config
+ if extended_syntax:
+ ret = ExtendedAtomDict(dict)
+ else:
+ ret = {}
+ file_dict = grabdict_package(file_name, recursive=recursive,
+ allow_wildcard=extended_syntax, allow_repo=extended_syntax,
+ verify_eapi=(not extended_syntax), eapi=eapi,
+ eapi_default=eapi_default, allow_build_id=allow_build_id,
+ allow_use=False)
+ if eapi is not None and eapi_filter is not None and not eapi_filter(eapi):
+ if file_dict:
+ writemsg(_("--- EAPI '%s' does not support '%s': '%s'\n") %
+ (eapi, os.path.basename(file_name), file_name),
+ noiselevel=-1)
+ return ret
+ useflag_re = _get_useflag_re(eapi)
+ for k, v in file_dict.items():
+ useflags = []
+ use_expand_prefix = ''
+ for prefixed_useflag in v:
+ if extended_syntax and prefixed_useflag == "\n":
+ use_expand_prefix = ""
+ continue
+ if extended_syntax and prefixed_useflag[-1] == ":":
+ use_expand_prefix = prefixed_useflag[:-1].lower() + "_"
+ continue
+
+ if prefixed_useflag[:1] == "-":
+ useflag = use_expand_prefix + prefixed_useflag[1:]
+ prefixed_useflag = "-" + useflag
+ else:
+ useflag = use_expand_prefix + prefixed_useflag
+ prefixed_useflag = useflag
+ if useflag_re.match(useflag) is None:
+ writemsg(_("--- Invalid USE flag for '%s' in '%s': '%s'\n") %
+ (k, file_name, prefixed_useflag), noiselevel=-1)
+ else:
+ useflags.append(prefixed_useflag)
+ location_dict.setdefault(k, []).extend(useflags)
+ for k, v in location_dict.items():
+ if juststrings:
+ v = " ".join(v)
+ else:
+ v = tuple(v)
+ ret.setdefault(k.cp, {})[k] = v
+ return ret
+
+ def _parse_user_files_to_extatomdict(self, file_name, location, user_config):
+ ret = ExtendedAtomDict(dict)
+ if user_config:
+ pusedict = grabdict_package(
+ os.path.join(location, file_name),
+ recursive=1, newlines=1, allow_wildcard=True,
+ allow_repo=True, verify_eapi=False,
+ allow_build_id=True, allow_use=False)
+ for k, v in pusedict.items():
+ l = []
+ use_expand_prefix = ''
+ for flag in v:
+ if flag == "\n":
+ use_expand_prefix = ""
+ continue
+ if flag[-1] == ":":
+ use_expand_prefix = flag[:-1].lower() + "_"
+ continue
+ if flag[0] == "-":
+ nv = "-" + use_expand_prefix + flag[1:]
+ else:
+ nv = use_expand_prefix + flag
+ l.append(nv)
+ ret.setdefault(k.cp, {})[k] = tuple(l)
+
+ return ret
+
+ def _parse_repository_files_to_dict_of_tuples(self, file_name, repositories, eapi_filter=None):
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ ret[repo.name] = self._parse_file_to_tuple(
+ os.path.join(repo.location, "profiles", file_name),
+ eapi_filter=eapi_filter, eapi_default=repo.eapi)
+ return ret
+
+ def _parse_repository_files_to_dict_of_dicts(self, file_name, repositories, eapi_filter=None):
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ ret[repo.name] = self._parse_file_to_dict(
+ os.path.join(repo.location, "profiles", file_name),
+ eapi_filter=eapi_filter, eapi_default=repo.eapi,
+ allow_build_id=("build-id" in repo.profile_formats))
+ return ret
+
+ def _parse_profile_files_to_tuple_of_tuples(self, file_name, locations,
+ eapi_filter=None):
+ return tuple(self._parse_file_to_tuple(
+ os.path.join(profile.location, file_name),
+ recursive=profile.portage1_directories,
+ eapi_filter=eapi_filter, eapi=profile.eapi,
+ eapi_default=None) for profile in locations)
+
+ def _parse_profile_files_to_tuple_of_dicts(self, file_name, locations,
+ juststrings=False, eapi_filter=None):
+ return tuple(self._parse_file_to_dict(
+ os.path.join(profile.location, file_name), juststrings,
+ recursive=profile.portage1_directories, eapi_filter=eapi_filter,
+ user_config=profile.user_config, eapi=profile.eapi,
+ eapi_default=None, allow_build_id=profile.allow_build_id)
+ for profile in locations)
+
+ def _parse_repository_usealiases(self, repositories):
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ file_name = os.path.join(repo.location, "profiles", "use.aliases")
+ eapi = read_corresponding_eapi_file(
+ file_name, default=repo.eapi)
+ useflag_re = _get_useflag_re(eapi)
+ raw_file_dict = grabdict(file_name, recursive=True)
+ file_dict = {}
+ for real_flag, aliases in raw_file_dict.items():
+ if useflag_re.match(real_flag) is None:
+ writemsg(_("--- Invalid real USE flag in '%s': '%s'\n") % (file_name, real_flag), noiselevel=-1)
+ else:
+ for alias in aliases:
+ if useflag_re.match(alias) is None:
+ writemsg(_("--- Invalid USE flag alias for '%s' real USE flag in '%s': '%s'\n") %
+ (real_flag, file_name, alias), noiselevel=-1)
+ else:
+ if any(alias in v for k, v in file_dict.items() if k != real_flag):
+ writemsg(_("--- Duplicated USE flag alias in '%s': '%s'\n") %
+ (file_name, alias), noiselevel=-1)
+ else:
+ file_dict.setdefault(real_flag, []).append(alias)
+ ret[repo.name] = file_dict
+ return ret
+
+ def _parse_repository_packageusealiases(self, repositories):
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ file_name = os.path.join(repo.location, "profiles", "package.use.aliases")
+ eapi = read_corresponding_eapi_file(
+ file_name, default=repo.eapi)
+ useflag_re = _get_useflag_re(eapi)
+ lines = grabfile(file_name, recursive=True)
+ file_dict = {}
+ for line in lines:
+ elements = line.split()
+ atom = elements[0]
+ try:
+ atom = Atom(atom, eapi=eapi)
+ except InvalidAtom:
+ writemsg(_("--- Invalid atom in '%s': '%s'\n") % (file_name, atom))
+ continue
+ if len(elements) == 1:
+ writemsg(_("--- Missing real USE flag for '%s' in '%s'\n") % (atom, file_name), noiselevel=-1)
+ continue
+ real_flag = elements[1]
+ if useflag_re.match(real_flag) is None:
+ writemsg(_("--- Invalid real USE flag for '%s' in '%s': '%s'\n") % (atom, file_name, real_flag), noiselevel=-1)
+ else:
+ for alias in elements[2:]:
+ if useflag_re.match(alias) is None:
+ writemsg(_("--- Invalid USE flag alias for '%s' real USE flag for '%s' in '%s': '%s'\n") %
+ (real_flag, atom, file_name, alias), noiselevel=-1)
+ else:
+ # Duplicated USE flag aliases in entries for different atoms
+ # matching the same package version are detected in getUseAliases().
+ if any(alias in v for k, v in file_dict.get(atom.cp, {}).get(atom, {}).items() if k != real_flag):
+ writemsg(_("--- Duplicated USE flag alias for '%s' in '%s': '%s'\n") %
+ (atom, file_name, alias), noiselevel=-1)
+ else:
+ file_dict.setdefault(atom.cp, {}).setdefault(atom, {}).setdefault(real_flag, []).append(alias)
+ ret[repo.name] = file_dict
+ return ret
+
+ def _isStable(self, pkg):
+ if self._user_config:
+ try:
+ return pkg.stable
+ except AttributeError:
+ # KEYWORDS is unavailable (prior to "depend" phase)
+ return False
+
+ try:
+ pkg._metadata
+ except AttributeError:
+ # KEYWORDS is unavailable (prior to "depend" phase)
+ return False
+
+ # Since repoman uses different config instances for
+ # different profiles, we have to be careful to do the
+ # stable check against the correct profile here.
+ return self._is_stable(pkg)
+
+ def getUseMask(self, pkg=None, stable=None):
+ if pkg is None:
+ return frozenset(stack_lists(
+ self._usemask_list, incremental=True))
+
+ slot = None
+ cp = getattr(pkg, "cp", None)
+ if cp is None:
+ slot = dep_getslot(pkg)
+ repo = dep_getrepo(pkg)
+ pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo)
+ cp = pkg.cp
+
+ if stable is None:
+ stable = self._isStable(pkg)
+
+ usemask = []
+
+ if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO:
+ repos = []
+ try:
+ repos.extend(repo.name for repo in
+ self.repositories[pkg.repo].masters)
+ except KeyError:
+ pass
+ repos.append(pkg.repo)
+ for repo in repos:
+ usemask.append(self._repo_usemask_dict.get(repo, {}))
+ if stable:
+ usemask.append(self._repo_usestablemask_dict.get(repo, {}))
+ cpdict = self._repo_pusemask_dict.get(repo, {}).get(cp)
+ if cpdict:
+ pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_usemask:
+ usemask.extend(pkg_usemask)
+ if stable:
+ cpdict = self._repo_pusestablemask_dict.get(repo, {}).get(cp)
+ if cpdict:
+ pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_usemask:
+ usemask.extend(pkg_usemask)
+
+ for i, pusemask_dict in enumerate(self._pusemask_list):
+ if self._usemask_list[i]:
+ usemask.append(self._usemask_list[i])
+ if stable and self._usestablemask_list[i]:
+ usemask.append(self._usestablemask_list[i])
+ cpdict = pusemask_dict.get(cp)
+ if cpdict:
+ pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_usemask:
+ usemask.extend(pkg_usemask)
+ if stable:
+ cpdict = self._pusestablemask_list[i].get(cp)
+ if cpdict:
+ pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_usemask:
+ usemask.extend(pkg_usemask)
+
+ return frozenset(stack_lists(usemask, incremental=True))
+
+ def getUseForce(self, pkg=None, stable=None):
+ if pkg is None:
+ return frozenset(stack_lists(
+ self._useforce_list, incremental=True))
+
+ cp = getattr(pkg, "cp", None)
+ if cp is None:
+ slot = dep_getslot(pkg)
+ repo = dep_getrepo(pkg)
+ pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo)
+ cp = pkg.cp
+
+ if stable is None:
+ stable = self._isStable(pkg)
+
+ useforce = []
+
+ if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO:
+ repos = []
+ try:
+ repos.extend(repo.name for repo in
+ self.repositories[pkg.repo].masters)
+ except KeyError:
+ pass
+ repos.append(pkg.repo)
+ for repo in repos:
+ useforce.append(self._repo_useforce_dict.get(repo, {}))
+ if stable:
+ useforce.append(self._repo_usestableforce_dict.get(repo, {}))
+ cpdict = self._repo_puseforce_dict.get(repo, {}).get(cp)
+ if cpdict:
+ pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_useforce:
+ useforce.extend(pkg_useforce)
+ if stable:
+ cpdict = self._repo_pusestableforce_dict.get(repo, {}).get(cp)
+ if cpdict:
+ pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_useforce:
+ useforce.extend(pkg_useforce)
+
+ for i, puseforce_dict in enumerate(self._puseforce_list):
+ if self._useforce_list[i]:
+ useforce.append(self._useforce_list[i])
+ if stable and self._usestableforce_list[i]:
+ useforce.append(self._usestableforce_list[i])
+ cpdict = puseforce_dict.get(cp)
+ if cpdict:
+ pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_useforce:
+ useforce.extend(pkg_useforce)
+ if stable:
+ cpdict = self._pusestableforce_list[i].get(cp)
+ if cpdict:
+ pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_useforce:
+ useforce.extend(pkg_useforce)
+
+ return frozenset(stack_lists(useforce, incremental=True))
+
+ def getUseAliases(self, pkg):
+ if hasattr(pkg, "eapi") and not eapi_has_use_aliases(pkg.eapi):
+ return {}
+
+ cp = getattr(pkg, "cp", None)
+ if cp is None:
+ slot = dep_getslot(pkg)
+ repo = dep_getrepo(pkg)
+ pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo)
+ cp = pkg.cp
+
+ usealiases = {}
+
+ if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO:
+ repos = []
+ try:
+ repos.extend(repo.name for repo in
+ self.repositories[pkg.repo].masters)
+ except KeyError:
+ pass
+ repos.append(pkg.repo)
+ for repo in repos:
+ usealiases_dict = self._repo_usealiases_dict.get(repo, {})
+ for real_flag, aliases in usealiases_dict.items():
+ for alias in aliases:
+ if any(alias in v for k, v in usealiases.items() if k != real_flag):
+ writemsg(_("--- Duplicated USE flag alias for '%s%s%s': '%s'\n") %
+ (pkg.cpv, _repo_separator, pkg.repo, alias), noiselevel=-1)
+ else:
+ usealiases.setdefault(real_flag, []).append(alias)
+ cp_usealiases_dict = self._repo_pusealiases_dict.get(repo, {}).get(cp)
+ if cp_usealiases_dict:
+ usealiases_dict_list = ordered_by_atom_specificity(cp_usealiases_dict, pkg)
+ for usealiases_dict in usealiases_dict_list:
+ for real_flag, aliases in usealiases_dict.items():
+ for alias in aliases:
+ if any(alias in v for k, v in usealiases.items() if k != real_flag):
+ writemsg(_("--- Duplicated USE flag alias for '%s%s%s': '%s'\n") %
+ (pkg.cpv, _repo_separator, pkg.repo, alias), noiselevel=-1)
+ else:
+ usealiases.setdefault(real_flag, []).append(alias)
+
+ return usealiases
+
+ def getPUSE(self, pkg):
+ cp = getattr(pkg, "cp", None)
+ if cp is None:
+ slot = dep_getslot(pkg)
+ repo = dep_getrepo(pkg)
+ pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo)
+ cp = pkg.cp
+ ret = ""
+ cpdict = self._pusedict.get(cp)
+ if cpdict:
+ puse_matches = ordered_by_atom_specificity(cpdict, pkg)
+ if puse_matches:
+ puse_list = []
+ for x in puse_matches:
+ puse_list.extend(x)
+ ret = " ".join(puse_list)
+ return ret
+
+ def extract_global_USE_changes(self, old=""):
+ ret = old
+ cpdict = self._pusedict.get("*/*")
+ if cpdict is not None:
+ v = cpdict.pop("*/*", None)
+ if v is not None:
+ ret = " ".join(v)
+ if old:
+ ret = old + " " + ret
+ if not cpdict:
+ #No tokens left in atom_license_map, remove it.
+ del self._pusedict["*/*"]
+ return ret
diff --git a/lib/portage/package/ebuild/_config/VirtualsManager.py b/lib/portage/package/ebuild/_config/VirtualsManager.py
new file mode 100644
index 000000000..c4d1e3635
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/VirtualsManager.py
@@ -0,0 +1,233 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'VirtualsManager',
+)
+
+from copy import deepcopy
+
+from portage import os
+from portage.dep import Atom
+from portage.exception import InvalidAtom
+from portage.localization import _
+from portage.util import grabdict, stack_dictlist, writemsg
+from portage.versions import cpv_getkey
+
+class VirtualsManager(object):
+
+ def __init__(self, *args, **kwargs):
+ if kwargs.get("_copy"):
+ return
+
+ assert len(args) == 1, "VirtualsManager.__init__ takes one positional argument"
+ assert not kwargs, "unknown keyword argument(s) '%s' passed to VirtualsManager.__init__" % \
+ ", ".join(kwargs)
+
+ profiles = args[0]
+ self._virtuals = None
+ self._dirVirtuals = None
+ self._virts_p = None
+
+ # Virtuals obtained from the vartree
+ self._treeVirtuals = None
+ # Virtuals added by the depgraph via self.add_depgraph_virtuals().
+ self._depgraphVirtuals = {}
+
+ #Initialise _dirVirtuals.
+ self._read_dirVirtuals(profiles)
+
+ #We could initialise _treeVirtuals here, but some consumers want to
+ #pass their own vartree.
+
+ def _read_dirVirtuals(self, profiles):
+ """
+ Read the 'virtuals' file in all profiles.
+ """
+ virtuals_list = []
+ for x in profiles:
+ virtuals_file = os.path.join(x, "virtuals")
+ virtuals_dict = grabdict(virtuals_file)
+ atoms_dict = {}
+ for k, v in virtuals_dict.items():
+ try:
+ virt_atom = Atom(k)
+ except InvalidAtom:
+ virt_atom = None
+ else:
+ if virt_atom.blocker or \
+ str(virt_atom) != str(virt_atom.cp):
+ virt_atom = None
+ if virt_atom is None:
+ writemsg(_("--- Invalid virtuals atom in %s: %s\n") % \
+ (virtuals_file, k), noiselevel=-1)
+ continue
+ providers = []
+ for atom in v:
+ atom_orig = atom
+ if atom[:1] == '-':
+ # allow incrementals
+ atom = atom[1:]
+ try:
+ atom = Atom(atom)
+ except InvalidAtom:
+ atom = None
+ else:
+ if atom.blocker:
+ atom = None
+ if atom is None:
+ writemsg(_("--- Invalid atom in %s: %s\n") % \
+ (virtuals_file, atom_orig), noiselevel=-1)
+ else:
+ if atom_orig == str(atom):
+ # normal atom, so return as Atom instance
+ providers.append(atom)
+ else:
+ # atom has special prefix, so return as string
+ providers.append(atom_orig)
+ if providers:
+ atoms_dict[virt_atom] = providers
+ if atoms_dict:
+ virtuals_list.append(atoms_dict)
+
+ self._dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
+
+ for virt in self._dirVirtuals:
+ # Preference for virtuals decreases from left to right.
+ self._dirVirtuals[virt].reverse()
+
+ def __deepcopy__(self, memo=None):
+ if memo is None:
+ memo = {}
+ result = VirtualsManager(_copy=True)
+ memo[id(self)] = result
+
+ # immutable attributes (internal policy ensures lack of mutation)
+ # _treeVirtuals is initilised by _populate_treeVirtuals().
+ # Before that it's 'None'.
+ result._treeVirtuals = self._treeVirtuals
+ memo[id(self._treeVirtuals)] = self._treeVirtuals
+ # _dirVirtuals is initilised by __init__.
+ result._dirVirtuals = self._dirVirtuals
+ memo[id(self._dirVirtuals)] = self._dirVirtuals
+
+ # mutable attributes (change when add_depgraph_virtuals() is called)
+ result._virtuals = deepcopy(self._virtuals, memo)
+ result._depgraphVirtuals = deepcopy(self._depgraphVirtuals, memo)
+ result._virts_p = deepcopy(self._virts_p, memo)
+
+ return result
+
+ def _compile_virtuals(self):
+ """Stack installed and profile virtuals. Preference for virtuals
+ decreases from left to right.
+ Order of preference:
+ 1. installed and in profile
+ 2. installed only
+ 3. profile only
+ """
+
+ assert self._treeVirtuals is not None, "_populate_treeVirtuals() must be called before " + \
+ "any query about virtuals"
+
+ # Virtuals by profile+tree preferences.
+ ptVirtuals = {}
+
+ for virt, installed_list in self._treeVirtuals.items():
+ profile_list = self._dirVirtuals.get(virt, None)
+ if not profile_list:
+ continue
+ for cp in installed_list:
+ if cp in profile_list:
+ ptVirtuals.setdefault(virt, [])
+ ptVirtuals[virt].append(cp)
+
+ virtuals = stack_dictlist([ptVirtuals, self._treeVirtuals,
+ self._dirVirtuals, self._depgraphVirtuals])
+ self._virtuals = virtuals
+ self._virts_p = None
+
+ def getvirtuals(self):
+ """
+ Computes self._virtuals if necessary and returns it.
+ self._virtuals is only computed on the first call.
+ """
+ if self._virtuals is None:
+ self._compile_virtuals()
+
+ return self._virtuals
+
+ def _populate_treeVirtuals(self, vartree):
+ """
+ Initialize _treeVirtuals from the given vartree.
+ It must not have been initialized already, otherwise
+ our assumptions about immutability don't hold.
+ """
+ assert self._treeVirtuals is None, "treeVirtuals must not be reinitialized"
+
+ self._treeVirtuals = {}
+
+ for provide, cpv_list in vartree.get_all_provides().items():
+ try:
+ provide = Atom(provide)
+ except InvalidAtom:
+ continue
+ self._treeVirtuals[provide.cp] = \
+ [Atom(cpv_getkey(cpv)) for cpv in cpv_list]
+
+ def populate_treeVirtuals_if_needed(self, vartree):
+ """
+ Initialize _treeVirtuals if it hasn't been done already.
+ This is a hack for consumers that already have an populated vartree.
+ """
+ if self._treeVirtuals is not None:
+ return
+
+ self._populate_treeVirtuals(vartree)
+
+ def add_depgraph_virtuals(self, mycpv, virts):
+ """This updates the preferences for old-style virtuals,
+ affecting the behavior of dep_expand() and dep_check()
+ calls. It can change dbapi.match() behavior since that
+ calls dep_expand(). However, dbapi instances have
+ internal match caches that are not invalidated when
+ preferences are updated here. This can potentially
+ lead to some inconsistency (relevant to bug #1343)."""
+
+ #Ensure that self._virtuals is populated.
+ if self._virtuals is None:
+ self.getvirtuals()
+
+ modified = False
+ cp = Atom(cpv_getkey(mycpv))
+ for virt in virts:
+ try:
+ virt = Atom(virt).cp
+ except InvalidAtom:
+ continue
+ providers = self._virtuals.get(virt)
+ if providers and cp in providers:
+ continue
+ providers = self._depgraphVirtuals.get(virt)
+ if providers is None:
+ providers = []
+ self._depgraphVirtuals[virt] = providers
+ if cp not in providers:
+ providers.append(cp)
+ modified = True
+
+ if modified:
+ self._compile_virtuals()
+
+ def get_virts_p(self):
+ if self._virts_p is not None:
+ return self._virts_p
+
+ virts = self.getvirtuals()
+ virts_p = {}
+ for x in virts:
+ vkeysplit = x.split("/")
+ if vkeysplit[1] not in virts_p:
+ virts_p[vkeysplit[1]] = virts[x]
+ self._virts_p = virts_p
+ return virts_p
diff --git a/lib/portage/package/ebuild/_config/__init__.py b/lib/portage/package/ebuild/_config/__init__.py
new file mode 100644
index 000000000..21a391aee
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/package/ebuild/_config/env_var_validation.py b/lib/portage/package/ebuild/_config/env_var_validation.py
new file mode 100644
index 000000000..d3db545cb
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/env_var_validation.py
@@ -0,0 +1,23 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.process import find_binary
+from portage.util import shlex_split
+
+def validate_cmd_var(v):
+ """
+ Validate an evironment variable value to see if it
+ contains an executable command as the first token.
+ returns (valid, token_list) where 'valid' is boolean and 'token_list'
+ is the (possibly empty) list of tokens split by shlex.
+ """
+ invalid = False
+ v_split = shlex_split(v)
+ if not v_split:
+ invalid = True
+ elif os.path.isabs(v_split[0]):
+ invalid = not os.access(v_split[0], os.EX_OK)
+ elif find_binary(v_split[0]) is None:
+ invalid = True
+ return (not invalid, v_split)
diff --git a/lib/portage/package/ebuild/_config/features_set.py b/lib/portage/package/ebuild/_config/features_set.py
new file mode 100644
index 000000000..62236fd89
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/features_set.py
@@ -0,0 +1,128 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'features_set',
+)
+
+import logging
+
+from portage.const import SUPPORTED_FEATURES
+from portage.localization import _
+from portage.output import colorize
+from portage.util import writemsg_level
+
+class features_set(object):
+ """
+ Provides relevant set operations needed for access and modification of
+ config.features. The FEATURES variable is automatically synchronized
+ upon modification.
+
+ Modifications result in a permanent override that will cause the change
+ to propagate to the incremental stacking mechanism in config.regenerate().
+ This eliminates the need to call config.backup_changes() when FEATURES
+ is modified, since any overrides are guaranteed to persist despite calls
+ to config.reset().
+ """
+
+ def __init__(self, settings):
+ self._settings = settings
+ self._features = set()
+
+ def __contains__(self, k):
+ return k in self._features
+
+ def __iter__(self):
+ return iter(self._features)
+
+ def _sync_env_var(self):
+ self._settings['FEATURES'] = ' '.join(sorted(self._features))
+
+ def add(self, k):
+ self._settings.modifying()
+ self._settings._features_overrides.append(k)
+ if k not in self._features:
+ self._features.add(k)
+ self._sync_env_var()
+
+ def update(self, values):
+ self._settings.modifying()
+ values = list(values)
+ self._settings._features_overrides.extend(values)
+ need_sync = False
+ for k in values:
+ if k in self._features:
+ continue
+ self._features.add(k)
+ need_sync = True
+ if need_sync:
+ self._sync_env_var()
+
+ def difference_update(self, values):
+ self._settings.modifying()
+ values = list(values)
+ self._settings._features_overrides.extend('-' + k for k in values)
+ remove_us = self._features.intersection(values)
+ if remove_us:
+ self._features.difference_update(values)
+ self._sync_env_var()
+
+ def remove(self, k):
+ """
+ This never raises KeyError, since it records a permanent override
+ that will prevent the given flag from ever being added again by
+ incremental stacking in config.regenerate().
+ """
+ self.discard(k)
+
+ def discard(self, k):
+ self._settings.modifying()
+ self._settings._features_overrides.append('-' + k)
+ if k in self._features:
+ self._features.remove(k)
+ self._sync_env_var()
+
+ def _validate(self):
+ """
+ Implements unknown-features-warn and unknown-features-filter.
+ """
+ if 'unknown-features-warn' in self._features:
+ unknown_features = \
+ self._features.difference(SUPPORTED_FEATURES)
+ if unknown_features:
+ unknown_features = unknown_features.difference(
+ self._settings._unknown_features)
+ if unknown_features:
+ self._settings._unknown_features.update(unknown_features)
+ writemsg_level(colorize("BAD",
+ _("FEATURES variable contains unknown value(s): %s") % \
+ ", ".join(sorted(unknown_features))) \
+ + "\n", level=logging.WARNING, noiselevel=-1)
+
+ if 'unknown-features-filter' in self._features:
+ unknown_features = \
+ self._features.difference(SUPPORTED_FEATURES)
+ if unknown_features:
+ self.difference_update(unknown_features)
+ self._prune_overrides()
+
+ def _prune_overrides(self):
+ """
+ If there are lots of invalid package.env FEATURES settings
+ then unknown-features-filter can make _features_overrides
+ grow larger and larger, so prune it. This performs incremental
+ stacking with preservation of negative values since they need
+ to persist for future config.regenerate() calls.
+ """
+ overrides_set = set(self._settings._features_overrides)
+ positive = set()
+ negative = set()
+ for x in self._settings._features_overrides:
+ if x[:1] == '-':
+ positive.discard(x[1:])
+ negative.add(x[1:])
+ else:
+ positive.add(x)
+ negative.discard(x)
+ self._settings._features_overrides[:] = \
+ list(positive) + list('-' + x for x in negative)
diff --git a/lib/portage/package/ebuild/_config/helper.py b/lib/portage/package/ebuild/_config/helper.py
new file mode 100644
index 000000000..ee0c090a0
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/helper.py
@@ -0,0 +1,64 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'ordered_by_atom_specificity', 'prune_incremental',
+)
+
+from _emerge.Package import Package
+from portage.dep import best_match_to_list, _repo_separator
+
+def ordered_by_atom_specificity(cpdict, pkg, repo=None):
+ """
+ Return a list of matched values from the given cpdict,
+ in ascending order by atom specificity. The rationale
+ for this order is that package.* config files are
+ typically written in ChangeLog like fashion, so it's
+ most friendly if the order that the atoms are written
+ does not matter. Therefore, settings from more specific
+ atoms override those of less specific atoms. Without
+ this behavior, settings from relatively unspecific atoms
+ would (somewhat confusingly) override the settings of
+ more specific atoms, requiring people to make adjustments
+ to the order that atoms are listed in the config file in
+ order to achieve desired results (and thus corrupting
+ the ChangeLog like ordering of the file).
+ """
+ if not hasattr(pkg, 'repo') and repo and repo != Package.UNKNOWN_REPO:
+ pkg = pkg + _repo_separator + repo
+
+ results = []
+ keys = list(cpdict)
+
+ while keys:
+ bestmatch = best_match_to_list(pkg, keys)
+ if bestmatch:
+ keys.remove(bestmatch)
+ results.append(cpdict[bestmatch])
+ else:
+ break
+
+ if results:
+ # reverse, so the most specific atoms come last
+ results.reverse()
+
+ return results
+
+def prune_incremental(split):
+ """
+ Prune off any parts of an incremental variable that are
+ made irrelevant by the latest occuring * or -*. This
+ could be more aggressive but that might be confusing
+ and the point is just to reduce noise a bit.
+ """
+ for i, x in enumerate(reversed(split)):
+ if x == '*':
+ split = split[-i-1:]
+ break
+ elif x == '-*':
+ if i == 0:
+ split = []
+ else:
+ split = split[-i:]
+ break
+ return split
diff --git a/lib/portage/package/ebuild/_config/special_env_vars.py b/lib/portage/package/ebuild/_config/special_env_vars.py
new file mode 100644
index 000000000..a308518af
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/special_env_vars.py
@@ -0,0 +1,211 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = (
+ 'case_insensitive_vars', 'default_globals', 'env_blacklist', \
+ 'environ_filter', 'environ_whitelist', 'environ_whitelist_re',
+)
+
+import re
+
+# Blacklisted variables are internal variables that are never allowed
+# to enter the config instance from the external environment or
+# configuration files.
+env_blacklist = frozenset((
+ "A", "AA", "BDEPEND", "BROOT", "CATEGORY", "DEPEND", "DESCRIPTION",
+ "DOCS", "EAPI",
+ "EBUILD_FORCE_TEST", "EBUILD_PHASE",
+ "EBUILD_PHASE_FUNC", "EBUILD_SKIP_MANIFEST",
+ "ED", "EMERGE_FROM", "EPREFIX", "EROOT",
+ "GREP_OPTIONS", "HDEPEND", "HOMEPAGE",
+ "INHERITED", "IUSE", "IUSE_EFFECTIVE",
+ "KEYWORDS", "LICENSE", "MERGE_TYPE",
+ "PDEPEND", "PF", "PKGUSE", "PORTAGE_BACKGROUND",
+ "PORTAGE_BACKGROUND_UNMERGE", "PORTAGE_BUILDDIR_LOCKED",
+ "PORTAGE_BUILT_USE", "PORTAGE_CONFIGROOT",
+ "PORTAGE_INTERNAL_CALLER", "PORTAGE_IUSE",
+ "PORTAGE_NONFATAL", "PORTAGE_PIPE_FD", "PORTAGE_REPO_NAME",
+ "PORTAGE_USE", "PROPERTIES", "RDEPEND", "REPOSITORY",
+ "REQUIRED_USE", "RESTRICT", "ROOT", "SLOT", "SRC_URI", "_"
+))
+
+environ_whitelist = []
+
+# Whitelisted variables are always allowed to enter the ebuild
+# environment. Generally, this only includes special portage
+# variables. Ebuilds can unset variables that are not whitelisted
+# and rely on them remaining unset for future phases, without them
+# leaking back in from various locations (bug #189417). It's very
+# important to set our special BASH_ENV variable in the ebuild
+# environment in order to prevent sandbox from sourcing /etc/profile
+# in it's bashrc (causing major leakage).
+environ_whitelist += [
+ "ACCEPT_LICENSE", "BASH_ENV", "BROOT", "BUILD_PREFIX", "COLUMNS", "D",
+ "DISTDIR", "DOC_SYMLINKS_DIR", "EAPI", "EBUILD",
+ "EBUILD_FORCE_TEST",
+ "EBUILD_PHASE", "EBUILD_PHASE_FUNC", "ECLASSDIR", "ECLASS_DEPTH", "ED",
+ "EMERGE_FROM", "EPREFIX", "EROOT", "ESYSROOT",
+ "FEATURES", "FILESDIR", "HOME", "MERGE_TYPE", "NOCOLOR", "PATH",
+ "PKGDIR",
+ "PKGUSE", "PKG_LOGDIR", "PKG_TMPDIR",
+ "PORTAGE_ACTUAL_DISTDIR", "PORTAGE_ARCHLIST", "PORTAGE_BASHRC_FILES",
+ "PORTAGE_BASHRC", "PM_EBUILD_HOOK_DIR",
+ "PORTAGE_BINPKG_FILE", "PORTAGE_BINPKG_TAR_OPTS",
+ "PORTAGE_BINPKG_TMPFILE",
+ "PORTAGE_BIN_PATH",
+ "PORTAGE_BUILDDIR", "PORTAGE_BUILD_GROUP", "PORTAGE_BUILD_USER",
+ "PORTAGE_BUNZIP2_COMMAND", "PORTAGE_BZIP2_COMMAND",
+ "PORTAGE_COLORMAP", "PORTAGE_COMPRESS", "PORTAGE_COMPRESSION_COMMAND",
+ "PORTAGE_COMPRESS_EXCLUDE_SUFFIXES",
+ "PORTAGE_CONFIGROOT", "PORTAGE_DEBUG", "PORTAGE_DEPCACHEDIR",
+ "PORTAGE_DOHTML_UNWARNED_SKIPPED_EXTENSIONS",
+ "PORTAGE_DOHTML_UNWARNED_SKIPPED_FILES",
+ "PORTAGE_DOHTML_WARN_ON_SKIPPED_FILES",
+ "PORTAGE_EBUILD_EXIT_FILE", "PORTAGE_FEATURES",
+ "PORTAGE_GID", "PORTAGE_GRPNAME",
+ "PORTAGE_INTERNAL_CALLER",
+ "PORTAGE_INST_GID", "PORTAGE_INST_UID",
+ "PORTAGE_IPC_DAEMON", "PORTAGE_IUSE", "PORTAGE_ECLASS_LOCATIONS",
+ "PORTAGE_LOG_FILE", "PORTAGE_OVERRIDE_EPREFIX", "PORTAGE_PIPE_FD",
+ "PORTAGE_PYM_PATH", "PORTAGE_PYTHON",
+ "PORTAGE_PYTHONPATH", "PORTAGE_QUIET",
+ "PORTAGE_REPO_NAME", "PORTAGE_REPOSITORIES", "PORTAGE_RESTRICT",
+ "PORTAGE_SIGPIPE_STATUS", "PORTAGE_SOCKS5_PROXY",
+ "PORTAGE_TMPDIR", "PORTAGE_UPDATE_ENV", "PORTAGE_USERNAME",
+ "PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE", "PORTAGE_XATTR_EXCLUDE",
+ "PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PYTHONDONTWRITEBYTECODE",
+ "REPLACING_VERSIONS", "REPLACED_BY_VERSION",
+ "ROOT", "ROOTPATH", "SYSROOT", "T", "TMP", "TMPDIR",
+ "USE_EXPAND", "USE_ORDER", "WORKDIR",
+ "XARGS", "__PORTAGE_TEST_HARDLINK_LOCKS",
+]
+
+# user config variables
+environ_whitelist += [
+ "DOC_SYMLINKS_DIR", "INSTALL_MASK", "PKG_INSTALL_MASK"
+]
+
+environ_whitelist += [
+ "A", "AA", "CATEGORY", "P", "PF", "PN", "PR", "PV", "PVR"
+]
+
+# misc variables inherited from the calling environment
+environ_whitelist += [
+ "COLORTERM", "DISPLAY", "EDITOR", "LESS",
+ "LESSOPEN", "LOGNAME", "LS_COLORS", "PAGER",
+ "TERM", "TERMCAP", "USER",
+ 'ftp_proxy', 'http_proxy', 'no_proxy',
+]
+
+# tempdir settings
+environ_whitelist += [
+ "TMPDIR", "TEMP", "TMP",
+]
+
+# localization settings
+environ_whitelist += [
+ "LANG", "LC_COLLATE", "LC_CTYPE", "LC_MESSAGES",
+ "LC_MONETARY", "LC_NUMERIC", "LC_TIME", "LC_PAPER",
+ "LC_ALL",
+]
+
+# other variables inherited from the calling environment
+environ_whitelist += [
+ "CVS_RSH", "ECHANGELOG_USER",
+ "GPG_AGENT_INFO",
+ "SSH_AGENT_PID", "SSH_AUTH_SOCK",
+ "STY", "WINDOW", "XAUTHORITY",
+]
+
+environ_whitelist = frozenset(environ_whitelist)
+
+environ_whitelist_re = re.compile(r'^(CCACHE_|DISTCC_).*')
+
+# Filter selected variables in the config.environ() method so that
+# they don't needlessly propagate down into the ebuild environment.
+environ_filter = []
+
+# Exclude anything that could be extremely long here (like SRC_URI)
+# since that could cause execve() calls to fail with E2BIG errors. For
+# example, see bug #262647.
+environ_filter += [
+ 'DEPEND', 'RDEPEND', 'PDEPEND', 'SRC_URI',
+]
+
+# misc variables inherited from the calling environment
+environ_filter += [
+ "INFOPATH", "MANPATH", "USER",
+]
+
+# variables that break bash
+environ_filter += [
+ "HISTFILE", "POSIXLY_CORRECT",
+]
+
+# portage config variables and variables set directly by portage
+environ_filter += [
+ "ACCEPT_CHOSTS", "ACCEPT_KEYWORDS", "ACCEPT_PROPERTIES",
+ "ACCEPT_RESTRICT", "AUTOCLEAN",
+ "BINPKG_COMPRESS", "BINPKG_COMPRESS_FLAGS",
+ "CLEAN_DELAY", "COLLISION_IGNORE",
+ "CONFIG_PROTECT", "CONFIG_PROTECT_MASK",
+ "DCO_SIGNED_OFF_BY",
+ "EGENCACHE_DEFAULT_OPTS", "EMERGE_DEFAULT_OPTS",
+ "EMERGE_LOG_DIR",
+ "EMERGE_WARNING_DELAY",
+ "FETCHCOMMAND", "FETCHCOMMAND_FTP",
+ "FETCHCOMMAND_HTTP", "FETCHCOMMAND_HTTPS",
+ "FETCHCOMMAND_RSYNC", "FETCHCOMMAND_SFTP",
+ "GENTOO_MIRRORS", "NOCONFMEM", "O",
+ "PORTAGE_BACKGROUND", "PORTAGE_BACKGROUND_UNMERGE",
+ "PORTAGE_BINHOST", "PORTAGE_BINPKG_FORMAT",
+ "PORTAGE_BUILDDIR_LOCKED",
+ "PORTAGE_CHECKSUM_FILTER",
+ "PORTAGE_ELOG_CLASSES",
+ "PORTAGE_ELOG_MAILFROM", "PORTAGE_ELOG_MAILSUBJECT",
+ "PORTAGE_ELOG_MAILURI", "PORTAGE_ELOG_SYSTEM",
+ "PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS", "PORTAGE_FETCH_RESUME_MIN_SIZE",
+ "PORTAGE_GPG_DIR",
+ "PORTAGE_GPG_KEY", "PORTAGE_GPG_SIGNING_COMMAND",
+ "PORTAGE_IONICE_COMMAND",
+ "PORTAGE_PACKAGE_EMPTY_ABORT",
+ "PORTAGE_REPO_DUPLICATE_WARN",
+ "PORTAGE_RO_DISTDIRS",
+ "PORTAGE_RSYNC_EXTRA_OPTS", "PORTAGE_RSYNC_OPTS",
+ "PORTAGE_RSYNC_RETRIES", "PORTAGE_SSH_OPTS", "PORTAGE_SYNC_STALE",
+ "PORTAGE_USE",
+ "PORT_LOGDIR", "PORT_LOGDIR_CLEAN",
+ "QUICKPKG_DEFAULT_OPTS", "REPOMAN_DEFAULT_OPTS",
+ "RESUMECOMMAND", "RESUMECOMMAND_FTP",
+ "RESUMECOMMAND_HTTP", "RESUMECOMMAND_HTTPS",
+ "RESUMECOMMAND_RSYNC", "RESUMECOMMAND_SFTP",
+ "UNINSTALL_IGNORE", "USE_EXPAND_HIDDEN", "USE_ORDER",
+ "__PORTAGE_HELPER"
+]
+
+# No longer supported variables
+environ_filter += [
+ "SYNC"
+]
+
+environ_filter = frozenset(environ_filter)
+
+# Variables that are not allowed to have per-repo or per-package
+# settings.
+global_only_vars = frozenset([
+ "CONFIG_PROTECT",
+])
+
+default_globals = {
+ 'ACCEPT_LICENSE': '* -@EULA',
+ 'ACCEPT_PROPERTIES': '*',
+ 'PORTAGE_BZIP2_COMMAND': 'bzip2',
+}
+
+validate_commands = ('PORTAGE_BZIP2_COMMAND', 'PORTAGE_BUNZIP2_COMMAND',)
+
+# To enhance usability, make some vars case insensitive
+# by forcing them to lower case.
+case_insensitive_vars = ('AUTOCLEAN', 'NOCOLOR',)
diff --git a/lib/portage/package/ebuild/_config/unpack_dependencies.py b/lib/portage/package/ebuild/_config/unpack_dependencies.py
new file mode 100644
index 000000000..137518949
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/unpack_dependencies.py
@@ -0,0 +1,38 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os, _supported_eapis
+from portage.dep import use_reduce
+from portage.eapi import eapi_has_automatic_unpack_dependencies
+from portage.exception import InvalidDependString
+from portage.localization import _
+from portage.util import grabfile, writemsg
+
+def load_unpack_dependencies_configuration(repositories):
+ repo_dict = {}
+ for repo in repositories.repos_with_profiles():
+ for eapi in _supported_eapis:
+ if eapi_has_automatic_unpack_dependencies(eapi):
+ file_name = os.path.join(repo.location, "profiles", "unpack_dependencies", eapi)
+ lines = grabfile(file_name, recursive=True)
+ for line in lines:
+ elements = line.split()
+ suffix = elements[0].lower()
+ if len(elements) == 1:
+ writemsg(_("--- Missing unpack dependencies for '%s' suffix in '%s'\n") % (suffix, file_name))
+ depend = " ".join(elements[1:])
+ try:
+ use_reduce(depend, eapi=eapi)
+ except InvalidDependString as e:
+ writemsg(_("--- Invalid unpack dependencies for '%s' suffix in '%s': '%s'\n" % (suffix, file_name, e)))
+ else:
+ repo_dict.setdefault(repo.name, {}).setdefault(eapi, {})[suffix] = depend
+
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ for repo_name in [x.name for x in repo.masters] + [repo.name]:
+ for eapi in repo_dict.get(repo_name, {}):
+ for suffix, depend in repo_dict.get(repo_name, {}).get(eapi, {}).items():
+ ret.setdefault(repo.name, {}).setdefault(eapi, {})[suffix] = depend
+
+ return ret