aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFabian Groffen <grobian@gentoo.org>2020-06-02 20:54:23 +0200
committerFabian Groffen <grobian@gentoo.org>2020-06-02 20:54:23 +0200
commite8b395c0fdfdf896fb6d3168dd1cf9a130b20796 (patch)
treef7196553dff427b731718905aa448af6079e4550
parentMerge remote-tracking branch 'overlays-gentoo-org/master' into prefix (diff)
parentEscape percent-signs in portage.package.ebuild.fetch.get_mirror_url() (diff)
downloadportage-e8b395c0.tar.gz
portage-e8b395c0.tar.bz2
portage-e8b395c0.zip
Merge remote-tracking branch 'overlays-gentoo-org/master' into prefix
Signed-off-by: Fabian Groffen <grobian@gentoo.org>
-rw-r--r--.travis.yml7
-rw-r--r--MANIFEST.in5
-rw-r--r--NEWS17
-rw-r--r--RELEASE-NOTES122
-rwxr-xr-xbin/ebuild-helpers/dosym13
-rwxr-xr-xbin/ecompress16
-rw-r--r--bin/isolated-functions.sh4
-rwxr-xr-xbin/misc-functions.sh12
-rw-r--r--bin/phase-functions.sh4
-rw-r--r--bin/phase-helpers.sh10
-rw-r--r--bin/socks5-server.py36
-rw-r--r--cnf/make.globals9
-rw-r--r--doc/api/.gitignore1
-rw-r--r--doc/api/Makefile32
-rw-r--r--doc/api/conf.py66
-rw-r--r--doc/api/index.rst18
-rw-r--r--doc/qa.docbook98
-rw-r--r--lib/_emerge/AbstractEbuildProcess.py2
-rw-r--r--lib/_emerge/AbstractPollTask.py3
-rw-r--r--lib/_emerge/AsynchronousTask.py75
-rw-r--r--lib/_emerge/CompositeTask.py7
-rw-r--r--lib/_emerge/EbuildFetcher.py12
-rw-r--r--lib/_emerge/EbuildMetadataPhase.py3
-rw-r--r--lib/_emerge/EbuildPhase.py66
-rw-r--r--lib/_emerge/FifoIpcDaemon.py3
-rw-r--r--lib/_emerge/Scheduler.py43
-rw-r--r--lib/_emerge/SequentialTaskQueue.py19
-rw-r--r--lib/_emerge/SubProcess.py15
-rw-r--r--lib/_emerge/actions.py40
-rw-r--r--lib/_emerge/create_world_atom.py11
-rw-r--r--lib/_emerge/depgraph.py65
-rw-r--r--lib/portage/_compat_upgrade/binpkg_compression.py40
-rw-r--r--lib/portage/_emirrordist/FetchTask.py9
-rw-r--r--lib/portage/_selinux.py9
-rw-r--r--lib/portage/cache/ebuild_xattr.py5
-rw-r--r--lib/portage/cache/template.py2
-rw-r--r--lib/portage/const.py1
-rw-r--r--lib/portage/data.py10
-rw-r--r--lib/portage/dbapi/cpv_expand.py4
-rw-r--r--lib/portage/dbapi/porttree.py9
-rw-r--r--lib/portage/dbapi/vartree.py9
-rw-r--r--lib/portage/dep/dep_check.py93
-rw-r--r--lib/portage/dep/soname/SonameAtom.py9
-rw-r--r--lib/portage/dispatch_conf.py9
-rw-r--r--lib/portage/emaint/modules/sync/sync.py2
-rw-r--r--lib/portage/locks.py67
-rw-r--r--lib/portage/package/ebuild/_config/KeywordsManager.py16
-rw-r--r--lib/portage/package/ebuild/_config/special_env_vars.py6
-rw-r--r--lib/portage/package/ebuild/deprecated_profile_check.py2
-rw-r--r--lib/portage/package/ebuild/doebuild.py41
-rw-r--r--lib/portage/package/ebuild/fetch.py148
-rw-r--r--lib/portage/package/ebuild/prepare_build_dirs.py21
-rw-r--r--lib/portage/process.py29
-rw-r--r--lib/portage/tests/dbapi/test_auxdb.py77
-rw-r--r--lib/portage/tests/dep/test_soname_atom_pickle.py26
-rw-r--r--lib/portage/tests/ebuild/test_doebuild_spawn.py4
-rw-r--r--lib/portage/tests/emerge/test_simple.py69
-rw-r--r--lib/portage/tests/locks/test_lock_nonblock.py16
-rw-r--r--lib/portage/tests/resolver/ResolverPlayground.py99
-rw-r--r--lib/portage/tests/resolver/test_circular_choices.py44
-rw-r--r--lib/portage/tests/resolver/test_depth.py18
-rw-r--r--lib/portage/tests/resolver/test_multirepo.py8
-rw-r--r--lib/portage/tests/resolver/test_or_choices.py572
-rw-r--r--lib/portage/tests/resolver/test_or_upgrade_installed.py70
-rw-r--r--lib/portage/tests/resolver/test_slot_operator_reverse_deps.py93
-rw-r--r--lib/portage/tests/util/futures/test_compat_coroutine.py29
-rw-r--r--lib/portage/tests/util/futures/test_done_callback_after_exit.py44
-rw-r--r--lib/portage/util/__init__.py8
-rw-r--r--lib/portage/util/_async/AsyncFunction.py5
-rw-r--r--lib/portage/util/_async/FileDigester.py5
-rw-r--r--lib/portage/util/_desktop_entry.py8
-rw-r--r--lib/portage/util/_dyn_libs/LinkageMapELF.py84
-rw-r--r--lib/portage/util/_dyn_libs/NeededEntry.py5
-rw-r--r--lib/portage/util/_dyn_libs/soname_deps_qa.py98
-rw-r--r--lib/portage/util/_eventloop/asyncio_event_loop.py31
-rw-r--r--lib/portage/util/compression_probe.py10
-rw-r--r--lib/portage/util/futures/_asyncio/__init__.py8
-rw-r--r--lib/portage/util/futures/compat_coroutine.py19
-rw-r--r--lib/portage/xml/metadata.py22
-rw-r--r--lib/portage/xpak.py5
-rw-r--r--man/emerge.16
-rw-r--r--man/make.conf.58
-rw-r--r--repoman/RELEASE-NOTES11
-rw-r--r--repoman/cnf/linechecks/linechecks.yaml1
-rw-r--r--repoman/cnf/repository/repository.yaml1
-rw-r--r--repoman/lib/repoman/_subprocess.py18
-rw-r--r--repoman/lib/repoman/gpg.py9
-rw-r--r--repoman/lib/repoman/metadata.py51
-rw-r--r--repoman/lib/repoman/modules/linechecks/deprecated/inherit.py5
-rw-r--r--repoman/lib/repoman/modules/linechecks/workaround/__init__.py6
-rw-r--r--repoman/lib/repoman/modules/linechecks/workaround/workarounds.py7
-rw-r--r--repoman/lib/repoman/modules/vcs/git/changes.py22
-rwxr-xr-xrepoman/runtests8
-rwxr-xr-xrepoman/setup.py2
-rwxr-xr-xruntests8
-rwxr-xr-xsetup.py37
-rw-r--r--tox.ini4
97 files changed, 2391 insertions, 665 deletions
diff --git a/.travis.yml b/.travis.yml
index dc8e2857c..6d3afa4ce 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,12 +1,17 @@
+dist: bionic
language: python
python:
- 2.7
- 3.6
- 3.7
- - 3.8-dev
+ - 3.8
+ - 3.9-dev
- pypy3
# command to install dependencies
+before_install:
+ # Use "dist: bionic" to get a zstd with --long support.
+ - sudo apt-get -y install zstd
install:
- pip install tox
diff --git a/MANIFEST.in b/MANIFEST.in
index 4f6cac162..c862b044f 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -7,6 +7,11 @@ include TEST-NOTES
include doc/custom.xsl
recursive-include doc *.docbook
+# sphinx sources
+include doc/api/index.rst
+include doc/api/conf.py
+include doc/api/Makefile
+
# extra conf files used in ebuild
include cnf/make.conf.example.*
diff --git a/NEWS b/NEWS
index 65cfa3e4f..4392f4c44 100644
--- a/NEWS
+++ b/NEWS
@@ -1,5 +1,22 @@
News (mainly features/major bug fixes)
+portage-2.3.100
+--------------
+* New BINPKG_COMPRESS=zstd default (does not apply to installed systems
+ using the old bzip2 default).
+
+portage-2.3.97
+--------------
+* Support for the PORTAGE_LOG_FILTER_FILE_CMD variable has been
+ temporarily removed due to bug 716636.
+
+portage-2.3.90
+--------------
+* The new PORTAGE_LOG_FILTER_FILE_CMD make.conf(5) variable specifies a
+ command that filters build log output to a log file. In order to
+ filter ANSI escape codes from build logs, ansifilter(1) is a
+ convenient setting for this variable.
+
portage-2.3.80
--------------
* The emerge --quickpkg-direct option enables use of installed packages
diff --git a/RELEASE-NOTES b/RELEASE-NOTES
index f1c748e3c..cf4a04c29 100644
--- a/RELEASE-NOTES
+++ b/RELEASE-NOTES
@@ -1,6 +1,128 @@
Release Notes; upgrade information mainly.
Features/major bugfixes are listed in NEWS
+portage-2.3.100
+==================================
+* Bug Fixes:
+ - Bug 715108 Change default BINPKG_COMPRESS to zstd
+ - Bug 719456 Add dependency on app-arch/zstd
+ - Bug 720866 Do not set PKG_CONFIG_PATH
+ - Bug 721402 Hostname UnicodeEncodeError surrogates not allowed
+ - Bug 721516 Suppress precompressed QA notice for docompress -x
+
+portage-2.3.99
+==================================
+* Bug Fixes:
+ - Bug 717140 dev-libs/libgit2 upgrade/downgrade loop triggered by
+ dev-libs/libgit2-glib dependency on <dev-libs/libgit2-1:0=[ssh]
+
+portage-2.3.98
+==================================
+* Bug Fixes:
+ - Bug 711322 always wakeup for empty merge queue
+
+portage-2.3.97
+==================================
+* Bug Fixes:
+ - Bug 709746 temporarily remove PORTAGE_LOG_FILTER_FILE_CMD support
+ - Bug 715162 infer implicit soname from file basename, for musl
+ - Bug 716636 emerge hangs in releases after 2.3.89-r1
+
+portage-2.3.96
+==================================
+* Bug Fixes:
+ - Bug 714480 DirectoryNotFound: /var/tmp/portage/category-directory
+
+portage-2.3.95
+==================================
+* Bug Fixes:
+ - Bug 713100 fix FEATURES=userpriv $HOME permissions
+ - Bug 713726 emerge --info: Filter variables for credentials
+ - Bug 713818 eqawarn: output to build log regardless of --quiet
+
+portage-2.3.94
+==================================
+* Bug Fixes:
+ - Bug 692492 secure ebuild ${D} permissions
+ - Bug 710444 omit zstd --long=31 for decompress on 32-bit arch
+ - Bug 712298 respect emerge --deep=<depth> with --update
+
+portage-2.3.93
+==================================
+* Bug Fixes:
+ - Bug 711322 schedule exit listeners via call_soon
+ - Bug 711688 BinpkgFetcher sync_timestamp KeyError regression
+
+portage-2.3.92
+==================================
+* Bug Fixes:
+ - Bug 601252 emerge --pretend --fetchonly event loop recursion
+ - Bug 709334 socks5-server.py async and await coroutine syntax
+ - Bug 709746 Rename PORTAGE_LOG_FILTER_FILE_CMD from
+ PORTAGE_LOG_FILTER_FILE
+ - Bug 711322 emerge hang after src_install
+ - Bug 711362 egencache AttributeError: 'NoneType' object has no
+ attribute 'ebuild'
+ - Bug 711400 AttributeError: 'NoneType' object has no attribute
+ 'depth'
+
+portage-2.3.91
+==================================
+* Bug Fixes:
+ - Bug 705910 remove pdb.set_trace() from exception handler
+ - Bug 711174 FEATURES=compress-build-logs EOFError regression
+ - Bug 711178 emerge --getbinpkg event loop recursion regression
+
+portage-2.3.90
+==================================
+* Bug Fixes:
+ - Bug 601252 DISTDIR NFS root_squash support
+ - Bug 709746 new PORTAGE_LOG_FILTER_FILE_CMD variable specifies a
+ command that filters build log output to a log file
+ - Bug 710076 einstalldocs: Fix test for DOCS being unset
+
+portage-2.3.89
+==================================
+* Bug Fixes:
+ - Bug 649622 depclean: ensure consistency with update actions, via
+ consistent order of dependency traversal
+
+portage-2.3.88
+==================================
+* Bug Fixes:
+ - Bug 649622 prevent unecessary installation of virtual/w3m followed
+ by removal by depclean
+ - Bug 705736 preserve-libs: prevent unecessary preservation of system
+ libraries which a package bundles
+ - Bug 707820 generate API documentation with sphinx-apidoc
+ - Bug 708448 support FEATURES=qa-unresolved-soname-deps so that the
+ QA warning from bug 704320 can be disabled
+ - Bug 708660 phase-helpers.sh: avoid passing an empty root value to
+ portageq when ebuild IPC is disabled
+
+portage-2.3.87
+==================================
+* Bug Fixes:
+ - Bug 691798 treat GLEP 81 acct-* categories like virtual
+ - Bug 707108 depclean: do not eliminate upgrades
+
+
+portage-2.3.86
+==================================
+* Bug Fixes:
+ - Bug 706278 Adjust || preference for slot upgrades
+ - Bug 706298 Suppress package.keywords warning for API consumers
+
+
+portage-2.3.85
+==================================
+* Bug Fixes:
+ - Bug 615594 dosym: revert deprecated prefix compat
+ - Bug 704256 emerge-webrsync: chmod 755 temp dir
+ - Bug 704320 Add QA check for unresolved soname dependencies
+ - Bug 704848 doebuild: export SANDBOX_LOG=${T}/sandbox.log
+ - Bug 705986 solve pypy / pypy-exe dependency cycle
+
portage-2.3.84
==================================
* Bug Fixes:
diff --git a/bin/ebuild-helpers/dosym b/bin/ebuild-helpers/dosym
index da15fe397..681e198c5 100755
--- a/bin/ebuild-helpers/dosym
+++ b/bin/ebuild-helpers/dosym
@@ -1,5 +1,5 @@
#!@PORTAGE_BASH@
-# Copyright 1999-2018 Gentoo Foundation
+# Copyright 1999-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
source "${PORTAGE_BIN_PATH}"/isolated-functions.sh || exit 1
@@ -20,16 +20,7 @@ fi
destdir=${2%/*}
[[ ! -d ${ED%/}/${destdir#/} ]] && dodir "${destdir}"
-target="${1}"
-# DEPRECATED HACK: when absolute, prefix with offset for Gentoo Prefix
-# (but only if ${EPREFIX} is not there already)
-# this will eventually be removed, #615594
-if [[ ${target:0:1} == "/" && ${target}/ != "${EPREFIX}"/* ]]; then
- eqawarn "dosym: prepending EPREFIX to path implicitly. If this is desired,"
- eqawarn " please fix the ebuild to use \${EPREFIX} explicitly."
- target="${EPREFIX}${target}"
-fi
-ln -snf "${target}" "${ED%/}/${2#/}"
+ln -snf "${1}" "${ED%/}/${2#/}"
ret=$?
[[ $ret -ne 0 ]] && __helpers_die "${0##*/} failed"
diff --git a/bin/ecompress b/bin/ecompress
index dfa1a0b44..2d74ed07a 100755
--- a/bin/ecompress
+++ b/bin/ecompress
@@ -19,16 +19,28 @@ while [[ $# -gt 0 ]] ; do
shift
skip_dirs=()
+ skip_files=()
for skip; do
if [[ -d ${ED%/}/${skip#/} ]]; then
skip_dirs+=( "${ED%/}/${skip#/}" )
else
rm -f "${ED%/}/${skip#/}.ecompress" || die
+ skip_files+=("${ED%/}/${skip#/}")
fi
done
if [[ ${#skip_dirs[@]} -gt 0 ]]; then
- find "${skip_dirs[@]}" -name '*.ecompress' -delete || die
+ while read -r -d ''; do
+ skip_files+=(${REPLY#.ecompress})
+ done < <(find "${skip_dirs[@]}" -name '*.ecompress' -print0 -delete || die)
+ fi
+
+ if [[ ${#skip_files[@]} -gt 0 && -s ${T}/.ecompress_had_precompressed ]]; then
+ sed_args=()
+ for f in "${skip_files[@]}"; do
+ sed_args+=(-e "s|^${f}\$||")
+ done
+ sed "${sed_args[@]}" -e '/^$/d' -i "${T}/.ecompress_had_precompressed" || die
fi
exit 0
@@ -176,7 +188,7 @@ find "${ED}" -name '*.ecompress' -delete -print0 |
___parallel_xargs -0 "${PORTAGE_BIN_PATH}"/ecompress-file
ret=${?}
-if [[ -f ${T}/.ecompress_had_precompressed ]]; then
+if [[ -s ${T}/.ecompress_had_precompressed ]]; then
eqawarn "One or more compressed files were found in docompress-ed directories."
eqawarn "Please fix the ebuild not to install compressed files (manpages,"
eqawarn "documentation) when automatic compression is used:"
diff --git a/bin/isolated-functions.sh b/bin/isolated-functions.sh
index efc377575..7840d6012 100644
--- a/bin/isolated-functions.sh
+++ b/bin/isolated-functions.sh
@@ -1,5 +1,5 @@
#!@PORTAGE_BASH@
-# Copyright 1999-2019 Gentoo Authors
+# Copyright 1999-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
source "${PORTAGE_BIN_PATH}/eapi.sh" || exit 1
@@ -270,7 +270,7 @@ eqawarn() {
__elog_base QA "$*"
[[ ${RC_ENDCOL} != "yes" && ${LAST_E_CMD} == "ebegin" ]] && echo >&2
echo -e "$@" | while read -r ; do
- __vecho " $WARN*$NORMAL $REPLY"
+ echo " $WARN*$NORMAL $REPLY" >&2
done
LAST_E_CMD="eqawarn"
return 0
diff --git a/bin/misc-functions.sh b/bin/misc-functions.sh
index d01ffe43a..d01a8edb1 100755
--- a/bin/misc-functions.sh
+++ b/bin/misc-functions.sh
@@ -216,12 +216,22 @@ install_qa_check_elf() {
if type -P scanelf > /dev/null ; then
# Save NEEDED information after removing self-contained providers
rm -f "$PORTAGE_BUILDDIR"/build-info/NEEDED{,.ELF.2}
- scanelf -qyRF '%a;%p;%S;%r;%n' "${D%/}/" | { while IFS= read -r l; do
+ # We don't use scanelf -q, since that would omit libraries like
+ # musl's /usr/lib/libc.so which do not have any DT_NEEDED or
+ # DT_SONAME settings. Since we don't use scanelf -q, we have to
+ # handle the special rpath value " - " below.
+ scanelf -yRBF '%a;%p;%S;%r;%n' "${D%/}/" | { while IFS= read -r l; do
arch=${l%%;*}; l=${l#*;}
obj="/${l%%;*}"; l=${l#*;}
soname=${l%%;*}; l=${l#*;}
rpath=${l%%;*}; l=${l#*;}; [ "${rpath}" = " - " ] && rpath=""
needed=${l%%;*}; l=${l#*;}
+
+ # Infer implicit soname from basename (bug 715162).
+ if [[ -z ${soname} && $(file "${D%/}${obj}") == *"SB shared object"* ]]; then
+ soname=${obj##*/}
+ fi
+
echo "${obj} ${needed}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED
echo "${arch:3};${obj};${soname};${rpath};${needed}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED.ELF.2
done }
diff --git a/bin/phase-functions.sh b/bin/phase-functions.sh
index a43e40f21..78bb5caca 100644
--- a/bin/phase-functions.sh
+++ b/bin/phase-functions.sh
@@ -1020,10 +1020,6 @@ __ebuild_main() {
[[ ${SANDBOX_WRITE/$DISTCC_DIR} = $SANDBOX_WRITE ]] && \
addwrite "$DISTCC_DIR"
- x=LIBDIR_$ABI
- [ -z "$PKG_CONFIG_PATH" -a -n "$ABI" -a -n "${!x}" ] && \
- export PKG_CONFIG_PATH=${EPREFIX}/usr/${!x}/pkgconfig
-
if has noauto $FEATURES && \
[[ ! -f $PORTAGE_BUILDDIR/.unpacked ]] ; then
echo
diff --git a/bin/phase-helpers.sh b/bin/phase-helpers.sh
index d0ab03712..c4ab51d78 100644
--- a/bin/phase-helpers.sh
+++ b/bin/phase-helpers.sh
@@ -1,5 +1,5 @@
#!@PORTAGE_BASH@
-# Copyright 1999-2019 Gentoo Authors
+# Copyright 1999-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
if ___eapi_has_DESTTREE_INSDESTTREE; then
@@ -878,7 +878,7 @@ ___best_version_and_has_version_common() {
if ___eapi_has_prefix_variables; then
case ${root_arg} in
-r) root=${ROOT%/}/${EPREFIX#/} ;;
- -d) root=${ESYSROOT} ;;
+ -d) root=${ESYSROOT:-/} ;;
-b)
# Use /${PORTAGE_OVERRIDE_EPREFIX#/} which is equivalent
# to BROOT, except BROOT is only defined in src_* phases.
@@ -888,8 +888,8 @@ ___best_version_and_has_version_common() {
esac
else
case ${root_arg} in
- -r) root=${ROOT} ;;
- -d) root=${SYSROOT} ;;
+ -r) root=${ROOT:-/} ;;
+ -d) root=${SYSROOT:-/} ;;
-b) root=/ ;;
esac
fi ;;
@@ -969,7 +969,7 @@ fi
if ___eapi_has_einstalldocs; then
einstalldocs() {
(
- if ! declare -p DOCS &>/dev/null ; then
+ if [[ $(declare -p DOCS 2>/dev/null) != *=* ]]; then
local d
for d in README* ChangeLog AUTHORS NEWS TODO CHANGES \
THANKS BUGS FAQ CREDITS CHANGELOG ; do
diff --git a/bin/socks5-server.py b/bin/socks5-server.py
index d1649ad4a..1d07c98ed 100644
--- a/bin/socks5-server.py
+++ b/bin/socks5-server.py
@@ -29,8 +29,7 @@ class Socks5Server(object):
An asynchronous SOCKSv5 server.
"""
- @asyncio.coroutine
- def handle_proxy_conn(self, reader, writer):
+ async def handle_proxy_conn(self, reader, writer):
"""
Handle incoming client connection. Perform SOCKSv5 request
exchange, open a proxied connection and start relaying.
@@ -43,7 +42,7 @@ class Socks5Server(object):
try:
# SOCKS hello
- data = yield from reader.readexactly(2)
+ data = await reader.readexactly(2)
vers, method_no = struct.unpack('!BB', data)
if vers != 0x05:
@@ -53,7 +52,7 @@ class Socks5Server(object):
return
# ...and auth method list
- data = yield from reader.readexactly(method_no)
+ data = await reader.readexactly(method_no)
for method in data:
if method == 0x00:
break
@@ -64,13 +63,13 @@ class Socks5Server(object):
# auth reply
repl = struct.pack('!BB', 0x05, method)
writer.write(repl)
- yield from writer.drain()
+ await writer.drain()
if method == 0xFF:
writer.close()
return
# request
- data = yield from reader.readexactly(4)
+ data = await reader.readexactly(4)
vers, cmd, rsv, atyp = struct.unpack('!BBBB', data)
if vers != 0x05 or rsv != 0x00:
@@ -83,31 +82,31 @@ class Socks5Server(object):
if cmd != 0x01: # CONNECT
rpl = 0x07 # command not supported
elif atyp == 0x01: # IPv4
- data = yield from reader.readexactly(4)
+ data = await reader.readexactly(4)
addr = socket.inet_ntoa(data)
elif atyp == 0x03: # domain name
- data = yield from reader.readexactly(1)
+ data = await reader.readexactly(1)
addr_len, = struct.unpack('!B', data)
- addr = yield from reader.readexactly(addr_len)
+ addr = await reader.readexactly(addr_len)
try:
addr = addr.decode('idna')
except UnicodeDecodeError:
rpl = 0x04 # host unreachable
elif atyp == 0x04: # IPv6
- data = yield from reader.readexactly(16)
+ data = await reader.readexactly(16)
addr = socket.inet_ntop(socket.AF_INET6, data)
else:
rpl = 0x08 # address type not supported
# try to connect if we can handle it
if rpl == 0x00:
- data = yield from reader.readexactly(2)
+ data = await reader.readexactly(2)
port, = struct.unpack('!H', data)
try:
# open a proxied connection
- proxied_reader, proxied_writer = yield from asyncio.open_connection(
+ proxied_reader, proxied_writer = await asyncio.open_connection(
addr, port)
except (socket.gaierror, socket.herror):
# DNS failure
@@ -150,7 +149,7 @@ class Socks5Server(object):
# reply to the request
repl = struct.pack('!BBB', 0x05, rpl, 0x00)
writer.write(repl + repl_addr)
- yield from writer.drain()
+ await writer.drain()
# close if an error occured
if rpl != 0x00:
@@ -166,7 +165,7 @@ class Socks5Server(object):
try:
try:
while True:
- data = yield from reader.read(4096)
+ data = await reader.read(4096)
if data == b'':
# client disconnected, stop relaying from
# remote host
@@ -174,7 +173,7 @@ class Socks5Server(object):
break
proxied_writer.write(data)
- yield from proxied_writer.drain()
+ await proxied_writer.drain()
except OSError:
# read or write failure
t.cancel()
@@ -193,8 +192,7 @@ class Socks5Server(object):
writer.close()
raise
- @asyncio.coroutine
- def handle_proxied_conn(self, proxied_reader, writer, parent_task):
+ async def handle_proxied_conn(self, proxied_reader, writer, parent_task):
"""
Handle the proxied connection. Relay incoming data
to the client.
@@ -208,12 +206,12 @@ class Socks5Server(object):
try:
try:
while True:
- data = yield from proxied_reader.read(4096)
+ data = await proxied_reader.read(4096)
if data == b'':
break
writer.write(data)
- yield from writer.drain()
+ await writer.drain()
finally:
parent_task.cancel()
except (OSError, asyncio.CancelledError):
diff --git a/cnf/make.globals b/cnf/make.globals
index 25678ee82..d3ba98513 100644
--- a/cnf/make.globals
+++ b/cnf/make.globals
@@ -1,4 +1,4 @@
-# Copyright 1999-2019 Gentoo Authors
+# Copyright 1999-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
# System-wide defaults for the Portage system
@@ -34,6 +34,10 @@ RPMDIR="@PORTAGE_EPREFIX@/var/cache/rpm"
# Temporary build directory
PORTAGE_TMPDIR="@PORTAGE_EPREFIX@/var/tmp"
+# The compression used for binary packages. Defaults to zstd except for
+# existing installs where bzip2 is used for backward compatibility.
+BINPKG_COMPRESS="zstd"
+
# Fetching command (3 tries, passive ftp for firewall compatibility)
FETCHCOMMAND="wget -t 3 -T 60 --passive-ftp -O \"\${DISTDIR}/\${FILE}\" \"\${URI}\""
RESUMECOMMAND="wget -c -t 3 -T 60 --passive-ftp -O \"\${DISTDIR}/\${FILE}\" \"\${URI}\""
@@ -53,7 +57,8 @@ FEATURES="assume-digests binpkg-docompress binpkg-dostrip binpkg-logs
config-protect-if-modified distlocks ebuild-locks
fixlafiles ipc-sandbox merge-sync multilib-strict
network-sandbox news parallel-fetch pid-sandbox
- preserve-libs protect-owned sandbox sfperms strict
+ preserve-libs protect-owned qa-unresolved-soname-deps
+ sandbox sfperms strict
unknown-features-warn unmerge-logs unmerge-orphans userfetch
userpriv usersandbox usersync"
diff --git a/doc/api/.gitignore b/doc/api/.gitignore
new file mode 100644
index 000000000..796b96d1c
--- /dev/null
+++ b/doc/api/.gitignore
@@ -0,0 +1 @@
+/build
diff --git a/doc/api/Makefile b/doc/api/Makefile
new file mode 100644
index 000000000..56420a497
--- /dev/null
+++ b/doc/api/Makefile
@@ -0,0 +1,32 @@
+# Makefile for Sphinx documentation
+#
+
+SPHINX_APIDOC_OPTIONS = members,private-members,undoc-members,show-inheritance,ignore-module-all,inherited-members
+export SPHINX_APIDOC_OPTIONS
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+SOURCEDIR = .
+BUILDDIR = build
+TOPDIR = ../..
+
+# Put it first so that "make" without argument is like "make help".
+help:
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+clean:
+ rm -rf $(BUILDDIR) $(SOURCEDIR)/api
+
+$(BUILDDIR)/_sources/portage.rst:
+ mkdir -p "$(BUILDDIR)/_sources"
+ cp -pPR "$(SOURCEDIR)/conf.py" "$(SOURCEDIR)/index.rst" "$(BUILDDIR)/_sources"
+ sphinx-apidoc -TPef -o "$(BUILDDIR)/_sources" $(TOPDIR)/lib/_emerge
+ sphinx-apidoc -TPef -o "$(BUILDDIR)/_sources" $(TOPDIR)/lib/portage $(TOPDIR)/lib/portage/tests
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile $(BUILDDIR)/_sources/portage.rst
+ @$(SPHINXBUILD) -M $@ "$(BUILDDIR)/_sources" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/doc/api/conf.py b/doc/api/conf.py
new file mode 100644
index 000000000..f318ca25d
--- /dev/null
+++ b/doc/api/conf.py
@@ -0,0 +1,66 @@
+# Configuration file for the Sphinx documentation builder.
+#
+# This file only contains a selection of the most common options. For a full
+# list see the documentation:
+# http://www.sphinx-doc.org/en/master/config
+
+# -- Path setup --------------------------------------------------------------
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+# import os
+# import sys
+# sys.path.insert(0, os.path.abspath('.'))
+
+import os
+from os import path as osp
+import sys
+
+if osp.isfile(osp.abspath(osp.join(osp.dirname(__file__), "../../../../.portage_not_installed"))):
+ sys.path.insert(0, osp.abspath(osp.join(osp.dirname(__file__), "../../../../lib")))
+import portage
+
+# -- Project information -----------------------------------------------------
+
+project = 'portage'
+copyright = '2020, Gentoo Authors'
+author = 'Gentoo Authors'
+
+# The full version, including alpha/beta/rc tags
+release = str(portage.VERSION)
+
+# -- General configuration ---------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx_epytext',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+# templates_path = []
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This pattern also affects html_static_path and html_extra_path.
+# exclude_patterns = []
+
+# -- Options for HTML output -------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+#
+html_show_sourcelink = False
+html_theme = 'sphinxdoc'
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+# html_static_path = []
+
+autodoc_default_options = dict((opt, True) for opt in
+ filter(None, os.environ.get('SPHINX_APIDOC_OPTIONS', '').split(',')))
diff --git a/doc/api/index.rst b/doc/api/index.rst
new file mode 100644
index 000000000..ffaece6c9
--- /dev/null
+++ b/doc/api/index.rst
@@ -0,0 +1,18 @@
+Portage API Documentation
+=========================
+
+Modules
+=======
+
+.. toctree::
+ :maxdepth: 1
+
+ _emerge
+ portage
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/doc/qa.docbook b/doc/qa.docbook
index 28ff6cf8e..fd5279b41 100644
--- a/doc/qa.docbook
+++ b/doc/qa.docbook
@@ -127,6 +127,104 @@
</para>
</sect1>
+ <sect1 id='qa-unresolved-soname-dependencies'>
+ <title>Unresolved soname dependencies</title>
+ <para>
+ <programlisting>
+ QA Notice: Unresolved soname dependencies
+ </programlisting>
+ </para>
+ <para>
+ This warning comes up when a library or executable has one or more
+ soname dependencies (found in its NEEDED.ELF.2 metadata) that could
+ not be resolved by usual means. If you run <command>ldd</command> on
+ files like these then it will report a "not found" error for each
+ unresolved soname dependency. In order to correct problems with
+ soname dependency resolution, use one or more of the approaches
+ described in the following sections.
+ </para>
+ <para>
+ Content of the NEEDED.ELF.2 metadata file may be useful for
+ debugging purposes. Find the NEEDED.ELF.2 file in the
+ ${D}/../build-info/ directory after the ebuild src_install phase
+ completes, or in the /var/db/pkg/*/*/ directory for an installed
+ package. Each line of the NEEDED.ELF.2 file contains semicolon
+ separated values for a single ELF file. The soname dependencies are
+ found in the DT_NEEDED column:
+ <programlisting>
+ E_MACHINE;path;DT_SONAME;DT_RUNPATH;DT_NEEDED;multilib category
+ </programlisting>
+ </para>
+ <sect2 id='qa-unresolved-soname-dependencies-resolved-bu-external-dependencies'>
+ <title>External dependencies</title>
+ <para>
+ For packages that install pre-built binaries, it may be possible to
+ resolve soname dependencies simply by adding dependencies for one
+ or more other packages that are known to provide the needed sonames.
+ </para>
+ </sect2>
+ <sect2 id='qa-unresolved-soname-dependencies-resolved-by-removal-of-unecessary-files'>
+ <title>Removal of unecessary files</title>
+ <para>
+ For packages that install pre-built binaries, it may be possible to
+ resolve soname dependencies simply by removing unnecessary files
+ which have unresolved soname dependencies. For example, some pre-built
+ binary packages include binaries intended for irrelevant architectures
+ or operating systems, and these files can simply be removed because
+ they are unnecessary.
+ </para>
+ </sect2>
+ <sect2 id='qa-unresolved-soname-dependencies-resolved-by-addition-of-dt-runpath-entries'>
+ <title>Addition of DT_RUNPATH entries</title>
+ <para>
+ If the relevant dependencies are installed in a location that is not
+ included in the dynamic linker search path, then it's necessary for
+ files to include a DT_RUNPATH entry which refers to the appropriate
+ directory. The special $ORIGIN value can be used to create a relative
+ path reference in DT_RUNPATH, where $ORIGIN is a placeholder for the
+ directory where the file having the DT_RUNPATH entry is located.
+ </para>
+ <para>
+ For pre-built binaries, it may be necessary to fix up DT_RUNPATH using
+ <command>patchelf --set-rpath</command>. For example, use
+ <command>patchelf --set-rpath '$ORIGIN'</command> if a given binary
+ should link to libraries found in the same directory as the binary
+ itself, or use <command>patchelf --set-rpath '$ORIGIN/libs'</command>
+ if a given binary should link to libraries found in a subdirectory
+ named libs found in the same directory as the binary itself.
+ </para>
+ <para>
+ For binaries built from source, a flag like
+ <option>-Wl,-rpath,/path/of/directory/containing/libs</option> will
+ create binaries with the desired DT_RUNPATH entry.
+ </para>
+ </sect2>
+ <sect2 id='qa-unresolved-soname-dependencies-resolved-by-addition-of-dt-soname-settings'>
+ <title>Addition of DT_SONAME settings</title>
+ <para>
+ If a package installs dynamic libraries which do not set DT_SONAME,
+ then this can lead to unresolved soname dependencies.
+ For dynamic libraries built from source, a flag like
+ <option>-Wl,-soname=foo.so.1</option> will create a DT_SONAME setting.
+ For pre-built dynamic libraries, it may be necessary to fix up
+ DT_SONAME using <command>patchelf --set-soname</command>.
+ </para>
+ </sect2>
+ <sect2 id='qa-unresolved-soname-dependencies-resolved-by-adjustment-to-portage-soname-resolution-logic'>
+ <title>Adjustment to Portage soname resolution logic</title>
+ <para>
+ It may be necessary to adjust Portage soname resolution logic in
+ order to account for special circumstances. For example, Portage
+ soname resolution tolerates missing DT_SONAME for dynamic libraries
+ that a package installs in a directory that its binaries reference
+ via DT_RUNPATH. This behavior is useful for packages that have
+ internal dynamic libraries stored in a private directory. An example
+ is ebtables, as discussed in
+ <ulink url="https://bugs.gentoo.org/646190">bug 646190</ulink>.
+ </para>
+ </sect2>
+ </sect1>
+
<sect1 id='qa-abs-lib-link'>
<title>Absolute Symlink In Library Directory</title>
<para>
diff --git a/lib/_emerge/AbstractEbuildProcess.py b/lib/_emerge/AbstractEbuildProcess.py
index ddf04e9b3..1c1955cfe 100644
--- a/lib/_emerge/AbstractEbuildProcess.py
+++ b/lib/_emerge/AbstractEbuildProcess.py
@@ -401,7 +401,7 @@ class AbstractEbuildProcess(SpawnProcess):
SpawnProcess._async_wait(self)
elif self._build_dir_unlock is None:
if self.returncode is None:
- raise asyncio.InvalidStateError('Result is not ready.')
+ raise asyncio.InvalidStateError('Result is not ready for %s' % (self,))
self._async_unlock_builddir(returncode=self.returncode)
def _async_unlock_builddir(self, returncode=None):
diff --git a/lib/_emerge/AbstractPollTask.py b/lib/_emerge/AbstractPollTask.py
index 4157794c6..7e9f2b536 100644
--- a/lib/_emerge/AbstractPollTask.py
+++ b/lib/_emerge/AbstractPollTask.py
@@ -16,9 +16,6 @@ class AbstractPollTask(AsynchronousTask):
_bufsize = 4096
- def isAlive(self):
- return bool(self._registered)
-
def _read_array(self, f):
"""
NOTE: array.fromfile() is used here only for testing purposes,
diff --git a/lib/_emerge/AsynchronousTask.py b/lib/_emerge/AsynchronousTask.py
index cf6e6dc44..28beb4992 100644
--- a/lib/_emerge/AsynchronousTask.py
+++ b/lib/_emerge/AsynchronousTask.py
@@ -18,7 +18,7 @@ class AsynchronousTask(SlotObject):
"""
__slots__ = ("background", "cancelled", "returncode", "scheduler") + \
- ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
+ ("_exit_listener_handles", "_exit_listeners", "_start_listeners")
_cancelled_returncode = - signal.SIGINT
@@ -37,7 +37,7 @@ class AsynchronousTask(SlotObject):
@returns: Future, result is self.returncode
"""
waiter = self.scheduler.create_future()
- exit_listener = lambda self: waiter.set_result(self.returncode)
+ exit_listener = lambda self: waiter.cancelled() or waiter.set_result(self.returncode)
self.addExitListener(exit_listener)
waiter.add_done_callback(lambda waiter:
self.removeExitListener(exit_listener) if waiter.cancelled() else None)
@@ -79,20 +79,20 @@ class AsynchronousTask(SlotObject):
"""
if self.returncode is None:
if self.scheduler.is_running():
- raise asyncio.InvalidStateError('Result is not ready.')
+ raise asyncio.InvalidStateError('Result is not ready for %s' % (self,))
self.scheduler.run_until_complete(self.async_wait())
self._wait_hook()
return self.returncode
def _async_wait(self):
"""
- For cases where _start exits synchronously, this method is a
- convenient way to trigger an asynchronous call to self.wait()
- (in order to notify exit listeners), avoiding excessive event
- loop recursion (or stack overflow) that synchronous calling of
- exit listeners can cause. This method is thread-safe.
+ Subclasses call this method in order to invoke exit listeners when
+ self.returncode is set. Subclasses may override this method in order
+ to perform cleanup. The default implementation for this method simply
+ calls self.wait(), which will immediately raise an InvalidStateError
+ if the event loop is running and self.returncode is None.
"""
- self.scheduler.call_soon(self.wait)
+ self.wait()
def cancel(self):
"""
@@ -133,6 +133,10 @@ class AsynchronousTask(SlotObject):
self._start_listeners = []
self._start_listeners.append(f)
+ # Ensure that start listeners are always called.
+ if self.returncode is not None:
+ self._start_hook()
+
def removeStartListener(self, f):
if self._start_listeners is None:
return
@@ -144,7 +148,7 @@ class AsynchronousTask(SlotObject):
self._start_listeners = None
for f in start_listeners:
- f(self)
+ self.scheduler.call_soon(f, self)
def addExitListener(self, f):
"""
@@ -153,13 +157,20 @@ class AsynchronousTask(SlotObject):
if self._exit_listeners is None:
self._exit_listeners = []
self._exit_listeners.append(f)
+ if self.returncode is not None:
+ self._wait_hook()
def removeExitListener(self, f):
- if self._exit_listeners is None:
- if self._exit_listener_stack is not None:
- self._exit_listener_stack.remove(f)
- return
- self._exit_listeners.remove(f)
+ if self._exit_listeners is not None:
+ try:
+ self._exit_listeners.remove(f)
+ except ValueError:
+ pass
+
+ if self._exit_listener_handles is not None:
+ handle = self._exit_listener_handles.pop(f, None)
+ if handle is not None:
+ handle.cancel()
def _wait_hook(self):
"""
@@ -168,29 +179,23 @@ class AsynchronousTask(SlotObject):
used to trigger exit listeners when the returncode first
becomes available.
"""
+ # Ensure that start listeners are always called.
+ if self.returncode is not None:
+ self._start_hook()
+
if self.returncode is not None and \
self._exit_listeners is not None:
- # This prevents recursion, in case one of the
- # exit handlers triggers this method again by
- # calling wait(). Use a stack that gives
- # removeExitListener() an opportunity to consume
- # listeners from the stack, before they can get
- # called below. This is necessary because a call
- # to one exit listener may result in a call to
- # removeExitListener() for another listener on
- # the stack. That listener needs to be removed
- # from the stack since it would be inconsistent
- # to call it after it has been been passed into
- # removeExitListener().
- self._exit_listener_stack = self._exit_listeners
+ listeners = self._exit_listeners
self._exit_listeners = None
+ if self._exit_listener_handles is None:
+ self._exit_listener_handles = {}
- # Execute exit listeners in reverse order, so that
- # the last added listener is executed first. This
- # allows SequentialTaskQueue to decrement its running
- # task count as soon as one of its tasks exits, so that
- # the value is accurate when other listeners execute.
- while self._exit_listener_stack:
- self._exit_listener_stack.pop()(self)
+ for listener in listeners:
+ if listener not in self._exit_listener_handles:
+ self._exit_listener_handles[listener] = \
+ self.scheduler.call_soon(self._exit_listener_cb, listener)
+ def _exit_listener_cb(self, listener):
+ del self._exit_listener_handles[listener]
+ listener(self)
diff --git a/lib/_emerge/CompositeTask.py b/lib/_emerge/CompositeTask.py
index 1edec4a17..72da6fac1 100644
--- a/lib/_emerge/CompositeTask.py
+++ b/lib/_emerge/CompositeTask.py
@@ -10,9 +10,6 @@ class CompositeTask(AsynchronousTask):
_TASK_QUEUED = -1
- def isAlive(self):
- return self._current_task is not None
-
def _cancel(self):
if self._current_task is not None:
if self._current_task is self._TASK_QUEUED:
@@ -21,6 +18,10 @@ class CompositeTask(AsynchronousTask):
self._async_wait()
else:
self._current_task.cancel()
+ elif self.returncode is None:
+ # Assume that the task has not started yet.
+ self._was_cancelled()
+ self._async_wait()
def _poll(self):
"""
diff --git a/lib/_emerge/EbuildFetcher.py b/lib/_emerge/EbuildFetcher.py
index ad5109c28..1e40994fb 100644
--- a/lib/_emerge/EbuildFetcher.py
+++ b/lib/_emerge/EbuildFetcher.py
@@ -12,7 +12,12 @@ from portage import _unicode_encode
from portage import _unicode_decode
from portage.checksum import _hash_filter
from portage.elog.messages import eerror
-from portage.package.ebuild.fetch import _check_distfile, fetch
+from portage.package.ebuild.fetch import (
+ _check_distfile,
+ _drop_privs_userfetch,
+ _want_userfetch,
+ fetch,
+)
from portage.util._async.AsyncTaskFuture import AsyncTaskFuture
from portage.util._async.ForkProcess import ForkProcess
from portage.util._pty import _create_pty_or_pipe
@@ -234,6 +239,11 @@ class _EbuildFetcherProcess(ForkProcess):
portage.output.havecolor = self._settings.get('NOCOLOR') \
not in ('yes', 'true')
+ # For userfetch, drop privileges for the entire fetch call, in
+ # order to handle DISTDIR on NFS with root_squash for bug 601252.
+ if _want_userfetch(self._settings):
+ _drop_privs_userfetch(self._settings)
+
rval = 1
allow_missing = self._get_manifest().allow_missing or \
'digest' in self._settings.features
diff --git a/lib/_emerge/EbuildMetadataPhase.py b/lib/_emerge/EbuildMetadataPhase.py
index 4940d40b6..efe71892c 100644
--- a/lib/_emerge/EbuildMetadataPhase.py
+++ b/lib/_emerge/EbuildMetadataPhase.py
@@ -144,7 +144,8 @@ class EbuildMetadataPhase(SubProcess):
break
def _unregister(self):
- self.scheduler.remove_reader(self._files.ebuild)
+ if self._files is not None:
+ self.scheduler.remove_reader(self._files.ebuild)
SubProcess._unregister(self)
def _async_waitpid_cb(self, *args, **kwargs):
diff --git a/lib/_emerge/EbuildPhase.py b/lib/_emerge/EbuildPhase.py
index 50e3dd1f4..477e0ba97 100644
--- a/lib/_emerge/EbuildPhase.py
+++ b/lib/_emerge/EbuildPhase.py
@@ -1,6 +1,8 @@
-# Copyright 1999-2018 Gentoo Foundation
+# Copyright 1999-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
+from __future__ import unicode_literals
+
import functools
import gzip
import io
@@ -14,10 +16,17 @@ from _emerge.EbuildProcess import EbuildProcess
from _emerge.CompositeTask import CompositeTask
from _emerge.PackagePhase import PackagePhase
from _emerge.TaskSequence import TaskSequence
+from portage.package.ebuild._ipc.QueryCommand import QueryCommand
+from portage.util._dyn_libs.soname_deps_qa import (
+ _get_all_provides,
+ _get_unresolved_soname_deps,
+)
from portage.package.ebuild.prepare_build_dirs import (_prepare_workdir,
_prepare_fake_distdir, _prepare_fake_filesdir)
+from portage.util.futures.compat_coroutine import coroutine
from portage.util import writemsg
from portage.util._async.AsyncTaskFuture import AsyncTaskFuture
+from portage.util.futures.executor.fork import ForkExecutor
try:
from portage.xml.metadata import MetaDataXML
@@ -281,7 +290,7 @@ class EbuildPhase(CompositeTask):
fd, logfile = tempfile.mkstemp()
os.close(fd)
post_phase = _PostPhaseCommands(background=self.background,
- commands=post_phase_cmds, fd_pipes=self.fd_pipes,
+ commands=post_phase_cmds, elog=self._elog, fd_pipes=self.fd_pipes,
logfile=logfile, phase=self.phase, scheduler=self.scheduler,
settings=settings)
self._start_task(post_phase, self._post_phase_exit)
@@ -315,13 +324,6 @@ class EbuildPhase(CompositeTask):
self._die_hooks()
return
- if self.phase == "install":
- out = io.StringIO()
- _post_src_install_soname_symlinks(self.settings, out)
- msg = out.getvalue()
- if msg:
- self.scheduler.output(msg, log_path=log_path)
-
self._current_task = None
self.wait()
return
@@ -414,7 +416,7 @@ class EbuildPhase(CompositeTask):
class _PostPhaseCommands(CompositeTask):
- __slots__ = ("commands", "fd_pipes", "logfile", "phase", "settings")
+ __slots__ = ("commands", "elog", "fd_pipes", "logfile", "phase", "settings")
def _start(self):
if isinstance(self.commands, list):
@@ -436,4 +438,46 @@ class _PostPhaseCommands(CompositeTask):
logfile=self.logfile, phase=self.phase,
scheduler=self.scheduler, settings=self.settings, **kwargs))
- self._start_task(tasks, self._default_final_exit)
+ self._start_task(tasks, self._commands_exit)
+
+ def _commands_exit(self, task):
+
+ if self._default_exit(task) != os.EX_OK:
+ self._async_wait()
+ return
+
+ if self.phase == 'install':
+ out = io.StringIO()
+ _post_src_install_soname_symlinks(self.settings, out)
+ msg = out.getvalue()
+ if msg:
+ self.scheduler.output(msg, log_path=self.settings.get("PORTAGE_LOG_FILE"))
+
+ if 'qa-unresolved-soname-deps' in self.settings.features:
+ # This operates on REQUIRES metadata generated by the above function call.
+ future = self._soname_deps_qa()
+ # If an unexpected exception occurs, then this will raise it.
+ future.add_done_callback(lambda future: future.cancelled() or future.result())
+ self._start_task(AsyncTaskFuture(future=future), self._default_final_exit)
+ else:
+ self._default_final_exit(task)
+ else:
+ self._default_final_exit(task)
+
+ @coroutine
+ def _soname_deps_qa(self):
+
+ vardb = QueryCommand.get_db()[self.settings['EROOT']]['vartree'].dbapi
+
+ all_provides = (yield self.scheduler.run_in_executor(ForkExecutor(loop=self.scheduler), _get_all_provides, vardb))
+
+ unresolved = _get_unresolved_soname_deps(os.path.join(self.settings['PORTAGE_BUILDDIR'], 'build-info'), all_provides)
+
+ if unresolved:
+ unresolved.sort()
+ qa_msg = ["QA Notice: Unresolved soname dependencies:"]
+ qa_msg.append("")
+ qa_msg.extend("\t%s: %s" % (filename, " ".join(sorted(soname_deps)))
+ for filename, soname_deps in unresolved)
+ qa_msg.append("")
+ self.elog("eqawarn", qa_msg)
diff --git a/lib/_emerge/FifoIpcDaemon.py b/lib/_emerge/FifoIpcDaemon.py
index 0cbaa13c7..2ec69d1cb 100644
--- a/lib/_emerge/FifoIpcDaemon.py
+++ b/lib/_emerge/FifoIpcDaemon.py
@@ -70,9 +70,6 @@ class FifoIpcDaemon(AbstractPollTask):
self._files.pipe_in,
self._input_handler)
- def isAlive(self):
- return self._registered
-
def _cancel(self):
if self.returncode is None:
self.returncode = 1
diff --git a/lib/_emerge/Scheduler.py b/lib/_emerge/Scheduler.py
index 98eaf3bcc..6f4564000 100644
--- a/lib/_emerge/Scheduler.py
+++ b/lib/_emerge/Scheduler.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2019 Gentoo Authors
+# Copyright 1999-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from __future__ import division, print_function, unicode_literals
@@ -27,6 +27,7 @@ bad = create_color_func("BAD")
from portage._sets import SETPREFIX
from portage._sets.base import InternalPackageSet
from portage.util import ensure_dirs, writemsg, writemsg_level
+from portage.util.futures import asyncio
from portage.util.SlotObject import SlotObject
from portage.util._async.SchedulerInterface import SchedulerInterface
from portage.util._eventloop.EventLoop import EventLoop
@@ -241,6 +242,7 @@ class Scheduler(PollScheduler):
self._completed_tasks = set()
self._main_exit = None
self._main_loadavg_handle = None
+ self._schedule_merge_wakeup_task = None
self._failed_pkgs = []
self._failed_pkgs_all = []
@@ -1336,7 +1338,7 @@ class Scheduler(PollScheduler):
self._deallocate_config(build.settings)
elif build.returncode == os.EX_OK:
self.curval += 1
- merge = PackageMerge(merge=build)
+ merge = PackageMerge(merge=build, scheduler=self._sched_iface)
self._running_tasks[id(merge)] = merge
if not build.build_opts.buildpkgonly and \
build.pkg in self._deep_system_deps:
@@ -1345,8 +1347,8 @@ class Scheduler(PollScheduler):
self._merge_wait_queue.append(merge)
merge.addStartListener(self._system_merge_started)
else:
- merge.addExitListener(self._merge_exit)
self._task_queues.merge.add(merge)
+ merge.addExitListener(self._merge_exit)
self._status_display.merges = len(self._task_queues.merge)
else:
settings = build.settings
@@ -1440,6 +1442,9 @@ class Scheduler(PollScheduler):
if self._job_delay_timeout_id is not None:
self._job_delay_timeout_id.cancel()
self._job_delay_timeout_id = None
+ if self._schedule_merge_wakeup_task is not None:
+ self._schedule_merge_wakeup_task.cancel()
+ self._schedule_merge_wakeup_task = None
def _choose_pkg(self):
"""
@@ -1580,9 +1585,10 @@ class Scheduler(PollScheduler):
if (self._merge_wait_queue and not self._jobs and
not self._task_queues.merge):
task = self._merge_wait_queue.popleft()
- task.addExitListener(self._merge_wait_exit_handler)
+ task.scheduler = self._sched_iface
self._merge_wait_scheduled.append(task)
self._task_queues.merge.add(task)
+ task.addExitListener(self._merge_wait_exit_handler)
self._status_display.merges = len(self._task_queues.merge)
state_change += 1
@@ -1613,6 +1619,25 @@ class Scheduler(PollScheduler):
self._main_loadavg_handle = self._event_loop.call_later(
self._loadavg_latency, self._schedule)
+ # Failure to schedule *after* self._task_queues.merge becomes
+ # empty will cause the scheduler to hang as in bug 711322.
+ # Do not rely on scheduling which occurs via the _merge_exit
+ # method, since the order of callback invocation may cause
+ # self._task_queues.merge to appear non-empty when it is
+ # about to become empty.
+ if (self._task_queues.merge and (self._schedule_merge_wakeup_task is None
+ or self._schedule_merge_wakeup_task.done())):
+ self._schedule_merge_wakeup_task = asyncio.ensure_future(
+ self._task_queues.merge.wait(), loop=self._event_loop)
+ self._schedule_merge_wakeup_task.add_done_callback(
+ self._schedule_merge_wakeup)
+
+ def _schedule_merge_wakeup(self, future):
+ if not future.cancelled():
+ future.result()
+ if self._main_exit is not None and not self._main_exit.done():
+ self._schedule()
+
def _sigcont_handler(self, signum, frame):
self._sigcont_time = time.time()
@@ -1699,26 +1724,28 @@ class Scheduler(PollScheduler):
task = self._task(pkg)
if pkg.installed:
- merge = PackageMerge(merge=task)
+ merge = PackageMerge(merge=task, scheduler=self._sched_iface)
self._running_tasks[id(merge)] = merge
- merge.addExitListener(self._merge_exit)
self._task_queues.merge.addFront(merge)
+ merge.addExitListener(self._merge_exit)
elif pkg.built:
self._jobs += 1
self._previous_job_start_time = time.time()
self._status_display.running = self._jobs
self._running_tasks[id(task)] = task
- task.addExitListener(self._extract_exit)
+ task.scheduler = self._sched_iface
self._task_queues.jobs.add(task)
+ task.addExitListener(self._extract_exit)
else:
self._jobs += 1
self._previous_job_start_time = time.time()
self._status_display.running = self._jobs
self._running_tasks[id(task)] = task
- task.addExitListener(self._build_exit)
+ task.scheduler = self._sched_iface
self._task_queues.jobs.add(task)
+ task.addExitListener(self._build_exit)
return bool(state_change)
diff --git a/lib/_emerge/SequentialTaskQueue.py b/lib/_emerge/SequentialTaskQueue.py
index 80908936c..d2551b1c6 100644
--- a/lib/_emerge/SequentialTaskQueue.py
+++ b/lib/_emerge/SequentialTaskQueue.py
@@ -1,9 +1,11 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from collections import deque
import sys
+from portage.util.futures import asyncio
+from portage.util.futures.compat_coroutine import coroutine
from portage.util.SlotObject import SlotObject
class SequentialTaskQueue(SlotObject):
@@ -60,16 +62,25 @@ class SequentialTaskQueue(SlotObject):
"""
Clear the task queue and asynchronously terminate any running tasks.
"""
+ for task in self._task_queue:
+ task.cancel()
self._task_queue.clear()
+
for task in list(self.running_tasks):
task.cancel()
+ @coroutine
def wait(self):
"""
- Synchronously wait for all running tasks to exit.
+ Wait for the queue to become empty. This method is a coroutine.
"""
- while self.running_tasks:
- next(iter(self.running_tasks)).wait()
+ while self:
+ task = next(iter(self.running_tasks), None)
+ if task is None:
+ # Wait for self.running_tasks to populate.
+ yield asyncio.sleep(0)
+ else:
+ yield task.async_wait()
def __bool__(self):
return bool(self._task_queue or self.running_tasks)
diff --git a/lib/_emerge/SubProcess.py b/lib/_emerge/SubProcess.py
index 7d6b03272..e834cb7d3 100644
--- a/lib/_emerge/SubProcess.py
+++ b/lib/_emerge/SubProcess.py
@@ -1,10 +1,11 @@
-# Copyright 1999-2018 Gentoo Foundation
+# Copyright 1999-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import logging
from portage import os
from portage.util import writemsg_level
+from portage.util.futures import asyncio
from _emerge.AbstractPollTask import AbstractPollTask
import signal
import errno
@@ -23,7 +24,7 @@ class SubProcess(AbstractPollTask):
return self.returncode
def _cancel(self):
- if self.isAlive():
+ if self.isAlive() and self.pid is not None:
try:
os.kill(self.pid, signal.SIGTERM)
except OSError as e:
@@ -36,9 +37,13 @@ class SubProcess(AbstractPollTask):
elif e.errno != errno.ESRCH:
raise
- def isAlive(self):
- return self.pid is not None and \
- self.returncode is None
+ def _async_wait(self):
+ if self.returncode is None:
+ raise asyncio.InvalidStateError('Result is not ready for %s' % (self,))
+ else:
+ # This calls _unregister, so don't call it until pid status
+ # is available.
+ super(SubProcess, self)._async_wait()
def _async_waitpid(self):
"""
diff --git a/lib/_emerge/actions.py b/lib/_emerge/actions.py
index 2b7cd0ffb..d8e9d7774 100644
--- a/lib/_emerge/actions.py
+++ b/lib/_emerge/actions.py
@@ -1,8 +1,9 @@
-# Copyright 1999-2019 Gentoo Authors
+# Copyright 1999-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from __future__ import division, print_function, unicode_literals
+import collections
import errno
import logging
import operator
@@ -56,6 +57,7 @@ bad = create_color_func("BAD")
warn = create_color_func("WARN")
from portage.package.ebuild._ipc.QueryCommand import QueryCommand
from portage.package.ebuild.doebuild import _check_temp_dir
+from portage.package.ebuild.fetch import _hide_url_passwd
from portage._sets import load_default_config, SETPREFIX
from portage._sets.base import InternalPackageSet
from portage.util import cmp_sort_key, writemsg, varexpand, \
@@ -742,8 +744,20 @@ def action_depclean(settings, trees, ldpath_mtimes,
return rval
+
def calc_depclean(settings, trees, ldpath_mtimes,
myopts, action, args_set, spinner):
+ result = _calc_depclean(settings, trees, ldpath_mtimes,
+ myopts, action, args_set, spinner)
+ return result.returncode, result.cleanlist, result.ordered, result.req_pkg_count
+
+
+_depclean_result = collections.namedtuple('_depclean_result',
+ ('returncode', 'cleanlist', 'ordered', 'req_pkg_count', 'depgraph'))
+
+
+def _calc_depclean(settings, trees, ldpath_mtimes,
+ myopts, action, args_set, spinner):
allow_missing_deps = bool(args_set)
debug = '--debug' in myopts
@@ -806,7 +820,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
writemsg_level(_("!!! Aborting due to set configuration "
"errors displayed above.\n"),
level=logging.ERROR, noiselevel=-1)
- return 1, [], False, 0
+ return _depclean_result(1, [], False, 0, None)
if action == "depclean":
emergelog(xterm_titles, " >>> depclean")
@@ -921,7 +935,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
resolver.display_problems()
if not success:
- return 1, [], False, 0
+ return _depclean_result(1, [], False, 0, resolver)
def unresolved_deps():
@@ -1021,7 +1035,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
return False
if unresolved_deps():
- return 1, [], False, 0
+ return _depclean_result(1, [], False, 0, resolver)
graph = resolver._dynamic_config.digraph.copy()
required_pkgs_total = 0
@@ -1322,7 +1336,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
runtime_slot_op=True),
root=pkg.root)):
resolver.display_problems()
- return 1, [], False, 0
+ return _depclean_result(1, [], False, 0, resolver)
writemsg_level("\nCalculating dependencies ")
success = resolver._complete_graph(
@@ -1330,9 +1344,9 @@ def calc_depclean(settings, trees, ldpath_mtimes,
writemsg_level("\b\b... done!\n")
resolver.display_problems()
if not success:
- return 1, [], False, 0
+ return _depclean_result(1, [], False, 0, resolver)
if unresolved_deps():
- return 1, [], False, 0
+ return _depclean_result(1, [], False, 0, resolver)
graph = resolver._dynamic_config.digraph.copy()
required_pkgs_total = 0
@@ -1341,7 +1355,7 @@ def calc_depclean(settings, trees, ldpath_mtimes,
required_pkgs_total += 1
cleanlist = create_cleanlist()
if not cleanlist:
- return 0, [], False, required_pkgs_total
+ return _depclean_result(0, [], False, required_pkgs_total, resolver)
clean_set = set(cleanlist)
if clean_set:
@@ -1459,8 +1473,8 @@ def calc_depclean(settings, trees, ldpath_mtimes,
graph.remove(node)
cleanlist.append(node.cpv)
- return 0, cleanlist, ordered, required_pkgs_total
- return 0, [], False, required_pkgs_total
+ return _depclean_result(0, cleanlist, ordered, required_pkgs_total, resolver)
+ return _depclean_result(0, [], False, required_pkgs_total, resolver)
def action_deselect(settings, trees, opts, atoms):
enter_invalid = '--ask-enter-invalid' in opts
@@ -1887,6 +1901,9 @@ def action_info(settings, trees, myopts, myfiles):
if default is not None and \
default == v:
continue
+
+ v = _hide_url_passwd(v)
+
append('%s="%s"' % (k, v))
else:
use = set(v.split())
@@ -2070,6 +2087,9 @@ def action_sync(emerge_config, trees=DeprecationWarning,
success, msgs = syncer.auto_sync(options=options)
if return_messages:
print_results(msgs)
+ elif msgs and not success:
+ writemsg_level("".join("%s\n" % (line,) for line in msgs),
+ level=logging.ERROR, noiselevel=-1)
return os.EX_OK if success else 1
diff --git a/lib/_emerge/create_world_atom.py b/lib/_emerge/create_world_atom.py
index c5e1f58be..a7f3e30bf 100644
--- a/lib/_emerge/create_world_atom.py
+++ b/lib/_emerge/create_world_atom.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2012 Gentoo Foundation
+# Copyright 1999-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import sys
@@ -15,8 +15,8 @@ def create_world_atom(pkg, args_set, root_config, before_install=False):
"""Create a new atom for the world file if one does not exist. If the
argument atom is precise enough to identify a specific slot then a slot
atom will be returned. Atoms that are in the system set may also be stored
- in world since system atoms can only match one slot while world atoms can
- be greedy with respect to slots. Unslotted system packages will not be
+ in world since a user might want to select multiple slots of a slotted
+ package like gcc for example. Unslotted system packages will not be
stored in world."""
arg_atom = args_set.findAtomForPackage(pkg)
@@ -111,8 +111,9 @@ def create_world_atom(pkg, args_set, root_config, before_install=False):
# Both atoms would be identical, so there's nothing to add.
return None
if not slotted and not arg_atom.repo:
- # Unlike world atoms, system atoms are not greedy for slots, so they
- # can't be safely excluded from world if they are slotted.
+ # Don't exclude slotted atoms for system packages from world, since
+ # a user might want to select multiple slots of a slotted package like
+ # gcc for example.
system_atom = sets["system"].findAtomForPackage(pkg)
if system_atom:
if not system_atom.cp.startswith("virtual/"):
diff --git a/lib/_emerge/depgraph.py b/lib/_emerge/depgraph.py
index 111862c9a..13c2b658f 100644
--- a/lib/_emerge/depgraph.py
+++ b/lib/_emerge/depgraph.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2019 Gentoo Authors
+# Copyright 1999-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from __future__ import division, print_function, unicode_literals
@@ -95,6 +95,14 @@ if sys.hexversion >= 0x3000000:
else:
_unicode = unicode
+# Exposes a depgraph interface to dep_check.
+_dep_check_graph_interface = collections.namedtuple('_dep_check_graph_interface',(
+ # Indicates a removal action, like depclean or prune.
+ 'removal_action',
+ # Checks if update is desirable for a given package.
+ 'want_update_pkg',
+))
+
class _scheduler_graph_config(object):
def __init__(self, trees, pkg_cache, graph, mergelist):
self.trees = trees
@@ -510,6 +518,10 @@ class _dynamic_depgraph_config(object):
soname_deps=depgraph._frozen_config.soname_deps_enabled)
# Track missed updates caused by solved conflicts.
self._conflict_missed_update = collections.defaultdict(dict)
+ dep_check_iface = _dep_check_graph_interface(
+ removal_action="remove" in myparams,
+ want_update_pkg=depgraph._want_update_pkg,
+ )
for myroot in depgraph._frozen_config.trees:
self.sets[myroot] = _depgraph_sets()
@@ -530,7 +542,7 @@ class _dynamic_depgraph_config(object):
self._graph_trees[myroot]["vartree"] = graph_tree
self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi
self._graph_trees[myroot]["graph"] = self.digraph
- self._graph_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg
+ self._graph_trees[myroot]["graph_interface"] = dep_check_iface
self._graph_trees[myroot]["downgrade_probe"] = depgraph._downgrade_probe
def filtered_tree():
pass
@@ -558,7 +570,7 @@ class _dynamic_depgraph_config(object):
self._filtered_trees[myroot]["graph"] = self.digraph
self._filtered_trees[myroot]["vartree"] = \
depgraph._frozen_config.trees[myroot]["vartree"]
- self._filtered_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg
+ self._filtered_trees[myroot]["graph_interface"] = dep_check_iface
self._filtered_trees[myroot]["downgrade_probe"] = depgraph._downgrade_probe
dbs = []
@@ -2056,9 +2068,15 @@ class depgraph(object):
for parent, atom in self._dynamic_config._parent_atoms.get(existing_pkg, []):
if isinstance(parent, Package):
if parent in built_slot_operator_parents:
- # This parent may need to be rebuilt, so its
- # dependencies aren't necessarily relevant.
- continue
+ # This parent may need to be rebuilt, therefore
+ # discard its soname and built slot operator
+ # dependency components which are not necessarily
+ # relevant.
+ if atom.soname:
+ continue
+ elif atom.package and atom.slot_operator_built:
+ # This discards the slot/subslot component.
+ atom = atom.with_slot("=")
if replacement_parent is not None and \
(replacement_parent.slot_atom == parent.slot_atom
@@ -2777,7 +2795,7 @@ class depgraph(object):
# Traverse nested sets and add them to the stack
# if they're not already in the graph. Also, graph
# edges between parent and nested sets.
- for token in arg.pset.getNonAtoms():
+ for token in sorted(arg.pset.getNonAtoms()):
if not token.startswith(SETPREFIX):
continue
s = token[len(SETPREFIX):]
@@ -4200,7 +4218,7 @@ class depgraph(object):
if len(expanded_atoms) > 1:
number_of_virtuals = 0
for expanded_atom in expanded_atoms:
- if expanded_atom.cp.startswith("virtual/"):
+ if expanded_atom.cp.startswith(("acct-group/", "acct-user/", "virtual/")):
number_of_virtuals += 1
else:
candidate = expanded_atom
@@ -4371,7 +4389,7 @@ class depgraph(object):
args = self._dynamic_config._initial_arg_list[:]
for arg in self._expand_set_args(args, add_to_digraph=True):
- for atom in arg.pset.getAtoms():
+ for atom in sorted(arg.pset.getAtoms()):
self._spinner_update()
dep = Dependency(atom=atom, onlydeps=onlydeps,
root=myroot, parent=arg)
@@ -6359,7 +6377,11 @@ class depgraph(object):
cpv = pkg.cpv
reinstall_for_flags = None
- if not pkg.installed or \
+ if pkg.installed and parent is not None and not self._want_update_pkg(parent, pkg):
+ # Ensure that --deep=<depth> is respected even when the
+ # installed package is masked and --update is enabled.
+ pass
+ elif not pkg.installed or \
(matched_packages and not avoid_update):
# Only enforce visibility on installed packages
# if there is at least one other visible package
@@ -6941,9 +6963,18 @@ class depgraph(object):
# Removal actions may override sets with temporary
# replacements that have had atoms removed in order
# to implement --deselect behavior.
- required_set_names = set(required_sets[root])
depgraph_sets.sets.clear()
depgraph_sets.sets.update(required_sets[root])
+ if 'world' in depgraph_sets.sets:
+ # For consistent order of traversal for both update
+ # and removal (depclean) actions, sets other that
+ # world are always nested under the world set.
+ world_atoms = list(depgraph_sets.sets['world'])
+ world_atoms.extend(SETPREFIX + s for s in required_sets[root] if s != 'world')
+ depgraph_sets.sets['world'] = InternalPackageSet(initial_atoms=world_atoms)
+ required_set_names = {'world'}
+ else:
+ required_set_names = set(required_sets[root])
if "remove" not in self._dynamic_config.myparams and \
root == self._frozen_config.target_root and \
already_deep:
@@ -6953,7 +6984,7 @@ class depgraph(object):
not self._dynamic_config._dep_stack:
continue
root_config = self._frozen_config.roots[root]
- for s in required_set_names:
+ for s in sorted(required_set_names):
pset = depgraph_sets.sets.get(s)
if pset is None:
pset = root_config.sets[s]
@@ -6963,10 +6994,10 @@ class depgraph(object):
self._set_args(args)
for arg in self._expand_set_args(args, add_to_digraph=True):
- for atom in arg.pset.getAtoms():
- self._dynamic_config._dep_stack.append(
- Dependency(atom=atom, root=arg.root_config.root,
- parent=arg, depth=self._UNREACHABLE_DEPTH))
+ for atom in sorted(arg.pset.getAtoms()):
+ if not self._add_dep(Dependency(atom=atom, root=arg.root_config.root,
+ parent=arg, depth=self._UNREACHABLE_DEPTH), allow_unsatisfied=True):
+ return 0
if True:
if self._dynamic_config._ignored_deps:
@@ -9354,7 +9385,7 @@ class depgraph(object):
# added via _add_pkg() so that they are included in the
# digraph (needed at least for --tree display).
for arg in self._expand_set_args(args, add_to_digraph=True):
- for atom in arg.pset.getAtoms():
+ for atom in sorted(arg.pset.getAtoms()):
pkg, existing_node = self._select_package(
arg.root_config.root, atom)
if existing_node is None and \
diff --git a/lib/portage/_compat_upgrade/binpkg_compression.py b/lib/portage/_compat_upgrade/binpkg_compression.py
new file mode 100644
index 000000000..0f5704733
--- /dev/null
+++ b/lib/portage/_compat_upgrade/binpkg_compression.py
@@ -0,0 +1,40 @@
+# Copyright 2020 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+import re
+
+import portage
+from portage import os
+from portage.const import GLOBAL_CONFIG_PATH
+
+COMPAT_BINPKG_COMPRESS = 'bzip2'
+
+
+def main():
+ """
+ If the current installation is still configured to use the old
+ default BINPKG_COMPRESS=bzip2 setting, then patch make.globals
+ inside ${ED} to maintain backward compatibility, ensuring that
+ binary package consumers are not caught off guard. This is
+ intended to be called from the ebuild as follows:
+
+ pkg_preinst() {
+ python_setup
+ env -u BINPKG_COMPRESS
+ PYTHONPATH="${D%/}$(python_get_sitedir)${PYTHONPATH:+:${PYTHONPATH}}" \
+ "${PYTHON}" -m portage._compat_upgrade.binpkg_compression || die
+ }
+ """
+ if portage.settings.get('BINPKG_COMPRESS', COMPAT_BINPKG_COMPRESS) == COMPAT_BINPKG_COMPRESS:
+ config_path = os.path.join(os.environ['ED'], GLOBAL_CONFIG_PATH.lstrip(os.sep), 'make.globals')
+ with open(config_path) as f:
+ content = f.read()
+ compat_setting = 'BINPKG_COMPRESS="{}"'.format(COMPAT_BINPKG_COMPRESS)
+ portage.output.EOutput().einfo('Setting make.globals default {} for backward compatibility'.format(compat_setting))
+ content = re.sub('^BINPKG_COMPRESS=.*$', compat_setting, content, flags=re.MULTILINE)
+ with open(config_path, 'wt') as f:
+ f.write(content)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/portage/_emirrordist/FetchTask.py b/lib/portage/_emirrordist/FetchTask.py
index 0441fc677..322de79ba 100644
--- a/lib/portage/_emirrordist/FetchTask.py
+++ b/lib/portage/_emirrordist/FetchTask.py
@@ -444,15 +444,6 @@ class FetchTask(CompositeTask):
args = [portage.util.varexpand(x, mydict=variables)
for x in args]
- if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
- not os.path.isabs(args[0]):
- # Python 3.1 _execvp throws TypeError for non-absolute executable
- # path passed as bytes (see https://bugs.python.org/issue8513).
- fullname = portage.process.find_binary(args[0])
- if fullname is None:
- raise portage.exception.CommandNotFound(args[0])
- args[0] = fullname
-
args = [_unicode_encode(x,
encoding=_encodings['fs'], errors='strict') for x in args]
diff --git a/lib/portage/_selinux.py b/lib/portage/_selinux.py
index 985e96628..49e2e8e58 100644
--- a/lib/portage/_selinux.py
+++ b/lib/portage/_selinux.py
@@ -1,4 +1,4 @@
-# Copyright 1999-2014 Gentoo Foundation
+# Copyright 1999-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
# Don't use the unicode-wrapped os and shutil modules here since
@@ -8,12 +8,15 @@ import shutil
import sys
import warnings
+try:
+ import selinux
+except ImportError:
+ selinux = None
+
import portage
from portage import _encodings
from portage import _native_string, _unicode_decode
from portage.localization import _
-portage.proxy.lazyimport.lazyimport(globals(),
- 'selinux')
def copyfile(src, dest):
src = _native_string(src, encoding=_encodings['fs'], errors='strict')
diff --git a/lib/portage/cache/ebuild_xattr.py b/lib/portage/cache/ebuild_xattr.py
index cc6b06246..33a40fdba 100644
--- a/lib/portage/cache/ebuild_xattr.py
+++ b/lib/portage/cache/ebuild_xattr.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-# Copyright: 2009-2011 Gentoo Foundation
+# Copyright: 2009-2020 Gentoo Authors
# Author(s): Petteri Räty (betelgeuse@gentoo.org)
# License: GPL2
@@ -14,8 +14,7 @@ from portage import cpv_getkey
from portage import os
from portage import _encodings
from portage import _unicode_decode
-portage.proxy.lazyimport.lazyimport(globals(),
- 'xattr')
+from portage.util._xattr import xattr
class NoValueException(Exception):
pass
diff --git a/lib/portage/cache/template.py b/lib/portage/cache/template.py
index 8662d859f..6b4878347 100644
--- a/lib/portage/cache/template.py
+++ b/lib/portage/cache/template.py
@@ -133,7 +133,7 @@ class database(object):
d = None
if self.cleanse_keys:
d=ProtectedDict(values)
- for k, v in list(d.items()):
+ for k, v in list(item for item in d.items() if item[0] != "_eclasses_"):
if not v:
del d[k]
if "_eclasses_" in values:
diff --git a/lib/portage/const.py b/lib/portage/const.py
index 6d1035311..146808fea 100644
--- a/lib/portage/const.py
+++ b/lib/portage/const.py
@@ -198,6 +198,7 @@ SUPPORTED_FEATURES = frozenset([
"preserve-libs",
"protect-owned",
"python-trace",
+ "qa-unresolved-soname-deps",
"sandbox",
"selinux",
"sesandbox",
diff --git a/lib/portage/data.py b/lib/portage/data.py
index 25a074848..20a8d1ba7 100644
--- a/lib/portage/data.py
+++ b/lib/portage/data.py
@@ -211,16 +211,6 @@ def _get_global(k):
# SIGPIPE problems with nss_ldap.
cmd = ["id", "-G", _portage_username]
- if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000:
- # Python 3.1 _execvp throws TypeError for non-absolute executable
- # path passed as bytes (see https://bugs.python.org/issue8513).
- fullname = portage.process.find_binary(cmd[0])
- if fullname is None:
- globals()[k] = v
- _initialized_globals.add(k)
- return v
- cmd[0] = fullname
-
encoding = portage._encodings['content']
cmd = [portage._unicode_encode(x,
encoding=encoding, errors='strict') for x in cmd]
diff --git a/lib/portage/dbapi/cpv_expand.py b/lib/portage/dbapi/cpv_expand.py
index 70ee78245..ac2f6cc2e 100644
--- a/lib/portage/dbapi/cpv_expand.py
+++ b/lib/portage/dbapi/cpv_expand.py
@@ -72,9 +72,9 @@ def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
matches.append(x+"/"+myp)
if len(matches) > 1:
virtual_name_collision = False
- if len(matches) == 2:
+ if len(matches) > 1:
for x in matches:
- if not x.startswith("virtual/"):
+ if not x.startswith(("acct-group/", "acct-user/", "virtual/")):
# Assume that the non-virtual is desired. This helps
# avoid the ValueError for invalid deps that come from
# installed packages (during reverse blocker detection,
diff --git a/lib/portage/dbapi/porttree.py b/lib/portage/dbapi/porttree.py
index 4bb396a6c..08af17bcd 100644
--- a/lib/portage/dbapi/porttree.py
+++ b/lib/portage/dbapi/porttree.py
@@ -1,4 +1,4 @@
-# Copyright 1998-2019 Gentoo Authors
+# Copyright 1998-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from __future__ import unicode_literals
@@ -804,8 +804,11 @@ class portdbapi(dbapi):
(mypkg, eapi)))
return
- result.set_result(_parse_uri_map(mypkg,
- {'EAPI':eapi,'SRC_URI':myuris}, use=useflags))
+ try:
+ result.set_result(_parse_uri_map(mypkg,
+ {'EAPI':eapi,'SRC_URI':myuris}, use=useflags))
+ except Exception as e:
+ result.set_exception(e)
aux_get_future = self.async_aux_get(
mypkg, ["EAPI", "SRC_URI"], mytree=mytree, loop=loop)
diff --git a/lib/portage/dbapi/vartree.py b/lib/portage/dbapi/vartree.py
index f1d18783d..d7c8ef3de 100644
--- a/lib/portage/dbapi/vartree.py
+++ b/lib/portage/dbapi/vartree.py
@@ -23,7 +23,7 @@ portage.proxy.lazyimport.lazyimport(globals(),
'portage.locks:lockdir,unlockdir,lockfile,unlockfile',
'portage.output:bold,colorize',
'portage.package.ebuild.doebuild:doebuild_environment,' + \
- '_merge_unicode_error', '_spawn_phase',
+ '_merge_unicode_error',
'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
'portage.package.ebuild._ipc.QueryCommand:QueryCommand',
'portage.process:find_binary',
@@ -675,13 +675,6 @@ class vardbapi(dbapi):
def _aux_cache_init(self):
aux_cache = None
open_kwargs = {}
- if sys.hexversion >= 0x3000000 and sys.hexversion < 0x3020000:
- # Buffered io triggers extreme performance issues in
- # Unpickler.load() (problem observed with python-3.0.1).
- # Unfortunately, performance is still poor relative to
- # python-2.x, but buffering makes it much worse (problem
- # appears to be solved in Python >=3.2 at least).
- open_kwargs["buffering"] = 0
try:
with open(_unicode_encode(self._aux_cache_filename,
encoding=_encodings['fs'], errors='strict'),
diff --git a/lib/portage/dep/dep_check.py b/lib/portage/dep/dep_check.py
index 3a0c7bbe9..9534590bf 100644
--- a/lib/portage/dep/dep_check.py
+++ b/lib/portage/dep/dep_check.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2018 Gentoo Foundation
+# Copyright 2010-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from __future__ import unicode_literals
@@ -296,7 +296,7 @@ def dep_eval(deplist):
class _dep_choice(SlotObject):
__slots__ = ('atoms', 'slot_map', 'cp_map', 'all_available',
- 'all_installed_slots', 'new_slot_count')
+ 'all_installed_slots', 'new_slot_count', 'want_update', 'all_in_graph')
def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None,
minimize_slots=False):
@@ -331,9 +331,9 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None,
# c) contains masked installed packages
# d) is the first item
- preferred_installed = []
preferred_in_graph = []
- preferred_any_slot = []
+ preferred_installed = preferred_in_graph
+ preferred_any_slot = preferred_in_graph
preferred_non_installed = []
unsat_use_in_graph = []
unsat_use_installed = []
@@ -347,8 +347,6 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None,
# for correct ordering in cases like || ( foo[a] foo[b] ).
choice_bins = (
preferred_in_graph,
- preferred_installed,
- preferred_any_slot,
preferred_non_installed,
unsat_use_in_graph,
unsat_use_installed,
@@ -365,7 +363,7 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None,
graph_db = trees[myroot].get("graph_db")
graph = trees[myroot].get("graph")
pkg_use_enabled = trees[myroot].get("pkg_use_enabled")
- want_update_pkg = trees[myroot].get("want_update_pkg")
+ graph_interface = trees[myroot].get("graph_interface")
downgrade_probe = trees[myroot].get("downgrade_probe")
circular_dependency = trees[myroot].get("circular_dependency")
vardb = None
@@ -506,14 +504,24 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None,
if current_higher or (all_match_current and not all_match_previous):
cp_map[avail_pkg.cp] = avail_pkg
- new_slot_count = (len(slot_map) if graph_db is None else
- sum(not graph_db.match_pkgs(slot_atom) for slot_atom in slot_map
- if not slot_atom.cp.startswith("virtual/")))
+ want_update = False
+ if graph_interface is None or graph_interface.removal_action:
+ new_slot_count = len(slot_map)
+ else:
+ new_slot_count = 0
+ for slot_atom, avail_pkg in slot_map.items():
+ if parent is not None and graph_interface.want_update_pkg(parent, avail_pkg):
+ want_update = True
+ if (not slot_atom.cp.startswith("virtual/")
+ and not graph_db.match_pkgs(slot_atom)):
+ new_slot_count += 1
this_choice = _dep_choice(atoms=atoms, slot_map=slot_map,
cp_map=cp_map, all_available=all_available,
all_installed_slots=False,
- new_slot_count=new_slot_count)
+ new_slot_count=new_slot_count,
+ all_in_graph=False,
+ want_update=want_update)
if all_available:
# The "all installed" criterion is not version or slot specific.
# If any version of a package is already in the graph then we
@@ -567,11 +575,12 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None,
graph_db.match_pkgs(atom)):
all_in_graph = False
break
+ this_choice.all_in_graph = all_in_graph
+
circular_atom = None
if not (parent is None or priority is None) and \
(parent.onlydeps or
- (all_in_graph and priority.buildtime and
- not (priority.satisfied or priority.optional))):
+ (priority.buildtime and not priority.satisfied and not priority.optional)):
# Check if the atom would result in a direct circular
# dependency and try to avoid that if it seems likely
# to be unresolvable. This is only relevant for
@@ -608,27 +617,8 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None,
elif all_installed:
if all_installed_slots:
preferred_installed.append(this_choice)
- elif parent is None or want_update_pkg is None:
- preferred_any_slot.append(this_choice)
else:
- # When appropriate, prefer a slot that is not
- # installed yet for bug #478188.
- want_update = True
- for slot_atom, avail_pkg in slot_map.items():
- if avail_pkg in graph:
- continue
- # New-style virtuals have zero cost to install.
- if slot_atom.startswith("virtual/") or \
- vardb.match(slot_atom):
- continue
- if not want_update_pkg(parent, avail_pkg):
- want_update = False
- break
-
- if want_update:
- preferred_installed.append(this_choice)
- else:
- preferred_any_slot.append(this_choice)
+ preferred_any_slot.append(this_choice)
else:
preferred_non_installed.append(this_choice)
else:
@@ -677,10 +667,6 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None,
if len(choices) < 2:
continue
- sort_keys = []
- # Prefer choices with all_installed_slots for bug #480736.
- sort_keys.append(lambda x: not x.all_installed_slots)
-
if minimize_slots:
# Prefer choices having fewer new slots. When used with DNF form,
# this can eliminate unecessary packages that depclean would
@@ -695,18 +681,31 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None,
# contribute to outcomes that appear to be random. Meanwhile,
# the order specified in the ebuild is without variance, so it
# does not have this problem.
- sort_keys.append(lambda x: x.new_slot_count)
+ choices.sort(key=operator.attrgetter('new_slot_count'))
- choices.sort(key=lambda x: tuple(f(x) for f in sort_keys))
for choice_1 in choices[1:]:
cps = set(choice_1.cp_map)
for choice_2 in choices:
if choice_1 is choice_2:
# choice_1 will not be promoted, so move on
break
+ if (
+ # Prefer choices where all_installed_slots is True, except
+ # in cases where we want to upgrade to a new slot as in
+ # bug 706278. Don't compare new_slot_count here since that
+ # would aggressively override the preference order defined
+ # in the ebuild, breaking the test case for bug 645002.
+ (choice_1.all_installed_slots and
+ not choice_2.all_installed_slots and
+ not choice_2.want_update)
+ ):
+ # promote choice_1 in front of choice_2
+ choices.remove(choice_1)
+ index_2 = choices.index(choice_2)
+ choices.insert(index_2, choice_1)
+ break
+
intersecting_cps = cps.intersection(choice_2.cp_map)
- if not intersecting_cps:
- continue
has_upgrade = False
has_downgrade = False
for cp in intersecting_cps:
@@ -718,8 +717,16 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None,
has_upgrade = True
else:
has_downgrade = True
- break
- if has_upgrade and not has_downgrade:
+
+ if (
+ # Prefer upgrades.
+ (has_upgrade and not has_downgrade)
+
+ # Prefer choices where all packages have been pulled into
+ # the graph, except for choices that eliminate upgrades.
+ or (choice_1.all_in_graph and not choice_2.all_in_graph and
+ not (has_downgrade and not has_upgrade))
+ ):
# promote choice_1 in front of choice_2
choices.remove(choice_1)
index_2 = choices.index(choice_2)
diff --git a/lib/portage/dep/soname/SonameAtom.py b/lib/portage/dep/soname/SonameAtom.py
index a7dad973d..5743544aa 100644
--- a/lib/portage/dep/soname/SonameAtom.py
+++ b/lib/portage/dep/soname/SonameAtom.py
@@ -1,4 +1,4 @@
-# Copyright 2015 Gentoo Foundation
+# Copyright 2015-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from __future__ import unicode_literals
@@ -26,6 +26,13 @@ class SonameAtom(object):
raise AttributeError("SonameAtom instances are immutable",
self.__class__, name, value)
+ def __getstate__(self):
+ return dict((k, getattr(self, k)) for k in self.__slots__)
+
+ def __setstate__(self, state):
+ for k, v in state.items():
+ object.__setattr__(self, k, v)
+
def __hash__(self):
return self._hash_value
diff --git a/lib/portage/dispatch_conf.py b/lib/portage/dispatch_conf.py
index eaea59393..2fab19f1a 100644
--- a/lib/portage/dispatch_conf.py
+++ b/lib/portage/dispatch_conf.py
@@ -41,15 +41,6 @@ def diffstatusoutput(cmd, file1, file2):
# raise a UnicodeDecodeError which makes the output inaccessible.
args = shlex_split(cmd % (file1, file2))
- if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
- not os.path.isabs(args[0]):
- # Python 3.1 _execvp throws TypeError for non-absolute executable
- # path passed as bytes (see https://bugs.python.org/issue8513).
- fullname = portage.process.find_binary(args[0])
- if fullname is None:
- raise portage.exception.CommandNotFound(args[0])
- args[0] = fullname
-
args = [portage._unicode_encode(x, errors='strict') for x in args]
proc = subprocess.Popen(args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
diff --git a/lib/portage/emaint/modules/sync/sync.py b/lib/portage/emaint/modules/sync/sync.py
index e28c42336..ac37fdcfa 100644
--- a/lib/portage/emaint/modules/sync/sync.py
+++ b/lib/portage/emaint/modules/sync/sync.py
@@ -299,7 +299,7 @@ class SyncRepos(object):
msgs.append(warn(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended")
msgs.append(warn(" * ")+"that you update portage now, before any other packages are updated.")
msgs.append('')
- msgs.append(warn(" * ")+"To update portage, run 'emerge --oneshot portage' now.")
+ msgs.append(warn(" * ")+"To update portage, run 'emerge --oneshot sys-apps/portage' now.")
msgs.append('')
return msgs
diff --git a/lib/portage/locks.py b/lib/portage/locks.py
index 72ac2fc70..535698dfe 100644
--- a/lib/portage/locks.py
+++ b/lib/portage/locks.py
@@ -1,5 +1,5 @@
# portage: Lock management code
-# Copyright 2004-2019 Gentoo Authors
+# Copyright 2004-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
__all__ = ["lockdir", "unlockdir", "lockfile", "unlockfile", \
@@ -84,7 +84,29 @@ def _get_lock_fn():
return _lock_fn
-_open_fds = set()
+_open_fds = {}
+_open_inodes = {}
+
+class _lock_manager(object):
+ __slots__ = ('fd', 'inode_key')
+ def __init__(self, fd, fstat_result, path):
+ self.fd = fd
+ self.inode_key = (fstat_result.st_dev, fstat_result.st_ino)
+ if self.inode_key in _open_inodes:
+ # This means that the lock is already held by the current
+ # process, so the caller will have to try again. This case
+ # is encountered with the default fcntl.lockf function, and
+ # with the alternative fcntl.flock function TryAgain is
+ # raised earlier.
+ os.close(fd)
+ raise TryAgain(path)
+ _open_fds[fd] = self
+ _open_inodes[self.inode_key] = self
+ def close(self):
+ os.close(self.fd)
+ del _open_fds[self.fd]
+ del _open_inodes[self.inode_key]
+
def _close_fds():
"""
@@ -93,8 +115,8 @@ def _close_fds():
safely after a fork without exec, unlike the _setup_pipes close_fds
behavior.
"""
- while _open_fds:
- os.close(_open_fds.pop())
+ for fd in list(_open_fds.values()):
+ fd.close()
def lockdir(mydir, flags=0):
return lockfile(mydir, wantnewlockfile=1, flags=flags)
@@ -296,10 +318,10 @@ def _lockfile_iteration(mypath, wantnewlockfile=False, unlinkfile=False,
else:
raise
-
+ fstat_result = None
if isinstance(lockfilename, basestring) and myfd != HARDLINK_FD and unlinkfile:
try:
- removed = _lockfile_was_removed(myfd, lockfilename)
+ (removed, fstat_result) = _lockfile_was_removed(myfd, lockfilename)
except Exception:
# Do not leak the file descriptor here.
os.close(myfd)
@@ -322,7 +344,7 @@ def _lockfile_iteration(mypath, wantnewlockfile=False, unlinkfile=False,
fcntl.fcntl(myfd, fcntl.F_SETFD,
fcntl.fcntl(myfd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
- _open_fds.add(myfd)
+ _lock_manager(myfd, os.fstat(myfd) if fstat_result is None else fstat_result, mypath)
writemsg(str((lockfilename, myfd, unlinkfile)) + "\n", 1)
return (lockfilename, myfd, unlinkfile, locking_method)
@@ -341,14 +363,15 @@ def _lockfile_was_removed(lock_fd, lock_path):
@param lock_path: path of lock file
@type lock_path: str
@rtype: bool
- @return: True if lock_path exists and corresponds to lock_fd, False otherwise
+ @return: a tuple of (removed, fstat_result), where removed is True if
+ lock_path does not correspond to lock_fd, and False otherwise
"""
try:
fstat_st = os.fstat(lock_fd)
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ESTALE):
_raise_exc(e)
- return True
+ return (True, None)
# Since stat is not reliable for removed files on NFS with the default
# file attribute cache behavior ('ac' mount option), create a temporary
@@ -365,7 +388,7 @@ def _lockfile_was_removed(lock_fd, lock_path):
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ESTALE):
_raise_exc(e)
- return True
+ return (True, None)
hardlink_stat = os.stat(hardlink_path)
if hardlink_stat.st_ino != fstat_st.st_ino or hardlink_stat.st_dev != fstat_st.st_dev:
@@ -383,27 +406,27 @@ def _lockfile_was_removed(lock_fd, lock_path):
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ESTALE):
_raise_exc(e)
- return True
+ return (True, None)
else:
if not os.path.samefile(hardlink_path, inode_test):
# This implies that inode numbers are not expected
# to match for this file system, so use a simple
# stat call to detect if lock_path has been removed.
- return not os.path.exists(lock_path)
+ return (not os.path.exists(lock_path), fstat_st)
finally:
try:
os.unlink(inode_test)
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ESTALE):
_raise_exc(e)
- return True
+ return (True, None)
finally:
try:
os.unlink(hardlink_path)
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ESTALE):
_raise_exc(e)
- return False
+ return (False, fstat_st)
def _fstat_nlink(fd):
@@ -442,8 +465,7 @@ def unlockfile(mytuple):
not os.path.exists(lockfilename):
writemsg(_("lockfile does not exist '%s'\n") % lockfilename, 1)
if myfd is not None:
- os.close(myfd)
- _open_fds.remove(myfd)
+ _open_fds[myfd].close()
return False
try:
@@ -453,8 +475,7 @@ def unlockfile(mytuple):
locking_method(myfd, fcntl.LOCK_UN)
except OSError:
if isinstance(lockfilename, basestring):
- os.close(myfd)
- _open_fds.remove(myfd)
+ _open_fds[myfd].close()
raise IOError(_("Failed to unlock file '%s'\n") % lockfilename)
try:
@@ -475,8 +496,7 @@ def unlockfile(mytuple):
locking_method(myfd, fcntl.LOCK_UN)
else:
writemsg(_("lockfile does not exist '%s'\n") % lockfilename, 1)
- os.close(myfd)
- _open_fds.remove(myfd)
+ _open_fds[myfd].close()
return False
except SystemExit:
raise
@@ -488,8 +508,7 @@ def unlockfile(mytuple):
# fd originally, and the caller might not like having their
# open fd closed automatically on them.
if isinstance(lockfilename, basestring):
- os.close(myfd)
- _open_fds.remove(myfd)
+ _open_fds[myfd].close()
return True
@@ -497,7 +516,7 @@ def unlockfile(mytuple):
def hardlock_name(path):
base, tail = os.path.split(path)
return os.path.join(base, ".%s.hardlock-%s-%s" %
- (tail, os.uname()[1], os.getpid()))
+ (tail, portage._decode_argv([os.uname()[1]])[0], os.getpid()))
def hardlink_is_mine(link, lock):
try:
@@ -653,7 +672,7 @@ def unhardlink_lockfile(lockfilename, unlinkfile=True):
pass
def hardlock_cleanup(path, remove_all_locks=False):
- myhost = os.uname()[1]
+ myhost = portage._decode_argv([os.uname()[1]])[0]
mydl = os.listdir(path)
results = []
diff --git a/lib/portage/package/ebuild/_config/KeywordsManager.py b/lib/portage/package/ebuild/_config/KeywordsManager.py
index fd0a6318d..48397b022 100644
--- a/lib/portage/package/ebuild/_config/KeywordsManager.py
+++ b/lib/portage/package/ebuild/_config/KeywordsManager.py
@@ -1,11 +1,14 @@
-# Copyright 2010-2014 Gentoo Foundation
+# Copyright 2010-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
__all__ = (
'KeywordsManager',
)
+import warnings
+
from _emerge.Package import Package
+import portage
from portage import os
from portage.dep import ExtendedAtomDict, _repo_separator, _slot_separator
from portage.localization import _
@@ -54,13 +57,20 @@ class KeywordsManager(object):
self.pkeywordsdict = ExtendedAtomDict(dict)
if user_config:
+ user_accept_kwrds_path = os.path.join(abs_user_config, "package.accept_keywords")
+ user_kwrds_path = os.path.join(abs_user_config, "package.keywords")
pkgdict = grabdict_package(
- os.path.join(abs_user_config, "package.keywords"),
+ user_kwrds_path,
recursive=1, allow_wildcard=True, allow_repo=True,
verify_eapi=False, allow_build_id=True)
+ if pkgdict and portage._internal_caller:
+ warnings.warn(_("%s is deprecated, use %s instead") %
+ (user_kwrds_path, user_accept_kwrds_path),
+ UserWarning)
+
for k, v in grabdict_package(
- os.path.join(abs_user_config, "package.accept_keywords"),
+ user_accept_kwrds_path,
recursive=1, allow_wildcard=True, allow_repo=True,
verify_eapi=False, allow_build_id=True).items():
pkgdict.setdefault(k, []).extend(v)
diff --git a/lib/portage/package/ebuild/_config/special_env_vars.py b/lib/portage/package/ebuild/_config/special_env_vars.py
index c8131f5b2..12d701c9a 100644
--- a/lib/portage/package/ebuild/_config/special_env_vars.py
+++ b/lib/portage/package/ebuild/_config/special_env_vars.py
@@ -28,7 +28,7 @@ env_blacklist = frozenset((
"PORTAGE_INTERNAL_CALLER", "PORTAGE_IUSE",
"PORTAGE_NONFATAL", "PORTAGE_PIPE_FD", "PORTAGE_REPO_NAME",
"PORTAGE_USE", "PROPERTIES", "RDEPEND", "REPOSITORY",
- "REQUIRED_USE", "RESTRICT", "ROOT", "SLOT", "SRC_URI", "_"
+ "REQUIRED_USE", "RESTRICT", "ROOT", "SANDBOX_LOG", "SLOT", "SRC_URI", "_"
))
environ_whitelist = []
@@ -47,7 +47,7 @@ environ_whitelist += [
"DISTDIR", "DOC_SYMLINKS_DIR", "EAPI", "EBUILD",
"EBUILD_FORCE_TEST",
"EBUILD_PHASE", "EBUILD_PHASE_FUNC", "ECLASSDIR", "ECLASS_DEPTH", "ED",
- "EMERGE_FROM", "EPREFIX", "EROOT", "ESYSROOT",
+ "EMERGE_FROM", "ENV_UNSET", "EPREFIX", "EROOT", "ESYSROOT",
"FEATURES", "FILESDIR", "HOME", "MERGE_TYPE", "NOCOLOR", "PATH",
"PKGDIR",
"PKGUSE", "PKG_LOGDIR", "PKG_TMPDIR",
@@ -78,7 +78,7 @@ environ_whitelist += [
"PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE", "PORTAGE_XATTR_EXCLUDE",
"PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PYTHONDONTWRITEBYTECODE",
"REPLACING_VERSIONS", "REPLACED_BY_VERSION",
- "ROOT", "ROOTPATH", "SYSROOT", "T", "TMP", "TMPDIR",
+ "ROOT", "ROOTPATH", "SANDBOX_LOG", "SYSROOT", "T", "TMP", "TMPDIR",
"USE_EXPAND", "USE_ORDER", "WORKDIR",
"XARGS", "__PORTAGE_TEST_HARDLINK_LOCKS",
# PREFIX LOCAL
diff --git a/lib/portage/package/ebuild/deprecated_profile_check.py b/lib/portage/package/ebuild/deprecated_profile_check.py
index fdb19b4ac..abf32a079 100644
--- a/lib/portage/package/ebuild/deprecated_profile_check.py
+++ b/lib/portage/package/ebuild/deprecated_profile_check.py
@@ -77,7 +77,7 @@ def deprecated_profile_check(settings=None):
"can migrate to the above profile.")), noiselevel=-1)
writemsg(" %s %s\n\n" % (colorize("WARN", "*"),
_("In order to update portage, "
- "run 'emerge --oneshot portage'.")),
+ "run 'emerge --oneshot sys-apps/portage'.")),
noiselevel=-1)
return True
diff --git a/lib/portage/package/ebuild/doebuild.py b/lib/portage/package/ebuild/doebuild.py
index 1a766f8ce..7d685dbe4 100644
--- a/lib/portage/package/ebuild/doebuild.py
+++ b/lib/portage/package/ebuild/doebuild.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2019 Gentoo Authors
+# Copyright 2010-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from __future__ import unicode_literals
@@ -31,7 +31,7 @@ portage.proxy.lazyimport.lazyimport(globals(),
'portage.package.ebuild.config:check_config_instance',
'portage.package.ebuild.digestcheck:digestcheck',
'portage.package.ebuild.digestgen:digestgen',
- 'portage.package.ebuild.fetch:fetch',
+ 'portage.package.ebuild.fetch:_drop_privs_userfetch,_want_userfetch,fetch',
'portage.package.ebuild.prepare_build_dirs:_prepare_fake_distdir',
'portage.package.ebuild._ipc.QueryCommand:QueryCommand',
'portage.dep._slot_operator:evaluate_slot_operator_equal_deps',
@@ -84,6 +84,7 @@ from portage.util.cpuinfo import get_cpu_count
from portage.util.lafilefixer import rewrite_lafile
from portage.util.compression_probe import _compressors
from portage.util.futures import asyncio
+from portage.util.futures.executor.fork import ForkExecutor
from portage.util.path import first_existing
from portage.util.socks5 import get_socks5_proxy
from portage.versions import _pkgsplit
@@ -376,7 +377,6 @@ def doebuild_environment(myebuild, mydo, myroot=None, settings=None,
mysettings["RPMDIR"] = os.path.realpath(mysettings["RPMDIR"])
mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
- mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
mysettings["PORTAGE_BASHRC_FILES"] = "\n".join(mysettings._pbashrc)
@@ -414,6 +414,7 @@ def doebuild_environment(myebuild, mydo, myroot=None, settings=None,
mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
+ mysettings["SANDBOX_LOG"] = os.path.join(mysettings["T"], "sandbox.log")
mysettings["FILESDIR"] = os.path.join(settings["PORTAGE_BUILDDIR"], "files")
# Prefix forward compatability
@@ -1089,9 +1090,28 @@ def doebuild(myebuild, mydo, _unused=DeprecationWarning, settings=None, debug=0,
dist_digests = None
if mf is not None:
dist_digests = mf.getTypeDigests("DIST")
- if not fetch(fetchme, mysettings, listonly=listonly,
- fetchonly=fetchonly, allow_missing_digests=False,
- digests=dist_digests):
+
+ def _fetch_subprocess(fetchme, mysettings, listonly, dist_digests):
+ # For userfetch, drop privileges for the entire fetch call, in
+ # order to handle DISTDIR on NFS with root_squash for bug 601252.
+ if _want_userfetch(mysettings):
+ _drop_privs_userfetch(mysettings)
+
+ return fetch(fetchme, mysettings, listonly=listonly,
+ fetchonly=fetchonly, allow_missing_digests=False,
+ digests=dist_digests)
+
+ loop = asyncio._safe_loop()
+ if loop.is_running():
+ # Called by EbuildFetchonly for emerge --pretend --fetchonly.
+ success = fetch(fetchme, mysettings, listonly=listonly,
+ fetchonly=fetchonly, allow_missing_digests=False,
+ digests=dist_digests)
+ else:
+ success = loop.run_until_complete(
+ loop.run_in_executor(ForkExecutor(loop=loop),
+ _fetch_subprocess, fetchme, mysettings, listonly, dist_digests))
+ if not success:
# Since listonly mode is called by emerge --pretend in an
# asynchronous context, spawn_nofetch would trigger event loop
# recursion here, therefore delegate execution of pkg_nofetch
@@ -1835,9 +1855,10 @@ def _post_phase_userpriv_perms(mysettings):
if "userpriv" in mysettings.features and secpass >= 2:
""" Privileged phases may have left files that need to be made
writable to a less privileged user."""
- apply_recursive_permissions(mysettings["T"],
- uid=portage_uid, gid=portage_gid, dirmode=0o700, dirmask=0,
- filemode=0o600, filemask=0)
+ for path in (mysettings["HOME"], mysettings["T"]):
+ apply_recursive_permissions(path,
+ uid=portage_uid, gid=portage_gid, dirmode=0o700, dirmask=0,
+ filemode=0o600, filemask=0)
def _check_build_log(mysettings, out=None):
@@ -1962,7 +1983,7 @@ def _check_build_log(mysettings, out=None):
if make_jobserver_re.match(line) is not None:
make_jobserver.append(line.rstrip("\n"))
- except zlib.error as e:
+ except (EOFError, zlib.error) as e:
_eerror(["portage encountered a zlib error: '%s'" % (e,),
"while reading the log file: '%s'" % logfile])
finally:
diff --git a/lib/portage/package/ebuild/fetch.py b/lib/portage/package/ebuild/fetch.py
index 5f6c40146..11b13fe56 100644
--- a/lib/portage/package/ebuild/fetch.py
+++ b/lib/portage/package/ebuild/fetch.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2019 Gentoo Authors
+# Copyright 2010-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -26,6 +26,11 @@ try:
except ImportError:
from urlparse import urlparse
+try:
+ from urllib.parse import quote as urlquote
+except ImportError:
+ from urllib import quote as urlquote
+
import portage
portage.proxy.lazyimport.lazyimport(globals(),
'portage.package.ebuild.config:check_config_instance,config',
@@ -47,7 +52,7 @@ from portage.checksum import (get_valid_checksum_keys, perform_md5, verify_all,
from portage.const import BASH_BINARY, CUSTOM_MIRRORS_FILE, \
GLOBAL_CONFIG_PATH
from portage.const import rootgid
-from portage.data import portage_gid, portage_uid, secpass, userpriv_groups
+from portage.data import portage_gid, portage_uid, userpriv_groups
from portage.exception import FileNotFound, OperationNotPermitted, \
PortageException, TryAgain
from portage.localization import _
@@ -68,7 +73,38 @@ _userpriv_spawn_kwargs = (
)
def _hide_url_passwd(url):
- return re.sub(r'//(.+):.+@(.+)', r'//\1:*password*@\2', url)
+ return re.sub(r'//([^:\s]+):[^@\s]+@', r'//\1:*password*@', url)
+
+
+def _want_userfetch(settings):
+ """
+ Check if it's desirable to drop privileges for userfetch.
+
+ @param settings: portage config
+ @type settings: portage.package.ebuild.config.config
+ @return: True if desirable, False otherwise
+ """
+ return ('userfetch' in settings.features and
+ portage.data.secpass >= 2 and os.getuid() == 0)
+
+
+def _drop_privs_userfetch(settings):
+ """
+ Drop privileges for userfetch, and update portage.data.secpass
+ to correspond to the new privilege level.
+ """
+ spawn_kwargs = dict(_userpriv_spawn_kwargs)
+ try:
+ _ensure_distdir(settings, settings['DISTDIR'])
+ except PortageException:
+ if not os.path.isdir(settings['DISTDIR']):
+ raise
+ os.setgid(int(spawn_kwargs['gid']))
+ os.setgroups(spawn_kwargs['groups'])
+ os.setuid(int(spawn_kwargs['uid']))
+ os.umask(spawn_kwargs['umask'])
+ portage.data.secpass = 1
+
def _spawn_fetch(settings, args, **kwargs):
"""
@@ -153,6 +189,59 @@ def _userpriv_test_write_file(settings, file_path):
_userpriv_test_write_file_cache[file_path] = rval
return rval
+
+def _ensure_distdir(settings, distdir):
+ """
+ Ensure that DISTDIR exists with appropriate permissions.
+
+ @param settings: portage config
+ @type settings: portage.package.ebuild.config.config
+ @param distdir: DISTDIR path
+ @type distdir: str
+ @raise PortageException: portage.exception wrapper exception
+ """
+ global _userpriv_test_write_file_cache
+ dirmode = 0o070
+ filemode = 0o60
+ modemask = 0o2
+ dir_gid = portage_gid
+ if "FAKED_MODE" in settings:
+ # When inside fakeroot, directories with portage's gid appear
+ # to have root's gid. Therefore, use root's gid instead of
+ # portage's gid to avoid spurrious permissions adjustments
+ # when inside fakeroot.
+ dir_gid = rootgid
+
+ userfetch = portage.data.secpass >= 2 and "userfetch" in settings.features
+ userpriv = portage.data.secpass >= 2 and "userpriv" in settings.features
+ write_test_file = os.path.join(distdir, ".__portage_test_write__")
+
+ try:
+ st = os.stat(distdir)
+ except OSError:
+ st = None
+
+ if st is not None and stat.S_ISDIR(st.st_mode):
+ if not (userfetch or userpriv):
+ return
+ if _userpriv_test_write_file(settings, write_test_file):
+ return
+
+ _userpriv_test_write_file_cache.pop(write_test_file, None)
+ if ensure_dirs(distdir, gid=dir_gid, mode=dirmode, mask=modemask):
+ if st is None:
+ # The directory has just been created
+ # and therefore it must be empty.
+ return
+ writemsg(_("Adjusting permissions recursively: '%s'\n") % distdir,
+ noiselevel=-1)
+ if not apply_recursive_permissions(distdir,
+ gid=dir_gid, dirmode=dirmode, dirmask=modemask,
+ filemode=filemode, filemask=modemask, onerror=_raise_exc):
+ raise OperationNotPermitted(
+ _("Failed to apply recursive permissions for the portage group."))
+
+
def _checksum_failure_temp_file(settings, distdir, basename):
"""
First try to find a duplicate temp file with the same checksum and return
@@ -437,7 +526,7 @@ def get_mirror_url(mirror_url, filename, mysettings, cache_path=None):
f.close()
return (mirror_url + "/distfiles/" +
- mirror_conf.get_best_supported_layout().get_path(filename))
+ urlquote(mirror_conf.get_best_supported_layout().get_path(filename)))
def fetch(myuris, mysettings, listonly=0, fetchonly=0,
@@ -487,9 +576,7 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
features = mysettings.features
restrict = mysettings.get("PORTAGE_RESTRICT","").split()
-
- userfetch = secpass >= 2 and "userfetch" in features
- userpriv = secpass >= 2 and "userpriv" in features
+ userfetch = portage.data.secpass >= 2 and "userfetch" in features
# 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
restrict_mirror = "mirror" in restrict or "nomirror" in restrict
@@ -729,51 +816,8 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
can_fetch = False
if can_fetch and not fetch_to_ro:
- global _userpriv_test_write_file_cache
- dirmode = 0o070
- filemode = 0o60
- modemask = 0o2
- dir_gid = portage_gid
- if "FAKED_MODE" in mysettings:
- # When inside fakeroot, directories with portage's gid appear
- # to have root's gid. Therefore, use root's gid instead of
- # portage's gid to avoid spurrious permissions adjustments
- # when inside fakeroot.
- dir_gid = rootgid
- distdir_dirs = [""]
try:
-
- for x in distdir_dirs:
- mydir = os.path.join(mysettings["DISTDIR"], x)
- write_test_file = os.path.join(
- mydir, ".__portage_test_write__")
-
- try:
- st = os.stat(mydir)
- except OSError:
- st = None
-
- if st is not None and stat.S_ISDIR(st.st_mode):
- if not (userfetch or userpriv):
- continue
- if _userpriv_test_write_file(mysettings, write_test_file):
- continue
-
- _userpriv_test_write_file_cache.pop(write_test_file, None)
- if ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
- if st is None:
- # The directory has just been created
- # and therefore it must be empty.
- continue
- writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
- noiselevel=-1)
- def onerror(e):
- raise # bail out on the first error that occurs during recursion
- if not apply_recursive_permissions(mydir,
- gid=dir_gid, dirmode=dirmode, dirmask=modemask,
- filemode=filemode, filemask=modemask, onerror=onerror):
- raise OperationNotPermitted(
- _("Failed to apply recursive permissions for the portage group."))
+ _ensure_distdir(mysettings, mysettings["DISTDIR"])
except PortageException as e:
if not os.path.isdir(mysettings["DISTDIR"]):
writemsg("!!! %s\n" % str(e), noiselevel=-1)
@@ -875,7 +919,7 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0,
if not has_space_superuser:
has_space = False
- elif secpass < 2:
+ elif portage.data.secpass < 2:
has_space = False
elif userfetch:
has_space = False
diff --git a/lib/portage/package/ebuild/prepare_build_dirs.py b/lib/portage/package/ebuild/prepare_build_dirs.py
index c325819d1..8349d306f 100644
--- a/lib/portage/package/ebuild/prepare_build_dirs.py
+++ b/lib/portage/package/ebuild/prepare_build_dirs.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2018 Gentoo Foundation
+# Copyright 2010-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from __future__ import unicode_literals
@@ -84,7 +84,7 @@ def prepare_build_dirs(myroot=None, settings=None, cleanup=False):
except PortageException:
if not os.path.isdir(mydir):
raise
- for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
+ for dir_key in ("HOME", "PKG_LOGDIR", "T"):
ensure_dirs(mysettings[dir_key], mode=0o755)
apply_secpass_permissions(mysettings[dir_key],
uid=portage_uid, gid=portage_gid)
@@ -272,11 +272,18 @@ def _prepare_workdir(mysettings):
writemsg(_("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n") % \
(mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode).replace('o', '')
- try:
- apply_secpass_permissions(mysettings["WORKDIR"],
- uid=portage_uid, gid=portage_gid, mode=workdir_mode)
- except FileNotFound:
- pass # ebuild.sh will create it
+
+ permissions = {'mode': workdir_mode}
+ if portage.data.secpass >= 2:
+ permissions['uid'] = portage_uid
+ if portage.data.secpass >= 1:
+ permissions['gid'] = portage_gid
+
+ # Apply PORTAGE_WORKDIR_MODE to PORTAGE_BUILDDIR, since the child
+ # directory ${D} and its children may have vulnerable permissions
+ # as reported in bug 692492.
+ ensure_dirs(mysettings["PORTAGE_BUILDDIR"], **permissions)
+ ensure_dirs(mysettings["WORKDIR"], **permissions)
if mysettings.get("PORTAGE_LOGDIR", "") == "":
while "PORTAGE_LOGDIR" in mysettings:
diff --git a/lib/portage/process.py b/lib/portage/process.py
index 226a6d0fa..3e5d2e4ef 100644
--- a/lib/portage/process.py
+++ b/lib/portage/process.py
@@ -28,6 +28,13 @@ from portage.exception import CommandNotFound
from portage.util._ctypes import find_library, LoadLibrary, ctypes
try:
+ from portage.util.netlink import RtNetlink
+except ImportError:
+ if platform.system() == "Linux":
+ raise
+ RtNetlink = None
+
+try:
import resource
max_fd_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
except ImportError:
@@ -363,12 +370,14 @@ def spawn(mycommand, env=None, opt_name=None, fd_pipes=None, returnpid=False,
if unshare_net or unshare_ipc or unshare_mount or unshare_pid:
# from /usr/include/bits/sched.h
CLONE_NEWNS = 0x00020000
+ CLONE_NEWUTS = 0x04000000
CLONE_NEWIPC = 0x08000000
CLONE_NEWPID = 0x20000000
CLONE_NEWNET = 0x40000000
if unshare_net:
- unshare_flags |= CLONE_NEWNET
+ # UTS namespace to override hostname
+ unshare_flags |= CLONE_NEWNET | CLONE_NEWUTS
if unshare_ipc:
unshare_flags |= CLONE_NEWIPC
if unshare_mount:
@@ -517,8 +526,8 @@ def _configure_loopback_interface():
# Bug: https://bugs.gentoo.org/690758
# Bug: https://sourceware.org/bugzilla/show_bug.cgi?id=12377#c13
- # Avoid importing this module on systems that may not support netlink sockets.
- from portage.util.netlink import RtNetlink
+ if RtNetlink is None:
+ return
try:
with RtNetlink() as rtnl:
@@ -719,6 +728,20 @@ def _exec(binary, mycommand, opt_name, fd_pipes,
noiselevel=-1)
os._exit(1)
if unshare_net:
+ # use 'localhost' to avoid hostname resolution problems
+ try:
+ # pypy3 does not implement socket.sethostname()
+ new_hostname = b'localhost'
+ if hasattr(socket, 'sethostname'):
+ socket.sethostname(new_hostname)
+ else:
+ if libc.sethostname(new_hostname, len(new_hostname)) != 0:
+ errno_value = ctypes.get_errno()
+ raise OSError(errno_value, os.strerror(errno_value))
+ except Exception as e:
+ writemsg("Unable to set hostname: %s (for FEATURES=\"network-sandbox\")\n" % (
+ e,),
+ noiselevel=-1)
_configure_loopback_interface()
except AttributeError:
# unshare() not supported by libc
diff --git a/lib/portage/tests/dbapi/test_auxdb.py b/lib/portage/tests/dbapi/test_auxdb.py
new file mode 100644
index 000000000..85d64c15e
--- /dev/null
+++ b/lib/portage/tests/dbapi/test_auxdb.py
@@ -0,0 +1,77 @@
+# Copyright 2020 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util.futures import asyncio
+from portage.util.futures.compat_coroutine import coroutine
+
+
+class AuxdbTestCase(TestCase):
+
+ def test_anydbm(self):
+ try:
+ from portage.cache.anydbm import database
+ except ImportError:
+ self.skipTest('dbm import failed')
+ self._test_mod('portage.cache.anydbm.database')
+
+ def test_flat_hash_md5(self):
+ self._test_mod('portage.cache.flat_hash.md5_database')
+
+ def test_volatile(self):
+ self._test_mod('portage.cache.volatile.database')
+
+ def test_sqite(self):
+ try:
+ import sqlite3
+ except ImportError:
+ self.skipTest('sqlite3 import failed')
+ self._test_mod('portage.cache.sqlite.database')
+
+ def _test_mod(self, auxdbmodule):
+ ebuilds = {
+ "cat/A-1": {
+ "EAPI": "7",
+ "MISC_CONTENT": "inherit foo",
+ },
+ "cat/B-1": {
+ "EAPI": "7",
+ "MISC_CONTENT": "inherit foo",
+ },
+ }
+
+ ebuild_inherited = frozenset(["bar", "foo"])
+ eclass_defined_phases = "prepare"
+ eclass_depend = "bar/foo"
+
+ eclasses = {
+ "foo": (
+ "inherit bar",
+ ),
+ "bar": (
+ "EXPORT_FUNCTIONS src_prepare",
+ "DEPEND=\"{}\"".format(eclass_depend),
+ "bar_src_prepare() { default; }",
+ ),
+ }
+
+ playground = ResolverPlayground(ebuilds=ebuilds, eclasses=eclasses,
+ user_config={'modules': ('portdbapi.auxdbmodule = %s' % auxdbmodule,)})
+
+ portdb = playground.trees[playground.eroot]["porttree"].dbapi
+
+ loop = asyncio._wrap_loop()
+ loop.run_until_complete(self._test_mod_async(ebuilds, ebuild_inherited, eclass_defined_phases, eclass_depend, portdb))
+
+ @coroutine
+ def _test_mod_async(self, ebuilds, ebuild_inherited, eclass_defined_phases, eclass_depend, portdb):
+
+ for cpv, metadata in ebuilds.items():
+ defined_phases, depend, eapi, inherited = yield portdb.async_aux_get(cpv, ['DEFINED_PHASES', 'DEPEND', 'EAPI', 'INHERITED'])
+ self.assertEqual(defined_phases, eclass_defined_phases)
+ self.assertEqual(depend, eclass_depend)
+ self.assertEqual(eapi, metadata['EAPI'])
+ self.assertEqual(frozenset(inherited.split()), ebuild_inherited)
diff --git a/lib/portage/tests/dep/test_soname_atom_pickle.py b/lib/portage/tests/dep/test_soname_atom_pickle.py
new file mode 100644
index 000000000..c3f339e2d
--- /dev/null
+++ b/lib/portage/tests/dep/test_soname_atom_pickle.py
@@ -0,0 +1,26 @@
+# Copyright 2020 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import sys
+
+from portage.dep.soname.SonameAtom import SonameAtom
+from portage.tests import TestCase
+from portage.util.futures import asyncio
+from portage.util.futures.executor.fork import ForkExecutor
+
+
+class TestSonameAtomPickle(TestCase):
+
+ _ALL_PROVIDES = frozenset([SonameAtom('x86_64', 'libc.so.6')])
+
+ def test_soname_atom_pickle(self):
+ loop = asyncio._wrap_loop()
+ with ForkExecutor(loop=loop) as executor:
+ result = loop.run_until_complete(loop.run_in_executor(executor, self._get_all_provides))
+ self.assertEqual(self._ALL_PROVIDES, result)
+
+ @classmethod
+ def _get_all_provides(cls):
+ return cls._ALL_PROVIDES
diff --git a/lib/portage/tests/ebuild/test_doebuild_spawn.py b/lib/portage/tests/ebuild/test_doebuild_spawn.py
index 6b344658f..8f7ba503f 100644
--- a/lib/portage/tests/ebuild/test_doebuild_spawn.py
+++ b/lib/portage/tests/ebuild/test_doebuild_spawn.py
@@ -66,9 +66,11 @@ class DoebuildSpawnTestCase(TestCase):
settings['PORTAGE_BUILDDIR'] = os.path.join(
settings['PORTAGE_TMPDIR'], cpv)
settings['PYTHONDONTWRITEBYTECODE'] = os.environ.get('PYTHONDONTWRITEBYTECODE', '')
+ settings['HOME'] = os.path.join(
+ settings['PORTAGE_BUILDDIR'], 'homedir')
settings['T'] = os.path.join(
settings['PORTAGE_BUILDDIR'], 'temp')
- for x in ('PORTAGE_BUILDDIR', 'T'):
+ for x in ('PORTAGE_BUILDDIR', 'HOME', 'T'):
os.makedirs(settings[x])
# Create a fake environment, to pretend as if the ebuild
# has been sourced already.
diff --git a/lib/portage/tests/emerge/test_simple.py b/lib/portage/tests/emerge/test_simple.py
index f5cd6f3d2..19ab72457 100644
--- a/lib/portage/tests/emerge/test_simple.py
+++ b/lib/portage/tests/emerge/test_simple.py
@@ -1,19 +1,41 @@
-# Copyright 2011-2019 Gentoo Authors
+# Copyright 2011-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import subprocess
import sys
import portage
-from portage import os
+from portage import shutil, os
from portage import _unicode_decode
from portage.const import (BASH_BINARY, PORTAGE_BASE_PATH,
PORTAGE_PYM_PATH, USER_CONFIG_PATH)
+from portage.cache.mappings import Mapping
from portage.process import find_binary
from portage.tests import TestCase
from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.tests.util.test_socks5 import AsyncHTTPServer
from portage.util import (ensure_dirs, find_updated_config_files,
shlex_split)
+from portage.util.futures import asyncio
+from portage.util.futures.compat_coroutine import coroutine
+
+
+class BinhostContentMap(Mapping):
+ def __init__(self, remote_path, local_path):
+ self._remote_path = remote_path
+ self._local_path = local_path
+
+ def __getitem__(self, request_path):
+ safe_path = os.path.normpath(request_path)
+ if not safe_path.startswith(self._remote_path + '/'):
+ raise KeyError(request_path)
+ local_path = os.path.join(self._local_path, safe_path[len(self._remote_path)+1:])
+ try:
+ with open(local_path, 'rb') as f:
+ return f.read()
+ except EnvironmentError:
+ raise KeyError(request_path)
+
class SimpleEmergeTestCase(TestCase):
@@ -201,6 +223,15 @@ call_has_and_best_version() {
playground = ResolverPlayground(
ebuilds=ebuilds, installed=installed, debug=debug)
+
+ loop = asyncio._wrap_loop()
+ loop.run_until_complete(asyncio.ensure_future(
+ self._async_test_simple(loop, playground, metadata_xml_files), loop=loop))
+
+ @coroutine
+ def _async_test_simple(self, loop, playground, metadata_xml_files):
+
+ debug = playground.debug
settings = playground.settings
eprefix = settings["EPREFIX"]
eroot = settings["EROOT"]
@@ -253,6 +284,16 @@ call_has_and_best_version() {
cross_root = os.path.join(eprefix, "cross_root")
cross_eroot = os.path.join(cross_root, eprefix.lstrip(os.sep))
+ binhost_dir = os.path.join(eprefix, "binhost")
+ binhost_address = '127.0.0.1'
+ binhost_remote_path = '/binhost'
+ binhost_server = AsyncHTTPServer(binhost_address,
+ BinhostContentMap(binhost_remote_path, binhost_dir), loop).__enter__()
+ binhost_uri = 'http://{address}:{port}{path}'.format(
+ address=binhost_address,
+ port=binhost_server.server_port,
+ path=binhost_remote_path)
+
test_commands = (
emerge_cmd + ("--usepkgonly", "--root", cross_root, "--quickpkg-direct=y", "dev-libs/A"),
env_update_cmd,
@@ -378,6 +419,18 @@ call_has_and_best_version() {
portageq_cmd + ("has_version", cross_eroot, "dev-libs/B"),
)
+ # Test binhost support if FETCHCOMMAND is available.
+ fetchcommand = portage.util.shlex_split(playground.settings['FETCHCOMMAND'])
+ fetch_bin = portage.process.find_binary(fetchcommand[0])
+ if fetch_bin is not None:
+ test_commands = test_commands + (
+ lambda: os.rename(pkgdir, binhost_dir),
+ ({"PORTAGE_BINHOST": binhost_uri},) + \
+ emerge_cmd + ("-e", "--getbinpkgonly", "dev-libs/A"),
+ lambda: shutil.rmtree(pkgdir),
+ lambda: os.rename(binhost_dir, pkgdir),
+ )
+
distdir = playground.distdir
pkgdir = playground.pkgdir
fake_bin = os.path.join(eprefix, "bin")
@@ -487,15 +540,14 @@ move dev-util/git dev-vcs/git
else:
local_env = env
- proc = subprocess.Popen(args,
- env=local_env, stdout=stdout)
+ proc = yield asyncio.create_subprocess_exec(*args,
+ env=local_env, stderr=None, stdout=stdout)
if debug:
- proc.wait()
+ yield proc.wait()
else:
- output = proc.stdout.readlines()
- proc.wait()
- proc.stdout.close()
+ output, _err = yield proc.communicate()
+ yield proc.wait()
if proc.returncode != os.EX_OK:
for line in output:
sys.stderr.write(_unicode_decode(line))
@@ -503,4 +555,5 @@ move dev-util/git dev-vcs/git
self.assertEqual(os.EX_OK, proc.returncode,
"emerge failed with args %s" % (args,))
finally:
+ binhost_server.__exit__(None, None, None)
playground.cleanup()
diff --git a/lib/portage/tests/locks/test_lock_nonblock.py b/lib/portage/tests/locks/test_lock_nonblock.py
index 2ff7b3527..02ba16ad9 100644
--- a/lib/portage/tests/locks/test_lock_nonblock.py
+++ b/lib/portage/tests/locks/test_lock_nonblock.py
@@ -1,4 +1,4 @@
-# Copyright 2011 Gentoo Foundation
+# Copyright 2011-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import tempfile
@@ -7,6 +7,7 @@ import traceback
import portage
from portage import os
from portage import shutil
+from portage.exception import TryAgain
from portage.tests import TestCase
class LockNonblockTestCase(TestCase):
@@ -60,3 +61,16 @@ class LockNonblockTestCase(TestCase):
if prev_state is not None:
os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = prev_state
+ def test_competition_with_same_process(self):
+ """
+ Test that at attempt to lock the same file multiple times in the
+ same process will behave as intended (bug 714480).
+ """
+ tempdir = tempfile.mkdtemp()
+ try:
+ path = os.path.join(tempdir, 'lock_me')
+ lock = portage.locks.lockfile(path)
+ self.assertRaises(TryAgain, portage.locks.lockfile, path, flags=os.O_NONBLOCK)
+ portage.locks.unlockfile(lock)
+ finally:
+ shutil.rmtree(tempdir)
diff --git a/lib/portage/tests/resolver/ResolverPlayground.py b/lib/portage/tests/resolver/ResolverPlayground.py
index c626bad22..0751e392e 100644
--- a/lib/portage/tests/resolver/ResolverPlayground.py
+++ b/lib/portage/tests/resolver/ResolverPlayground.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2019 Gentoo Authors
+# Copyright 2010-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import bz2
@@ -22,9 +22,10 @@ from portage.util import ensure_dirs, normalize_path
from portage.versions import catsplit
import _emerge
-from _emerge.actions import calc_depclean
+from _emerge.actions import _calc_depclean
from _emerge.Blocker import Blocker
from _emerge.create_depgraph_params import create_depgraph_params
+from _emerge.DependencyArg import DependencyArg
from _emerge.depgraph import backtrack_depgraph
from _emerge.RootConfig import RootConfig
@@ -44,7 +45,7 @@ class ResolverPlayground(object):
its work.
"""
- config_files = frozenset(("eapi", "layout.conf", "make.conf", "package.accept_keywords",
+ config_files = frozenset(("eapi", "layout.conf", "make.conf", "modules", "package.accept_keywords",
"package.keywords", "package.license", "package.mask", "package.properties",
"package.provided", "packages",
"package.unmask", "package.use", "package.use.aliases", "package.use.stable.mask",
@@ -66,7 +67,7 @@ class ResolverPlayground(object):
"""
def __init__(self, ebuilds={}, binpkgs={}, installed={}, profile={}, repo_configs={}, \
- user_config={}, sets={}, world=[], world_sets=[], distfiles={},
+ user_config={}, sets={}, world=[], world_sets=[], distfiles={}, eclasses={},
eprefix=None, targetroot=False, debug=False):
"""
ebuilds: cpv -> metadata mapping simulating available ebuilds.
@@ -111,6 +112,7 @@ class ResolverPlayground(object):
"uname",
"uniq",
"xargs",
+ "zstd",
)
# Exclude internal wrappers from PATH lookup.
orig_path = os.environ['PATH']
@@ -158,7 +160,7 @@ class ResolverPlayground(object):
self._create_ebuilds(ebuilds)
self._create_binpkgs(binpkgs)
self._create_installed(installed)
- self._create_profile(ebuilds, installed, profile, repo_configs, user_config, sets)
+ self._create_profile(ebuilds, eclasses, installed, profile, repo_configs, user_config, sets)
self._create_world(world, world_sets)
self.settings, self.trees = self._load_config()
@@ -346,7 +348,7 @@ class ResolverPlayground(object):
with open(ebuild_path, 'rb') as inputfile:
f.write(inputfile.read())
- def _create_profile(self, ebuilds, installed, profile, repo_configs, user_config, sets):
+ def _create_profile(self, ebuilds, eclasses, installed, profile, repo_configs, user_config, sets):
user_config_dir = os.path.join(self.eroot, USER_CONFIG_PATH)
@@ -404,7 +406,15 @@ class ResolverPlayground(object):
f.write("masters =\n")
#Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
- os.makedirs(os.path.join(repo_dir, "eclass"))
+ eclass_dir = os.path.join(repo_dir, "eclass")
+ os.makedirs(eclass_dir)
+
+ for eclass_name, eclass_content in eclasses.items():
+ with open(os.path.join(eclass_dir, "{}.eclass".format(eclass_name)), 'wt') as f:
+ if isinstance(eclass_content, basestring):
+ eclass_content = [eclass_content]
+ for line in eclass_content:
+ f.write("{}\n".format(line))
# Temporarily write empty value of masters until it becomes default.
if not repo_config or "layout.conf" not in repo_config:
@@ -556,6 +566,9 @@ class ResolverPlayground(object):
"PORTAGE_REPOSITORIES": "\n".join("[%s]\n%s" % (repo_name, "\n".join("%s = %s" % (k, v) for k, v in repo_config.items())) for repo_name, repo_config in self._repositories.items())
}
+ if self.debug:
+ env["PORTAGE_DEBUG"] = "1"
+
trees = portage.create_trees(env=env, eprefix=self.eprefix,
**create_trees_kwargs)
@@ -591,11 +604,16 @@ class ResolverPlayground(object):
_emerge.emergelog._disable = True
if action in ("depclean", "prune"):
- rval, cleanlist, ordered, req_pkg_count = \
- calc_depclean(self.settings, self.trees, None,
+ depclean_result = _calc_depclean(self.settings, self.trees, None,
options, action, InternalPackageSet(initial_atoms=atoms, allow_wildcard=True), None)
result = ResolverPlaygroundDepcleanResult(
- atoms, rval, cleanlist, ordered, req_pkg_count)
+ atoms,
+ depclean_result.returncode,
+ depclean_result.cleanlist,
+ depclean_result.ordered,
+ depclean_result.req_pkg_count,
+ depclean_result.depgraph,
+ )
else:
params = create_depgraph_params(options, action)
success, depgraph, favorites = backtrack_depgraph(
@@ -778,18 +796,46 @@ class ResolverPlaygroundTestCase(object):
return False
return True
+
+def _mergelist_str(x, depgraph):
+ if isinstance(x, DependencyArg):
+ mergelist_str = x.arg
+ elif isinstance(x, Blocker):
+ mergelist_str = x.atom
+ else:
+ repo_str = ""
+ if x.repo != "test_repo":
+ repo_str = _repo_separator + x.repo
+ build_id_str = ""
+ if (x.type_name == "binary" and
+ x.cpv.build_id is not None):
+ build_id_str = "-%s" % x.cpv.build_id
+ mergelist_str = x.cpv + build_id_str + repo_str
+ if x.built:
+ if x.operation == "merge":
+ desc = x.type_name
+ else:
+ desc = x.operation
+ mergelist_str = "[%s]%s" % (desc, mergelist_str)
+ if x.root != depgraph._frozen_config._running_root.root:
+ mergelist_str += "{targetroot}"
+ return mergelist_str
+
+
class ResolverPlaygroundResult(object):
checks = (
"success", "mergelist", "use_changes", "license_changes",
"unstable_keywords", "slot_collision_solutions",
"circular_dependency_solutions", "needed_p_mask_changes",
- "unsatisfied_deps", "forced_rebuilds", "required_use_unsatisfied"
+ "unsatisfied_deps", "forced_rebuilds", "required_use_unsatisfied",
+ "graph_order",
)
optional_checks = (
"forced_rebuilds",
"required_use_unsatisfied",
- "unsatisfied_deps"
+ "unsatisfied_deps",
+ "graph_order",
)
def __init__(self, atoms, success, mydepgraph, favorites):
@@ -808,30 +854,12 @@ class ResolverPlaygroundResult(object):
self.forced_rebuilds = None
self.required_use_unsatisfied = None
+ self.graph_order = [_mergelist_str(node, self.depgraph) for node in self.depgraph._dynamic_config.digraph]
+
if self.depgraph._dynamic_config._serialized_tasks_cache is not None:
self.mergelist = []
- host_root = self.depgraph._frozen_config._running_root.root
for x in self.depgraph._dynamic_config._serialized_tasks_cache:
- if isinstance(x, Blocker):
- self.mergelist.append(x.atom)
- else:
- repo_str = ""
- if x.repo != "test_repo":
- repo_str = _repo_separator + x.repo
- build_id_str = ""
- if (x.type_name == "binary" and
- x.cpv.build_id is not None):
- build_id_str = "-%s" % x.cpv.build_id
- mergelist_str = x.cpv + build_id_str + repo_str
- if x.built:
- if x.operation == "merge":
- desc = x.type_name
- else:
- desc = x.operation
- mergelist_str = "[%s]%s" % (desc, mergelist_str)
- if x.root != host_root:
- mergelist_str += "{targetroot}"
- self.mergelist.append(mergelist_str)
+ self.mergelist.append(_mergelist_str(x, self.depgraph))
if self.depgraph._dynamic_config._needed_use_config_changes:
self.use_changes = {}
@@ -892,14 +920,17 @@ class ResolverPlaygroundDepcleanResult(object):
checks = (
"success", "cleanlist", "ordered", "req_pkg_count",
+ "graph_order",
)
optional_checks = (
"ordered", "req_pkg_count",
+ "graph_order",
)
- def __init__(self, atoms, rval, cleanlist, ordered, req_pkg_count):
+ def __init__(self, atoms, rval, cleanlist, ordered, req_pkg_count, depgraph):
self.atoms = atoms
self.success = rval == 0
self.cleanlist = cleanlist
self.ordered = ordered
self.req_pkg_count = req_pkg_count
+ self.graph_order = [_mergelist_str(node, depgraph) for node in depgraph._dynamic_config.digraph]
diff --git a/lib/portage/tests/resolver/test_circular_choices.py b/lib/portage/tests/resolver/test_circular_choices.py
index a5c10b476..a383519fc 100644
--- a/lib/portage/tests/resolver/test_circular_choices.py
+++ b/lib/portage/tests/resolver/test_circular_choices.py
@@ -1,4 +1,4 @@
-# Copyright 2011-2019 Gentoo Authors
+# Copyright 2011-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -160,3 +160,45 @@ class VirtualCircularChoicesTestCase(TestCase):
self.assertEqual(test_case.test_success, True, test_case.fail_msg)
finally:
playground.cleanup()
+
+
+class CircularPypyExeTestCase(TestCase):
+ def testCircularPypyExe(self):
+
+ ebuilds = {
+ 'dev-python/pypy-7.3.0': {
+ 'EAPI': '7',
+ 'SLOT' : '0/73',
+ 'DEPEND': '|| ( dev-python/pypy-exe dev-python/pypy-exe-bin )'
+ },
+ 'dev-python/pypy-exe-7.3.0': {
+ 'EAPI': '7',
+ 'IUSE': 'low-memory',
+ 'SLOT' : '7.3.0',
+ 'BDEPEND': '!low-memory? ( dev-python/pypy )'
+ },
+ 'dev-python/pypy-exe-bin-7.3.0': {
+ 'EAPI': '7',
+ 'SLOT' : '7.3.0',
+ },
+ }
+
+ test_cases = (
+ # Demonstrate bug 705986, where a USE change suggestion was given
+ # even though an || preference adjustment would solve the problem
+ # by pulling in pypy-exe-bin instead of pypy-exe.
+ ResolverPlaygroundTestCase(
+ ['dev-python/pypy'],
+ mergelist=['dev-python/pypy-exe-bin-7.3.0', 'dev-python/pypy-7.3.0'],
+ success = True,
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_depth.py b/lib/portage/tests/resolver/test_depth.py
index cb1e2dd5d..ea7c803bb 100644
--- a/lib/portage/tests/resolver/test_depth.py
+++ b/lib/portage/tests/resolver/test_depth.py
@@ -1,4 +1,4 @@
-# Copyright 2011 Gentoo Foundation
+# Copyright 2011-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -9,6 +9,17 @@ class ResolverDepthTestCase(TestCase):
def testResolverDepth(self):
+ profile = {
+ "package.mask":
+ (
+ # Mask an installed package (for which an update is
+ # available) in order to test for bug 712298, where
+ # --update caused --deep=<depth> to be violated for
+ # such a package.
+ "<dev-libs/B-2",
+ ),
+ }
+
ebuilds = {
"dev-libs/A-1": {"RDEPEND" : "dev-libs/B"},
"dev-libs/A-2": {"RDEPEND" : "dev-libs/B"},
@@ -65,6 +76,9 @@ class ResolverDepthTestCase(TestCase):
world = ["dev-libs/A"]
test_cases = (
+ # Test for bug 712298, where --update caused --deep=<depth>
+ # to be violated for dependencies that were masked. In this
+ # case, the installed dev-libs/B-1 dependency is masked.
ResolverPlaygroundTestCase(
["dev-libs/A"],
options = {"--update": True, "--deep": 0},
@@ -243,7 +257,7 @@ class ResolverDepthTestCase(TestCase):
)
playground = ResolverPlayground(ebuilds=ebuilds, installed=installed,
- world=world)
+ profile=profile, world=world)
try:
for test_case in test_cases:
playground.run_TestCase(test_case)
diff --git a/lib/portage/tests/resolver/test_multirepo.py b/lib/portage/tests/resolver/test_multirepo.py
index dabec6af9..bdfcf2120 100644
--- a/lib/portage/tests/resolver/test_multirepo.py
+++ b/lib/portage/tests/resolver/test_multirepo.py
@@ -1,4 +1,4 @@
-# Copyright 2010-2014 Gentoo Foundation
+# Copyright 2010-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -246,7 +246,7 @@ class MultirepoTestCase(TestCase):
"dev-libs/B-2": { "DEPEND": "dev-libs/A[foo]", "EAPI": 2 },
"dev-libs/B-3": { "DEPEND": "dev-libs/A[-foo]", "EAPI": 2 },
- #package.keywords test
+ #package.accept_keywords test
"dev-libs/C-1": { "KEYWORDS": "~x86" },
"dev-libs/C-1::repo1": { "KEYWORDS": "~x86" },
@@ -286,7 +286,7 @@ class MultirepoTestCase(TestCase):
(
"dev-libs/A::repo1 foo",
),
- "package.keywords":
+ "package.accept_keywords":
(
"=dev-libs/C-1::test_repo",
),
@@ -332,7 +332,7 @@ class MultirepoTestCase(TestCase):
success = False,
check_repo_names = True),
- #package.keywords test
+ #package.accept_keywords test
ResolverPlaygroundTestCase(
["dev-libs/C"],
success = True,
diff --git a/lib/portage/tests/resolver/test_or_choices.py b/lib/portage/tests/resolver/test_or_choices.py
index 63e62d010..f9d9687f3 100644
--- a/lib/portage/tests/resolver/test_or_choices.py
+++ b/lib/portage/tests/resolver/test_or_choices.py
@@ -1,6 +1,8 @@
-# Copyright 2013-2015 Gentoo Foundation
+# Copyright 2013-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
+import itertools
+
from portage.tests import TestCase
from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
ResolverPlaygroundTestCase)
@@ -78,62 +80,6 @@ class OrChoicesTestCase(TestCase):
finally:
playground.cleanup()
- def testOrChoicesLibpostproc(self):
- ebuilds = {
- "media-video/ffmpeg-0.10" : {
- "EAPI": "5",
- "SLOT": "0.10"
- },
- "media-video/ffmpeg-1.2.2" : {
- "EAPI": "5",
- "SLOT": "0"
- },
- "media-libs/libpostproc-0.8.0.20121125" : {
- "EAPI": "5"
- },
- "media-plugins/gst-plugins-ffmpeg-0.10.13_p201211-r1" : {
- "EAPI": "5",
- "RDEPEND" : "|| ( media-video/ffmpeg:0 media-libs/libpostproc )"
- },
- }
-
- installed = {
- "media-video/ffmpeg-0.10" : {
- "EAPI": "5",
- "SLOT": "0.10"
- },
- "media-libs/libpostproc-0.8.0.20121125" : {
- "EAPI": "5"
- },
- "media-plugins/gst-plugins-ffmpeg-0.10.13_p201211-r1" : {
- "EAPI": "5",
- "RDEPEND" : "|| ( media-video/ffmpeg:0 media-libs/libpostproc )"
- },
- }
-
- world = ["media-plugins/gst-plugins-ffmpeg"]
-
- test_cases = (
- # Demonstrate that libpostproc is preferred
- # over ffmpeg:0 for bug #480736.
- ResolverPlaygroundTestCase(
- ["@world"],
- options = {"--update": True, "--deep": True},
- success=True,
- all_permutations = True,
- mergelist = []),
- )
-
- playground = ResolverPlayground(ebuilds=ebuilds, installed=installed,
- world=world, debug=False)
- try:
- for test_case in test_cases:
- playground.run_TestCase(test_case)
- self.assertEqual(test_case.test_success, True, test_case.fail_msg)
- finally:
- playground.cleanup()
-
-
def testInitiallyUnsatisfied(self):
ebuilds = {
@@ -340,3 +286,515 @@ class OrChoicesTestCase(TestCase):
# Disable debug so that cleanup works.
playground.debug = False
playground.cleanup()
+
+ def test_python_slot(self):
+ ebuilds = {
+
+ "dev-lang/python-3.8" : {
+ "EAPI": "7",
+ "SLOT": "3.8"
+ },
+
+ "dev-lang/python-3.7" : {
+ "EAPI": "7",
+ "SLOT": "3.7"
+ },
+
+ "dev-lang/python-3.6" : {
+ "EAPI": "7",
+ "SLOT": "3.6"
+ },
+
+ "app-misc/bar-1" : {
+ "EAPI": "7",
+ "IUSE": "python_targets_python3_6 +python_targets_python3_7",
+ "RDEPEND": "python_targets_python3_7? ( dev-lang/python:3.7 ) python_targets_python3_6? ( dev-lang/python:3.6 )"
+ },
+
+ "app-misc/foo-1" : {
+ "EAPI": "7",
+ "RDEPEND": "|| ( dev-lang/python:3.8 dev-lang/python:3.7 dev-lang/python:3.6 )"
+ },
+
+ }
+
+ installed = {
+
+ "dev-lang/python-3.7" : {
+ "EAPI": "7",
+ "SLOT": "3.7"
+ },
+
+ "app-misc/bar-1" : {
+ "EAPI": "7",
+ "IUSE": "python_targets_python3_6 +python_targets_python3_7",
+ "USE": "python_targets_python3_7",
+ "RDEPEND": "dev-lang/python:3.7"
+ },
+
+ "app-misc/foo-1" : {
+ "EAPI": "7",
+ "RDEPEND": "|| ( dev-lang/python:3.8 dev-lang/python:3.7 dev-lang/python:3.6 )"
+ },
+
+ }
+
+ world = ["app-misc/foo", "app-misc/bar"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ['dev-lang/python-3.8']
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
+
+ installed = {
+
+ "dev-lang/python-3.8" : {
+ "EAPI": "7",
+ "SLOT": "3.8"
+ },
+
+ "dev-lang/python-3.7" : {
+ "EAPI": "7",
+ "SLOT": "3.7"
+ },
+
+ "app-misc/bar-1" : {
+ "EAPI": "7",
+ "IUSE": "python_targets_python3_6 +python_targets_python3_7",
+ "USE": "python_targets_python3_7",
+ "RDEPEND": "dev-lang/python:3.7"
+ },
+
+ "app-misc/foo-1" : {
+ "EAPI": "7",
+ "RDEPEND": "|| ( dev-lang/python:3.8 dev-lang/python:3.7 dev-lang/python:3.6 )"
+ },
+
+ }
+
+ test_cases = (
+ # Test for bug 707108, where a new python slot was erroneously
+ # removed by emerge --depclean.
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=[],
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
+
+ def test_virtual_w3m(self):
+ ebuilds = {
+
+ 'app-text/xmlto-0.0.28-r1' : {
+ 'EAPI': '7',
+ 'RDEPEND': '|| ( virtual/w3m www-client/lynx www-client/elinks )'
+ },
+
+ 'www-client/elinks-0.13_pre_pre20180225' : {
+ 'EAPI': '7',
+ },
+
+ 'www-client/lynx-2.9.0_pre4' : {
+ 'EAPI': '7',
+ },
+
+ 'virtual/w3m-0' : {
+ 'EAPI': '7',
+ 'RDEPEND': '|| ( www-client/w3m www-client/w3mmee )'
+ },
+
+ 'www-client/w3m-0.5.3_p20190105' : {
+ 'EAPI': '7',
+ },
+
+ 'www-client/w3mmee-0.3.2_p24-r10' : {
+ 'EAPI': '7',
+ },
+
+ }
+
+ installed = {
+
+ 'app-text/xmlto-0.0.28-r1' : {
+ 'EAPI': '7',
+ 'RDEPEND': '|| ( virtual/w3m www-client/lynx www-client/elinks )'
+ },
+
+ 'www-client/elinks-0.13_pre_pre20180225' : {
+ 'EAPI': '7',
+ },
+
+ 'www-client/lynx-2.9.0_pre4' : {
+ 'EAPI': '7',
+ },
+
+ }
+
+ world = ['app-text/xmlto', 'www-client/elinks', 'www-client/lynx']
+
+ test_cases = (
+
+ # Test for bug 649622 (without www-client/w3m installed),
+ # where virtual/w3m was pulled in only to be removed by the
+ # next emerge --depclean.
+ ResolverPlaygroundTestCase(
+ ['@world'],
+ options = {'--update': True, '--deep': True},
+ success = True,
+ mergelist = []
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
+
+ installed = dict(itertools.chain(installed.items(), {
+
+ 'www-client/w3m-0.5.3_p20190105' : {
+ 'EAPI': '7',
+ },
+
+ }.items()))
+
+ test_cases = (
+
+ # Test for bug 649622 (with www-client/w3m installed),
+ # where virtual/w3m was pulled in only to be removed by the
+ # next emerge --depclean.
+ ResolverPlaygroundTestCase(
+ ['@world'],
+ options = {'--update': True, '--deep': True},
+ success = True,
+ mergelist = []
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
+
+ installed = dict(itertools.chain(installed.items(), {
+
+ 'virtual/w3m-0' : {
+ 'EAPI': '7',
+ 'RDEPEND': '|| ( www-client/w3m www-client/w3mmee )'
+ },
+
+ }.items()))
+
+ test_cases = (
+
+ # Test for bug 649622, where virtual/w3m is removed by
+ # emerge --depclean immediately after it's installed
+ # by a world update. Note that removal of virtual/w3m here
+ # is essentially indistinguishable from removal of
+ # dev-util/cmake-bootstrap in the depclean test case for
+ # bug 703440.
+ ResolverPlaygroundTestCase(
+ [],
+ options={'--depclean': True},
+ success=True,
+ cleanlist=['virtual/w3m-0', 'www-client/w3m-0.5.3_p20190105'],
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
+
+
+ test_cases = (
+
+ # Test for behavior reported in bug 649622 comment #10, where
+ # depclean removed virtual/w3m-0 even though www-client/w3m
+ # was in the world file. Since nothing is removed here, it
+ # means that we have not reproduced the behavior reported in
+ # this comment.
+ ResolverPlaygroundTestCase(
+ [],
+ options={'--depclean': True},
+ success=True,
+ cleanlist=[],
+ ),
+
+ )
+
+ world += ['www-client/w3m']
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
+
+
+ def test_virtual_w3m_realistic(self):
+ """
+ Test for bug 649622 with realistic www-client/w3m dependencies copied
+ from real ebuilds.
+ """
+ ebuilds = {
+
+ 'app-misc/neofetch-6.1.0': {
+ 'EAPI': '7',
+ 'RDEPEND': 'www-client/w3m'
+ },
+
+ 'app-text/xmlto-0.0.28-r1' : {
+ 'EAPI': '7',
+ 'RDEPEND': '|| ( virtual/w3m www-client/lynx www-client/elinks )'
+ },
+
+ 'mail-client/neomutt-20191207': {
+ 'EAPI': '7',
+ 'RDEPEND': '|| ( www-client/lynx www-client/w3m www-client/elinks )'
+ },
+
+ 'www-client/elinks-0.13_pre_pre20180225' : {
+ 'EAPI': '7',
+ },
+
+ 'www-client/lynx-2.9.0_pre4' : {
+ 'EAPI': '7',
+ },
+
+ 'virtual/w3m-0' : {
+ 'EAPI': '7',
+ 'RDEPEND': '|| ( www-client/w3m www-client/w3mmee )'
+ },
+
+ 'www-client/w3m-0.5.3_p20190105' : {
+ 'EAPI': '7',
+ },
+
+ 'www-client/w3mmee-0.3.2_p24-r10' : {
+ 'EAPI': '7',
+ },
+
+ 'x11-base/xorg-server-1.20.7' : {
+ 'EAPI': '7',
+ 'RDEPEND': '|| ( www-client/links www-client/lynx www-client/w3m ) app-text/xmlto',
+ }
+ }
+
+ installed = {
+
+ 'app-misc/neofetch-6.1.0': {
+ 'EAPI': '7',
+ 'RDEPEND': 'www-client/w3m'
+ },
+
+ 'app-text/xmlto-0.0.28-r1' : {
+ 'EAPI': '7',
+ 'RDEPEND': '|| ( virtual/w3m www-client/lynx www-client/elinks )'
+ },
+
+ 'mail-client/neomutt-20191207': {
+ 'EAPI': '7',
+ 'RDEPEND': '|| ( www-client/lynx www-client/w3m www-client/elinks )'
+ },
+
+ 'www-client/lynx-2.9.0_pre4' : {
+ 'EAPI': '7',
+ },
+
+ 'www-client/w3m-0.5.3_p20190105' : {
+ 'EAPI': '7',
+ },
+
+ 'x11-base/xorg-server-1.20.7' : {
+ 'EAPI': '7',
+ 'RDEPEND': '|| ( www-client/links www-client/lynx www-client/w3m ) app-text/xmlto',
+ }
+ }
+
+ world = ['app-misc/neofetch', 'mail-client/neomutt', 'www-client/lynx', 'x11-base/xorg-server']
+
+ test_cases = (
+
+ # Test for bug 649622 (with www-client/w3m installed via
+ # xorg-server dependency), where virtual/w3m was pulled in
+ # only to be removed by the next emerge --depclean. Note
+ # that graph_order must be deterministic in order to achieve
+ # deterministic results which are consistent between both
+ # update and removal (depclean) actions.
+ ResolverPlaygroundTestCase(
+ ['@world'],
+ options = {'--update': True, '--deep': True},
+ success = True,
+ mergelist=['virtual/w3m-0'],
+ graph_order=['@world', '@profile', '@selected', '@system', '[nomerge]app-misc/neofetch-6.1.0', '[nomerge]mail-client/neomutt-20191207', '[nomerge]www-client/lynx-2.9.0_pre4', '[nomerge]x11-base/xorg-server-1.20.7', '[nomerge]app-text/xmlto-0.0.28-r1', '[nomerge]www-client/w3m-0.5.3_p20190105', 'virtual/w3m-0'],
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
+
+
+ installed = dict(itertools.chain(installed.items(), {
+
+ 'virtual/w3m-0' : {
+ 'EAPI': '7',
+ 'RDEPEND': '|| ( www-client/w3m www-client/w3mmee )'
+ },
+
+ }.items()))
+
+ test_cases = (
+
+ # Test for bug 649622, where virtual/w3m is removed by
+ # emerge --depclean immediately after it's installed
+ # by a world update. Since virtual/w3m-0 is not removed
+ # here, this case fails to reproduce bug 649622. Note
+ # that graph_order must be deterministic in order to achieve
+ # deterministic results which are consistent between both
+ # update and removal (depclean) actions.
+ ResolverPlaygroundTestCase(
+ [],
+ options={'--depclean': True},
+ success=True,
+ cleanlist=[],
+ graph_order=['@world', '@____depclean_protected_set____', '@profile', '@selected', '@system', '[nomerge]app-misc/neofetch-6.1.0', '[nomerge]mail-client/neomutt-20191207', '[nomerge]www-client/lynx-2.9.0_pre4', '[nomerge]x11-base/xorg-server-1.20.7', '[nomerge]app-text/xmlto-0.0.28-r1', '[nomerge]www-client/w3m-0.5.3_p20190105', '[nomerge]virtual/w3m-0'],
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
+
+
+class OrChoicesLibpostprocTestCase(TestCase):
+
+ def testOrChoicesLibpostproc(self):
+ # This test case is expected to fail after the fix for bug 706278,
+ # since the "undesirable" slot upgrade which triggers a blocker conflict
+ # in this test case is practically indistinguishable from a desirable
+ # slot upgrade. This particular blocker conflict is no longer relevant,
+ # since current versions of media-libs/libpostproc are no longer
+ # compatible with any available media-video/ffmpeg slot. In order to
+ # solve this test case, some fancy backtracking (like for bug 382421)
+ # will be required.
+ self.todo = True
+
+ ebuilds = {
+ "media-video/ffmpeg-0.10" : {
+ "EAPI": "5",
+ "SLOT": "0.10"
+ },
+ "media-video/ffmpeg-1.2.2" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+ "media-libs/libpostproc-0.8.0.20121125" : {
+ "EAPI": "5"
+ },
+ "media-plugins/gst-plugins-ffmpeg-0.10.13_p201211-r1" : {
+ "EAPI": "5",
+ "RDEPEND" : "|| ( media-video/ffmpeg:0 media-libs/libpostproc )"
+ },
+ }
+
+ installed = {
+ "media-video/ffmpeg-0.10" : {
+ "EAPI": "5",
+ "SLOT": "0.10"
+ },
+ "media-libs/libpostproc-0.8.0.20121125" : {
+ "EAPI": "5"
+ },
+ "media-plugins/gst-plugins-ffmpeg-0.10.13_p201211-r1" : {
+ "EAPI": "5",
+ "RDEPEND" : "|| ( media-video/ffmpeg:0 media-libs/libpostproc )"
+ },
+ }
+
+ world = ["media-plugins/gst-plugins-ffmpeg"]
+
+ test_cases = (
+ # Demonstrate that libpostproc is preferred
+ # over ffmpeg:0 for bug #480736.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success=True,
+ all_permutations = True,
+ mergelist = []),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed,
+ world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_or_upgrade_installed.py b/lib/portage/tests/resolver/test_or_upgrade_installed.py
index 7018e08de..3889d53dc 100644
--- a/lib/portage/tests/resolver/test_or_upgrade_installed.py
+++ b/lib/portage/tests/resolver/test_or_upgrade_installed.py
@@ -158,3 +158,73 @@ class OrUpgradeInstalledTestCase(TestCase):
finally:
playground.debug = False
playground.cleanup()
+
+
+ def test_llvm_slot_operator(self):
+ ebuilds = {
+ 'media-libs/mesa-19.2.8': {
+ 'EAPI': '7',
+ 'RDEPEND': '''|| (
+ sys-devel/llvm:10
+ sys-devel/llvm:9
+ sys-devel/llvm:8
+ sys-devel/llvm:7
+ )
+ sys-devel/llvm:='''
+ },
+ 'sys-devel/llvm-10': {
+ 'EAPI': '7',
+ 'KEYWORDS': '',
+ 'SLOT': '10',
+ },
+ 'sys-devel/llvm-9': {
+ 'EAPI': '7',
+ 'SLOT': '9',
+ },
+ 'sys-devel/llvm-8': {
+ 'EAPI': '7',
+ 'SLOT': '8',
+ },
+ }
+
+ installed = {
+ 'media-libs/mesa-19.2.8': {
+ 'EAPI': '7',
+ 'RDEPEND': '''|| (
+ sys-devel/llvm:10
+ sys-devel/llvm:9
+ sys-devel/llvm:8
+ sys-devel/llvm:7
+ )
+ sys-devel/llvm:8/8='''
+ },
+ 'sys-devel/llvm-8': {
+ 'EAPI': '7',
+ 'SLOT': '8',
+ },
+ }
+
+ world = ['media-libs/mesa']
+
+ test_cases = (
+ # Demonstrate bug 706278, where there is a missed slot operator
+ # rebuild that prevents upgrade from llvm-8 to llvm-9.
+ ResolverPlaygroundTestCase(
+ ['@world'],
+ options={'--update': True, '--deep': True},
+ success=True,
+ mergelist=['sys-devel/llvm-9', 'media-libs/mesa-19.2.8'],
+ ),
+ )
+
+ playground = ResolverPlayground(debug=False,
+ ebuilds=ebuilds, installed=installed, world=world)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_slot_operator_reverse_deps.py b/lib/portage/tests/resolver/test_slot_operator_reverse_deps.py
index ce614a4dc..6641e9987 100644
--- a/lib/portage/tests/resolver/test_slot_operator_reverse_deps.py
+++ b/lib/portage/tests/resolver/test_slot_operator_reverse_deps.py
@@ -1,4 +1,4 @@
-# Copyright 2016 Gentoo Foundation
+# Copyright 2016-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
@@ -111,3 +111,94 @@ class SlotOperatorReverseDepsTestCase(TestCase):
test_case.fail_msg)
finally:
playground.cleanup()
+
+
+class SlotOperatorReverseDepsLibGit2TestCase(TestCase):
+
+ def testSlotOperatorReverseDepsLibGit2(self):
+ """
+ Test bug #717140, where the depgraph _slot_operator_update_probe
+ method ignored <dev-libs/libgit2-1:0= dependency and tried to
+ trigger an upgrade to dev-libs/libgit2-1.0.0-r1, ultimately
+ resulting in an undesirable downgrade to dev-libs/libgit2-0.28.4-r1.
+ """
+
+ ebuilds = {
+
+ "dev-libs/libgit2-0.28.4-r1" : {
+ "EAPI": "7",
+ "SLOT": "0/28",
+ },
+
+ "dev-libs/libgit2-0.99.0-r1" : {
+ "EAPI": "7",
+ "SLOT": "0/0.99",
+ },
+
+ "dev-libs/libgit2-1.0.0-r1" : {
+ "EAPI": "7",
+ "SLOT": "0/1.0",
+ },
+
+ "dev-libs/libgit2-glib-0.28.0.1" : {
+ "EAPI": "7",
+ "SLOT": "0",
+ "RDEPEND": "<dev-libs/libgit2-0.29:0= >=dev-libs/libgit2-0.26.0",
+ },
+
+ "dev-libs/libgit2-glib-0.99.0.1" : {
+ "EAPI": "7",
+ "SLOT": "0",
+ "RDEPEND": "<dev-libs/libgit2-1:0= >=dev-libs/libgit2-0.26.0",
+ },
+
+ "dev-vcs/gitg-3.32.1-r1" : {
+ "EAPI": "7",
+ "SLOT": "0",
+ "RDEPEND": "dev-libs/libgit2:= >=dev-libs/libgit2-glib-0.27 <dev-libs/libgit2-glib-1",
+ },
+ }
+
+ installed = {
+
+ "dev-libs/libgit2-0.99.0-r1" : {
+ "EAPI": "7",
+ "SLOT": "0/0.99",
+ },
+
+ "dev-libs/libgit2-glib-0.99.0.1" : {
+ "EAPI": "7",
+ "SLOT": "0",
+ "RDEPEND": "<dev-libs/libgit2-1:0/0.99= >=dev-libs/libgit2-0.26.0",
+ },
+
+ "dev-vcs/gitg-3.32.1-r1" : {
+ "EAPI": "7",
+ "SLOT": "0",
+ "RDEPEND": "dev-libs/libgit2:0/0.99= >=dev-libs/libgit2-glib-0.27 <dev-libs/libgit2-glib-1",
+ },
+
+ }
+
+ world = ["dev-vcs/gitg"]
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ #mergelist = ['dev-libs/libgit2-0.28.4-r1', 'dev-libs/libgit2-glib-0.99.0.1', 'dev-vcs/gitg-3.32.1-r1'],
+ mergelist = [],
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/util/futures/test_compat_coroutine.py b/lib/portage/tests/util/futures/test_compat_coroutine.py
index f96aa9be5..b561c0227 100644
--- a/lib/portage/tests/util/futures/test_compat_coroutine.py
+++ b/lib/portage/tests/util/futures/test_compat_coroutine.py
@@ -57,20 +57,43 @@ class CompatCoroutineTestCase(TestCase):
loop.run_until_complete(catching_coroutine(loop=loop)))
def test_cancelled_coroutine(self):
+ """
+ Verify that a coroutine can handle (and reraise) asyncio.CancelledError
+ in order to perform any necessary cleanup. Note that the
+ asyncio.CancelledError will only be thrown in the coroutine if there's
+ an opportunity (yield) before the generator raises StopIteration.
+ """
+ loop = asyncio.get_event_loop()
+ ready_for_exception = loop.create_future()
+ exception_in_coroutine = loop.create_future()
@coroutine
def cancelled_coroutine(loop=None):
loop = asyncio._wrap_loop(loop)
while True:
- yield loop.create_future()
+ task = loop.create_future()
+ try:
+ ready_for_exception.set_result(None)
+ yield task
+ except BaseException as e:
+ # Since python3.8, asyncio.CancelledError inherits
+ # from BaseException.
+ task.done() or task.cancel()
+ exception_in_coroutine.set_exception(e)
+ raise
+ else:
+ exception_in_coroutine.set_result(None)
- loop = asyncio.get_event_loop()
future = cancelled_coroutine(loop=loop)
- loop.call_soon(future.cancel)
+ loop.run_until_complete(ready_for_exception)
+ future.cancel()
self.assertRaises(asyncio.CancelledError,
loop.run_until_complete, future)
+ self.assertRaises(asyncio.CancelledError,
+ loop.run_until_complete, exception_in_coroutine)
+
def test_cancelled_future(self):
"""
When a coroutine raises CancelledError, the coroutine's
diff --git a/lib/portage/tests/util/futures/test_done_callback_after_exit.py b/lib/portage/tests/util/futures/test_done_callback_after_exit.py
new file mode 100644
index 000000000..7ac7674e6
--- /dev/null
+++ b/lib/portage/tests/util/futures/test_done_callback_after_exit.py
@@ -0,0 +1,44 @@
+# Copyright 2020 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AsynchronousTask import AsynchronousTask
+from portage.tests import TestCase
+from portage.util.futures import asyncio
+
+
+class DoneCallbackAfterExitTestCase(TestCase):
+
+ def test_done_callback_after_exit(self):
+ """
+ Test that callbacks can be registered via the Future
+ add_done_callback method even after the future is done, and
+ verify that the callbacks are called.
+ """
+ loop = asyncio._wrap_loop()
+ future = loop.create_future()
+ future.set_result(None)
+
+ for i in range(3):
+ event = loop.create_future()
+ future.add_done_callback(lambda future: event.set_result(None))
+ loop.run_until_complete(event)
+
+ def test_exit_listener_after_exit(self):
+ """
+ Test that callbacks can be registered via the AsynchronousTask
+ addExitListener method even after the task is done, and
+ verify that the callbacks are called.
+ """
+ loop = asyncio._wrap_loop()
+ task = AsynchronousTask(scheduler=loop)
+ task.start()
+ loop.run_until_complete(task.async_wait())
+
+ for i in range(3):
+ event = loop.create_future()
+ task.addStartListener(lambda task: event.set_result(None))
+ loop.run_until_complete(event)
+
+ event = loop.create_future()
+ task.addExitListener(lambda task: event.set_result(None))
+ loop.run_until_complete(event)
diff --git a/lib/portage/util/__init__.py b/lib/portage/util/__init__.py
index def9f2392..6bff97fb7 100644
--- a/lib/portage/util/__init__.py
+++ b/lib/portage/util/__init__.py
@@ -1803,14 +1803,6 @@ def find_updated_config_files(target_root, config_protect):
mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
cmd = shlex_split(mycommand)
- if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000:
- # Python 3.1 _execvp throws TypeError for non-absolute executable
- # path passed as bytes (see https://bugs.python.org/issue8513).
- fullname = portage.process.find_binary(cmd[0])
- if fullname is None:
- raise portage.exception.CommandNotFound(cmd[0])
- cmd[0] = fullname
-
cmd = [_unicode_encode(arg, encoding=encoding, errors='strict')
for arg in cmd]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
diff --git a/lib/portage/util/_async/AsyncFunction.py b/lib/portage/util/_async/AsyncFunction.py
index ad3d8333f..1dffa36cc 100644
--- a/lib/portage/util/_async/AsyncFunction.py
+++ b/lib/portage/util/_async/AsyncFunction.py
@@ -44,10 +44,11 @@ class AsyncFunction(ForkProcess):
return os.EX_OK
- def _pipe_logger_exit(self, pipe_logger):
+ def _async_waitpid(self):
# Ignore this event, since we want to ensure that we exit
# only after _async_func_reader_exit has reached EOF.
- self._pipe_logger = None
+ if self._async_func_reader is None:
+ ForkProcess._async_waitpid(self)
def _async_func_reader_exit(self, pipe_reader):
try:
diff --git a/lib/portage/util/_async/FileDigester.py b/lib/portage/util/_async/FileDigester.py
index 72f06759c..bb99b9b6d 100644
--- a/lib/portage/util/_async/FileDigester.py
+++ b/lib/portage/util/_async/FileDigester.py
@@ -52,10 +52,11 @@ class FileDigester(ForkProcess):
self.digests = digests
- def _pipe_logger_exit(self, pipe_logger):
+ def _async_waitpid(self):
# Ignore this event, since we want to ensure that we
# exit only after _digest_pipe_reader has reached EOF.
- self._pipe_logger = None
+ if self._digest_pipe_reader is None:
+ ForkProcess._async_waitpid(self)
def _digest_pipe_reader_exit(self, pipe_reader):
self._parse_digests(pipe_reader.getvalue())
diff --git a/lib/portage/util/_desktop_entry.py b/lib/portage/util/_desktop_entry.py
index 45949215a..ee6572588 100644
--- a/lib/portage/util/_desktop_entry.py
+++ b/lib/portage/util/_desktop_entry.py
@@ -42,14 +42,6 @@ _ShowIn_exemptions = (
def validate_desktop_entry(path):
args = ["desktop-file-validate", path]
- if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000:
- # Python 3.1 _execvp throws TypeError for non-absolute executable
- # path passed as bytes (see https://bugs.python.org/issue8513).
- fullname = portage.process.find_binary(args[0])
- if fullname is None:
- raise portage.exception.CommandNotFound(args[0])
- args[0] = fullname
-
args = [_unicode_encode(x, errors='strict') for x in args]
proc = subprocess.Popen(args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
diff --git a/lib/portage/util/_dyn_libs/LinkageMapELF.py b/lib/portage/util/_dyn_libs/LinkageMapELF.py
index 92a50b444..473a1243d 100644
--- a/lib/portage/util/_dyn_libs/LinkageMapELF.py
+++ b/lib/portage/util/_dyn_libs/LinkageMapELF.py
@@ -1,7 +1,9 @@
-# Copyright 1998-2019 Gentoo Authors
+# Copyright 1998-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
+import collections
import errno
+import itertools
import logging
import subprocess
import sys
@@ -14,6 +16,7 @@ from portage import _unicode_encode
from portage.cache.mappings import slot_dict_class
from portage.const import EPREFIX
from portage.dep.soname.multilib_category import compute_multilib_category
+from portage.dep.soname.SonameAtom import SonameAtom
from portage.exception import CommandNotFound, InvalidData
from portage.localization import _
from portage.util import getlibpaths
@@ -269,7 +272,10 @@ class LinkageMapELF(object):
continue
plibs.update((x, cpv) for x in items)
if plibs:
- args = [os.path.join(EPREFIX or "/", "usr/bin/scanelf"), "-qF", "%a;%F;%S;%r;%n"]
+ # We don't use scanelf -q, since that would omit libraries like
+ # musl's /usr/lib/libc.so which do not have any DT_NEEDED or
+ # DT_SONAME settings.
+ args = [os.path.join(EPREFIX or "/", "usr/bin/scanelf"), "-BF", "%a;%F;%S;%r;%n"]
args.extend(os.path.join(root, x.lstrip("." + os.sep)) \
for x in plibs)
try:
@@ -308,6 +314,22 @@ class LinkageMapELF(object):
raise
# File removed concurrently.
continue
+
+ # Infer implicit soname from basename (bug 715162).
+ if not entry.soname:
+ try:
+ proc = subprocess.Popen([b'file',
+ _unicode_encode(entry.filename,
+ encoding=_encodings['fs'], errors='strict')],
+ stdout=subprocess.PIPE)
+ out, err = proc.communicate()
+ proc.wait()
+ except EnvironmentError:
+ pass
+ else:
+ if b'SB shared object' in out:
+ entry.soname = os.path.basename(entry.filename)
+
entry.multilib_category = compute_multilib_category(elf_header)
entry.filename = entry.filename[root_len:]
owner = plibs.pop(entry.filename, None)
@@ -328,8 +350,13 @@ class LinkageMapELF(object):
# Share identical frozenset instances when available,
# in order to conserve memory.
frozensets = {}
+ owner_entries = collections.defaultdict(list)
- for owner, location, l in lines:
+ while True:
+ try:
+ owner, location, l = lines.pop()
+ except IndexError:
+ break
l = l.rstrip("\n")
if not l:
continue
@@ -352,18 +379,55 @@ class LinkageMapELF(object):
# exists, map e_machine (entry.arch) to an approximate
# multilib category. If all else fails, use e_machine, just
# as older versions of portage did.
- arch = entry.multilib_category
- if arch is None:
- arch = _approx_multilib_categories.get(
+ if entry.multilib_category is None:
+ entry.multilib_category = _approx_multilib_categories.get(
entry.arch, entry.arch)
- obj = entry.filename
- soname = entry.soname
+ entry.filename = normalize_path(entry.filename)
expand = {"ORIGIN": os.path.dirname(entry.filename)}
- path = frozenset(normalize_path(
+ entry.runpaths = frozenset(normalize_path(
varexpand(x, expand, error_leader=lambda: "%s: " % location))
for x in entry.runpaths)
- path = frozensets.setdefault(path, path)
+ entry.runpaths = frozensets.setdefault(entry.runpaths, entry.runpaths)
+ owner_entries[owner].append(entry)
+
+ # In order to account for internal library resolution which a package
+ # may implement (useful at least for handling of bundled libraries),
+ # generate implicit runpath entries for any needed sonames which are
+ # provided by the same owner package.
+ for owner, entries in owner_entries.items():
+ if owner is None:
+ continue
+
+ providers = {}
+ for entry in entries:
+ if entry.soname:
+ providers[SonameAtom(entry.multilib_category, entry.soname)] = entry
+
+ for entry in entries:
+ implicit_runpaths = []
+ for soname in entry.needed:
+ soname_atom = SonameAtom(entry.multilib_category, soname)
+ provider = providers.get(soname_atom)
+ if provider is None:
+ continue
+ provider_dir = os.path.dirname(provider.filename)
+ if provider_dir not in entry.runpaths:
+ implicit_runpaths.append(provider_dir)
+
+ if implicit_runpaths:
+ entry.runpaths = frozenset(
+ itertools.chain(entry.runpaths, implicit_runpaths))
+ entry.runpaths = frozensets.setdefault(
+ entry.runpaths, entry.runpaths)
+
+ for owner, entry in ((owner, entry)
+ for (owner, entries) in owner_entries.items()
+ for entry in entries):
+ arch = entry.multilib_category
+ obj = entry.filename
+ soname = entry.soname
+ path = entry.runpaths
needed = frozenset(entry.needed)
needed = frozensets.setdefault(needed, needed)
diff --git a/lib/portage/util/_dyn_libs/NeededEntry.py b/lib/portage/util/_dyn_libs/NeededEntry.py
index c52cfce3c..70ff99100 100644
--- a/lib/portage/util/_dyn_libs/NeededEntry.py
+++ b/lib/portage/util/_dyn_libs/NeededEntry.py
@@ -52,6 +52,11 @@ class NeededEntry(object):
del fields[cls._MIN_FIELDS:]
obj.arch, obj.filename, obj.soname, rpaths, needed = fields
+ # We don't use scanelf -q, since that would omit libraries like
+ # musl's /usr/lib/libc.so which do not have any DT_NEEDED or
+ # DT_SONAME settings. Since we don't use scanelf -q, we have to
+ # handle the special rpath value " - " below.
+ rpaths = "" if rpaths == " - " else rpaths
obj.runpaths = tuple(filter(None, rpaths.split(":")))
obj.needed = tuple(filter(None, needed.split(",")))
diff --git a/lib/portage/util/_dyn_libs/soname_deps_qa.py b/lib/portage/util/_dyn_libs/soname_deps_qa.py
new file mode 100644
index 000000000..6840bb602
--- /dev/null
+++ b/lib/portage/util/_dyn_libs/soname_deps_qa.py
@@ -0,0 +1,98 @@
+# Copyright 2020 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+
+from portage import (
+ _encodings,
+ _unicode_encode,
+ os,
+)
+from portage.dep.soname.parse import parse_soname_deps
+from portage.util._dyn_libs.NeededEntry import NeededEntry
+
+
+def _get_all_provides(vardb):
+ """
+ Get all of the sonames provided by all of the installed packages.
+ This does not bother to acquire a lock, since its pretty safe to
+ assume that any packages merged or unmerged while this function
+ is running must be irrelevant.
+
+ @param vardb: an installed package database
+ @type vardb: vardbapi
+ @rtype: frozenset
+ @return: a frozenset od SonameAtom instances provided by all
+ installed packages
+ """
+
+ all_provides = []
+
+ for cpv in vardb.cpv_all():
+ try:
+ provides, = vardb.aux_get(cpv, ['PROVIDES'])
+ except KeyError:
+ # Since we don't hold a lock, assume this is due to a
+ # concurrent unmerge, and PROVIDES from the unmerged package
+ # are most likely negligible due to topologically sorted
+ # merge order. Also, note that it's possible for aux_get
+ # to succeed and return empty PROVIDES metadata if the file
+ # disappears (due to unmerge) before it can be read.
+ pass
+ else:
+ if provides:
+ all_provides.extend(parse_soname_deps(provides))
+
+ return frozenset(all_provides)
+
+
+def _get_unresolved_soname_deps(metadata_dir, all_provides):
+ """
+ Get files with unresolved soname dependencies.
+
+ @param metadata_dir: directory containing package metadata files
+ named REQUIRES and NEEDED.ELF.2
+ @type metadata_dir: str
+ @param all_provides: a frozenset on SonameAtom instances provided by
+ all installed packages
+ @type all_provides: frozenset
+ @rtype: list
+ @return: list of tuple(filename, tuple(unresolved sonames))
+ """
+ try:
+ with io.open(_unicode_encode(os.path.join(metadata_dir, 'REQUIRES'),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='rt', encoding=_encodings['repo.content'], errors='strict') as f:
+ requires = frozenset(parse_soname_deps(f.read()))
+ except EnvironmentError:
+ return []
+
+ unresolved_by_category = {}
+ for atom in requires:
+ if atom not in all_provides:
+ unresolved_by_category.setdefault(atom.multilib_category, set()).add(atom.soname)
+
+ needed_filename = os.path.join(metadata_dir, "NEEDED.ELF.2")
+ with io.open(_unicode_encode(needed_filename, encoding=_encodings['fs'], errors='strict'),
+ mode='rt', encoding=_encodings['repo.content'], errors='strict') as f:
+ needed = f.readlines()
+
+ unresolved_by_file = []
+ for l in needed:
+ l = l.rstrip("\n")
+ if not l:
+ continue
+ entry = NeededEntry.parse(needed_filename, l)
+ missing = unresolved_by_category.get(entry.multilib_category)
+ if not missing:
+ continue
+ # NOTE: This can contain some false positives in the case of
+ # missing DT_RPATH settings, since it's possible that a subset
+ # package files have the desired DT_RPATH settings. However,
+ # since reported sonames are unresolved for at least some file(s),
+ # false positives or this sort should not be not too annoying.
+ missing = [soname for soname in entry.needed if soname in missing]
+ if missing:
+ unresolved_by_file.append((entry.filename, tuple(missing)))
+
+ return unresolved_by_file
diff --git a/lib/portage/util/_eventloop/asyncio_event_loop.py b/lib/portage/util/_eventloop/asyncio_event_loop.py
index a11a10205..ce7e06923 100644
--- a/lib/portage/util/_eventloop/asyncio_event_loop.py
+++ b/lib/portage/util/_eventloop/asyncio_event_loop.py
@@ -1,10 +1,8 @@
-# Copyright 2018 Gentoo Foundation
+# Copyright 2018-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
import os
-import pdb
import signal
-import sys
try:
import asyncio as _real_asyncio
@@ -69,25 +67,14 @@ class AsyncioEventLoop(_AbstractEventLoop):
"""
loop.default_exception_handler(context)
if 'exception' in context:
- # If we have a tty then start the debugger, since in might
- # aid in diagnosis of the problem. If there's no tty, then
- # exit immediately.
- if all(s.isatty() for s in (sys.stdout, sys.stderr, sys.stdin)):
- # Restore default SIGINT handler, since emerge's Scheduler
- # has a SIGINT handler which delays exit until after
- # cleanup, and cleanup cannot occur here since the event
- # loop is suspended (see bug 672540).
- signal.signal(signal.SIGINT, signal.SIG_DFL)
- pdb.set_trace()
- else:
- # Normally emerge will wait for all coroutines to complete
- # after SIGTERM has been received. However, an unhandled
- # exception will prevent the interrupted coroutine from
- # completing, therefore use the default SIGTERM handler
- # in order to ensure that emerge exits immediately (though
- # uncleanly).
- signal.signal(signal.SIGTERM, signal.SIG_DFL)
- os.kill(os.getpid(), signal.SIGTERM)
+ # Normally emerge will wait for all coroutines to complete
+ # after SIGTERM has been received. However, an unhandled
+ # exception will prevent the interrupted coroutine from
+ # completing, therefore use the default SIGTERM handler
+ # in order to ensure that emerge exits immediately (though
+ # uncleanly).
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ os.kill(os.getpid(), signal.SIGTERM)
def _create_future(self):
"""
diff --git a/lib/portage/util/compression_probe.py b/lib/portage/util/compression_probe.py
index 90880b1cd..7d595670b 100644
--- a/lib/portage/util/compression_probe.py
+++ b/lib/portage/util/compression_probe.py
@@ -1,6 +1,7 @@
-# Copyright 2015 Gentoo Foundation
+# Copyright 2015-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
+import ctypes
import errno
import re
import sys
@@ -45,7 +46,12 @@ _compressors = {
},
"zstd": {
"compress": "zstd ${BINPKG_COMPRESS_FLAGS}",
- "decompress": "zstd -d --long=31",
+ # If the compression windowLog was larger than the default of 27,
+ # then --long=windowLog needs to be passed to the decompressor.
+ # Therefore, pass a larger --long=31 value to the decompressor
+ # if the current architecture can support it, which is true when
+ # sizeof(long) is at least 8 bytes.
+ "decompress": "zstd -d" + (" --long=31" if ctypes.sizeof(ctypes.c_long) >= 8 else ""),
"package": "app-arch/zstd",
},
}
diff --git a/lib/portage/util/futures/_asyncio/__init__.py b/lib/portage/util/futures/_asyncio/__init__.py
index 7635dbb5e..f4b03891f 100644
--- a/lib/portage/util/futures/_asyncio/__init__.py
+++ b/lib/portage/util/futures/_asyncio/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2018 Gentoo Foundation
+# Copyright 2018-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
__all__ = (
@@ -139,7 +139,7 @@ def create_subprocess_exec(*args, **kwargs):
"""
loop = _wrap_loop(kwargs.pop('loop', None))
kwargs.setdefault('close_fds', _close_fds_default)
- if _asyncio_enabled and isinstance(loop, _AsyncioEventLoop):
+ if _asyncio_enabled and isinstance(loop._asyncio_wrapper, _AsyncioEventLoop):
# Use the real asyncio create_subprocess_exec (loop argument
# is deprecated since since Python 3.8).
return _real_asyncio.create_subprocess_exec(*args, **kwargs)
@@ -191,10 +191,10 @@ def ensure_future(coro_or_future, loop=None):
@return: an instance of Future
"""
loop = _wrap_loop(loop)
- if _asyncio_enabled and isinstance(loop, _AsyncioEventLoop):
+ if _asyncio_enabled and isinstance(loop._asyncio_wrapper, _AsyncioEventLoop):
# Use the real asyncio loop and ensure_future.
return _real_asyncio.ensure_future(
- coro_or_future, loop=loop._loop)
+ coro_or_future, loop=loop._asyncio_wrapper._loop)
if isinstance(coro_or_future, Future):
return coro_or_future
diff --git a/lib/portage/util/futures/compat_coroutine.py b/lib/portage/util/futures/compat_coroutine.py
index b745fd845..54fc316fe 100644
--- a/lib/portage/util/futures/compat_coroutine.py
+++ b/lib/portage/util/futures/compat_coroutine.py
@@ -87,21 +87,29 @@ class _GeneratorTask(object):
def __init__(self, generator, result, loop):
self._generator = generator
self._result = result
+ self._current_task = None
self._loop = loop
result.add_done_callback(self._cancel_callback)
loop.call_soon(self._next)
def _cancel_callback(self, result):
- if result.cancelled():
- self._generator.close()
+ if result.cancelled() and self._current_task is not None:
+ # The done callback for self._current_task invokes
+ # _next in either case here.
+ self._current_task.done() or self._current_task.cancel()
def _next(self, previous=None):
+ self._current_task = None
if self._result.cancelled():
if previous is not None:
# Consume exceptions, in order to avoid triggering
# the event loop's exception handler.
previous.cancelled() or previous.exception()
- return
+
+ # This will throw asyncio.CancelledError in the coroutine if
+ # there's an opportunity (yield) before the generator raises
+ # StopIteration.
+ previous = self._result
try:
if previous is None:
future = next(self._generator)
@@ -124,5 +132,6 @@ class _GeneratorTask(object):
if not self._result.cancelled():
self._result.set_exception(e)
else:
- future = asyncio.ensure_future(future, loop=self._loop)
- future.add_done_callback(self._next)
+ self._current_task = asyncio.ensure_future(future, loop=self._loop)
+ self._current_task.add_done_callback(self._next)
+
diff --git a/lib/portage/xml/metadata.py b/lib/portage/xml/metadata.py
index e479f2dd2..64246c828 100644
--- a/lib/portage/xml/metadata.py
+++ b/lib/portage/xml/metadata.py
@@ -34,22 +34,14 @@ __all__ = ('MetaDataXML', 'parse_metadata_use')
import sys
-if sys.hexversion < 0x2070000 or \
- (sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000):
- # Our _MetadataTreeBuilder usage is incompatible with
- # cElementTree in Python 2.6, 3.0, and 3.1:
- # File "/usr/lib/python2.6/xml/etree/ElementTree.py", line 644, in findall
- # assert self._root is not None
+try:
+ import xml.etree.cElementTree as etree
+except (SystemExit, KeyboardInterrupt):
+ raise
+except (ImportError, SystemError, RuntimeError, Exception):
+ # broken or missing xml support
+ # https://bugs.python.org/issue14988
import xml.etree.ElementTree as etree
-else:
- try:
- import xml.etree.cElementTree as etree
- except (SystemExit, KeyboardInterrupt):
- raise
- except (ImportError, SystemError, RuntimeError, Exception):
- # broken or missing xml support
- # https://bugs.python.org/issue14988
- import xml.etree.ElementTree as etree
try:
from xml.parsers.expat import ExpatError
diff --git a/lib/portage/xpak.py b/lib/portage/xpak.py
index e11f26e6c..c708190b9 100644
--- a/lib/portage/xpak.py
+++ b/lib/portage/xpak.py
@@ -1,4 +1,4 @@
-# Copyright 2001-2014 Gentoo Foundation
+# Copyright 2001-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
@@ -33,6 +33,7 @@ from portage import normalize_path
from portage import _encodings
from portage import _unicode_decode
from portage import _unicode_encode
+from portage.util.file_copy import copyfile
def addtolist(mylist, curdir):
"""(list, dir) --- Takes an array(list) and appends all files from dir down
@@ -328,7 +329,7 @@ class tbz2(object):
if break_hardlinks and self.filestat and self.filestat.st_nlink > 1:
tmp_fname = "%s.%d" % (self.file, os.getpid())
- shutil.copyfile(self.file, tmp_fname)
+ copyfile(self.file, tmp_fname)
try:
portage.util.apply_stat_permissions(self.file, self.filestat)
except portage.exception.OperationNotPermitted:
diff --git a/man/emerge.1 b/man/emerge.1
index a24a9529f..aa28ab337 100644
--- a/man/emerge.1
+++ b/man/emerge.1
@@ -319,7 +319,7 @@ finished calculating the graph.
\fB--alert\fR may be 'y' or 'n'. 'true' and 'false' mean the same thing.
Using \fB--alert\fR without an option is the same as using it with 'y'.
-Try it with 'emerge -aA portage'.
+Try it with 'emerge -aA sys-apps/portage'.
If your terminal emulator is set up to make '\\a' into a window manager
urgency hint, move your cursor to a different window to get the effect.
@@ -1294,7 +1294,7 @@ permanent, you can put them in /etc/portage/package.use instead.
If \fBemerge \-\-update @system\fR or \fBemerge \-\-update @world\fR
fails with an error message, it may be that an ebuild uses some
newer feature not present in this version of \fBemerge\fR. You
-can use \fBemerge \-\-update portage\fR to upgrade to the lastest
+can use \fBemerge \-\-update sys-apps/portage\fR to upgrade to the lastest
version, which should support any necessary new features.
.SH "MASKED PACKAGES"
\fINOTE: Please use caution when using development packages. Problems
@@ -1498,7 +1498,7 @@ Contains a log of all the fetches in the previous emerge invocation.
.TP
.B
/var/log/portage/elog/summary.log
-Contains the emerge summaries. Installs \fI/etc/logrotate/elog-save-summary\fR.
+Contains the emerge summaries. Installs \fI/etc/logrotate.d/elog-save-summary\fR.
.SH "SEE ALSO"
.BR "emerge \-\-help",
.BR quickpkg (1),
diff --git a/man/make.conf.5 b/man/make.conf.5
index 683ce26c2..ab00cb7d7 100644
--- a/man/make.conf.5
+++ b/man/make.conf.5
@@ -1,4 +1,4 @@
-.TH "MAKE.CONF" "5" "Nov 2019" "Portage VERSION" "Portage"
+.TH "MAKE.CONF" "5" "May 2020" "Portage VERSION" "Portage"
.SH "NAME"
make.conf \- custom settings for Portage
.SH "SYNOPSIS"
@@ -115,7 +115,7 @@ This variable is used to determine the compression used for \fIbinary
packages\fR. Supported settings and compression algorithms are: bzip2, gzip,
lz4, lzip, lzop, xz, zstd.
.br
-Defaults to "bzip2".
+Defaults to "zstd".
.br
.I Example:
.nf
@@ -610,6 +610,10 @@ If \fIcollision\-protect\fR is enabled then it takes precedence over
Output a verbose trace of python execution to stderr when a command's
\-\-debug option is enabled.
.TP
+.B qa\-unresolved\-soname\-deps
+Trigger a QA warning when a package installs files with unresolved soname
+dependencies.
+.TP
.B sandbox
Enable sandbox\-ing when running \fBemerge\fR(1) and \fBebuild\fR(1).
On Mac OS X platforms that have /usr/bin/sandbox-exec available (10.5
diff --git a/repoman/RELEASE-NOTES b/repoman/RELEASE-NOTES
index 849cf6a42..80541fa5c 100644
--- a/repoman/RELEASE-NOTES
+++ b/repoman/RELEASE-NOTES
@@ -1,6 +1,17 @@
Release Notes; upgrade information mainly.
Features/major bugfixes are listed in NEWS
+repoman-2.3.22
+==================================
+* Bug Fixes:
+ - Bug 712106 only stage changes in index for commit mode
+
+repoman-2.3.21
+==================================
+* Bug Fixes:
+ - Bug 712106 prevent spurious copyright header updates
+
+
repoman-2.3.20
==================================
* Bug Fixes:
diff --git a/repoman/cnf/linechecks/linechecks.yaml b/repoman/cnf/linechecks/linechecks.yaml
index 410bcd9c5..2182b467a 100644
--- a/repoman/cnf/linechecks/linechecks.yaml
+++ b/repoman/cnf/linechecks/linechecks.yaml
@@ -28,7 +28,6 @@ errors:
PRESERVE_OLD_LIB: 'Ebuild calls deprecated preserve_old_lib'
BUILT_WITH_USE: 'built_with_use'
NO_OFFSET_WITH_HELPERS: 'Helper function is used with D, ROOT, ED, EROOT or EPREFIX'
- SANDBOX_ADDPREDICT: 'Ebuild calls addpredict'
USEQ_ERROR: 'Ebuild calls deprecated useq function'
HASQ_ERROR: 'Ebuild calls deprecated hasq function'
URI_HTTPS: 'Ebuild uses http:// but should use https://'
diff --git a/repoman/cnf/repository/repository.yaml b/repoman/cnf/repository/repository.yaml
index 935260424..ad00d18c1 100644
--- a/repoman/cnf/repository/repository.yaml
+++ b/repoman/cnf/repository/repository.yaml
@@ -71,6 +71,5 @@ linechecks_modules:
uselessdodoc
whitespace
blankline
- addpredict
noasneeded
diff --git a/repoman/lib/repoman/_subprocess.py b/repoman/lib/repoman/_subprocess.py
index b6c19bda3..2ca434010 100644
--- a/repoman/lib/repoman/_subprocess.py
+++ b/repoman/lib/repoman/_subprocess.py
@@ -20,15 +20,6 @@ def repoman_getstatusoutput(cmd):
"""
args = portage.util.shlex_split(cmd)
- if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
- not os.path.isabs(args[0]):
- # Python 3.1 _execvp throws TypeError for non-absolute executable
- # path passed as bytes (see https://bugs.python.org/issue8513).
- fullname = find_binary(args[0])
- if fullname is None:
- raise portage.exception.CommandNotFound(args[0])
- args[0] = fullname
-
encoding = _encodings['fs']
args = [
_unicode_encode(x, encoding=encoding, errors='strict') for x in args]
@@ -53,15 +44,6 @@ class repoman_popen(portage.proxy.objectproxy.ObjectProxy):
def __init__(self, cmd):
args = portage.util.shlex_split(cmd)
- if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
- not os.path.isabs(args[0]):
- # Python 3.1 _execvp throws TypeError for non-absolute executable
- # path passed as bytes (see https://bugs.python.org/issue8513).
- fullname = find_binary(args[0])
- if fullname is None:
- raise portage.exception.CommandNotFound(args[0])
- args[0] = fullname
-
encoding = _encodings['fs']
args = [
_unicode_encode(x, encoding=encoding, errors='strict')
diff --git a/repoman/lib/repoman/gpg.py b/repoman/lib/repoman/gpg.py
index a3c12b3c9..7dac46f41 100644
--- a/repoman/lib/repoman/gpg.py
+++ b/repoman/lib/repoman/gpg.py
@@ -50,15 +50,6 @@ def gpgsign(filename, repoman_settings, options):
# Encode unicode manually for bug #310789.
gpgcmd = portage.util.shlex_split(gpgcmd)
- if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
- not os.path.isabs(gpgcmd[0]):
- # Python 3.1 _execvp throws TypeError for non-absolute executable
- # path passed as bytes (see https://bugs.python.org/issue8513).
- fullname = find_binary(gpgcmd[0])
- if fullname is None:
- raise portage.exception.CommandNotFound(gpgcmd[0])
- gpgcmd[0] = fullname
-
gpgcmd = [
_unicode_encode(arg, encoding=_encodings['fs'], errors='strict')
for arg in gpgcmd]
diff --git a/repoman/lib/repoman/metadata.py b/repoman/lib/repoman/metadata.py
index 11ec1aaf8..4537d2ce2 100644
--- a/repoman/lib/repoman/metadata.py
+++ b/repoman/lib/repoman/metadata.py
@@ -5,21 +5,14 @@ from __future__ import print_function, unicode_literals
import errno
import logging
import sys
-import tempfile
import time
-try:
- from urllib.parse import urlparse
-except ImportError:
- from urlparse import urlparse
-
-
# import our initialized portage instance
from repoman._portage import portage
from portage import os
-from portage import shutil
from portage.output import green
+from portage.package.ebuild.fetch import fetch
if sys.hexversion >= 0x3000000:
basestring = str
@@ -64,41 +57,17 @@ def fetch_metadata_xsd(metadata_xsd, repoman_settings):
"%s the local copy of metadata.xsd "
"needs to be refetched, doing that now" % green("***"))
print()
- parsed_url = urlparse(metadata_xsd_uri)
- setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper()
- fcmd = repoman_settings.get(setting)
- if not fcmd:
- fcmd = repoman_settings.get('FETCHCOMMAND')
- if not fcmd:
- logging.error("FETCHCOMMAND is unset")
- return False
-
- destdir = repoman_settings["DISTDIR"]
- fd, metadata_xsd_tmp = tempfile.mkstemp(
- prefix='metadata.xsd.', dir=destdir)
- os.close(fd)
+
+ if not fetch([metadata_xsd_uri], repoman_settings, force=1, try_mirrors=0):
+ logging.error(
+ "failed to fetch metadata.xsd from '%s'" % metadata_xsd_uri)
+ return False
try:
- if not portage.getbinpkg.file_get(
- metadata_xsd_uri, destdir, fcmd=fcmd,
- filename=os.path.basename(metadata_xsd_tmp)):
- logging.error(
- "failed to fetch metadata.xsd from '%s'" % metadata_xsd_uri)
- return False
-
- try:
- portage.util.apply_secpass_permissions(
- metadata_xsd_tmp,
- gid=portage.data.portage_gid, mode=0o664, mask=0o2)
- except portage.exception.PortageException:
- pass
-
- shutil.move(metadata_xsd_tmp, metadata_xsd)
- finally:
- try:
- os.unlink(metadata_xsd_tmp)
- except OSError:
- pass
+ portage.util.apply_secpass_permissions(metadata_xsd,
+ gid=portage.data.portage_gid, mode=0o664, mask=0o2)
+ except portage.exception.PortageException:
+ pass
return True
diff --git a/repoman/lib/repoman/modules/linechecks/deprecated/inherit.py b/repoman/lib/repoman/modules/linechecks/deprecated/inherit.py
index 9cef086da..f307d46fd 100644
--- a/repoman/lib/repoman/modules/linechecks/deprecated/inherit.py
+++ b/repoman/lib/repoman/modules/linechecks/deprecated/inherit.py
@@ -11,6 +11,8 @@ class InheritDeprecated(LineCheck):
# deprecated eclass : new eclass (False if no new eclass)
deprecated_eclasses = {
+ "autotools-multilib": "multilib-minimal",
+ "autotools-utils": False,
"base": False,
"bash-completion": "bash-completion-r1",
"boost-utils": False,
@@ -18,8 +20,10 @@ class InheritDeprecated(LineCheck):
"confutils": False,
"distutils": "distutils-r1",
"epatch": "(eapply since EAPI 6)",
+ "fdo-mime": "xdg-utils",
"games": False,
"gems": "ruby-fakegem",
+ "git-2": "git-r3",
"gpe": False,
"gst-plugins-bad": "gstreamer",
"gst-plugins-base": "gstreamer",
@@ -33,6 +37,7 @@ class InheritDeprecated(LineCheck):
"user": "GLEP 81",
"versionator": "eapi7-ver (built-in since EAPI 7)",
"x-modular": "xorg-2",
+ "xfconf": False,
}
_inherit_re = re.compile(r'^\s*inherit\s(.*)$')
diff --git a/repoman/lib/repoman/modules/linechecks/workaround/__init__.py b/repoman/lib/repoman/modules/linechecks/workaround/__init__.py
index 425085a5c..694695015 100644
--- a/repoman/lib/repoman/modules/linechecks/workaround/__init__.py
+++ b/repoman/lib/repoman/modules/linechecks/workaround/__init__.py
@@ -10,12 +10,6 @@ module_spec = {
'name': 'do',
'description': doc,
'provides':{
- 'addpredict-check': {
- 'name': "addpredict",
- 'sourcefile': "workarounds",
- 'class': "SandboxAddpredict",
- 'description': doc,
- },
'noasneeded-check': {
'name': "noasneeded",
'sourcefile': "workarounds",
diff --git a/repoman/lib/repoman/modules/linechecks/workaround/workarounds.py b/repoman/lib/repoman/modules/linechecks/workaround/workarounds.py
index 37cb54314..768a47e8e 100644
--- a/repoman/lib/repoman/modules/linechecks/workaround/workarounds.py
+++ b/repoman/lib/repoman/modules/linechecks/workaround/workarounds.py
@@ -9,10 +9,3 @@ class NoAsNeeded(LineCheck):
repoman_check_name = 'upstream.workaround'
re = re.compile(r'.*\$\(no-as-needed\)')
error = 'NO_AS_NEEDED'
-
-
-class SandboxAddpredict(LineCheck):
- """Check for calls to the addpredict function."""
- repoman_check_name = 'upstream.workaround'
- re = re.compile(r'(^|\s)addpredict\b')
- error = 'SANDBOX_ADDPREDICT'
diff --git a/repoman/lib/repoman/modules/vcs/git/changes.py b/repoman/lib/repoman/modules/vcs/git/changes.py
index 7e9ac1eb5..9819831b5 100644
--- a/repoman/lib/repoman/modules/vcs/git/changes.py
+++ b/repoman/lib/repoman/modules/vcs/git/changes.py
@@ -29,8 +29,21 @@ class Changes(ChangesBase):
'''
super(Changes, self).__init__(options, repo_settings)
- def _scan(self):
- '''VCS type scan function, looks for all detectable changes'''
+ def _scan(self, _reindex=None):
+ '''
+ VCS type scan function, looks for all detectable changes
+
+ @param _reindex: ensure that the git index reflects the state on
+ disk for files returned by git diff-index (this parameter is
+ used in recursive calls and it's not intended to be used for
+ any other reason)
+ @type _reindex: bool
+ '''
+ # Automatically reindex for commit mode, but not for other modes
+ # were the user might not want changes to be staged in the index.
+ if _reindex is None and self.options.mode == 'commit':
+ _reindex = True
+
with repoman_popen(
"git diff-index --name-only "
"--relative --diff-filter=M HEAD") as f:
@@ -51,6 +64,9 @@ class Changes(ChangesBase):
removed = f.readlines()
self.removed = ["./" + elem[:-1] for elem in removed]
del removed
+ if _reindex and (self.changed or self.new or self.removed):
+ self.update_index([], self.changed + self.new + self.removed)
+ self._scan(_reindex=False)
@property
def unadded(self):
@@ -91,7 +107,7 @@ class Changes(ChangesBase):
# of the working tree.
myfiles = mymanifests + myupdates
myfiles.sort()
- update_index_cmd = ["git", "update-index"]
+ update_index_cmd = ["git", "update-index", "--add", "--remove"]
update_index_cmd.extend(f.lstrip("./") for f in myfiles)
if self.options.pretend:
print("(%s)" % (" ".join(update_index_cmd),))
diff --git a/repoman/runtests b/repoman/runtests
index 1ef52f482..bbda4526f 100755
--- a/repoman/runtests
+++ b/repoman/runtests
@@ -24,14 +24,14 @@ import tempfile
# These are the versions we fully support and require to pass tests.
PYTHON_SUPPORTED_VERSIONS = [
'2.7',
- '3.5',
'3.6',
- '3.7'
+ '3.7',
+ '3.8'
]
# The rest are just "nice to have".
PYTHON_NICE_VERSIONS = [
- 'pypy',
- '3.8'
+ 'pypy3',
+ '3.9'
]
EPREFIX = os.environ.get('PORTAGE_OVERRIDE_EPREFIX', '/')
diff --git a/repoman/setup.py b/repoman/setup.py
index d8cf68a99..c3a7adad9 100755
--- a/repoman/setup.py
+++ b/repoman/setup.py
@@ -450,7 +450,7 @@ def get_manpages():
setup(
name = 'repoman',
- version = '2.3.20',
+ version = '2.3.22',
url = 'https://wiki.gentoo.org/wiki/Project:Portage',
author = 'Gentoo Portage Development Team',
author_email = 'dev-portage@gentoo.org',
diff --git a/runtests b/runtests
index c9603a338..15cc0bc77 100755
--- a/runtests
+++ b/runtests
@@ -24,14 +24,14 @@ import tempfile
# These are the versions we fully support and require to pass tests.
PYTHON_SUPPORTED_VERSIONS = [
'2.7',
- '3.5',
'3.6',
- '3.7'
+ '3.7',
+ '3.8'
]
# The rest are just "nice to have".
PYTHON_NICE_VERSIONS = [
- 'pypy',
- '3.8'
+ 'pypy3',
+ '3.9'
]
EPREFIX = os.environ.get('PORTAGE_OVERRIDE_EPREFIX', '/')
diff --git a/setup.py b/setup.py
index 96bdf28e9..7d8cdcd04 100755
--- a/setup.py
+++ b/setup.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-# Copyright 1998-2019 Gentoo Authors
+# Copyright 1998-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
@@ -30,7 +30,7 @@ import sys
# TODO:
-# - smarter rebuilds of docs w/ 'install_docbook' and 'install_epydoc'.
+# - smarter rebuilds of docs w/ 'install_docbook' and 'install_apidoc'.
# Dictionary of scripts. The structure is
# key = location in filesystem to install the scripts
@@ -135,8 +135,8 @@ class docbook(Command):
'-m', 'doc/custom.xsl', f, 'doc/portage.docbook'])
-class epydoc(Command):
- """ Build API docs using epydoc. """
+class apidoc(Command):
+ """ Build API docs using apidoc. """
user_options = [
]
@@ -160,14 +160,8 @@ class epydoc(Command):
pass
process_env['PYTHONPATH'] = pythonpath
- subprocess.check_call(['epydoc', '-o', 'epydoc',
- '--name', self.distribution.get_name(),
- '--url', self.distribution.get_url(),
- '-qq', '--no-frames', '--show-imports',
- '--exclude', 'portage.tests',
- '_emerge', 'portage'],
+ subprocess.check_call(['make', '-C', 'doc/api', 'html'],
env = process_env)
- os.remove('epydoc/api-objects.txt')
class install_docbook(install_data):
@@ -194,8 +188,8 @@ class install_docbook(install_data):
install_data.run(self)
-class install_epydoc(install_data):
- """ install_data for epydoc docs """
+class install_apidoc(install_data):
+ """ install_data for apidoc docs """
user_options = install_data.user_options + [
('htmldir=', None, "HTML documentation install directory"),
@@ -210,10 +204,11 @@ class install_epydoc(install_data):
install_data.finalize_options(self)
def run(self):
- if not os.path.exists('epydoc/index.html'):
- self.run_command('epydoc')
+ if not os.path.exists('doc/api/build/html/index.html'):
+ self.run_command('apidoc')
self.data_files = [
- (os.path.join(self.htmldir, 'api'), glob.glob('epydoc/*')),
+ (os.path.join(self.htmldir, 'api'), glob.glob('doc/api/build/html/*.html') + glob.glob('doc/api/build/html/*.js')),
+ (os.path.join(self.htmldir, 'api/_static'), glob.glob('doc/api/build/html/_static/*')),
]
install_data.run(self)
@@ -298,8 +293,8 @@ class x_clean(clean):
if os.path.isdir('doc/fragment'):
remove_tree('doc/fragment')
- if os.path.isdir('epydoc'):
- remove_tree('epydoc')
+ if os.path.isdir('doc/api/build'):
+ remove_tree('doc/api/build')
def clean_tests(self):
# do not remove incorrect dirs accidentally
@@ -662,7 +657,7 @@ class build_ext(_build_ext):
setup(
name = 'portage',
- version = '2.3.84',
+ version = '2.3.100',
url = 'https://wiki.gentoo.org/wiki/Project:Portage',
author = 'Gentoo Portage Development Team',
author_email = 'dev-portage@gentoo.org',
@@ -699,11 +694,11 @@ setup(
'build_tests': build_tests,
'clean': x_clean,
'docbook': docbook,
- 'epydoc': epydoc,
+ 'apidoc': apidoc,
'install': x_install,
'install_data': x_install_data,
'install_docbook': install_docbook,
- 'install_epydoc': install_epydoc,
+ 'install_apidoc': install_apidoc,
'install_lib': x_install_lib,
'install_scripts': x_install_scripts,
'install_scripts_bin': x_install_scripts_bin,
diff --git a/tox.ini b/tox.ini
index 5ba192d2e..79b5b45cb 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,12 +1,12 @@
[tox]
-envlist = py27,py36,py37,py38,pypy3
+envlist = py27,py36,py37,py38,py39,pypy3
skipsdist = True
[testenv]
deps =
pygost
pyyaml
- py27,py36,py37,py38,pypy3: lxml!=4.2.0
+ py27,py36,py37,py38,py39,pypy3: lxml!=4.2.0
py27: pyblake2
py27: pysha3
setenv =