diff options
30 files changed, 672 insertions, 236 deletions
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000..d4b960dc3 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,43 @@ +name: CI + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + build: + + runs-on: ubuntu-20.04 + strategy: + matrix: + python-version: + - '3.6' + - '3.7' + - '3.8' + - '3.9' + - '3.10.0-alpha.3' + - 'pypy-3.6' + + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Install python dependencies + run: | + set -xe + sudo apt-get install -y --no-install-recommends libxslt-dev libxml2-dev libxml2-utils zstd + python -VV + python -m site + python -m pip install --upgrade pip + python -m pip install tox tox-gh-actions + - name: Test ./setup.py install --root=/tmp/install-root + run: | + printf "[build_ext]\nportage-ext-modules=true" >> setup.cfg + ./setup.py install --root=/tmp/install-root + - name: Run tox targets for ${{ matrix.python-version }} + run: | + tox -vv diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index d2935fdab..000000000 --- a/.travis.yml +++ /dev/null @@ -1,26 +0,0 @@ -dist: bionic -language: python -python: - - 3.6 - - 3.7 - - 3.8 - - 3.9-dev - - pypy3 - -# command to install dependencies -before_install: - # Use "dist: bionic" to get a zstd with --long support. - - sudo apt-get -y install zstd -install: - - pip install tox - -script: - - printf "[build_ext]\nportage-ext-modules=true" >> setup.cfg - - ./setup.py test - - ./setup.py install --root=/tmp/install-root - - if [[ ${TRAVIS_PYTHON_VERSION/-dev/} == ?.? ]]; then - TOX_PYTHON_VERSION=${TRAVIS_PYTHON_VERSION/-dev/}; - tox -e py${TOX_PYTHON_VERSION/./}; - else - tox -e ${TRAVIS_PYTHON_VERSION}; - fi @@ -1,5 +1,11 @@ News (mainly features/major bug fixes) +portage-3.0.13 +-------------- +* FETCHCOMMAND now supports a \${DIGESTS} placeholder which expands + to a space separated list of digests. Refer to the FETCHCOMMAND + documentation in the make.conf(5) man page. + portage-3.0.6 -------------- * emerge --search now detects regular expressions automatically. This diff --git a/RELEASE-NOTES b/RELEASE-NOTES index 61c3c3d16..c2a5607cd 100644 --- a/RELEASE-NOTES +++ b/RELEASE-NOTES @@ -1,6 +1,25 @@ Release Notes; upgrade information mainly. Features/major bugfixes are listed in NEWS +portage-3.0.13 +================================== +* Bug Fixes: + - Bug 763339 always allow event loops to run in threads + - Bug 764764 fix virtual/dist-kernel slot operator rebuilds + - Bug 764905 wrap asyncio child watcher for thread safety + +portage-3.0.12 +================================== +* Bug Fixes: + - Bug 758740 Use default asyncio event loop in child processes + - Bug 758755 Use default asyncio event loop in API consumer threads + +portage-3.0.11 +================================== +* Bug Fixes: + - Bug 756961 handle dev-lang/rust[system-bootstrap] dependency cycle + - Bug 757306 backtracking: fix virtual choices for circular deps + portage-3.0.10 ================================== * Bug Fixes: diff --git a/bin/misc-functions.sh b/bin/misc-functions.sh index c2a16cbe0..d7009d7eb 100755 --- a/bin/misc-functions.sh +++ b/bin/misc-functions.sh @@ -194,7 +194,7 @@ install_qa_check() { fi echo "${obj} ${needed}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED - echo "${arch:3};${obj};${soname};${rpath};${needed}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED.ELF.2 + echo "${arch#EM_};${obj};${soname};${rpath};${needed}" >> "${PORTAGE_BUILDDIR}"/build-info/NEEDED.ELF.2 done } [ -n "${QA_SONAME_NO_SYMLINK}" ] && \ diff --git a/lib/_emerge/DepPrioritySatisfiedRange.py b/lib/_emerge/DepPrioritySatisfiedRange.py index fb0d7db4e..f546590e0 100644 --- a/lib/_emerge/DepPrioritySatisfiedRange.py +++ b/lib/_emerge/DepPrioritySatisfiedRange.py @@ -93,6 +93,7 @@ class DepPrioritySatisfiedRange: ignore_medium = _ignore_runtime ignore_medium_soft = _ignore_satisfied_buildtime_slot_op ignore_medium_post = _ignore_runtime_post + ignore_medium_post_satisifed = _ignore_satisfied_runtime_post ignore_soft = _ignore_optional diff --git a/lib/_emerge/depgraph.py b/lib/_emerge/depgraph.py index d10474ab3..2bf04406f 100644 --- a/lib/_emerge/depgraph.py +++ b/lib/_emerge/depgraph.py @@ -85,6 +85,8 @@ from _emerge.resolver.output import Display, format_unmatched_atom # Exposes a depgraph interface to dep_check. _dep_check_graph_interface = collections.namedtuple('_dep_check_graph_interface',( + # Checks if parent package will replace child. + 'will_replace_child', # Indicates a removal action, like depclean or prune. 'removal_action', # Checks if update is desirable for a given package. @@ -507,6 +509,7 @@ class _dynamic_depgraph_config: # Track missed updates caused by solved conflicts. self._conflict_missed_update = collections.defaultdict(dict) dep_check_iface = _dep_check_graph_interface( + will_replace_child=depgraph._will_replace_child, removal_action="remove" in myparams, want_update_pkg=depgraph._want_update_pkg, ) @@ -2070,6 +2073,12 @@ class depgraph: for parent, atom in self._dynamic_config._parent_atoms.get(existing_pkg, []): if isinstance(parent, Package): if parent in built_slot_operator_parents: + if hasattr(atom, '_orig_atom'): + # If atom is the result of virtual expansion, then + # derefrence it to _orig_atom so that it will be correctly + # handled as a built slot operator dependency when + # appropriate (see bug 764764). + atom = atom._orig_atom # This parent may need to be rebuilt, therefore # discard its soname and built slot operator # dependency components which are not necessarily @@ -2128,6 +2137,22 @@ class depgraph: allow_repo=True) if not atom_set.findAtomForPackage(candidate_pkg, modified_use=self._pkg_use_enabled(candidate_pkg)): + if debug: + parent_atoms = [] + for other_parent, other_atom in self._dynamic_config._parent_atoms.get(existing_pkg, []): + if other_parent is parent: + parent_atoms.append(other_atom) + msg = ( + "", + "", + "check_reverse_dependencies:", + " candidate package does not match atom '%s': %s" % (atom, candidate_pkg), + " parent: %s" % parent, + " parent atoms: %s" % " ".join(parent_atoms), + "", + ) + writemsg_level("\n".join(msg), + noiselevel=-1, level=logging.DEBUG) return False return True @@ -3104,6 +3129,22 @@ class depgraph: self._frozen_config.myopts, modified_use=self._pkg_use_enabled(pkg))), level=logging.DEBUG, noiselevel=-1) + elif (pkg.installed and isinstance(myparent, Package) and + pkg.root == myparent.root and + pkg.slot_atom == myparent.slot_atom): + # If the parent package is replacing the child package then + # there's no slot conflict. Since the child will be replaced, + # do not add it to the graph. No attempt will be made to + # satisfy its dependencies, which is unsafe if it has any + # missing dependencies, as discussed in bug 199856. + if debug: + writemsg_level( + "%s%s %s\n" % ("Replace Child:".ljust(15), + pkg, pkg_use_display(pkg, + self._frozen_config.myopts, + modified_use=self._pkg_use_enabled(pkg))), + level=logging.DEBUG, noiselevel=-1) + return 1 else: if debug: @@ -5877,6 +5918,27 @@ class depgraph: (arg_atoms or update) and not self._too_deep(depth)) + def _will_replace_child(self, parent, root, atom): + """ + Check if a given parent package will replace a child package + for the given root and atom. + + @param parent: parent package + @type parent: Package + @param root: child root + @type root: str + @param atom: child atom + @type atom: Atom + @rtype: Package + @return: child package to replace, or None + """ + if parent.root != root or parent.cp != atom.cp: + return None + for child in self._iter_match_pkgs(self._frozen_config.roots[root], "installed", atom): + if parent.slot_atom == child.slot_atom: + return child + return None + def _too_deep(self, depth): """ Check if a package depth is deeper than the max allowed depth. @@ -6440,19 +6502,21 @@ class depgraph: # Calculation of USE for unbuilt ebuilds is relatively # expensive, so it is only performed lazily, after the # above visibility checks are complete. - - myarg = None - try: - for myarg, myarg_atom in self._iter_atoms_for_pkg(pkg): - if myarg.force_reinstall: - reinstall = True - break - except InvalidDependString: - if not installed: - # masked by corruption - continue - if not installed and myarg: - found_available_arg = True + effective_parent = parent or self._select_atoms_parent + if not (effective_parent and self._will_replace_child( + effective_parent, root, atom)): + myarg = None + try: + for myarg, myarg_atom in self._iter_atoms_for_pkg(pkg): + if myarg.force_reinstall: + reinstall = True + break + except InvalidDependString: + if not installed: + # masked by corruption + continue + if not installed and myarg: + found_available_arg = True if atom.package and atom.unevaluated_atom.use: #Make sure we don't miss a 'missing IUSE'. @@ -8010,18 +8074,18 @@ class depgraph: (selected_nodes[0],), noiselevel=-1) if selected_nodes and ignore_priority is not None: - # Try to merge ignored medium_post deps as soon as possible + # Try to merge neglected medium_post deps as soon as possible # if they're not satisfied by installed packages. for node in selected_nodes: children = set(mygraph.child_nodes(node)) - soft = children.difference( + medium_post_satisifed = children.difference( mygraph.child_nodes(node, ignore_priority = \ - DepPrioritySatisfiedRange.ignore_soft)) + DepPrioritySatisfiedRange.ignore_medium_post_satisifed)) medium_post = children.difference( mygraph.child_nodes(node, ignore_priority=DepPrioritySatisfiedRange.ignore_medium_post)) - medium_post -= soft + medium_post -= medium_post_satisifed for child in medium_post: if child in selected_nodes: continue diff --git a/lib/portage/__init__.py b/lib/portage/__init__.py index 4d4b590a8..178d724db 100644 --- a/lib/portage/__init__.py +++ b/lib/portage/__init__.py @@ -9,6 +9,7 @@ VERSION = "HEAD" # =========================================================================== try: + import asyncio import sys import errno if not hasattr(errno, 'ESTALE'): @@ -359,7 +360,8 @@ except (ImportError, OSError) as e: # END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END # =========================================================================== -_python_interpreter = os.path.realpath(sys.executable) +_python_interpreter = (sys.executable if os.environ.get("VIRTUAL_ENV") + else os.path.realpath(sys.executable)) _bin_path = PORTAGE_BIN_PATH _pym_path = PORTAGE_PYM_PATH _not_installed = os.path.isfile(os.path.join(PORTAGE_BASE_PATH, ".portage_not_installed")) @@ -373,6 +375,9 @@ class _ForkWatcher: @staticmethod def hook(_ForkWatcher): _ForkWatcher.current_pid = _os.getpid() + # Force instantiation of a new event loop policy as a workaround + # for https://bugs.python.org/issue22087. + asyncio.set_event_loop_policy(None) _ForkWatcher.hook(_ForkWatcher) diff --git a/lib/portage/dbapi/vartree.py b/lib/portage/dbapi/vartree.py index 1547d2f6d..f3d74cf82 100644 --- a/lib/portage/dbapi/vartree.py +++ b/lib/portage/dbapi/vartree.py @@ -41,7 +41,6 @@ portage.proxy.lazyimport.lazyimport(globals(), 'portage.util._dyn_libs.LinkageMapELF:LinkageMapELF@LinkageMap', 'portage.util._dyn_libs.NeededEntry:NeededEntry', 'portage.util._async.SchedulerInterface:SchedulerInterface', - 'portage.util._eventloop.EventLoop:EventLoop', 'portage.util._eventloop.global_event_loop:global_event_loop', 'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,vercmp,' + \ '_get_slot_re,_pkgsplit@pkgsplit,_pkg_str,_unknown_repo', diff --git a/lib/portage/dep/dep_check.py b/lib/portage/dep/dep_check.py index 60c8ec6d0..3bed6c348 100644 --- a/lib/portage/dep/dep_check.py +++ b/lib/portage/dep/dep_check.py @@ -356,6 +356,7 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None, # Alias the trees we'll be checking availability against parent = trees[myroot].get("parent") + virt_parent = trees[myroot].get("virt_parent") priority = trees[myroot].get("priority") graph_db = trees[myroot].get("graph_db") graph = trees[myroot].get("graph") @@ -404,9 +405,15 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None, for atom in atoms: if atom.blocker: continue + + # It's not a downgrade if parent is replacing child. + replacing = (parent and graph_interface and + graph_interface.will_replace_child(parent, myroot, atom)) # Ignore USE dependencies here since we don't want USE # settings to adversely affect || preference evaluation. avail_pkg = mydbapi_match_pkgs(atom.without_use) + if not avail_pkg and replacing: + avail_pkg = [replacing] if avail_pkg: avail_pkg = avail_pkg[-1] # highest (ascending order) avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot)) @@ -415,7 +422,7 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None, all_use_satisfied = False break - if graph_db is not None and downgrade_probe is not None: + if not replacing and graph_db is not None and downgrade_probe is not None: slot_matches = graph_db.match_pkgs(avail_slot) if (len(slot_matches) > 1 and avail_pkg < slot_matches[-1] and @@ -462,7 +469,7 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None, avail_pkg = avail_pkg_use avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot)) - if downgrade_probe is not None and graph is not None: + if not replacing and downgrade_probe is not None and graph is not None: highest_in_slot = mydbapi_match_pkgs(avail_slot) highest_in_slot = (highest_in_slot[-1] if highest_in_slot else None) @@ -575,14 +582,14 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None, this_choice.all_in_graph = all_in_graph circular_atom = None - if not (parent is None or priority is None) and \ - (parent.onlydeps or - (priority.buildtime and not priority.satisfied and not priority.optional)): + if parent and parent.onlydeps: # Check if the atom would result in a direct circular - # dependency and try to avoid that if it seems likely - # to be unresolvable. This is only relevant for - # buildtime deps that aren't already satisfied by an - # installed package. + # dependency and avoid that for --onlydeps arguments + # since it can defeat the purpose of --onlydeps. + # This check should only be used for --onlydeps + # arguments, since it can interfere with circular + # dependency backtracking choices, causing the test + # case for bug 756961 to fail. cpv_slot_list = [parent] for atom in atoms: if atom.blocker: @@ -596,8 +603,10 @@ def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None, if match_from_list(atom, cpv_slot_list): circular_atom = atom break - else: - for circular_child in circular_dependency.get(parent, []): + if circular_atom is None and circular_dependency is not None: + for circular_child in itertools.chain( + circular_dependency.get(parent, []), + circular_dependency.get(virt_parent, [])): for atom in atoms: if not atom.blocker and atom.match(circular_child): circular_atom = atom diff --git a/lib/portage/package/ebuild/doebuild.py b/lib/portage/package/ebuild/doebuild.py index 3b1991b28..f6cee4518 100644 --- a/lib/portage/package/ebuild/doebuild.py +++ b/lib/portage/package/ebuild/doebuild.py @@ -39,7 +39,6 @@ portage.proxy.lazyimport.lazyimport(globals(), 'portage.util._dyn_libs.NeededEntry:NeededEntry', 'portage.util._dyn_libs.soname_deps:SonameDepsProcessor', 'portage.util._async.SchedulerInterface:SchedulerInterface', - 'portage.util._eventloop.EventLoop:EventLoop', 'portage.util._eventloop.global_event_loop:global_event_loop', 'portage.util.ExtractKernelVersion:ExtractKernelVersion' ) diff --git a/lib/portage/package/ebuild/fetch.py b/lib/portage/package/ebuild/fetch.py index ca031f31e..e0fecaf23 100644 --- a/lib/portage/package/ebuild/fetch.py +++ b/lib/portage/package/ebuild/fetch.py @@ -1291,13 +1291,20 @@ def fetch(myuris, mysettings, listonly=0, fetchonly=0, "FILE": os.path.basename(download_path) } + try: + variables['DIGESTS'] = " ".join(["%s:%s" % (k.lower(), v) + for k, v in mydigests[myfile].items() if k != 'size']) + except KeyError: + pass + for k in ("DISTDIR", "PORTAGE_SSH_OPTS"): v = mysettings.get(k) if v is not None: variables[k] = v - myfetch = shlex_split(locfetch) - myfetch = [varexpand(x, mydict=variables) for x in myfetch] + myfetch = varexpand(locfetch, mydict=variables) + myfetch = shlex_split(myfetch) + myret = -1 try: diff --git a/lib/portage/tests/resolver/test_circular_choices_rust.py b/lib/portage/tests/resolver/test_circular_choices_rust.py new file mode 100644 index 000000000..612f76c48 --- /dev/null +++ b/lib/portage/tests/resolver/test_circular_choices_rust.py @@ -0,0 +1,94 @@ +# Copyright 2020 Gentoo Authors +# Distributed under the terms of the GNU General Public License v2 + +from portage.tests import TestCase +from portage.tests.resolver.ResolverPlayground import ( + ResolverPlayground, + ResolverPlaygroundTestCase, +) + + +class CircularRustTestCase(TestCase): + def testCircularPypyExe(self): + + ebuilds = { + "dev-lang/rust-1.47.0-r2": { + "EAPI": "7", + "SLOT": "stable/1.47", + "BDEPEND": "|| ( =dev-lang/rust-1.46* =dev-lang/rust-bin-1.46* =dev-lang/rust-1.47* =dev-lang/rust-bin-1.47* )", + }, + "dev-lang/rust-1.46.0": { + "EAPI": "7", + "SLOT": "stable/1.46", + "BDEPEND": "|| ( =dev-lang/rust-1.45* =dev-lang/rust-bin-1.45* =dev-lang/rust-1.46* =dev-lang/rust-bin-1.46* )", + }, + "dev-lang/rust-bin-1.47.0": { + "EAPI": "7", + }, + "dev-lang/rust-bin-1.46.0": { + "EAPI": "7", + }, + } + + installed = { + "dev-lang/rust-1.46.0": { + "EAPI": "7", + "SLOT": "stable/1.46", + "BDEPEND": "|| ( =dev-lang/rust-1.45* =dev-lang/rust-bin-1.45* =dev-lang/rust-1.46* =dev-lang/rust-bin-1.46* )", + }, + } + + test_cases = ( + # Test bug 756961, where a circular dependency was reported + # when a package would replace its own builtime dependency. + # This needs to be tested with and without --update, since + # that affects package selection logic significantly, + # expecially for packages given as arguments. + ResolverPlaygroundTestCase( + ["=dev-lang/rust-1.46*"], + mergelist=["dev-lang/rust-1.46.0"], + success=True, + ), + ResolverPlaygroundTestCase( + ["=dev-lang/rust-1.46*"], + options={"--update": True}, + mergelist=[], + success=True, + ), + ResolverPlaygroundTestCase( + ["=dev-lang/rust-1.46*"], + options={"--deep": True, "--update": True}, + mergelist=[], + success=True, + ), + ResolverPlaygroundTestCase( + ["dev-lang/rust"], + mergelist=["dev-lang/rust-1.47.0-r2"], + success=True, + ), + ResolverPlaygroundTestCase( + ["dev-lang/rust"], + options={"--update": True}, + mergelist=["dev-lang/rust-1.47.0-r2"], + success=True, + ), + ResolverPlaygroundTestCase( + ["@world"], + options={"--deep": True, "--update": True}, + mergelist=["dev-lang/rust-1.47.0-r2"], + success=True, + ), + ) + + world = ["dev-lang/rust"] + + playground = ResolverPlayground( + ebuilds=ebuilds, installed=installed, world=world, debug=False + ) + try: + for test_case in test_cases: + playground.run_TestCase(test_case) + self.assertEqual(test_case.test_success, True, test_case.fail_msg) + finally: + playground.debug = False + playground.cleanup() diff --git a/lib/portage/tests/resolver/test_merge_order.py b/lib/portage/tests/resolver/test_merge_order.py index f81fd2f6f..db063ed9d 100644 --- a/lib/portage/tests/resolver/test_merge_order.py +++ b/lib/portage/tests/resolver/test_merge_order.py @@ -217,12 +217,23 @@ class MergeOrderTestCase(TestCase): "IUSE" : "X +encode", "RDEPEND" : "|| ( >=media-video/ffmpeg-0.6.90_rc0-r2[X=,encode=] >=media-video/libav-0.6.90_rc[X=,encode=] )", }, + "x11-base/xorg-drivers-1.20-r2": { + "EAPI": "7", + "IUSE": "+video_cards_fbdev", + "PDEPEND": "x11-base/xorg-server video_cards_fbdev? ( x11-drivers/xf86-video-fbdev )", + }, "x11-base/xorg-server-1.14.1" : { "EAPI" : "5", "SLOT": "0/1.14.1", "DEPEND" : "media-libs/mesa", "RDEPEND" : "media-libs/mesa", + "PDEPEND": "x11-base/xorg-drivers", }, + "x11-drivers/xf86-video-fbdev-0.5.0-r1": { + "EAPI": "7", + "DEPEND": "x11-base/xorg-server", + "RDEPEND": "x11-base/xorg-server:=", + } } installed = { @@ -299,12 +310,24 @@ class MergeOrderTestCase(TestCase): "USE" : "encode", "RDEPEND" : "|| ( >=media-video/ffmpeg-0.6.90_rc0-r2[X=,encode=] >=media-video/libav-0.6.90_rc[X=,encode=] )", }, + "x11-base/xorg-drivers-1.20-r2": { + "EAPI": "7", + "IUSE": "+video_cards_fbdev", + "USE": "video_cards_fbdev", + "PDEPEND": "x11-base/xorg-server x11-drivers/xf86-video-fbdev", + }, "x11-base/xorg-server-1.14.1" : { "EAPI" : "5", "SLOT": "0/1.14.1", "DEPEND" : "media-libs/mesa", "RDEPEND" : "media-libs/mesa", + "PDEPEND": "x11-base/xorg-drivers", }, + "x11-drivers/xf86-video-fbdev-0.5.0-r1": { + "EAPI": "7", + "DEPEND": "x11-base/xorg-server", + "RDEPEND": "x11-base/xorg-server:0/1.14.1=", + } } test_cases = ( @@ -486,10 +509,10 @@ class MergeOrderTestCase(TestCase): # Both deps are already satisfied by installed packages, but # the := dep is given higher priority in merge order. ResolverPlaygroundTestCase( - ["media-libs/mesa", "x11-base/xorg-server"], + ["media-libs/mesa", "x11-drivers/xf86-video-fbdev", "x11-base/xorg-server"], success=True, all_permutations = True, - mergelist = ['x11-base/xorg-server-1.14.1', 'media-libs/mesa-9.1.3']), + mergelist = ['x11-base/xorg-server-1.14.1', 'media-libs/mesa-9.1.3', 'x11-drivers/xf86-video-fbdev-0.5.0-r1']), # Test prioritization of the find_smallest_cycle function, which should # minimize the use of installed packages to break cycles. If installed # packages must be used to break cycles, then it should prefer to do this diff --git a/lib/portage/tests/resolver/test_slot_operator_reverse_deps.py b/lib/portage/tests/resolver/test_slot_operator_reverse_deps.py index 6641e9987..ef884f8ca 100644 --- a/lib/portage/tests/resolver/test_slot_operator_reverse_deps.py +++ b/lib/portage/tests/resolver/test_slot_operator_reverse_deps.py @@ -1,4 +1,4 @@ -# Copyright 2016-2020 Gentoo Authors +# Copyright 2016-2021 Gentoo Authors # Distributed under the terms of the GNU General Public License v2 from portage.tests import TestCase @@ -202,3 +202,99 @@ class SlotOperatorReverseDepsLibGit2TestCase(TestCase): finally: playground.debug = False playground.cleanup() + + +class SlotOperatorReverseDepsVirtualTestCase(TestCase): + + def testSlotOperatorReverseDepsVirtual(self): + """ + Demonstrate bug #764764, where slot operator rebuilds were + not triggered for reverse deps of virtual/dist-kernel. + """ + + ebuilds = { + + "app-emulation/virtualbox-modules-6.1.16-r1": { + "EAPI": "7", + "DEPEND": "virtual/dist-kernel", + "RDEPEND": "virtual/dist-kernel:=", + }, + + "sys-kernel/gentoo-kernel-5.10.6": { + "EAPI": "7", + "SLOT": "5.10.6", + }, + + "sys-kernel/gentoo-kernel-5.10.5": { + "EAPI": "7", + "SLOT": "5.10.5", + }, + + "virtual/dist-kernel-5.10.5" : { + "EAPI": "7", + "SLOT": "0/5.10.5", + "RDEPEND": "~sys-kernel/gentoo-kernel-5.10.5", + }, + + "virtual/dist-kernel-5.10.6" : { + "EAPI": "7", + "SLOT": "0/5.10.6", + "RDEPEND": "~sys-kernel/gentoo-kernel-5.10.6" + }, + + "x11-drivers/nvidia-drivers-460.32.03" : { + "EAPI": "7", + "DEPEND": "virtual/dist-kernel", + "RDEPEND": "virtual/dist-kernel:=", + }, + + } + + installed = { + + "app-emulation/virtualbox-modules-6.1.16-r1": { + "EAPI": "7", + "DEPEND": "virtual/dist-kernel", + "RDEPEND": "virtual/dist-kernel:0/5.10.5=", + }, + + "sys-kernel/gentoo-kernel-5.10.5": { + "EAPI": "7", + "SLOT": "5.10.5", + }, + + "virtual/dist-kernel-5.10.5" : { + "EAPI": "7", + "SLOT": "0/5.10.5", + "RDEPEND": "~sys-kernel/gentoo-kernel-5.10.5" + }, + + "x11-drivers/nvidia-drivers-460.32.03" : { + "EAPI": "7", + "DEPEND": "virtual/dist-kernel", + "RDEPEND": "virtual/dist-kernel:0/5.10.5=" + }, + + } + + world = ["app-emulation/virtualbox-modules", "x11-drivers/nvidia-drivers"] + + test_cases = ( + ResolverPlaygroundTestCase( + ["@world"], + options = {"--update": True, "--deep": True}, + success = True, + mergelist = ['sys-kernel/gentoo-kernel-5.10.6', 'virtual/dist-kernel-5.10.6', 'app-emulation/virtualbox-modules-6.1.16-r1', 'x11-drivers/nvidia-drivers-460.32.03'] + ), + ) + + playground = ResolverPlayground(ebuilds=ebuilds, + installed=installed, world=world, debug=False) + try: + for test_case in test_cases: + playground.run_TestCase(test_case) + self.assertEqual(test_case.test_success, True, + test_case.fail_msg) + finally: + playground.debug = False + playground.cleanup() diff --git a/lib/portage/tests/util/futures/test_retry.py b/lib/portage/tests/util/futures/test_retry.py index ce5fb3e11..6648b1b2c 100644 --- a/lib/portage/tests/util/futures/test_retry.py +++ b/lib/portage/tests/util/futures/test_retry.py @@ -1,15 +1,18 @@ # Copyright 2018-2020 Gentoo Authors # Distributed under the terms of the GNU General Public License v2 -from concurrent.futures import ThreadPoolExecutor +from concurrent.futures import Future, ThreadPoolExecutor +import contextlib try: import threading except ImportError: import dummy_threading as threading +import weakref import time +import portage from portage.tests import TestCase from portage.util._eventloop.global_event_loop import global_event_loop from portage.util.backoff import RandomExponentialBackoff @@ -64,99 +67,100 @@ class HangForever: A callable object that sleeps forever. """ def __call__(self): - return global_event_loop().create_future() + return asyncio.Future() class RetryTestCase(TestCase): + @contextlib.contextmanager def _wrap_coroutine_func(self, coroutine_func): """ Derived classes may override this method in order to implement alternative forms of execution. """ - return coroutine_func + yield coroutine_func def testSucceedLater(self): loop = global_event_loop() - func_coroutine = self._wrap_coroutine_func(SucceedLater(1)) - decorator = retry(try_max=9999, - delay_func=RandomExponentialBackoff(multiplier=0.1, base=2)) - decorated_func = decorator(func_coroutine, loop=loop) - result = loop.run_until_complete(decorated_func()) - self.assertEqual(result, 'success') + with self._wrap_coroutine_func(SucceedLater(1)) as func_coroutine: + decorator = retry(try_max=9999, + delay_func=RandomExponentialBackoff(multiplier=0.1, base=2)) + decorated_func = decorator(func_coroutine, loop=loop) + result = loop.run_until_complete(decorated_func()) + self.assertEqual(result, 'success') def testSucceedNever(self): loop = global_event_loop() - func_coroutine = self._wrap_coroutine_func(SucceedNever()) - decorator = retry(try_max=4, try_timeout=None, - delay_func=RandomExponentialBackoff(multiplier=0.1, base=2)) - decorated_func = decorator(func_coroutine, loop=loop) - done, pending = loop.run_until_complete(asyncio.wait([decorated_func()], loop=loop)) - self.assertEqual(len(done), 1) - self.assertTrue(isinstance(done.pop().exception().__cause__, SucceedNeverException)) + with self._wrap_coroutine_func(SucceedNever()) as func_coroutine: + decorator = retry(try_max=4, try_timeout=None, + delay_func=RandomExponentialBackoff(multiplier=0.1, base=2)) + decorated_func = decorator(func_coroutine, loop=loop) + done, pending = loop.run_until_complete(asyncio.wait([decorated_func()], loop=loop)) + self.assertEqual(len(done), 1) + self.assertTrue(isinstance(done.pop().exception().__cause__, SucceedNeverException)) def testSucceedNeverReraise(self): loop = global_event_loop() - func_coroutine = self._wrap_coroutine_func(SucceedNever()) - decorator = retry(reraise=True, try_max=4, try_timeout=None, - delay_func=RandomExponentialBackoff(multiplier=0.1, base=2)) - decorated_func = decorator(func_coroutine, loop=loop) - done, pending = loop.run_until_complete(asyncio.wait([decorated_func()], loop=loop)) - self.assertEqual(len(done), 1) - self.assertTrue(isinstance(done.pop().exception(), SucceedNeverException)) + with self._wrap_coroutine_func(SucceedNever()) as func_coroutine: + decorator = retry(reraise=True, try_max=4, try_timeout=None, + delay_func=RandomExponentialBackoff(multiplier=0.1, base=2)) + decorated_func = decorator(func_coroutine, loop=loop) + done, pending = loop.run_until_complete(asyncio.wait([decorated_func()], loop=loop)) + self.assertEqual(len(done), 1) + self.assertTrue(isinstance(done.pop().exception(), SucceedNeverException)) def testHangForever(self): loop = global_event_loop() - func_coroutine = self._wrap_coroutine_func(HangForever()) - decorator = retry(try_max=2, try_timeout=0.1, - delay_func=RandomExponentialBackoff(multiplier=0.1, base=2)) - decorated_func = decorator(func_coroutine, loop=loop) - done, pending = loop.run_until_complete(asyncio.wait([decorated_func()], loop=loop)) - self.assertEqual(len(done), 1) - self.assertTrue(isinstance(done.pop().exception().__cause__, asyncio.TimeoutError)) + with self._wrap_coroutine_func(HangForever()) as func_coroutine: + decorator = retry(try_max=2, try_timeout=0.1, + delay_func=RandomExponentialBackoff(multiplier=0.1, base=2)) + decorated_func = decorator(func_coroutine, loop=loop) + done, pending = loop.run_until_complete(asyncio.wait([decorated_func()], loop=loop)) + self.assertEqual(len(done), 1) + self.assertTrue(isinstance(done.pop().exception().__cause__, asyncio.TimeoutError)) def testHangForeverReraise(self): loop = global_event_loop() - func_coroutine = self._wrap_coroutine_func(HangForever()) - decorator = retry(reraise=True, try_max=2, try_timeout=0.1, - delay_func=RandomExponentialBackoff(multiplier=0.1, base=2)) - decorated_func = decorator(func_coroutine, loop=loop) - done, pending = loop.run_until_complete(asyncio.wait([decorated_func()], loop=loop)) - self.assertEqual(len(done), 1) - self.assertTrue(isinstance(done.pop().exception(), asyncio.TimeoutError)) + with self._wrap_coroutine_func(HangForever()) as func_coroutine: + decorator = retry(reraise=True, try_max=2, try_timeout=0.1, + delay_func=RandomExponentialBackoff(multiplier=0.1, base=2)) + decorated_func = decorator(func_coroutine, loop=loop) + done, pending = loop.run_until_complete(asyncio.wait([decorated_func()], loop=loop)) + self.assertEqual(len(done), 1) + self.assertTrue(isinstance(done.pop().exception(), asyncio.TimeoutError)) def testCancelRetry(self): loop = global_event_loop() - func_coroutine = self._wrap_coroutine_func(SucceedNever()) - decorator = retry(try_timeout=0.1, - delay_func=RandomExponentialBackoff(multiplier=0.1, base=2)) - decorated_func = decorator(func_coroutine, loop=loop) - future = decorated_func() - loop.call_later(0.3, future.cancel) - done, pending = loop.run_until_complete(asyncio.wait([future], loop=loop)) - self.assertEqual(len(done), 1) - self.assertTrue(done.pop().cancelled()) + with self._wrap_coroutine_func(SucceedNever()) as func_coroutine: + decorator = retry(try_timeout=0.1, + delay_func=RandomExponentialBackoff(multiplier=0.1, base=2)) + decorated_func = decorator(func_coroutine, loop=loop) + future = decorated_func() + loop.call_later(0.3, future.cancel) + done, pending = loop.run_until_complete(asyncio.wait([future], loop=loop)) + self.assertEqual(len(done), 1) + self.assertTrue(done.pop().cancelled()) def testOverallTimeoutWithException(self): loop = global_event_loop() - func_coroutine = self._wrap_coroutine_func(SucceedNever()) - decorator = retry(try_timeout=0.1, overall_timeout=0.3, - delay_func=RandomExponentialBackoff(multiplier=0.1, base=2)) - decorated_func = decorator(func_coroutine, loop=loop) - done, pending = loop.run_until_complete(asyncio.wait([decorated_func()], loop=loop)) - self.assertEqual(len(done), 1) - self.assertTrue(isinstance(done.pop().exception().__cause__, SucceedNeverException)) + with self._wrap_coroutine_func(SucceedNever()) as func_coroutine: + decorator = retry(try_timeout=0.1, overall_timeout=0.3, + delay_func=RandomExponentialBackoff(multiplier=0.1, base=2)) + decorated_func = decorator(func_coroutine, loop=loop) + done, pending = loop.run_until_complete(asyncio.wait([decorated_func()], loop=loop)) + self.assertEqual(len(done), 1) + self.assertTrue(isinstance(done.pop().exception().__cause__, SucceedNeverException)) def testOverallTimeoutWithTimeoutError(self): loop = global_event_loop() # results in TimeoutError because it hangs forever - func_coroutine = self._wrap_coroutine_func(HangForever()) - decorator = retry(try_timeout=0.1, overall_timeout=0.3, - delay_func=RandomExponentialBackoff(multiplier=0.1, base=2)) - decorated_func = decorator(func_coroutine, loop=loop) - done, pending = loop.run_until_complete(asyncio.wait([decorated_func()], loop=loop)) - self.assertEqual(len(done), 1) - self.assertTrue(isinstance(done.pop().exception().__cause__, asyncio.TimeoutError)) + with self._wrap_coroutine_func(HangForever()) as func_coroutine: + decorator = retry(try_timeout=0.1, overall_timeout=0.3, + delay_func=RandomExponentialBackoff(multiplier=0.1, base=2)) + decorated_func = decorator(func_coroutine, loop=loop) + done, pending = loop.run_until_complete(asyncio.wait([decorated_func()], loop=loop)) + self.assertEqual(len(done), 1) + self.assertTrue(isinstance(done.pop().exception().__cause__, asyncio.TimeoutError)) class RetryForkExecutorTestCase(RetryTestCase): @@ -184,43 +188,70 @@ class RetryForkExecutorTestCase(RetryTestCase): def tearDown(self): self._tearDownExecutor() + @contextlib.contextmanager def _wrap_coroutine_func(self, coroutine_func): parent_loop = global_event_loop() + parent_pid = portage.getpid() + pending = weakref.WeakValueDictionary() # Since ThreadPoolExecutor does not propagate cancellation of a # parent_future to the underlying coroutine, use kill_switch to # propagate task cancellation to wrapper, so that HangForever's # thread returns when retry eventually cancels parent_future. def wrapper(kill_switch): - loop = global_event_loop() - if loop is parent_loop: + if portage.getpid() == parent_pid: # thread in main process - result = coroutine_func() - event = threading.Event() - loop.call_soon_threadsafe(result.add_done_callback, - lambda result: event.set()) - loop.call_soon_threadsafe(kill_switch.add_done_callback, - lambda kill_switch: event.set()) - event.wait() - return result.result() + def done_callback(result): + result.cancelled() or result.exception() or result.result() + kill_switch.set() + def start_coroutine(future): + result = asyncio.ensure_future(coroutine_func(), loop=parent_loop) + pending[id(result)] = result + result.add_done_callback(done_callback) + future.set_result(result) + future = Future() + parent_loop.call_soon_threadsafe(start_coroutine, future) + kill_switch.wait() + if not future.done(): + future.cancel() + raise asyncio.CancelledError + elif not future.result().done(): + future.result().cancel() + raise asyncio.CancelledError + else: + return future.result().result() # child process + loop = global_event_loop() try: return loop.run_until_complete(coroutine_func()) finally: loop.close() def execute_wrapper(): - kill_switch = parent_loop.create_future() + kill_switch = threading.Event() parent_future = asyncio.ensure_future( parent_loop.run_in_executor(self._executor, wrapper, kill_switch), loop=parent_loop) - parent_future.add_done_callback( - lambda parent_future: None if kill_switch.done() - else kill_switch.set_result(None)) + def kill_callback(parent_future): + if not kill_switch.is_set(): + kill_switch.set() + parent_future.add_done_callback(kill_callback) return parent_future - return execute_wrapper + try: + yield execute_wrapper + finally: + while True: + try: + _, future = pending.popitem() + except KeyError: + break + try: + parent_loop.run_until_complete(future) + except (Exception, asyncio.CancelledError): + pass + future.cancelled() or future.exception() or future.result() class RetryThreadExecutorTestCase(RetryForkExecutorTestCase): diff --git a/lib/portage/util/__init__.py b/lib/portage/util/__init__.py index 0412b2b59..b0922bf09 100644 --- a/lib/portage/util/__init__.py +++ b/lib/portage/util/__init__.py @@ -11,6 +11,7 @@ __all__ = ['apply_permissions', 'apply_recursive_permissions', 'stack_dicts', 'stack_lists', 'unique_array', 'unique_everseen', 'varexpand', 'write_atomic', 'writedict', 'writemsg', 'writemsg_level', 'writemsg_stdout'] +from contextlib import AbstractContextManager from copy import deepcopy import errno import io @@ -1246,7 +1247,7 @@ def apply_secpass_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1, stat_cached=stat_cached, follow_links=follow_links) return all_applied -class atomic_ofstream(ObjectProxy): +class atomic_ofstream(AbstractContextManager, ObjectProxy): """Write a file atomically via os.rename(). Atomic replacement prevents interprocess interference and prevents corruption of the target file when the write is interrupted (for example, when an 'out of space' @@ -1287,6 +1288,12 @@ class atomic_ofstream(ObjectProxy): encoding=_encodings['fs'], errors='strict'), mode=mode, **kargs)) + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is not None: + self.abort() + else: + self.close() + def _get_target(self): return object.__getattribute__(self, '_file') diff --git a/lib/portage/util/_compare_files.py b/lib/portage/util/_compare_files.py index 60d43aefa..7692797fc 100644 --- a/lib/portage/util/_compare_files.py +++ b/lib/portage/util/_compare_files.py @@ -9,7 +9,7 @@ import stat from portage import _encodings from portage import _unicode_encode -from portage.util._xattr import xattr +from portage.util._xattr import XATTRS_WORKS, xattr def compare_files(file1, file2, skipped_types=()): """ @@ -45,7 +45,7 @@ def compare_files(file1, file2, skipped_types=()): if "device_number" not in skipped_types and file1_stat.st_rdev != file2_stat.st_rdev: differences.append("device_number") - if (xattr.XATTRS_WORKS and "xattr" not in skipped_types and + if (XATTRS_WORKS and "xattr" not in skipped_types and sorted(xattr.get_all(file1, nofollow=True)) != sorted(xattr.get_all(file2, nofollow=True))): differences.append("xattr") diff --git a/lib/portage/util/_eventloop/asyncio_event_loop.py b/lib/portage/util/_eventloop/asyncio_event_loop.py index 836f1c30a..b77728088 100644 --- a/lib/portage/util/_eventloop/asyncio_event_loop.py +++ b/lib/portage/util/_eventloop/asyncio_event_loop.py @@ -6,6 +6,7 @@ import signal import asyncio as _real_asyncio from asyncio.events import AbstractEventLoop as _AbstractEventLoop +from asyncio.unix_events import AbstractChildWatcher as _AbstractChildWatcher import portage @@ -47,6 +48,7 @@ class AsyncioEventLoop(_AbstractEventLoop): self.set_debug = loop.set_debug self.get_debug = loop.get_debug self._wakeup_fd = -1 + self._child_watcher = None if portage._internal_caller: loop.set_exception_handler(self._internal_caller_exception_handler) @@ -87,7 +89,9 @@ class AsyncioEventLoop(_AbstractEventLoop): @rtype: asyncio.AbstractChildWatcher @return: the internal event loop's AbstractChildWatcher interface """ - return _real_asyncio.get_child_watcher() + if self._child_watcher is None: + self._child_watcher = _ChildWatcherThreadSafetyWrapper(self, _real_asyncio.get_child_watcher()) + return self._child_watcher @property def _asyncio_wrapper(self): @@ -121,4 +125,32 @@ class AsyncioEventLoop(_AbstractEventLoop): try: return self._loop.run_until_complete(future) finally: - self._wakeup_fd = signal.set_wakeup_fd(-1) + try: + self._wakeup_fd = signal.set_wakeup_fd(-1) + except ValueError: + # This is intended to fail when not called in the main thread. + pass + + +class _ChildWatcherThreadSafetyWrapper(_AbstractChildWatcher): + def __init__(self, loop, real_watcher): + self._loop = loop + self._real_watcher = real_watcher + + def close(self): + pass + + def __enter__(self): + return self + + def __exit__(self, a, b, c): + pass + + def _child_exit(self, pid, status, callback, *args): + self._loop.call_soon_threadsafe(callback, pid, status, *args) + + def add_child_handler(self, pid, callback, *args): + self._real_watcher.add_child_handler(pid, self._child_exit, callback, *args) + + def remove_child_handler(self, pid): + return self._real_watcher.remove_child_handler(pid) diff --git a/lib/portage/util/_eventloop/global_event_loop.py b/lib/portage/util/_eventloop/global_event_loop.py index 21a1d1970..cb7a13078 100644 --- a/lib/portage/util/_eventloop/global_event_loop.py +++ b/lib/portage/util/_eventloop/global_event_loop.py @@ -1,35 +1,6 @@ -# Copyright 2012-2020 Gentoo Authors +# Copyright 2012-2021 Gentoo Authors # Distributed under the terms of the GNU General Public License v2 -import portage -from .EventLoop import EventLoop -from portage.util._eventloop.asyncio_event_loop import AsyncioEventLoop +__all__ = ('global_event_loop',) - -_MAIN_PID = portage.getpid() -_instances = {} - - -def global_event_loop(): - """ - Get a global EventLoop (or compatible object) instance which - belongs exclusively to the current process. - """ - - pid = portage.getpid() - instance = _instances.get(pid) - if instance is not None: - return instance - - constructor = AsyncioEventLoop - # If the default constructor doesn't support multiprocessing, - # then multiprocessing constructor is used in subprocesses. - if not constructor.supports_multiprocessing and pid != _MAIN_PID: - constructor = EventLoop - - # Use the _asyncio_wrapper attribute, so that unit tests can compare - # the reference to one retured from _wrap_loop(), since they should - # not close the loop if it refers to a global event loop. - instance = constructor()._asyncio_wrapper - _instances[pid] = instance - return instance +from portage.util.futures._asyncio import _safe_loop as global_event_loop diff --git a/lib/portage/util/env_update.py b/lib/portage/util/env_update.py index dec086cf8..5588931a8 100644 --- a/lib/portage/util/env_update.py +++ b/lib/portage/util/env_update.py @@ -342,18 +342,17 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env, #create /etc/profile.env for bash support profile_env_path = os.path.join(eroot, "etc", "profile.env") - outfile = atomic_ofstream(profile_env_path) - outfile.write(penvnotice) - - env_keys = [x for x in env if x != "LDPATH"] - env_keys.sort() - for k in env_keys: - v = env[k] - if v.startswith('$') and not v.startswith('${'): - outfile.write("export %s=$'%s'\n" % (k, v[1:])) - else: - outfile.write("export %s='%s'\n" % (k, v)) - outfile.close() + with atomic_ofstream(profile_env_path) as outfile: + outfile.write(penvnotice) + + env_keys = [x for x in env if x != "LDPATH"] + env_keys.sort() + for k in env_keys: + v = env[k] + if v.startswith('$') and not v.startswith('${'): + outfile.write("export %s=$'%s'\n" % (k, v[1:])) + else: + outfile.write("export %s='%s'\n" % (k, v)) # Create the systemd user environment configuration file # /etc/environment.d/10-gentoo-env.conf with the @@ -363,8 +362,7 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env, systemd_gentoo_env_path = os.path.join(systemd_environment_dir, "10-gentoo-env.conf") - systemd_gentoo_env = atomic_ofstream(systemd_gentoo_env_path) - try: + with atomic_ofstream(systemd_gentoo_env_path) as systemd_gentoo_env: senvnotice = notice + "\n\n" systemd_gentoo_env.write(senvnotice) @@ -384,10 +382,6 @@ def _env_update(makelinks, target_root, prev_mtimes, contents, env, line = f"{env_key}={env_key_value}\n" systemd_gentoo_env.write(line) - except: - systemd_gentoo_env.abort() - raise - systemd_gentoo_env.close() #create /etc/csh.env for (t)csh support outfile = atomic_ofstream(os.path.join(eroot, "etc", "csh.env")) diff --git a/lib/portage/util/futures/_asyncio/__init__.py b/lib/portage/util/futures/_asyncio/__init__.py index a902ad895..5590963f1 100644 --- a/lib/portage/util/futures/_asyncio/__init__.py +++ b/lib/portage/util/futures/_asyncio/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2018-2020 Gentoo Authors +# Copyright 2018-2021 Gentoo Authors # Distributed under the terms of the GNU General Public License v2 __all__ = ( @@ -21,7 +21,8 @@ __all__ = ( ) import subprocess -import sys +import types +import weakref import asyncio as _real_asyncio @@ -34,12 +35,8 @@ import portage portage.proxy.lazyimport.lazyimport(globals(), 'portage.util.futures.unix_events:_PortageEventLoopPolicy', 'portage.util.futures:compat_coroutine@_compat_coroutine', - 'portage.util._eventloop.EventLoop:EventLoop@_EventLoop', ) from portage.util._eventloop.asyncio_event_loop import AsyncioEventLoop as _AsyncioEventLoop -from portage.util._eventloop.global_event_loop import ( - global_event_loop as _global_event_loop, -) # pylint: disable=redefined-builtin from portage.util.futures.futures import ( CancelledError, @@ -238,7 +235,7 @@ def _wrap_loop(loop=None): # The default loop returned by _wrap_loop should be consistent # with global_event_loop, in order to avoid accidental registration # of callbacks with a loop that is not intended to run. - loop = loop or _global_event_loop() + loop = loop or _safe_loop() return (loop if hasattr(loop, '_asyncio_wrapper') else _AsyncioEventLoop(loop=loop)) @@ -246,14 +243,72 @@ def _wrap_loop(loop=None): def _safe_loop(): """ Return an event loop that's safe to use within the current context. - For portage internal callers, this returns a globally shared event - loop instance. For external API consumers, this constructs a - temporary event loop instance that's safe to use in a non-main - thread (it does not override the global SIGCHLD handler). + For portage internal callers or external API consumers calling from + the main thread, this returns a globally shared event loop instance. + + For external API consumers calling from a non-main thread, an + asyncio loop must be registered for the current thread, or else the + asyncio.get_event_loop() function will raise an error like this: + + RuntimeError: There is no current event loop in thread 'Thread-1'. + + In order to avoid this RuntimeError, a loop will be automatically + created like this: + + asyncio.set_event_loop(asyncio.new_event_loop()) + + In order to avoid a ResourceWarning, automatically created loops + are added to a WeakValueDictionary, and closed via an atexit hook + if they still exist during exit for the current pid. @rtype: asyncio.AbstractEventLoop (or compatible) @return: event loop instance """ - if portage._internal_caller: - return _global_event_loop() - return _EventLoop(main=False) + loop = _get_running_loop() + if loop is not None: + return loop + + thread_key = threading.get_ident() + with _thread_weakrefs.lock: + if _thread_weakrefs.pid != portage.getpid(): + _thread_weakrefs.pid = portage.getpid() + _thread_weakrefs.mainloop = None + _thread_weakrefs.loops = weakref.WeakValueDictionary() + try: + loop = _thread_weakrefs.loops[thread_key] + except KeyError: + try: + _real_asyncio.get_event_loop() + except RuntimeError: + _real_asyncio.set_event_loop(_real_asyncio.new_event_loop()) + loop = _thread_weakrefs.loops[thread_key] = _AsyncioEventLoop() + + if _thread_weakrefs.mainloop is None and threading.current_thread() is threading.main_thread(): + _thread_weakrefs.mainloop = loop + + return loop + + +def _get_running_loop(): + with _thread_weakrefs.lock: + if _thread_weakrefs.pid == portage.getpid(): + try: + loop = _thread_weakrefs.loops[threading.get_ident()] + except KeyError: + return None + return loop if loop.is_running() else None + + +def _thread_weakrefs_atexit(): + with _thread_weakrefs.lock: + if _thread_weakrefs.pid == portage.getpid(): + while True: + try: + thread_key, loop = _thread_weakrefs.loops.popitem() + except KeyError: + break + else: + loop.close() + +_thread_weakrefs = types.SimpleNamespace(lock=threading.Lock(), loops=None, mainloop=None, pid=None) +portage.process.atexit_register(_thread_weakrefs_atexit) diff --git a/lib/portage/util/futures/retry.py b/lib/portage/util/futures/retry.py index 4092f60d6..31cc161da 100644 --- a/lib/portage/util/futures/retry.py +++ b/lib/portage/util/futures/retry.py @@ -1,4 +1,4 @@ -# Copyright 2018 Gentoo Foundation +# Copyright 2018-2021 Gentoo Authors # Distributed under the terms of the GNU General Public License v2 __all__ = ( @@ -113,7 +113,7 @@ class _Retry: def _begin_try(self): self._tries += 1 - self._current_task = self._func() + self._current_task = asyncio.ensure_future(self._func(), loop=self._loop) self._current_task.add_done_callback(self._try_done) if self._try_timeout is not None: self._try_timeout_handle = self._loop.call_later( diff --git a/man/make.conf.5 b/man/make.conf.5 index 494d5a212..f6eae6f60 100644 --- a/man/make.conf.5 +++ b/man/make.conf.5 @@ -1,4 +1,4 @@ -.TH "MAKE.CONF" "5" "Sep 2020" "Portage VERSION" "Portage" +.TH "MAKE.CONF" "5" "Jan 2021" "Portage VERSION" "Portage" .SH "NAME" make.conf \- custom settings for Portage .SH "SYNOPSIS" @@ -746,6 +746,18 @@ the internet. It must contain the full path to the executable as well as the place\-holders \\${DISTDIR}, \\${FILE} and \\${URI}. The command should be written to place the fetched file at \\${DISTDIR}/\\${FILE}. Also see \fBRESUMECOMMAND\fR. +.RS +.TP +.B Optional FETCHCOMMAND Placeholders +.TS +l l l +___ +l l l. +Placeholder Meaning Example + +\\${DIGESTS} Space separated list of file digests blake2b <hexdigest> sha512 <hexdigest> +.TE +.RE .TP .B FFLAGS FCFLAGS Use these variables to set the desired optimization/CPU instruction settings diff --git a/repoman/runtests b/repoman/runtests index 3edaaf0a8..5137b5e6e 100755 --- a/repoman/runtests +++ b/repoman/runtests @@ -24,12 +24,13 @@ PYTHON_SUPPORTED_VERSIONS = [ '2.7', '3.6', '3.7', - '3.8' + '3.8', + '3.9' ] # The rest are just "nice to have". PYTHON_NICE_VERSIONS = [ 'pypy3', - '3.9' + '3.10' ] EPREFIX = os.environ.get('PORTAGE_OVERRIDE_EPREFIX', '/') @@ -23,12 +23,13 @@ import tempfile PYTHON_SUPPORTED_VERSIONS = [ '3.6', '3.7', - '3.8' + '3.8', + '3.9' ] # The rest are just "nice to have". PYTHON_NICE_VERSIONS = [ 'pypy3', - '3.9' + '3.10' ] EPREFIX = os.environ.get('PORTAGE_OVERRIDE_EPREFIX', '/') @@ -655,7 +655,7 @@ class build_ext(_build_ext): setup( name = 'portage', - version = '3.0.10', + version = '3.0.13', url = 'https://wiki.gentoo.org/wiki/Project:Portage', author = 'Gentoo Portage Development Team', author_email = 'dev-portage@gentoo.org', diff --git a/src/portage_util_file_copy_reflink_linux.c b/src/portage_util_file_copy_reflink_linux.c index 352342c06..c6affe57a 100644 --- a/src/portage_util_file_copy_reflink_linux.c +++ b/src/portage_util_file_copy_reflink_linux.c @@ -1,4 +1,4 @@ -/* Copyright 2017 Gentoo Foundation +/* Copyright 2017-2020 Gentoo Authors * Distributed under the terms of the GNU General Public License v2 */ @@ -25,7 +25,6 @@ static PyMethodDef reflink_linuxMethods[] = { {NULL, NULL, 0, NULL} }; -#if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "reflink_linux", /* m_name */ @@ -45,13 +44,6 @@ PyInit_reflink_linux(void) m = PyModule_Create(&moduledef); return m; } -#else -PyMODINIT_FUNC -initreflink_linux(void) -{ - Py_InitModule("reflink_linux", reflink_linuxMethods); -} -#endif /** diff --git a/src/portage_util_libc.c b/src/portage_util_libc.c index 977b95474..2a3e624dc 100644 --- a/src/portage_util_libc.c +++ b/src/portage_util_libc.c @@ -1,4 +1,4 @@ -/* Copyright 2005-2016 Gentoo Foundation +/* Copyright 2005-2020 Gentoo Authors * Distributed under the terms of the GNU General Public License v2 */ @@ -15,7 +15,6 @@ static PyMethodDef LibcMethods[] = { {NULL, NULL, 0, NULL} }; -#if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "libc", /* m_name */ @@ -35,13 +34,6 @@ PyInit_libc(void) m = PyModule_Create(&moduledef); return m; } -#else -PyMODINIT_FUNC -initlibc(void) -{ - Py_InitModule("libc", LibcMethods); -} -#endif static PyObject * @@ -1,16 +1,25 @@ [tox] -envlist = py36,py37,py38,py39,pypy3 +envlist = py36,py37,py38,py39,py310,pypy3 skipsdist = True +[gh-actions] +python = + 3.6: py36 + 3.7: py37 + 3.8: py38 + 3.9: py39 + 3.10: py310 + pypy-3.6: pypy3 + [testenv] deps = pylint pygost pyyaml - py36,py37,py38,py39,pypy3: lxml!=4.2.0 + py36,py37,py38,py39,py310,pypy3: lxml!=4.2.0 setenv = PYTHONPATH={toxinidir}/lib commands = bash -c 'rm -rf build && PYTHONPATH=$PWD/lib:$PWD/repoman/lib pylint *' python -b -Wd setup.py test - python -b -Wd repoman/setup.py test + bash -c 'if python -c "import lxml.etree"; then python -b -Wd repoman/setup.py test; else echo "repoman tests skipped due to lxml breakage"; fi' |