aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/_emerge/AbstractDepPriority.py30
-rw-r--r--lib/_emerge/AbstractEbuildProcess.py429
-rw-r--r--lib/_emerge/AbstractPollTask.py117
-rw-r--r--lib/_emerge/AsynchronousLock.py313
-rw-r--r--lib/_emerge/AsynchronousTask.py196
-rw-r--r--lib/_emerge/AtomArg.py14
-rw-r--r--lib/_emerge/Binpkg.py457
-rw-r--r--lib/_emerge/BinpkgEnvExtractor.py66
-rw-r--r--lib/_emerge/BinpkgExtractorAsync.py108
-rw-r--r--lib/_emerge/BinpkgFetcher.py240
-rw-r--r--lib/_emerge/BinpkgPrefetcher.py43
-rw-r--r--lib/_emerge/BinpkgVerifier.py122
-rw-r--r--lib/_emerge/Blocker.py15
-rw-r--r--lib/_emerge/BlockerCache.py191
-rw-r--r--lib/_emerge/BlockerDB.py126
-rw-r--r--lib/_emerge/BlockerDepPriority.py13
-rw-r--r--lib/_emerge/CompositeTask.py123
-rw-r--r--lib/_emerge/DepPriority.py56
-rw-r--r--lib/_emerge/DepPriorityNormalRange.py47
-rw-r--r--lib/_emerge/DepPrioritySatisfiedRange.py97
-rw-r--r--lib/_emerge/Dependency.py21
-rw-r--r--lib/_emerge/DependencyArg.py46
-rw-r--r--lib/_emerge/EbuildBinpkg.py53
-rw-r--r--lib/_emerge/EbuildBuild.py535
-rw-r--r--lib/_emerge/EbuildBuildDir.py161
-rw-r--r--lib/_emerge/EbuildExecuter.py84
-rw-r--r--lib/_emerge/EbuildFetcher.py373
-rw-r--r--lib/_emerge/EbuildFetchonly.py35
-rw-r--r--lib/_emerge/EbuildIpcDaemon.py119
-rw-r--r--lib/_emerge/EbuildMerge.py75
-rw-r--r--lib/_emerge/EbuildMetadataPhase.py220
-rw-r--r--lib/_emerge/EbuildPhase.py439
-rw-r--r--lib/_emerge/EbuildProcess.py27
-rw-r--r--lib/_emerge/EbuildSpawnProcess.py22
-rw-r--r--lib/_emerge/FakeVartree.py337
-rw-r--r--lib/_emerge/FifoIpcDaemon.py97
-rw-r--r--lib/_emerge/JobStatusDisplay.py303
-rw-r--r--lib/_emerge/MergeListItem.py129
-rw-r--r--lib/_emerge/MetadataRegen.py150
-rw-r--r--lib/_emerge/MiscFunctionsProcess.py51
-rw-r--r--lib/_emerge/Package.py927
-rw-r--r--lib/_emerge/PackageArg.py19
-rw-r--r--lib/_emerge/PackageMerge.py49
-rw-r--r--lib/_emerge/PackagePhase.py93
-rw-r--r--lib/_emerge/PackageUninstall.py143
-rw-r--r--lib/_emerge/PackageVirtualDbapi.py149
-rw-r--r--lib/_emerge/PipeReader.py106
-rw-r--r--lib/_emerge/PollScheduler.py187
-rw-r--r--lib/_emerge/ProgressHandler.py22
-rw-r--r--lib/_emerge/RootConfig.py41
-rw-r--r--lib/_emerge/Scheduler.py2011
-rw-r--r--lib/_emerge/SequentialTaskQueue.py81
-rw-r--r--lib/_emerge/SetArg.py14
-rw-r--r--lib/_emerge/SpawnProcess.py241
-rw-r--r--lib/_emerge/SubProcess.py84
-rw-r--r--lib/_emerge/Task.py50
-rw-r--r--lib/_emerge/TaskSequence.py61
-rw-r--r--lib/_emerge/UninstallFailure.py15
-rw-r--r--lib/_emerge/UnmergeDepPriority.py46
-rw-r--r--lib/_emerge/UseFlagDisplay.py131
-rw-r--r--lib/_emerge/UserQuery.py78
-rw-r--r--lib/_emerge/__init__.py2
-rw-r--r--lib/_emerge/_find_deep_system_runtime_deps.py38
-rw-r--r--lib/_emerge/_flush_elog_mod_echo.py15
-rw-r--r--lib/_emerge/actions.py3337
-rw-r--r--lib/_emerge/chk_updated_cfg_files.py42
-rw-r--r--lib/_emerge/clear_caches.py16
-rw-r--r--lib/_emerge/countdown.py22
-rw-r--r--lib/_emerge/create_depgraph_params.py159
-rw-r--r--lib/_emerge/create_world_atom.py128
-rw-r--r--lib/_emerge/depgraph.py10049
-rw-r--r--lib/_emerge/emergelog.py56
-rw-r--r--lib/_emerge/getloadavg.py28
-rw-r--r--lib/_emerge/help.py25
-rw-r--r--lib/_emerge/is_valid_package_atom.py23
-rw-r--r--lib/_emerge/main.py1295
-rw-r--r--lib/_emerge/post_emerge.py168
-rw-r--r--lib/_emerge/resolver/DbapiProvidesIndex.py102
-rw-r--r--lib/_emerge/resolver/__init__.py2
-rw-r--r--lib/_emerge/resolver/backtracking.py264
-rw-r--r--lib/_emerge/resolver/circular_dependency.py289
-rw-r--r--lib/_emerge/resolver/output.py1033
-rw-r--r--lib/_emerge/resolver/output_helpers.py693
-rw-r--r--lib/_emerge/resolver/package_tracker.py386
-rw-r--r--lib/_emerge/resolver/slot_collision.py1185
-rw-r--r--lib/_emerge/search.py531
-rw-r--r--lib/_emerge/show_invalid_depstring_notice.py35
-rw-r--r--lib/_emerge/stdout_spinner.py86
-rw-r--r--lib/_emerge/unmerge.py608
-rw-r--r--lib/portage/__init__.py664
-rw-r--r--lib/portage/_emirrordist/Config.py140
-rw-r--r--lib/portage/_emirrordist/DeletionIterator.py83
-rw-r--r--lib/portage/_emirrordist/DeletionTask.py129
-rw-r--r--lib/portage/_emirrordist/FetchIterator.py289
-rw-r--r--lib/portage/_emirrordist/FetchTask.py631
-rw-r--r--lib/portage/_emirrordist/MirrorDistTask.py249
-rw-r--r--lib/portage/_emirrordist/__init__.py2
-rw-r--r--lib/portage/_emirrordist/main.py442
-rw-r--r--lib/portage/_global_updates.py255
-rw-r--r--lib/portage/_legacy_globals.py78
-rw-r--r--lib/portage/_selinux.py158
-rw-r--r--lib/portage/_sets/ProfilePackageSet.py35
-rw-r--r--lib/portage/_sets/__init__.py302
-rw-r--r--lib/portage/_sets/base.py250
-rw-r--r--lib/portage/_sets/dbapi.py537
-rw-r--r--lib/portage/_sets/files.py394
-rw-r--r--lib/portage/_sets/libs.py99
-rw-r--r--lib/portage/_sets/profiles.py57
-rw-r--r--lib/portage/_sets/security.py86
-rw-r--r--lib/portage/_sets/shell.py44
-rw-r--r--lib/portage/cache/__init__.py4
-rw-r--r--lib/portage/cache/anydbm.py116
-rw-r--r--lib/portage/cache/cache_errors.py62
-rw-r--r--lib/portage/cache/ebuild_xattr.py172
-rw-r--r--lib/portage/cache/flat_hash.py166
-rw-r--r--lib/portage/cache/fs_template.py93
-rw-r--r--lib/portage/cache/index/IndexStreamIterator.py27
-rw-r--r--lib/portage/cache/index/__init__.py2
-rw-r--r--lib/portage/cache/index/pkg_desc_index.py60
-rw-r--r--lib/portage/cache/mappings.py485
-rw-r--r--lib/portage/cache/metadata.py158
-rw-r--r--lib/portage/cache/sql_template.py301
-rw-r--r--lib/portage/cache/sqlite.py285
-rw-r--r--lib/portage/cache/template.py373
-rw-r--r--lib/portage/cache/volatile.py30
-rw-r--r--lib/portage/checksum.py583
-rw-r--r--lib/portage/const.py265
-rw-r--r--lib/portage/cvstree.py315
-rw-r--r--lib/portage/data.py322
-rw-r--r--lib/portage/dbapi/DummyTree.py16
-rw-r--r--lib/portage/dbapi/IndexedPortdb.py171
-rw-r--r--lib/portage/dbapi/IndexedVardb.py114
-rw-r--r--lib/portage/dbapi/_ContentsCaseSensitivityManager.py93
-rw-r--r--lib/portage/dbapi/_MergeProcess.py287
-rw-r--r--lib/portage/dbapi/_SyncfsProcess.py53
-rw-r--r--lib/portage/dbapi/_VdbMetadataDelta.py176
-rw-r--r--lib/portage/dbapi/__init__.py443
-rw-r--r--lib/portage/dbapi/_expand_new_virt.py81
-rw-r--r--lib/portage/dbapi/_similar_name_search.py57
-rw-r--r--lib/portage/dbapi/bintree.py1710
-rw-r--r--lib/portage/dbapi/cpv_expand.py108
-rw-r--r--lib/portage/dbapi/dep_expand.py58
-rw-r--r--lib/portage/dbapi/porttree.py1526
-rw-r--r--lib/portage/dbapi/vartree.py5559
-rw-r--r--lib/portage/dbapi/virtual.py232
-rw-r--r--lib/portage/debug.py120
-rw-r--r--lib/portage/dep/__init__.py2874
-rw-r--r--lib/portage/dep/_dnf.py90
-rw-r--r--lib/portage/dep/_slot_operator.py122
-rw-r--r--lib/portage/dep/dep_check.py961
-rw-r--r--lib/portage/dep/soname/SonameAtom.py72
-rw-r--r--lib/portage/dep/soname/__init__.py2
-rw-r--r--lib/portage/dep/soname/multilib_category.py116
-rw-r--r--lib/portage/dep/soname/parse.py47
-rw-r--r--lib/portage/dispatch_conf.py397
-rw-r--r--lib/portage/eapi.py194
-rw-r--r--lib/portage/eclass_cache.py187
-rw-r--r--lib/portage/elog/__init__.py191
-rw-r--r--lib/portage/elog/filtering.py15
-rw-r--r--lib/portage/elog/messages.py190
-rw-r--r--lib/portage/elog/mod_custom.py19
-rw-r--r--lib/portage/elog/mod_echo.py69
-rw-r--r--lib/portage/elog/mod_mail.py43
-rw-r--r--lib/portage/elog/mod_mail_summary.py89
-rw-r--r--lib/portage/elog/mod_save.py84
-rw-r--r--lib/portage/elog/mod_save_summary.py92
-rw-r--r--lib/portage/elog/mod_syslog.py37
-rw-r--r--lib/portage/emaint/__init__.py5
-rw-r--r--lib/portage/emaint/defaults.py25
-rw-r--r--lib/portage/emaint/main.py246
-rw-r--r--lib/portage/emaint/modules/__init__.py5
-rw-r--r--lib/portage/emaint/modules/binhost/__init__.py21
-rw-r--r--lib/portage/emaint/modules/binhost/binhost.py183
-rw-r--r--lib/portage/emaint/modules/config/__init__.py21
-rw-r--r--lib/portage/emaint/modules/config/config.py81
-rw-r--r--lib/portage/emaint/modules/logs/__init__.py46
-rw-r--r--lib/portage/emaint/modules/logs/logs.py110
-rw-r--r--lib/portage/emaint/modules/merges/__init__.py32
-rw-r--r--lib/portage/emaint/modules/merges/merges.py291
-rw-r--r--lib/portage/emaint/modules/move/__init__.py32
-rw-r--r--lib/portage/emaint/modules/move/move.py188
-rw-r--r--lib/portage/emaint/modules/resume/__init__.py21
-rw-r--r--lib/portage/emaint/modules/resume/resume.py59
-rw-r--r--lib/portage/emaint/modules/sync/__init__.py56
-rw-r--r--lib/portage/emaint/modules/sync/sync.py462
-rw-r--r--lib/portage/emaint/modules/world/__init__.py21
-rw-r--r--lib/portage/emaint/modules/world/world.py93
-rw-r--r--lib/portage/env/__init__.py3
-rw-r--r--lib/portage/env/config.py105
-rw-r--r--lib/portage/env/loaders.py327
-rw-r--r--lib/portage/env/validators.py20
-rw-r--r--lib/portage/exception.py211
-rw-r--r--lib/portage/getbinpkg.py934
-rw-r--r--lib/portage/glsa.py726
-rw-r--r--lib/portage/localization.py46
-rw-r--r--lib/portage/locks.py607
-rw-r--r--lib/portage/mail.py177
-rw-r--r--lib/portage/manifest.py729
-rw-r--r--lib/portage/metadata.py208
-rw-r--r--lib/portage/module.py240
-rw-r--r--lib/portage/news.py452
-rw-r--r--lib/portage/output.py844
-rw-r--r--lib/portage/package/__init__.py2
-rw-r--r--lib/portage/package/ebuild/__init__.py2
-rw-r--r--lib/portage/package/ebuild/_config/KeywordsManager.py325
-rw-r--r--lib/portage/package/ebuild/_config/LicenseManager.py237
-rw-r--r--lib/portage/package/ebuild/_config/LocationsManager.py349
-rw-r--r--lib/portage/package/ebuild/_config/MaskManager.py261
-rw-r--r--lib/portage/package/ebuild/_config/UseManager.py579
-rw-r--r--lib/portage/package/ebuild/_config/VirtualsManager.py233
-rw-r--r--lib/portage/package/ebuild/_config/__init__.py2
-rw-r--r--lib/portage/package/ebuild/_config/env_var_validation.py23
-rw-r--r--lib/portage/package/ebuild/_config/features_set.py128
-rw-r--r--lib/portage/package/ebuild/_config/helper.py64
-rw-r--r--lib/portage/package/ebuild/_config/special_env_vars.py211
-rw-r--r--lib/portage/package/ebuild/_config/unpack_dependencies.py38
-rw-r--r--lib/portage/package/ebuild/_ipc/ExitCommand.py27
-rw-r--r--lib/portage/package/ebuild/_ipc/IpcCommand.py9
-rw-r--r--lib/portage/package/ebuild/_ipc/QueryCommand.py140
-rw-r--r--lib/portage/package/ebuild/_ipc/__init__.py2
-rw-r--r--lib/portage/package/ebuild/_metadata_invalid.py41
-rw-r--r--lib/portage/package/ebuild/_parallel_manifest/ManifestProcess.py43
-rw-r--r--lib/portage/package/ebuild/_parallel_manifest/ManifestScheduler.py88
-rw-r--r--lib/portage/package/ebuild/_parallel_manifest/ManifestTask.py208
-rw-r--r--lib/portage/package/ebuild/_parallel_manifest/__init__.py2
-rw-r--r--lib/portage/package/ebuild/_spawn_nofetch.py125
-rw-r--r--lib/portage/package/ebuild/config.py2875
-rw-r--r--lib/portage/package/ebuild/deprecated_profile_check.py83
-rw-r--r--lib/portage/package/ebuild/digestcheck.py155
-rw-r--r--lib/portage/package/ebuild/digestgen.py208
-rw-r--r--lib/portage/package/ebuild/doebuild.py2539
-rw-r--r--lib/portage/package/ebuild/fetch.py1174
-rw-r--r--lib/portage/package/ebuild/getmaskingreason.py126
-rw-r--r--lib/portage/package/ebuild/getmaskingstatus.py192
-rw-r--r--lib/portage/package/ebuild/prepare_build_dirs.py443
-rw-r--r--lib/portage/package/ebuild/profile_iuse.py32
-rw-r--r--lib/portage/process.py689
-rw-r--r--lib/portage/progress.py61
-rw-r--r--lib/portage/proxy/__init__.py2
-rw-r--r--lib/portage/proxy/lazyimport.py222
-rw-r--r--lib/portage/proxy/objectproxy.py98
-rw-r--r--lib/portage/repository/__init__.py2
-rw-r--r--lib/portage/repository/config.py1177
-rw-r--r--lib/portage/sync/__init__.py52
-rw-r--r--lib/portage/sync/config_checks.py72
-rw-r--r--lib/portage/sync/controller.py397
-rw-r--r--lib/portage/sync/getaddrinfo_validate.py29
-rw-r--r--lib/portage/sync/modules/__init__.py0
-rw-r--r--lib/portage/sync/modules/cvs/__init__.py47
-rw-r--r--lib/portage/sync/modules/cvs/cvs.py67
-rw-r--r--lib/portage/sync/modules/git/__init__.py65
-rw-r--r--lib/portage/sync/modules/git/git.py286
-rw-r--r--lib/portage/sync/modules/rsync/__init__.py37
-rw-r--r--lib/portage/sync/modules/rsync/rsync.py782
-rw-r--r--lib/portage/sync/modules/svn/__init__.py33
-rw-r--r--lib/portage/sync/modules/svn/svn.py89
-rw-r--r--lib/portage/sync/modules/webrsync/__init__.py51
-rw-r--r--lib/portage/sync/modules/webrsync/webrsync.py70
-rw-r--r--lib/portage/sync/old_tree_timestamp.py101
-rw-r--r--lib/portage/sync/syncbase.py263
-rw-r--r--lib/portage/tests/__init__.py353
-rw-r--r--lib/portage/tests/bin/__init__.py0
-rw-r--r--lib/portage/tests/bin/__test__.py0
-rw-r--r--lib/portage/tests/bin/setup_env.py87
-rw-r--r--lib/portage/tests/bin/test_dobin.py16
-rw-r--r--lib/portage/tests/bin/test_dodir.py18
-rw-r--r--lib/portage/tests/bin/test_doins.py355
-rw-r--r--lib/portage/tests/bin/test_eapi7_ver_funcs.py240
-rw-r--r--lib/portage/tests/bin/test_filter_bash_env.py115
-rw-r--r--lib/portage/tests/dbapi/__init__.py2
-rw-r--r--lib/portage/tests/dbapi/__test__.py0
-rw-r--r--lib/portage/tests/dbapi/test_fakedbapi.py92
-rw-r--r--lib/portage/tests/dbapi/test_portdb_cache.py184
-rw-r--r--lib/portage/tests/dep/__init__.py3
-rw-r--r--lib/portage/tests/dep/__test__.py0
-rw-r--r--lib/portage/tests/dep/testAtom.py341
-rw-r--r--lib/portage/tests/dep/testCheckRequiredUse.py234
-rw-r--r--lib/portage/tests/dep/testExtendedAtomDict.py18
-rw-r--r--lib/portage/tests/dep/testExtractAffectingUSE.py75
-rw-r--r--lib/portage/tests/dep/testStandalone.py37
-rw-r--r--lib/portage/tests/dep/test_best_match_to_list.py63
-rw-r--r--lib/portage/tests/dep/test_dep_getcpv.py37
-rw-r--r--lib/portage/tests/dep/test_dep_getrepo.py29
-rw-r--r--lib/portage/tests/dep/test_dep_getslot.py28
-rw-r--r--lib/portage/tests/dep/test_dep_getusedeps.py35
-rw-r--r--lib/portage/tests/dep/test_dnf_convert.py48
-rw-r--r--lib/portage/tests/dep/test_get_operator.py37
-rw-r--r--lib/portage/tests/dep/test_get_required_use_flags.py44
-rw-r--r--lib/portage/tests/dep/test_isjustname.py24
-rw-r--r--lib/portage/tests/dep/test_isvalidatom.py162
-rw-r--r--lib/portage/tests/dep/test_match_from_list.py146
-rw-r--r--lib/portage/tests/dep/test_overlap_dnf.py28
-rw-r--r--lib/portage/tests/dep/test_paren_reduce.py69
-rw-r--r--lib/portage/tests/dep/test_use_reduce.py626
-rw-r--r--lib/portage/tests/ebuild/__init__.py2
-rw-r--r--lib/portage/tests/ebuild/__test__.py0
-rw-r--r--lib/portage/tests/ebuild/test_array_fromfile_eof.py47
-rw-r--r--lib/portage/tests/ebuild/test_config.py346
-rw-r--r--lib/portage/tests/ebuild/test_doebuild_fd_pipes.py138
-rw-r--r--lib/portage/tests/ebuild/test_doebuild_spawn.py106
-rw-r--r--lib/portage/tests/ebuild/test_ipc_daemon.py162
-rw-r--r--lib/portage/tests/ebuild/test_spawn.py57
-rw-r--r--lib/portage/tests/ebuild/test_use_expand_incremental.py132
-rw-r--r--lib/portage/tests/emerge/__init__.py2
-rw-r--r--lib/portage/tests/emerge/__test__.py0
-rw-r--r--lib/portage/tests/emerge/test_config_protect.py293
-rw-r--r--lib/portage/tests/emerge/test_emerge_blocker_file_collision.py168
-rw-r--r--lib/portage/tests/emerge/test_emerge_slot_abi.py179
-rw-r--r--lib/portage/tests/emerge/test_global_updates.py41
-rw-r--r--lib/portage/tests/emerge/test_simple.py505
-rw-r--r--lib/portage/tests/env/__init__.py4
-rw-r--r--lib/portage/tests/env/__test__.py0
-rw-r--r--lib/portage/tests/env/config/__init__.py4
-rw-r--r--lib/portage/tests/env/config/__test__.py0
-rw-r--r--lib/portage/tests/env/config/test_PackageKeywordsFile.py40
-rw-r--r--lib/portage/tests/env/config/test_PackageMaskFile.py29
-rw-r--r--lib/portage/tests/env/config/test_PackageUseFile.py37
-rw-r--r--lib/portage/tests/env/config/test_PortageModulesFile.py38
-rw-r--r--lib/portage/tests/glsa/__init__.py2
-rw-r--r--lib/portage/tests/glsa/__test__.py0
-rw-r--r--lib/portage/tests/glsa/test_security_set.py144
-rw-r--r--lib/portage/tests/lafilefixer/__init__.py0
-rw-r--r--lib/portage/tests/lafilefixer/__test__.py0
-rw-r--r--lib/portage/tests/lafilefixer/test_lafilefixer.py145
-rw-r--r--lib/portage/tests/lazyimport/__init__.py0
-rw-r--r--lib/portage/tests/lazyimport/__test__.py0
-rw-r--r--lib/portage/tests/lazyimport/test_lazy_import_portage_baseline.py81
-rw-r--r--lib/portage/tests/lazyimport/test_preload_portage_submodules.py16
-rw-r--r--lib/portage/tests/lint/__init__.py0
-rw-r--r--lib/portage/tests/lint/__test__.py0
-rw-r--r--lib/portage/tests/lint/metadata.py11
-rw-r--r--lib/portage/tests/lint/test_bash_syntax.py54
-rw-r--r--lib/portage/tests/lint/test_compile_modules.py67
-rw-r--r--lib/portage/tests/lint/test_import_modules.py44
-rw-r--r--lib/portage/tests/locks/__init__.py2
-rw-r--r--lib/portage/tests/locks/__test__.py0
-rw-r--r--lib/portage/tests/locks/test_asynchronous_lock.py181
-rw-r--r--lib/portage/tests/locks/test_lock_nonblock.py62
-rw-r--r--lib/portage/tests/news/__init__.py3
-rw-r--r--lib/portage/tests/news/__test__.py0
-rw-r--r--lib/portage/tests/news/test_NewsItem.py96
-rw-r--r--lib/portage/tests/process/__init__.py2
-rw-r--r--lib/portage/tests/process/__test__.py0
-rw-r--r--lib/portage/tests/process/test_PopenProcess.py85
-rw-r--r--lib/portage/tests/process/test_PopenProcessBlockingIO.py63
-rw-r--r--lib/portage/tests/process/test_poll.py111
-rw-r--r--lib/portage/tests/resolver/ResolverPlayground.py842
-rw-r--r--lib/portage/tests/resolver/__init__.py2
-rw-r--r--lib/portage/tests/resolver/__test__.py0
-rw-r--r--lib/portage/tests/resolver/binpkg_multi_instance/__init__.py2
-rw-r--r--lib/portage/tests/resolver/binpkg_multi_instance/__test__.py2
-rw-r--r--lib/portage/tests/resolver/binpkg_multi_instance/test_build_id_profile_format.py134
-rw-r--r--lib/portage/tests/resolver/binpkg_multi_instance/test_rebuilt_binaries.py101
-rw-r--r--lib/portage/tests/resolver/soname/__init__.py2
-rw-r--r--lib/portage/tests/resolver/soname/__test__.py2
-rw-r--r--lib/portage/tests/resolver/soname/test_autounmask.py103
-rw-r--r--lib/portage/tests/resolver/soname/test_depclean.py61
-rw-r--r--lib/portage/tests/resolver/soname/test_downgrade.py240
-rw-r--r--lib/portage/tests/resolver/soname/test_or_choices.py92
-rw-r--r--lib/portage/tests/resolver/soname/test_reinstall.py87
-rw-r--r--lib/portage/tests/resolver/soname/test_skip_update.py86
-rw-r--r--lib/portage/tests/resolver/soname/test_slot_conflict_reinstall.py357
-rw-r--r--lib/portage/tests/resolver/soname/test_slot_conflict_update.py117
-rw-r--r--lib/portage/tests/resolver/soname/test_soname_provided.py78
-rw-r--r--lib/portage/tests/resolver/soname/test_unsatisfiable.py71
-rw-r--r--lib/portage/tests/resolver/soname/test_unsatisfied.py87
-rw-r--r--lib/portage/tests/resolver/test_autounmask.py599
-rw-r--r--lib/portage/tests/resolver/test_autounmask_binpkg_use.py64
-rw-r--r--lib/portage/tests/resolver/test_autounmask_keep_keywords.py72
-rw-r--r--lib/portage/tests/resolver/test_autounmask_multilib_use.py85
-rw-r--r--lib/portage/tests/resolver/test_autounmask_parent.py43
-rw-r--r--lib/portage/tests/resolver/test_autounmask_use_backtrack.py86
-rw-r--r--lib/portage/tests/resolver/test_autounmask_use_breakage.py103
-rw-r--r--lib/portage/tests/resolver/test_backtracking.py179
-rw-r--r--lib/portage/tests/resolver/test_bdeps.py215
-rw-r--r--lib/portage/tests/resolver/test_binary_pkg_ebuild_visibility.py144
-rw-r--r--lib/portage/tests/resolver/test_blocker.py48
-rw-r--r--lib/portage/tests/resolver/test_changed_deps.py121
-rw-r--r--lib/portage/tests/resolver/test_circular_choices.py61
-rw-r--r--lib/portage/tests/resolver/test_circular_dependencies.py84
-rw-r--r--lib/portage/tests/resolver/test_complete_graph.py148
-rw-r--r--lib/portage/tests/resolver/test_complete_if_new_subslot_without_revbump.py74
-rw-r--r--lib/portage/tests/resolver/test_depclean.py291
-rw-r--r--lib/portage/tests/resolver/test_depclean_order.py57
-rw-r--r--lib/portage/tests/resolver/test_depclean_slot_unavailable.py78
-rw-r--r--lib/portage/tests/resolver/test_depth.py252
-rw-r--r--lib/portage/tests/resolver/test_disjunctive_depend_order.py87
-rw-r--r--lib/portage/tests/resolver/test_eapi.py122
-rw-r--r--lib/portage/tests/resolver/test_features_test_use.py68
-rw-r--r--lib/portage/tests/resolver/test_imagemagick_graphicsmagick.py104
-rw-r--r--lib/portage/tests/resolver/test_keywords.py356
-rw-r--r--lib/portage/tests/resolver/test_merge_order.py478
-rw-r--r--lib/portage/tests/resolver/test_missing_iuse_and_evaluated_atoms.py31
-rw-r--r--lib/portage/tests/resolver/test_multirepo.py398
-rw-r--r--lib/portage/tests/resolver/test_multislot.py54
-rw-r--r--lib/portage/tests/resolver/test_old_dep_chain_display.py35
-rw-r--r--lib/portage/tests/resolver/test_onlydeps.py34
-rw-r--r--lib/portage/tests/resolver/test_onlydeps_circular.py51
-rw-r--r--lib/portage/tests/resolver/test_onlydeps_minimal.py48
-rw-r--r--lib/portage/tests/resolver/test_or_choices.py342
-rw-r--r--lib/portage/tests/resolver/test_or_downgrade_installed.py97
-rw-r--r--lib/portage/tests/resolver/test_or_upgrade_installed.py160
-rw-r--r--lib/portage/tests/resolver/test_output.py88
-rw-r--r--lib/portage/tests/resolver/test_package_tracker.py261
-rw-r--r--lib/portage/tests/resolver/test_profile_default_eapi.py126
-rw-r--r--lib/portage/tests/resolver/test_profile_package_set.py123
-rw-r--r--lib/portage/tests/resolver/test_rebuild.py143
-rw-r--r--lib/portage/tests/resolver/test_regular_slot_change_without_revbump.py59
-rw-r--r--lib/portage/tests/resolver/test_required_use.py134
-rw-r--r--lib/portage/tests/resolver/test_runtime_cycle_merge_order.py72
-rw-r--r--lib/portage/tests/resolver/test_simple.py74
-rw-r--r--lib/portage/tests/resolver/test_slot_abi.py457
-rw-r--r--lib/portage/tests/resolver/test_slot_abi_downgrade.py225
-rw-r--r--lib/portage/tests/resolver/test_slot_change_without_revbump.py88
-rw-r--r--lib/portage/tests/resolver/test_slot_collisions.py263
-rw-r--r--lib/portage/tests/resolver/test_slot_conflict_force_rebuild.py84
-rw-r--r--lib/portage/tests/resolver/test_slot_conflict_mask_update.py41
-rw-r--r--lib/portage/tests/resolver/test_slot_conflict_rebuild.py455
-rw-r--r--lib/portage/tests/resolver/test_slot_conflict_unsatisfied_deep_deps.py176
-rw-r--r--lib/portage/tests/resolver/test_slot_conflict_update.py98
-rw-r--r--lib/portage/tests/resolver/test_slot_operator_autounmask.py120
-rw-r--r--lib/portage/tests/resolver/test_slot_operator_complete_graph.py141
-rw-r--r--lib/portage/tests/resolver/test_slot_operator_exclusive_slots.py148
-rw-r--r--lib/portage/tests/resolver/test_slot_operator_rebuild.py121
-rw-r--r--lib/portage/tests/resolver/test_slot_operator_required_use.py72
-rw-r--r--lib/portage/tests/resolver/test_slot_operator_reverse_deps.py113
-rw-r--r--lib/portage/tests/resolver/test_slot_operator_runtime_pkg_mask.py136
-rw-r--r--lib/portage/tests/resolver/test_slot_operator_unsatisfied.py70
-rw-r--r--lib/portage/tests/resolver/test_slot_operator_unsolved.py88
-rw-r--r--lib/portage/tests/resolver/test_slot_operator_update_probe_parent_downgrade.py68
-rw-r--r--lib/portage/tests/resolver/test_solve_non_slot_operator_slot_conflicts.py75
-rw-r--r--lib/portage/tests/resolver/test_targetroot.py85
-rw-r--r--lib/portage/tests/resolver/test_unpack_dependencies.py65
-rw-r--r--lib/portage/tests/resolver/test_use_aliases.py131
-rw-r--r--lib/portage/tests/resolver/test_use_dep_defaults.py40
-rw-r--r--lib/portage/tests/resolver/test_useflags.py78
-rw-r--r--lib/portage/tests/resolver/test_virtual_minimize_children.py287
-rw-r--r--lib/portage/tests/resolver/test_virtual_slot.py217
-rw-r--r--lib/portage/tests/resolver/test_with_test_deps.py44
-rwxr-xr-xlib/portage/tests/runTests.py65
-rw-r--r--lib/portage/tests/sets/__init__.py0
-rw-r--r--lib/portage/tests/sets/__test__.py0
-rw-r--r--lib/portage/tests/sets/base/__init__.py0
-rw-r--r--lib/portage/tests/sets/base/__test__.py0
-rw-r--r--lib/portage/tests/sets/base/testInternalPackageSet.py61
-rw-r--r--lib/portage/tests/sets/files/__init__.py0
-rw-r--r--lib/portage/tests/sets/files/__test__.py0
-rw-r--r--lib/portage/tests/sets/files/testConfigFileSet.py32
-rw-r--r--lib/portage/tests/sets/files/testStaticFileSet.py27
-rw-r--r--lib/portage/tests/sets/shell/__init__.py0
-rw-r--r--lib/portage/tests/sets/shell/__test__.py0
-rw-r--r--lib/portage/tests/sets/shell/testShell.py28
-rw-r--r--lib/portage/tests/sync/__init__.py2
-rw-r--r--lib/portage/tests/sync/__test__.py0
-rw-r--r--lib/portage/tests/sync/test_sync_local.py271
-rw-r--r--lib/portage/tests/unicode/__init__.py2
-rw-r--r--lib/portage/tests/unicode/__test__.py0
-rw-r--r--lib/portage/tests/unicode/test_string_format.py108
-rw-r--r--lib/portage/tests/update/__init__.py2
-rw-r--r--lib/portage/tests/update/__test__.py0
-rw-r--r--lib/portage/tests/update/test_move_ent.py109
-rw-r--r--lib/portage/tests/update/test_move_slot_ent.py154
-rw-r--r--lib/portage/tests/update/test_update_dbentry.py277
-rw-r--r--lib/portage/tests/util/__init__.py4
-rw-r--r--lib/portage/tests/util/__test__.py0
-rw-r--r--lib/portage/tests/util/dyn_libs/__init__.py0
-rw-r--r--lib/portage/tests/util/dyn_libs/__test__.py0
-rw-r--r--lib/portage/tests/util/dyn_libs/test_soname_deps.py34
-rw-r--r--lib/portage/tests/util/eventloop/__init__.py0
-rw-r--r--lib/portage/tests/util/eventloop/__test__.py0
-rw-r--r--lib/portage/tests/util/eventloop/test_call_soon_fifo.py30
-rw-r--r--lib/portage/tests/util/file_copy/__init__.py0
-rw-r--r--lib/portage/tests/util/file_copy/__test__.py0
-rw-r--r--lib/portage/tests/util/file_copy/test_copyfile.py71
-rw-r--r--lib/portage/tests/util/futures/__init__.py0
-rw-r--r--lib/portage/tests/util/futures/__test__.py0
-rw-r--r--lib/portage/tests/util/futures/asyncio/__init__.py0
-rw-r--r--lib/portage/tests/util/futures/asyncio/__test__.py0
-rw-r--r--lib/portage/tests/util/futures/asyncio/test_child_watcher.py50
-rw-r--r--lib/portage/tests/util/futures/asyncio/test_event_loop_in_fork.py65
-rw-r--r--lib/portage/tests/util/futures/asyncio/test_pipe_closed.py151
-rw-r--r--lib/portage/tests/util/futures/asyncio/test_policy_wrapper_recursion.py29
-rw-r--r--lib/portage/tests/util/futures/asyncio/test_run_until_complete.py34
-rw-r--r--lib/portage/tests/util/futures/asyncio/test_subprocess_exec.py236
-rw-r--r--lib/portage/tests/util/futures/asyncio/test_wakeup_fd_sigchld.py76
-rw-r--r--lib/portage/tests/util/futures/test_compat_coroutine.py159
-rw-r--r--lib/portage/tests/util/futures/test_done_callback.py35
-rw-r--r--lib/portage/tests/util/futures/test_iter_completed.py86
-rw-r--r--lib/portage/tests/util/futures/test_retry.py234
-rw-r--r--lib/portage/tests/util/test_checksum.py106
-rw-r--r--lib/portage/tests/util/test_digraph.py241
-rw-r--r--lib/portage/tests/util/test_getconfig.py76
-rw-r--r--lib/portage/tests/util/test_grabdict.py11
-rw-r--r--lib/portage/tests/util/test_install_mask.py129
-rw-r--r--lib/portage/tests/util/test_normalizedPath.py14
-rw-r--r--lib/portage/tests/util/test_stackDictList.py19
-rw-r--r--lib/portage/tests/util/test_stackDicts.py33
-rw-r--r--lib/portage/tests/util/test_stackLists.py21
-rw-r--r--lib/portage/tests/util/test_uniqueArray.py26
-rw-r--r--lib/portage/tests/util/test_varExpand.py92
-rw-r--r--lib/portage/tests/util/test_whirlpool.py16
-rw-r--r--lib/portage/tests/util/test_xattr.py178
-rw-r--r--lib/portage/tests/versions/__init__.py3
-rw-r--r--lib/portage/tests/versions/__test__.py0
-rw-r--r--lib/portage/tests/versions/test_cpv_sort_key.py17
-rw-r--r--lib/portage/tests/versions/test_vercmp.py81
-rw-r--r--lib/portage/tests/xpak/__init__.py3
-rw-r--r--lib/portage/tests/xpak/__test__.py0
-rw-r--r--lib/portage/tests/xpak/test_decodeint.py16
-rw-r--r--lib/portage/update.py427
-rw-r--r--lib/portage/util/ExtractKernelVersion.py78
-rw-r--r--lib/portage/util/SlotObject.py57
-rw-r--r--lib/portage/util/_ShelveUnicodeWrapper.py45
-rw-r--r--lib/portage/util/__init__.py1854
-rw-r--r--lib/portage/util/_async/AsyncFunction.py73
-rw-r--r--lib/portage/util/_async/AsyncScheduler.py103
-rw-r--r--lib/portage/util/_async/AsyncTaskFuture.py31
-rw-r--r--lib/portage/util/_async/FileCopier.py17
-rw-r--r--lib/portage/util/_async/FileDigester.py76
-rw-r--r--lib/portage/util/_async/ForkProcess.py75
-rw-r--r--lib/portage/util/_async/PipeLogger.py149
-rw-r--r--lib/portage/util/_async/PipeReaderBlockingIO.py83
-rw-r--r--lib/portage/util/_async/PopenProcess.py33
-rw-r--r--lib/portage/util/_async/SchedulerInterface.py101
-rw-r--r--lib/portage/util/_async/TaskScheduler.py20
-rw-r--r--lib/portage/util/_async/__init__.py2
-rw-r--r--lib/portage/util/_async/run_main_scheduler.py41
-rw-r--r--lib/portage/util/_ctypes.py47
-rw-r--r--lib/portage/util/_desktop_entry.py87
-rw-r--r--lib/portage/util/_dyn_libs/LinkageMapELF.py875
-rw-r--r--lib/portage/util/_dyn_libs/NeededEntry.py82
-rw-r--r--lib/portage/util/_dyn_libs/PreservedLibsRegistry.py255
-rw-r--r--lib/portage/util/_dyn_libs/__init__.py2
-rw-r--r--lib/portage/util/_dyn_libs/display_preserved_libs.py98
-rw-r--r--lib/portage/util/_dyn_libs/soname_deps.py168
-rw-r--r--lib/portage/util/_eventloop/EventLoop.py1184
-rw-r--r--lib/portage/util/_eventloop/PollConstants.py18
-rw-r--r--lib/portage/util/_eventloop/PollSelectAdapter.py76
-rw-r--r--lib/portage/util/_eventloop/__init__.py2
-rw-r--r--lib/portage/util/_eventloop/asyncio_event_loop.py137
-rw-r--r--lib/portage/util/_eventloop/global_event_loop.py40
-rw-r--r--lib/portage/util/_get_vm_info.py80
-rw-r--r--lib/portage/util/_info_files.py138
-rw-r--r--lib/portage/util/_path.py27
-rw-r--r--lib/portage/util/_pty.py78
-rw-r--r--lib/portage/util/_urlopen.py104
-rw-r--r--lib/portage/util/_xattr.py228
-rw-r--r--lib/portage/util/backoff.py53
-rw-r--r--lib/portage/util/changelog.py69
-rw-r--r--lib/portage/util/compression_probe.py111
-rw-r--r--lib/portage/util/configparser.py76
-rw-r--r--lib/portage/util/cpuinfo.py18
-rw-r--r--lib/portage/util/digraph.py390
-rw-r--r--lib/portage/util/elf/__init__.py2
-rw-r--r--lib/portage/util/elf/constants.py46
-rw-r--r--lib/portage/util/elf/header.py65
-rw-r--r--lib/portage/util/endian/__init__.py2
-rw-r--r--lib/portage/util/endian/decode.py48
-rw-r--r--lib/portage/util/env_update.py365
-rw-r--r--lib/portage/util/file_copy/__init__.py36
-rw-r--r--lib/portage/util/formatter.py69
-rw-r--r--lib/portage/util/futures/__init__.py8
-rw-r--r--lib/portage/util/futures/_asyncio/__init__.py185
-rw-r--r--lib/portage/util/futures/_asyncio/tasks.py105
-rw-r--r--lib/portage/util/futures/compat_coroutine.py112
-rw-r--r--lib/portage/util/futures/events.py191
-rw-r--r--lib/portage/util/futures/executor/__init__.py0
-rw-r--r--lib/portage/util/futures/executor/fork.py136
-rw-r--r--lib/portage/util/futures/extendedfutures.py73
-rw-r--r--lib/portage/util/futures/futures.py199
-rw-r--r--lib/portage/util/futures/iter_completed.py183
-rw-r--r--lib/portage/util/futures/retry.py182
-rw-r--r--lib/portage/util/futures/transports.py90
-rw-r--r--lib/portage/util/futures/unix_events.py705
-rw-r--r--lib/portage/util/install_mask.py128
-rw-r--r--lib/portage/util/iterators/MultiIterGroupBy.py94
-rw-r--r--lib/portage/util/iterators/__init__.py2
-rw-r--r--lib/portage/util/lafilefixer.py185
-rw-r--r--lib/portage/util/listdir.py139
-rw-r--r--lib/portage/util/locale.py144
-rw-r--r--lib/portage/util/monotonic.py34
-rw-r--r--lib/portage/util/movefile.py369
-rw-r--r--lib/portage/util/mtimedb.py128
-rw-r--r--lib/portage/util/path.py48
-rw-r--r--lib/portage/util/socks5.py81
-rw-r--r--lib/portage/util/whirlpool.py796
-rw-r--r--lib/portage/util/writeable_check.py130
-rw-r--r--lib/portage/versions.py588
-rw-r--r--lib/portage/xml/__init__.py2
-rw-r--r--lib/portage/xml/metadata.py505
-rw-r--r--lib/portage/xpak.py499
591 files changed, 117710 insertions, 0 deletions
diff --git a/lib/_emerge/AbstractDepPriority.py b/lib/_emerge/AbstractDepPriority.py
new file mode 100644
index 000000000..1fcd04345
--- /dev/null
+++ b/lib/_emerge/AbstractDepPriority.py
@@ -0,0 +1,30 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import copy
+from portage.util.SlotObject import SlotObject
+
+class AbstractDepPriority(SlotObject):
+ __slots__ = ("buildtime", "buildtime_slot_op",
+ "runtime", "runtime_post", "runtime_slot_op")
+
+ def __lt__(self, other):
+ return self.__int__() < other
+
+ def __le__(self, other):
+ return self.__int__() <= other
+
+ def __eq__(self, other):
+ return self.__int__() == other
+
+ def __ne__(self, other):
+ return self.__int__() != other
+
+ def __gt__(self, other):
+ return self.__int__() > other
+
+ def __ge__(self, other):
+ return self.__int__() >= other
+
+ def copy(self):
+ return copy.copy(self)
diff --git a/lib/_emerge/AbstractEbuildProcess.py b/lib/_emerge/AbstractEbuildProcess.py
new file mode 100644
index 000000000..bda0bd83f
--- /dev/null
+++ b/lib/_emerge/AbstractEbuildProcess.py
@@ -0,0 +1,429 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import functools
+import io
+import platform
+import stat
+import subprocess
+import tempfile
+import textwrap
+from _emerge.SpawnProcess import SpawnProcess
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EbuildIpcDaemon import EbuildIpcDaemon
+import portage
+from portage.elog import messages as elog_messages
+from portage.localization import _
+from portage.package.ebuild._ipc.ExitCommand import ExitCommand
+from portage.package.ebuild._ipc.QueryCommand import QueryCommand
+from portage import shutil, os
+from portage.util.futures import asyncio
+from portage.util._pty import _create_pty_or_pipe
+from portage.util import apply_secpass_permissions
+
+class AbstractEbuildProcess(SpawnProcess):
+
+ __slots__ = ('phase', 'settings',) + \
+ ('_build_dir', '_build_dir_unlock', '_ipc_daemon',
+ '_exit_command', '_exit_timeout_id', '_start_future')
+
+ _phases_without_builddir = ('clean', 'cleanrm', 'depend', 'help',)
+ _phases_interactive_whitelist = ('config',)
+ _phases_without_cgroup = ('preinst', 'postinst', 'prerm', 'postrm', 'config')
+
+ # Number of milliseconds to allow natural exit of the ebuild
+ # process after it has called the exit command via IPC. It
+ # doesn't hurt to be generous here since the scheduler
+ # continues to process events during this period, and it can
+ # return long before the timeout expires.
+ _exit_timeout = 10 # seconds
+
+ # The EbuildIpcDaemon support is well tested, but this variable
+ # is left so we can temporarily disable it if any issues arise.
+ _enable_ipc_daemon = True
+
+ def __init__(self, **kwargs):
+ SpawnProcess.__init__(self, **kwargs)
+ if self.phase is None:
+ phase = self.settings.get("EBUILD_PHASE")
+ if not phase:
+ phase = 'other'
+ self.phase = phase
+
+ def _start(self):
+
+ need_builddir = self.phase not in self._phases_without_builddir
+
+ # This can happen if the pre-clean phase triggers
+ # die_hooks for some reason, and PORTAGE_BUILDDIR
+ # doesn't exist yet.
+ if need_builddir and \
+ not os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
+ msg = _("The ebuild phase '%s' has been aborted "
+ "since PORTAGE_BUILDDIR does not exist: '%s'") % \
+ (self.phase, self.settings['PORTAGE_BUILDDIR'])
+ self._eerror(textwrap.wrap(msg, 72))
+ self.returncode = 1
+ self._async_wait()
+ return
+
+ # Check if the cgroup hierarchy is in place. If it's not, mount it.
+ if (os.geteuid() == 0 and platform.system() == 'Linux'
+ and 'cgroup' in self.settings.features
+ and self.phase not in self._phases_without_cgroup):
+ cgroup_root = '/sys/fs/cgroup'
+ cgroup_portage = os.path.join(cgroup_root, 'portage')
+
+ try:
+ # cgroup tmpfs
+ if not os.path.ismount(cgroup_root):
+ # we expect /sys/fs to be there already
+ if not os.path.isdir(cgroup_root):
+ os.mkdir(cgroup_root, 0o755)
+ subprocess.check_call(['mount', '-t', 'tmpfs',
+ '-o', 'rw,nosuid,nodev,noexec,mode=0755',
+ 'tmpfs', cgroup_root])
+
+ # portage subsystem
+ if not os.path.ismount(cgroup_portage):
+ if not os.path.isdir(cgroup_portage):
+ os.mkdir(cgroup_portage, 0o755)
+ subprocess.check_call(['mount', '-t', 'cgroup',
+ '-o', 'rw,nosuid,nodev,noexec,none,name=portage',
+ 'tmpfs', cgroup_portage])
+ with open(os.path.join(
+ cgroup_portage, 'release_agent'), 'w') as f:
+ f.write(os.path.join(self.settings['PORTAGE_BIN_PATH'],
+ 'cgroup-release-agent'))
+ with open(os.path.join(
+ cgroup_portage, 'notify_on_release'), 'w') as f:
+ f.write('1')
+ else:
+ # Update release_agent if it no longer exists, because
+ # it refers to a temporary path when portage is updating
+ # itself.
+ release_agent = os.path.join(
+ cgroup_portage, 'release_agent')
+ try:
+ with open(release_agent) as f:
+ release_agent_path = f.readline().rstrip('\n')
+ except EnvironmentError:
+ release_agent_path = None
+
+ if (release_agent_path is None or
+ not os.path.exists(release_agent_path)):
+ with open(release_agent, 'w') as f:
+ f.write(os.path.join(
+ self.settings['PORTAGE_BIN_PATH'],
+ 'cgroup-release-agent'))
+
+ cgroup_path = tempfile.mkdtemp(dir=cgroup_portage,
+ prefix='%s:%s.' % (self.settings["CATEGORY"],
+ self.settings["PF"]))
+ except (subprocess.CalledProcessError, OSError):
+ pass
+ else:
+ self.cgroup = cgroup_path
+
+ if self.background:
+ # Automatically prevent color codes from showing up in logs,
+ # since we're not displaying to a terminal anyway.
+ self.settings['NOCOLOR'] = 'true'
+
+ start_ipc_daemon = False
+ if self._enable_ipc_daemon:
+ self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None)
+ if self.phase not in self._phases_without_builddir:
+ start_ipc_daemon = True
+ if 'PORTAGE_BUILDDIR_LOCKED' not in self.settings:
+ self._build_dir = EbuildBuildDir(
+ scheduler=self.scheduler, settings=self.settings)
+ self._start_future = self._build_dir.async_lock()
+ self._start_future.add_done_callback(
+ functools.partial(self._start_post_builddir_lock,
+ start_ipc_daemon=start_ipc_daemon))
+ return
+ else:
+ self.settings.pop('PORTAGE_IPC_DAEMON', None)
+ else:
+ # Since the IPC daemon is disabled, use a simple tempfile based
+ # approach to detect unexpected exit like in bug #190128.
+ self.settings.pop('PORTAGE_IPC_DAEMON', None)
+ if self.phase not in self._phases_without_builddir:
+ exit_file = os.path.join(
+ self.settings['PORTAGE_BUILDDIR'],
+ '.exit_status')
+ self.settings['PORTAGE_EBUILD_EXIT_FILE'] = exit_file
+ try:
+ os.unlink(exit_file)
+ except OSError:
+ if os.path.exists(exit_file):
+ # make sure it doesn't exist
+ raise
+ else:
+ self.settings.pop('PORTAGE_EBUILD_EXIT_FILE', None)
+
+ self._start_post_builddir_lock(start_ipc_daemon=start_ipc_daemon)
+
+ def _start_post_builddir_lock(self, lock_future=None, start_ipc_daemon=False):
+ if lock_future is not None:
+ if lock_future is not self._start_future:
+ raise AssertionError('lock_future is not self._start_future')
+ self._start_future = None
+ if lock_future.cancelled():
+ self._build_dir = None
+ self.cancelled = True
+ self._was_cancelled()
+ self._async_wait()
+ return
+
+ lock_future.result()
+
+ if start_ipc_daemon:
+ self.settings['PORTAGE_IPC_DAEMON'] = "1"
+ self._start_ipc_daemon()
+
+ if self.fd_pipes is None:
+ self.fd_pipes = {}
+ null_fd = None
+ if 0 not in self.fd_pipes and \
+ self.phase not in self._phases_interactive_whitelist and \
+ "interactive" not in self.settings.get("PROPERTIES", "").split():
+ null_fd = os.open('/dev/null', os.O_RDONLY)
+ self.fd_pipes[0] = null_fd
+
+ try:
+ SpawnProcess._start(self)
+ finally:
+ if null_fd is not None:
+ os.close(null_fd)
+
+ def _init_ipc_fifos(self):
+
+ input_fifo = os.path.join(
+ self.settings['PORTAGE_BUILDDIR'], '.ipc_in')
+ output_fifo = os.path.join(
+ self.settings['PORTAGE_BUILDDIR'], '.ipc_out')
+
+ for p in (input_fifo, output_fifo):
+
+ st = None
+ try:
+ st = os.lstat(p)
+ except OSError:
+ os.mkfifo(p)
+ else:
+ if not stat.S_ISFIFO(st.st_mode):
+ st = None
+ try:
+ os.unlink(p)
+ except OSError:
+ pass
+ os.mkfifo(p)
+
+ apply_secpass_permissions(p,
+ uid=os.getuid(),
+ gid=portage.data.portage_gid,
+ mode=0o770, stat_cached=st)
+
+ return (input_fifo, output_fifo)
+
+ def _start_ipc_daemon(self):
+ self._exit_command = ExitCommand()
+ self._exit_command.reply_hook = self._exit_command_callback
+ query_command = QueryCommand(self.settings, self.phase)
+ commands = {
+ 'available_eclasses' : query_command,
+ 'best_version' : query_command,
+ 'eclass_path' : query_command,
+ 'exit' : self._exit_command,
+ 'has_version' : query_command,
+ 'license_path' : query_command,
+ 'master_repositories' : query_command,
+ 'repository_path' : query_command,
+ }
+ input_fifo, output_fifo = self._init_ipc_fifos()
+ self._ipc_daemon = EbuildIpcDaemon(commands=commands,
+ input_fifo=input_fifo,
+ output_fifo=output_fifo,
+ scheduler=self.scheduler)
+ self._ipc_daemon.start()
+
+ def _exit_command_callback(self):
+ if self._registered:
+ # Let the process exit naturally, if possible.
+ self._exit_timeout_id = \
+ self.scheduler.call_later(self._exit_timeout,
+ self._exit_command_timeout_cb)
+
+ def _exit_command_timeout_cb(self):
+ if self._registered:
+ # If it doesn't exit naturally in a reasonable amount
+ # of time, kill it (solves bug #278895). We try to avoid
+ # this when possible since it makes sandbox complain about
+ # being killed by a signal.
+ self.cancel()
+ self._exit_timeout_id = \
+ self.scheduler.call_later(self._cancel_timeout,
+ self._cancel_timeout_cb)
+ else:
+ self._exit_timeout_id = None
+
+ def _cancel_timeout_cb(self):
+ self._exit_timeout_id = None
+ self._async_waitpid()
+
+ def _orphan_process_warn(self):
+ phase = self.phase
+
+ msg = _("The ebuild phase '%s' with pid %s appears "
+ "to have left an orphan process running in the "
+ "background.") % (phase, self.pid)
+
+ self._eerror(textwrap.wrap(msg, 72))
+
+ def _pipe(self, fd_pipes):
+ stdout_pipe = None
+ if not self.background:
+ stdout_pipe = fd_pipes.get(1)
+ got_pty, master_fd, slave_fd = \
+ _create_pty_or_pipe(copy_term_size=stdout_pipe)
+ return (master_fd, slave_fd)
+
+ def _can_log(self, slave_fd):
+ # With sesandbox, logging works through a pty but not through a
+ # normal pipe. So, disable logging if ptys are broken.
+ # See Bug #162404.
+ # TODO: Add support for logging via named pipe (fifo) with
+ # sesandbox, since EbuildIpcDaemon uses a fifo and it's known
+ # to be compatible with sesandbox.
+ return not ('sesandbox' in self.settings.features \
+ and self.settings.selinux_enabled()) or os.isatty(slave_fd)
+
+ def _killed_by_signal(self, signum):
+ msg = _("The ebuild phase '%s' has been "
+ "killed by signal %s.") % (self.phase, signum)
+ self._eerror(textwrap.wrap(msg, 72))
+
+ def _unexpected_exit(self):
+
+ phase = self.phase
+
+ msg = _("The ebuild phase '%s' has exited "
+ "unexpectedly. This type of behavior "
+ "is known to be triggered "
+ "by things such as failed variable "
+ "assignments (bug #190128) or bad substitution "
+ "errors (bug #200313). Normally, before exiting, bash should "
+ "have displayed an error message above. If bash did not "
+ "produce an error message above, it's possible "
+ "that the ebuild has called `exit` when it "
+ "should have called `die` instead. This behavior may also "
+ "be triggered by a corrupt bash binary or a hardware "
+ "problem such as memory or cpu malfunction. If the problem is not "
+ "reproducible or it appears to occur randomly, then it is likely "
+ "to be triggered by a hardware problem. "
+ "If you suspect a hardware problem then you should "
+ "try some basic hardware diagnostics such as memtest. "
+ "Please do not report this as a bug unless it is consistently "
+ "reproducible and you are sure that your bash binary and hardware "
+ "are functioning properly.") % phase
+
+ self._eerror(textwrap.wrap(msg, 72))
+
+ def _eerror(self, lines):
+ self._elog('eerror', lines)
+
+ def _elog(self, elog_funcname, lines):
+ out = io.StringIO()
+ phase = self.phase
+ elog_func = getattr(elog_messages, elog_funcname)
+ global_havecolor = portage.output.havecolor
+ try:
+ portage.output.havecolor = \
+ self.settings.get('NOCOLOR', 'false').lower() in ('no', 'false')
+ for line in lines:
+ elog_func(line, phase=phase, key=self.settings.mycpv, out=out)
+ finally:
+ portage.output.havecolor = global_havecolor
+ msg = out.getvalue()
+ if msg:
+ log_path = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ self.scheduler.output(msg, log_path=log_path)
+
+ def _async_waitpid_cb(self, *args, **kwargs):
+ """
+ Override _async_waitpid_cb to perform cleanup that is
+ not necessarily idempotent.
+ """
+ SpawnProcess._async_waitpid_cb(self, *args, **kwargs)
+
+ if self._exit_timeout_id is not None:
+ self._exit_timeout_id.cancel()
+ self._exit_timeout_id = None
+
+ if self._ipc_daemon is not None:
+ self._ipc_daemon.cancel()
+ if self._exit_command.exitcode is not None:
+ self.returncode = self._exit_command.exitcode
+ else:
+ if self.returncode < 0:
+ if not self.cancelled:
+ self._killed_by_signal(-self.returncode)
+ else:
+ self.returncode = 1
+ if not self.cancelled:
+ self._unexpected_exit()
+
+ elif not self.cancelled:
+ exit_file = self.settings.get('PORTAGE_EBUILD_EXIT_FILE')
+ if exit_file and not os.path.exists(exit_file):
+ if self.returncode < 0:
+ if not self.cancelled:
+ self._killed_by_signal(-self.returncode)
+ else:
+ self.returncode = 1
+ if not self.cancelled:
+ self._unexpected_exit()
+
+ def _async_wait(self):
+ """
+ Override _async_wait to asynchronously unlock self._build_dir
+ when necessary.
+ """
+ if self._build_dir is None:
+ SpawnProcess._async_wait(self)
+ elif self._build_dir_unlock is None:
+ if self.returncode is None:
+ raise asyncio.InvalidStateError('Result is not ready.')
+ self._async_unlock_builddir(returncode=self.returncode)
+
+ def _async_unlock_builddir(self, returncode=None):
+ """
+ Release the lock asynchronously, and if a returncode parameter
+ is given then set self.returncode and notify exit listeners.
+ """
+ if self._build_dir_unlock is not None:
+ raise AssertionError('unlock already in progress')
+ if returncode is not None:
+ # The returncode will be set after unlock is complete.
+ self.returncode = None
+ self._build_dir_unlock = self._build_dir.async_unlock()
+ # Unlock only once.
+ self._build_dir = None
+ self._build_dir_unlock.add_done_callback(
+ functools.partial(self._unlock_builddir_exit, returncode=returncode))
+
+ def _unlock_builddir_exit(self, unlock_future, returncode=None):
+ # Normally, async_unlock should not raise an exception here.
+ unlock_future.cancelled() or unlock_future.result()
+ if returncode is not None:
+ if unlock_future.cancelled():
+ self.cancelled = True
+ self._was_cancelled()
+ else:
+ self.returncode = returncode
+ SpawnProcess._async_wait(self)
diff --git a/lib/_emerge/AbstractPollTask.py b/lib/_emerge/AbstractPollTask.py
new file mode 100644
index 000000000..4157794c6
--- /dev/null
+++ b/lib/_emerge/AbstractPollTask.py
@@ -0,0 +1,117 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import array
+import errno
+import logging
+import os
+
+from portage.util import writemsg_level
+from portage.util.futures import asyncio
+from _emerge.AsynchronousTask import AsynchronousTask
+
+class AbstractPollTask(AsynchronousTask):
+
+ __slots__ = ("_registered",)
+
+ _bufsize = 4096
+
+ def isAlive(self):
+ return bool(self._registered)
+
+ def _read_array(self, f):
+ """
+ NOTE: array.fromfile() is used here only for testing purposes,
+ because it has bugs in all known versions of Python (including
+ Python 2.7 and Python 3.2). See PipeReaderArrayTestCase.
+
+ A benchmark that copies bytes from /dev/zero to /dev/null shows
+ that arrays give a 15% performance improvement for Python 2.7.14.
+ However, arrays significantly *decrease* performance for Python 3.
+ """
+ buf = array.array('B')
+ try:
+ buf.fromfile(f, self._bufsize)
+ except EOFError:
+ pass
+ except TypeError:
+ # Python 3.2:
+ # TypeError: read() didn't return bytes
+ pass
+ except IOError as e:
+ # EIO happens with pty on Linux after the
+ # slave end of the pty has been closed.
+ if e.errno == errno.EIO:
+ # EOF: return empty string of bytes
+ pass
+ elif e.errno == errno.EAGAIN:
+ # EAGAIN: return None
+ buf = None
+ else:
+ raise
+
+ if buf is not None:
+ try:
+ # Python >=3.2
+ buf = buf.tobytes()
+ except AttributeError:
+ buf = buf.tostring()
+
+ return buf
+
+ def _read_buf(self, fd):
+ """
+ Read self._bufsize into a string of bytes, handling EAGAIN and
+ EIO. This will only call os.read() once, so the caller should
+ call this method in a loop until either None or an empty string
+ of bytes is returned. An empty string of bytes indicates EOF.
+ None indicates EAGAIN.
+
+ NOTE: os.read() will be called regardless of the event flags,
+ since otherwise data may be lost (see bug #531724).
+
+ @param fd: file descriptor (non-blocking mode required)
+ @type fd: int
+ @rtype: bytes or None
+ @return: A string of bytes, or None
+ """
+ # NOTE: array.fromfile() is no longer used here because it has
+ # bugs in all known versions of Python (including Python 2.7
+ # and Python 3.2).
+ buf = None
+ try:
+ buf = os.read(fd, self._bufsize)
+ except OSError as e:
+ # EIO happens with pty on Linux after the
+ # slave end of the pty has been closed.
+ if e.errno == errno.EIO:
+ # EOF: return empty string of bytes
+ buf = b''
+ elif e.errno == errno.EAGAIN:
+ # EAGAIN: return None
+ buf = None
+ else:
+ raise
+
+ return buf
+
+ def _async_wait(self):
+ self._unregister()
+ super(AbstractPollTask, self)._async_wait()
+
+ def _unregister(self):
+ self._registered = False
+
+ def _wait_loop(self, timeout=None):
+ loop = self.scheduler
+ tasks = [self.async_wait()]
+ if timeout is not None:
+ tasks.append(asyncio.ensure_future(
+ asyncio.sleep(timeout, loop=loop), loop=loop))
+ try:
+ loop.run_until_complete(asyncio.ensure_future(
+ asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED,
+ loop=loop), loop=loop))
+ finally:
+ for task in tasks:
+ task.cancel()
diff --git a/lib/_emerge/AsynchronousLock.py b/lib/_emerge/AsynchronousLock.py
new file mode 100644
index 000000000..aed1bcb15
--- /dev/null
+++ b/lib/_emerge/AsynchronousLock.py
@@ -0,0 +1,313 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import fcntl
+import logging
+import sys
+
+try:
+ import dummy_threading
+except ImportError:
+ dummy_threading = None
+
+try:
+ import threading
+except ImportError:
+ threading = dummy_threading
+
+import portage
+from portage import os
+from portage.exception import TryAgain
+from portage.localization import _
+from portage.locks import lockfile, unlockfile
+from portage.util import writemsg_level
+from _emerge.AbstractPollTask import AbstractPollTask
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.SpawnProcess import SpawnProcess
+
+class AsynchronousLock(AsynchronousTask):
+ """
+ This uses the portage.locks module to acquire a lock asynchronously,
+ using either a thread (if available) or a subprocess.
+
+ The default behavior is to use a process instead of a thread, since
+ there is currently no way to interrupt a thread that is waiting for
+ a lock (notably, SIGINT doesn't work because python delivers all
+ signals to the main thread).
+ """
+
+ __slots__ = ('path',) + \
+ ('_imp', '_force_async', '_force_dummy', '_force_process', \
+ '_force_thread', '_unlock_future')
+
+ _use_process_by_default = True
+
+ def _start(self):
+
+ if not self._force_async:
+ try:
+ self._imp = lockfile(self.path,
+ wantnewlockfile=True, flags=os.O_NONBLOCK)
+ except TryAgain:
+ pass
+ else:
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ if self._force_process or \
+ (not self._force_thread and \
+ (self._use_process_by_default or threading is dummy_threading)):
+ self._imp = _LockProcess(path=self.path, scheduler=self.scheduler)
+ else:
+ self._imp = _LockThread(path=self.path,
+ scheduler=self.scheduler,
+ _force_dummy=self._force_dummy)
+
+ self._imp.addExitListener(self._imp_exit)
+ self._imp.start()
+
+ def _imp_exit(self, imp):
+ # call exit listeners
+ self.returncode = imp.returncode
+ self._async_wait()
+
+ def _cancel(self):
+ if isinstance(self._imp, AsynchronousTask):
+ self._imp.cancel()
+
+ def _poll(self):
+ if isinstance(self._imp, AsynchronousTask):
+ self._imp.poll()
+ return self.returncode
+
+ def async_unlock(self):
+ """
+ Release the lock asynchronously. Release notification is available
+ via the add_done_callback method of the returned Future instance.
+
+ @returns: Future, result is None
+ """
+ if self._imp is None:
+ raise AssertionError('not locked')
+ if self._unlock_future is not None:
+ raise AssertionError("already unlocked")
+ if isinstance(self._imp, (_LockProcess, _LockThread)):
+ unlock_future = self._imp.async_unlock()
+ else:
+ unlockfile(self._imp)
+ unlock_future = self.scheduler.create_future()
+ self.scheduler.call_soon(unlock_future.set_result, None)
+ self._imp = None
+ self._unlock_future = unlock_future
+ return unlock_future
+
+
+class _LockThread(AbstractPollTask):
+ """
+ This uses the portage.locks module to acquire a lock asynchronously,
+ using a background thread. After the lock is acquired, the thread
+ writes to a pipe in order to notify a poll loop running in the main
+ thread.
+
+ If the threading module is unavailable then the dummy_threading
+ module will be used, and the lock will be acquired synchronously
+ (before the start() method returns).
+ """
+
+ __slots__ = ('path',) + \
+ ('_force_dummy', '_lock_obj', '_thread', '_unlock_future')
+
+ def _start(self):
+ self._registered = True
+ threading_mod = threading
+ if self._force_dummy:
+ threading_mod = dummy_threading
+ self._thread = threading_mod.Thread(target=self._run_lock)
+ self._thread.daemon = True
+ self._thread.start()
+
+ def _run_lock(self):
+ self._lock_obj = lockfile(self.path, wantnewlockfile=True)
+ # Thread-safe callback to EventLoop
+ self.scheduler.call_soon_threadsafe(self._run_lock_cb)
+
+ def _run_lock_cb(self):
+ self._unregister()
+ self.returncode = os.EX_OK
+ self._async_wait()
+
+ def _cancel(self):
+ # There's currently no way to force thread termination.
+ pass
+
+ def _unlock(self):
+ if self._lock_obj is None:
+ raise AssertionError('not locked')
+ if self.returncode is None:
+ raise AssertionError('lock not acquired yet')
+ if self._unlock_future is not None:
+ raise AssertionError("already unlocked")
+ self._unlock_future = self.scheduler.create_future()
+ unlockfile(self._lock_obj)
+ self._lock_obj = None
+
+ def async_unlock(self):
+ """
+ Release the lock asynchronously. Release notification is available
+ via the add_done_callback method of the returned Future instance.
+
+ @returns: Future, result is None
+ """
+ self._unlock()
+ self.scheduler.call_soon(self._unlock_future.set_result, None)
+ return self._unlock_future
+
+ def _unregister(self):
+ self._registered = False
+
+ if self._thread is not None:
+ self._thread.join()
+ self._thread = None
+
+class _LockProcess(AbstractPollTask):
+ """
+ This uses the portage.locks module to acquire a lock asynchronously,
+ using a subprocess. After the lock is acquired, the process
+ writes to a pipe in order to notify a poll loop running in the main
+ process. The unlock() method notifies the subprocess to release the
+ lock and exit.
+ """
+
+ __slots__ = ('path',) + \
+ ('_acquired', '_kill_test', '_proc', '_files', '_unlock_future')
+
+ def _start(self):
+ in_pr, in_pw = os.pipe()
+ out_pr, out_pw = os.pipe()
+ self._files = {}
+ self._files['pipe_in'] = in_pr
+ self._files['pipe_out'] = out_pw
+
+ fcntl.fcntl(in_pr, fcntl.F_SETFL,
+ fcntl.fcntl(in_pr, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(in_pr, fcntl.F_SETFD,
+ fcntl.fcntl(in_pr, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self.scheduler.add_reader(in_pr, self._output_handler)
+ self._registered = True
+ self._proc = SpawnProcess(
+ args=[portage._python_interpreter,
+ os.path.join(portage._bin_path, 'lock-helper.py'), self.path],
+ env=dict(os.environ, PORTAGE_PYM_PATH=portage._pym_path),
+ fd_pipes={0:out_pr, 1:in_pw, 2:sys.__stderr__.fileno()},
+ scheduler=self.scheduler)
+ self._proc.addExitListener(self._proc_exit)
+ self._proc.start()
+ os.close(out_pr)
+ os.close(in_pw)
+
+ def _proc_exit(self, proc):
+
+ if self._files is not None:
+ # Close pipe_out if it's still open, since it's useless
+ # after the process has exited. This helps to avoid
+ # "ResourceWarning: unclosed file" since Python 3.2.
+ try:
+ pipe_out = self._files.pop('pipe_out')
+ except KeyError:
+ pass
+ else:
+ os.close(pipe_out)
+
+ if proc.returncode != os.EX_OK:
+ # Typically, this will happen due to the
+ # process being killed by a signal.
+
+ if not self._acquired:
+ # If the lock hasn't been aquired yet, the
+ # caller can check the returncode and handle
+ # this failure appropriately.
+ if not (self.cancelled or self._kill_test):
+ writemsg_level("_LockProcess: %s\n" % \
+ _("failed to acquire lock on '%s'") % (self.path,),
+ level=logging.ERROR, noiselevel=-1)
+ self._unregister()
+ self.returncode = proc.returncode
+ self._async_wait()
+ return
+
+ if not self.cancelled and \
+ self._unlock_future is None:
+ # We don't want lost locks going unnoticed, so it's
+ # only safe to ignore if either the cancel() or
+ # unlock() methods have been previously called.
+ raise AssertionError("lock process failed with returncode %s" \
+ % (proc.returncode,))
+
+ if self._unlock_future is not None:
+ self._unlock_future.set_result(None)
+
+ def _cancel(self):
+ if self._proc is not None:
+ self._proc.cancel()
+
+ def _poll(self):
+ if self._proc is not None:
+ self._proc.poll()
+ return self.returncode
+
+ def _output_handler(self):
+ buf = self._read_buf(self._files['pipe_in'])
+ if buf:
+ self._acquired = True
+ self._unregister()
+ self.returncode = os.EX_OK
+ self._async_wait()
+
+ return True
+
+ def _unregister(self):
+ self._registered = False
+
+ if self._files is not None:
+ try:
+ pipe_in = self._files.pop('pipe_in')
+ except KeyError:
+ pass
+ else:
+ self.scheduler.remove_reader(pipe_in)
+ os.close(pipe_in)
+
+ def _unlock(self):
+ if self._proc is None:
+ raise AssertionError('not locked')
+ if not self._acquired:
+ raise AssertionError('lock not acquired yet')
+ if self.returncode != os.EX_OK:
+ raise AssertionError("lock process failed with returncode %s" \
+ % (self.returncode,))
+ if self._unlock_future is not None:
+ raise AssertionError("already unlocked")
+ self._unlock_future = self.scheduler.create_future()
+ os.write(self._files['pipe_out'], b'\0')
+ os.close(self._files['pipe_out'])
+ self._files = None
+
+ def async_unlock(self):
+ """
+ Release the lock asynchronously. Release notification is available
+ via the add_done_callback method of the returned Future instance.
+
+ @returns: Future, result is None
+ """
+ self._unlock()
+ return self._unlock_future
diff --git a/lib/_emerge/AsynchronousTask.py b/lib/_emerge/AsynchronousTask.py
new file mode 100644
index 000000000..cf6e6dc44
--- /dev/null
+++ b/lib/_emerge/AsynchronousTask.py
@@ -0,0 +1,196 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import signal
+
+from portage import os
+from portage.util.futures import asyncio
+from portage.util.SlotObject import SlotObject
+
+class AsynchronousTask(SlotObject):
+ """
+ Subclasses override _wait() and _poll() so that calls
+ to public methods can be wrapped for implementing
+ hooks such as exit listener notification.
+
+ Sublasses should call self._async_wait() to notify exit listeners after
+ the task is complete and self.returncode has been set.
+ """
+
+ __slots__ = ("background", "cancelled", "returncode", "scheduler") + \
+ ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
+
+ _cancelled_returncode = - signal.SIGINT
+
+ def start(self):
+ """
+ Start an asynchronous task and then return as soon as possible.
+ """
+ self._start_hook()
+ self._start()
+
+ def async_wait(self):
+ """
+ Wait for returncode asynchronously. Notification is available
+ via the add_done_callback method of the returned Future instance.
+
+ @returns: Future, result is self.returncode
+ """
+ waiter = self.scheduler.create_future()
+ exit_listener = lambda self: waiter.set_result(self.returncode)
+ self.addExitListener(exit_listener)
+ waiter.add_done_callback(lambda waiter:
+ self.removeExitListener(exit_listener) if waiter.cancelled() else None)
+ if self.returncode is not None:
+ # If the returncode is not None, it means the exit event has already
+ # happened, so use _async_wait() to guarantee that the exit_listener
+ # is called. This does not do any harm because a given exit listener
+ # is never called more than once.
+ self._async_wait()
+ return waiter
+
+ def _start(self):
+ self.returncode = os.EX_OK
+ self._async_wait()
+
+ def isAlive(self):
+ return self.returncode is None
+
+ def poll(self):
+ if self.returncode is not None:
+ return self.returncode
+ self._poll()
+ self._wait_hook()
+ return self.returncode
+
+ def _poll(self):
+ return self.returncode
+
+ def wait(self):
+ """
+ Wait for the returncode attribute to become ready, and return
+ it. If the returncode is not ready and the event loop is already
+ running, then the async_wait() method should be used instead of
+ wait(), because wait() will raise asyncio.InvalidStateError in
+ this case.
+
+ @rtype: int
+ @returns: the value of self.returncode
+ """
+ if self.returncode is None:
+ if self.scheduler.is_running():
+ raise asyncio.InvalidStateError('Result is not ready.')
+ self.scheduler.run_until_complete(self.async_wait())
+ self._wait_hook()
+ return self.returncode
+
+ def _async_wait(self):
+ """
+ For cases where _start exits synchronously, this method is a
+ convenient way to trigger an asynchronous call to self.wait()
+ (in order to notify exit listeners), avoiding excessive event
+ loop recursion (or stack overflow) that synchronous calling of
+ exit listeners can cause. This method is thread-safe.
+ """
+ self.scheduler.call_soon(self.wait)
+
+ def cancel(self):
+ """
+ Cancel the task, but do not wait for exit status. If asynchronous exit
+ notification is desired, then use addExitListener to add a listener
+ before calling this method.
+ NOTE: Synchronous waiting for status is not supported, since it would
+ be vulnerable to hitting the recursion limit when a large number of
+ tasks need to be terminated simultaneously, like in bug #402335.
+ """
+ if not self.cancelled:
+ self.cancelled = True
+ self._cancel()
+
+ def _cancel(self):
+ """
+ Subclasses should implement this, as a template method
+ to be called by AsynchronousTask.cancel().
+ """
+ pass
+
+ def _was_cancelled(self):
+ """
+ If cancelled, set returncode if necessary and return True.
+ Otherwise, return False.
+ """
+ if self.cancelled:
+ if self.returncode is None:
+ self.returncode = self._cancelled_returncode
+ return True
+ return False
+
+ def addStartListener(self, f):
+ """
+ The function will be called with one argument, a reference to self.
+ """
+ if self._start_listeners is None:
+ self._start_listeners = []
+ self._start_listeners.append(f)
+
+ def removeStartListener(self, f):
+ if self._start_listeners is None:
+ return
+ self._start_listeners.remove(f)
+
+ def _start_hook(self):
+ if self._start_listeners is not None:
+ start_listeners = self._start_listeners
+ self._start_listeners = None
+
+ for f in start_listeners:
+ f(self)
+
+ def addExitListener(self, f):
+ """
+ The function will be called with one argument, a reference to self.
+ """
+ if self._exit_listeners is None:
+ self._exit_listeners = []
+ self._exit_listeners.append(f)
+
+ def removeExitListener(self, f):
+ if self._exit_listeners is None:
+ if self._exit_listener_stack is not None:
+ self._exit_listener_stack.remove(f)
+ return
+ self._exit_listeners.remove(f)
+
+ def _wait_hook(self):
+ """
+ Call this method after the task completes, just before returning
+ the returncode from wait() or poll(). This hook is
+ used to trigger exit listeners when the returncode first
+ becomes available.
+ """
+ if self.returncode is not None and \
+ self._exit_listeners is not None:
+
+ # This prevents recursion, in case one of the
+ # exit handlers triggers this method again by
+ # calling wait(). Use a stack that gives
+ # removeExitListener() an opportunity to consume
+ # listeners from the stack, before they can get
+ # called below. This is necessary because a call
+ # to one exit listener may result in a call to
+ # removeExitListener() for another listener on
+ # the stack. That listener needs to be removed
+ # from the stack since it would be inconsistent
+ # to call it after it has been been passed into
+ # removeExitListener().
+ self._exit_listener_stack = self._exit_listeners
+ self._exit_listeners = None
+
+ # Execute exit listeners in reverse order, so that
+ # the last added listener is executed first. This
+ # allows SequentialTaskQueue to decrement its running
+ # task count as soon as one of its tasks exits, so that
+ # the value is accurate when other listeners execute.
+ while self._exit_listener_stack:
+ self._exit_listener_stack.pop()(self)
+
diff --git a/lib/_emerge/AtomArg.py b/lib/_emerge/AtomArg.py
new file mode 100644
index 000000000..343d7aaab
--- /dev/null
+++ b/lib/_emerge/AtomArg.py
@@ -0,0 +1,14 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage._sets.base import InternalPackageSet
+from _emerge.DependencyArg import DependencyArg
+
+class AtomArg(DependencyArg):
+
+ __slots__ = ('atom', 'pset')
+
+ def __init__(self, atom=None, **kwargs):
+ DependencyArg.__init__(self, **kwargs)
+ self.atom = atom
+ self.pset = InternalPackageSet(initial_atoms=(self.atom,), allow_repo=True)
diff --git a/lib/_emerge/Binpkg.py b/lib/_emerge/Binpkg.py
new file mode 100644
index 000000000..7791ec236
--- /dev/null
+++ b/lib/_emerge/Binpkg.py
@@ -0,0 +1,457 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import functools
+
+import _emerge.emergelog
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.BinpkgFetcher import BinpkgFetcher
+from _emerge.BinpkgEnvExtractor import BinpkgEnvExtractor
+from _emerge.BinpkgExtractorAsync import BinpkgExtractorAsync
+from _emerge.CompositeTask import CompositeTask
+from _emerge.BinpkgVerifier import BinpkgVerifier
+from _emerge.EbuildMerge import EbuildMerge
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.SpawnProcess import SpawnProcess
+from portage.eapi import eapi_exports_replace_vars
+from portage.util import ensure_dirs
+from portage.util._async.AsyncTaskFuture import AsyncTaskFuture
+import portage
+from portage import os
+from portage import shutil
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+import io
+import logging
+
+class Binpkg(CompositeTask):
+
+ __slots__ = ("find_blockers",
+ "ldpath_mtimes", "logger", "opts",
+ "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
+ ("_bintree", "_build_dir", "_build_prefix",
+ "_ebuild_path", "_fetched_pkg",
+ "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
+
+ def _writemsg_level(self, msg, level=0, noiselevel=0):
+ self.scheduler.output(msg, level=level, noiselevel=noiselevel,
+ log_path=self.settings.get("PORTAGE_LOG_FILE"))
+
+ def _start(self):
+
+ pkg = self.pkg
+ settings = self.settings
+ settings.setcpv(pkg)
+ self._tree = "bintree"
+ self._bintree = self.pkg.root_config.trees[self._tree]
+ self._verify = not self.opts.pretend
+
+ # Use realpath like doebuild_environment() does, since we assert
+ # that this path is literally identical to PORTAGE_BUILDDIR.
+ dir_path = os.path.join(os.path.realpath(settings["PORTAGE_TMPDIR"]),
+ "portage", pkg.category, pkg.pf)
+ self._image_dir = os.path.join(dir_path, "image")
+ self._infloc = os.path.join(dir_path, "build-info")
+ self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
+ settings["EBUILD"] = self._ebuild_path
+ portage.doebuild_environment(self._ebuild_path, 'setup',
+ settings=self.settings, db=self._bintree.dbapi)
+ if dir_path != self.settings['PORTAGE_BUILDDIR']:
+ raise AssertionError("'%s' != '%s'" % \
+ (dir_path, self.settings['PORTAGE_BUILDDIR']))
+ self._build_dir = EbuildBuildDir(
+ scheduler=self.scheduler, settings=settings)
+ settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
+ settings.configdict["pkg"]["MERGE_TYPE"] = "binary"
+
+ if eapi_exports_replace_vars(settings["EAPI"]):
+ vardb = self.pkg.root_config.trees["vartree"].dbapi
+ settings["REPLACING_VERSIONS"] = " ".join(
+ set(portage.versions.cpv_getversion(x) \
+ for x in vardb.match(self.pkg.slot_atom) + \
+ vardb.match('='+self.pkg.cpv)))
+
+ # The prefetcher has already completed or it
+ # could be running now. If it's running now,
+ # wait for it to complete since it holds
+ # a lock on the file being fetched. The
+ # portage.locks functions are only designed
+ # to work between separate processes. Since
+ # the lock is held by the current process,
+ # use the scheduler and fetcher methods to
+ # synchronize with the fetcher.
+ prefetcher = self.prefetcher
+ if prefetcher is None:
+ pass
+ elif prefetcher.isAlive() and \
+ prefetcher.poll() is None:
+
+ if not self.background:
+ fetch_log = os.path.join(
+ _emerge.emergelog._emerge_log_dir, 'emerge-fetch.log')
+ msg = (
+ 'Fetching in the background:',
+ prefetcher.pkg_path,
+ 'To view fetch progress, run in another terminal:',
+ 'tail -f %s' % fetch_log,
+ )
+ out = portage.output.EOutput()
+ for l in msg:
+ out.einfo(l)
+
+ self._current_task = prefetcher
+ prefetcher.addExitListener(self._prefetch_exit)
+ return
+
+ self._prefetch_exit(prefetcher)
+
+ def _prefetch_exit(self, prefetcher):
+
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ if not (self.opts.pretend or self.opts.fetchonly):
+ self._start_task(
+ AsyncTaskFuture(future=self._build_dir.async_lock()),
+ self._start_fetcher)
+ else:
+ self._start_fetcher()
+
+ def _start_fetcher(self, lock_task=None):
+ if lock_task is not None:
+ self._assert_current(lock_task)
+ if lock_task.cancelled:
+ self._default_final_exit(lock_task)
+ return
+
+ lock_task.future.result()
+ # Initialize PORTAGE_LOG_FILE (clean_log won't work without it).
+ portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
+ # If necessary, discard old log so that we don't
+ # append to it.
+ self._build_dir.clean_log()
+
+ pkg = self.pkg
+ pkg_count = self.pkg_count
+ fetcher = BinpkgFetcher(background=self.background,
+ logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
+ pretend=self.opts.pretend, scheduler=self.scheduler)
+
+ if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
+ msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv,
+ fetcher.pkg_path)
+ short_msg = "emerge: (%s of %s) %s Fetch" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ self.logger.log(msg, short_msg=short_msg)
+
+ # Allow the Scheduler's fetch queue to control the
+ # number of concurrent fetchers.
+ fetcher.addExitListener(self._fetcher_exit)
+ self._task_queued(fetcher)
+ self.scheduler.fetch.schedule(fetcher)
+ return
+
+ self._fetcher_exit(fetcher)
+
+ def _fetcher_exit(self, fetcher):
+
+ # The fetcher only has a returncode when
+ # --getbinpkg is enabled.
+ if fetcher.returncode is not None:
+ self._fetched_pkg = fetcher.pkg_path
+ if self._default_exit(fetcher) != os.EX_OK:
+ self._async_unlock_builddir(returncode=self.returncode)
+ return
+
+ if self.opts.pretend:
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ verifier = None
+ if self._verify:
+ if self._fetched_pkg:
+ path = self._fetched_pkg
+ else:
+ path = self.pkg.root_config.trees["bintree"].getname(
+ self.pkg.cpv)
+ logfile = self.settings.get("PORTAGE_LOG_FILE")
+ verifier = BinpkgVerifier(background=self.background,
+ logfile=logfile, pkg=self.pkg, scheduler=self.scheduler,
+ _pkg_path=path)
+ self._start_task(verifier, self._verifier_exit)
+ return
+
+ self._verifier_exit(verifier)
+
+ def _verifier_exit(self, verifier):
+ if verifier is not None and \
+ self._default_exit(verifier) != os.EX_OK:
+ self._async_unlock_builddir(returncode=self.returncode)
+ return
+
+ logger = self.logger
+ pkg = self.pkg
+ pkg_count = self.pkg_count
+
+ if self._fetched_pkg:
+ pkg_path = self._bintree.getname(
+ self._bintree.inject(pkg.cpv,
+ filename=self._fetched_pkg),
+ allocate_new=False)
+ else:
+ pkg_path = self.pkg.root_config.trees["bintree"].getname(
+ self.pkg.cpv)
+
+ # This gives bashrc users an opportunity to do various things
+ # such as remove binary packages after they're installed.
+ self.settings["PORTAGE_BINPKG_FILE"] = pkg_path
+ self._pkg_path = pkg_path
+
+ logfile = self.settings.get("PORTAGE_LOG_FILE")
+ if logfile is not None and os.path.isfile(logfile):
+ # Remove fetch log after successful fetch.
+ try:
+ os.unlink(logfile)
+ except OSError:
+ pass
+
+ if self.opts.fetchonly:
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ msg = " === (%s of %s) Merging Binary (%s::%s)" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
+ short_msg = "emerge: (%s of %s) %s Merge Binary" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ logger.log(msg, short_msg=short_msg)
+
+ phase = "clean"
+ settings = self.settings
+ ebuild_phase = EbuildPhase(background=self.background,
+ phase=phase, scheduler=self.scheduler,
+ settings=settings)
+
+ self._start_task(ebuild_phase, self._clean_exit)
+
+ def _clean_exit(self, clean_phase):
+ if self._default_exit(clean_phase) != os.EX_OK:
+ self._async_unlock_builddir(returncode=self.returncode)
+ return
+
+ dir_path = self.settings['PORTAGE_BUILDDIR']
+
+ infloc = self._infloc
+ pkg = self.pkg
+ pkg_path = self._pkg_path
+
+ dir_mode = 0o755
+ for mydir in (dir_path, self._image_dir, infloc):
+ portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
+ gid=portage.data.portage_gid, mode=dir_mode)
+
+ # This initializes PORTAGE_LOG_FILE.
+ portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
+ self._writemsg_level(">>> Extracting info\n")
+
+ pkg_xpak = portage.xpak.tbz2(self._pkg_path)
+ check_missing_metadata = ("CATEGORY", "PF")
+ missing_metadata = set()
+ for k in check_missing_metadata:
+ v = pkg_xpak.getfile(_unicode_encode(k,
+ encoding=_encodings['repo.content']))
+ if not v:
+ missing_metadata.add(k)
+
+ pkg_xpak.unpackinfo(infloc)
+ for k in missing_metadata:
+ if k == "CATEGORY":
+ v = pkg.category
+ elif k == "PF":
+ v = pkg.pf
+ else:
+ continue
+
+ f = io.open(_unicode_encode(os.path.join(infloc, k),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['content'],
+ errors='backslashreplace')
+ try:
+ f.write(_unicode_decode(v + "\n"))
+ finally:
+ f.close()
+
+ # Store the md5sum in the vdb.
+ f = io.open(_unicode_encode(os.path.join(infloc, 'BINPKGMD5'),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['content'], errors='strict')
+ try:
+ f.write(_unicode_decode(
+ str(portage.checksum.perform_md5(pkg_path)) + "\n"))
+ finally:
+ f.close()
+
+ env_extractor = BinpkgEnvExtractor(background=self.background,
+ scheduler=self.scheduler, settings=self.settings)
+
+ self._start_task(env_extractor, self._env_extractor_exit)
+
+ def _env_extractor_exit(self, env_extractor):
+ if self._default_exit(env_extractor) != os.EX_OK:
+ self._async_unlock_builddir(returncode=self.returncode)
+ return
+
+ setup_phase = EbuildPhase(background=self.background,
+ phase="setup", scheduler=self.scheduler,
+ settings=self.settings)
+
+ setup_phase.addExitListener(self._setup_exit)
+ self._task_queued(setup_phase)
+ self.scheduler.scheduleSetup(setup_phase)
+
+ def _setup_exit(self, setup_phase):
+ if self._default_exit(setup_phase) != os.EX_OK:
+ self._async_unlock_builddir(returncode=self.returncode)
+ return
+
+ extractor = BinpkgExtractorAsync(background=self.background,
+ env=self.settings.environ(),
+ features=self.settings.features,
+ image_dir=self._image_dir,
+ pkg=self.pkg, pkg_path=self._pkg_path,
+ logfile=self.settings.get("PORTAGE_LOG_FILE"),
+ scheduler=self.scheduler)
+ self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
+ self._start_task(extractor, self._extractor_exit)
+
+ def _extractor_exit(self, extractor):
+ if self._default_exit(extractor) != os.EX_OK:
+ self._writemsg_level("!!! Error Extracting '%s'\n" % \
+ self._pkg_path, noiselevel=-1, level=logging.ERROR)
+ self._async_unlock_builddir(returncode=self.returncode)
+ return
+
+ try:
+ with io.open(_unicode_encode(os.path.join(self._infloc, "EPREFIX"),
+ encoding=_encodings['fs'], errors='strict'), mode='r',
+ encoding=_encodings['repo.content'], errors='replace') as f:
+ self._build_prefix = f.read().rstrip('\n')
+ except IOError:
+ self._build_prefix = ""
+
+ if self._build_prefix == self.settings["EPREFIX"]:
+ ensure_dirs(self.settings["ED"])
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ env = self.settings.environ()
+ env["PYTHONPATH"] = self.settings["PORTAGE_PYTHONPATH"]
+ chpathtool = SpawnProcess(
+ args=[portage._python_interpreter,
+ os.path.join(self.settings["PORTAGE_BIN_PATH"], "chpathtool.py"),
+ self.settings["D"], self._build_prefix, self.settings["EPREFIX"]],
+ background=self.background, env=env,
+ scheduler=self.scheduler,
+ logfile=self.settings.get('PORTAGE_LOG_FILE'))
+ self._writemsg_level(">>> Adjusting Prefix to %s\n" % self.settings["EPREFIX"])
+ self._start_task(chpathtool, self._chpathtool_exit)
+
+ def _chpathtool_exit(self, chpathtool):
+ if self._final_exit(chpathtool) != os.EX_OK:
+ self._writemsg_level("!!! Error Adjusting Prefix to %s\n" %
+ (self.settings["EPREFIX"],),
+ noiselevel=-1, level=logging.ERROR)
+ self._async_unlock_builddir(returncode=self.returncode)
+ return
+
+ # We want to install in "our" prefix, not the binary one
+ with io.open(_unicode_encode(os.path.join(self._infloc, "EPREFIX"),
+ encoding=_encodings['fs'], errors='strict'), mode='w',
+ encoding=_encodings['repo.content'], errors='strict') as f:
+ f.write(self.settings["EPREFIX"] + "\n")
+
+ # Move the files to the correct location for merge.
+ image_tmp_dir = os.path.join(
+ self.settings["PORTAGE_BUILDDIR"], "image_tmp")
+ build_d = os.path.join(self.settings["D"],
+ self._build_prefix.lstrip(os.sep))
+ if not os.path.isdir(build_d):
+ # Assume this is a virtual package or something.
+ shutil.rmtree(self._image_dir)
+ ensure_dirs(self.settings["ED"])
+ else:
+ os.rename(build_d, image_tmp_dir)
+ shutil.rmtree(self._image_dir)
+ ensure_dirs(os.path.dirname(self.settings["ED"].rstrip(os.sep)))
+ os.rename(image_tmp_dir, self.settings["ED"])
+
+ self.wait()
+
+ def _async_unlock_builddir(self, returncode=None):
+ """
+ Release the lock asynchronously, and if a returncode parameter
+ is given then set self.returncode and notify exit listeners.
+ """
+ if self.opts.pretend or self.opts.fetchonly:
+ if returncode is not None:
+ self.returncode = returncode
+ self._async_wait()
+ return
+ if returncode is not None:
+ # The returncode will be set after unlock is complete.
+ self.returncode = None
+ portage.elog.elog_process(self.pkg.cpv, self.settings)
+ self._start_task(
+ AsyncTaskFuture(future=self._build_dir.async_unlock()),
+ functools.partial(self._unlock_builddir_exit, returncode=returncode))
+
+ def _unlock_builddir_exit(self, unlock_task, returncode=None):
+ self._assert_current(unlock_task)
+ if unlock_task.cancelled and returncode is not None:
+ self._default_final_exit(unlock_task)
+ return
+
+ # Normally, async_unlock should not raise an exception here.
+ unlock_task.future.cancelled() or unlock_task.future.result()
+ if returncode is not None:
+ self.returncode = returncode
+ self._async_wait()
+
+ def create_install_task(self):
+ task = EbuildMerge(exit_hook=self._install_exit,
+ find_blockers=self.find_blockers,
+ ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
+ pkg=self.pkg, pkg_count=self.pkg_count,
+ pkg_path=self._pkg_path, scheduler=self.scheduler,
+ settings=self.settings, tree=self._tree,
+ world_atom=self.world_atom)
+ return task
+
+ def _install_exit(self, task):
+ """
+ @returns: Future, result is the returncode from an
+ EbuildBuildDir.async_unlock() task
+ """
+ self.settings.pop("PORTAGE_BINPKG_FILE", None)
+ if task.returncode == os.EX_OK and \
+ 'binpkg-logs' not in self.settings.features and \
+ self.settings.get("PORTAGE_LOG_FILE"):
+ try:
+ os.unlink(self.settings["PORTAGE_LOG_FILE"])
+ except OSError:
+ pass
+ self._async_unlock_builddir()
+ if self._current_task is None:
+ result = self.scheduler.create_future()
+ self.scheduler.call_soon(result.set_result, os.EX_OK)
+ else:
+ result = self._current_task.async_wait()
+ return result
diff --git a/lib/_emerge/BinpkgEnvExtractor.py b/lib/_emerge/BinpkgEnvExtractor.py
new file mode 100644
index 000000000..5ba14955d
--- /dev/null
+++ b/lib/_emerge/BinpkgEnvExtractor.py
@@ -0,0 +1,66 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+
+from _emerge.CompositeTask import CompositeTask
+from _emerge.SpawnProcess import SpawnProcess
+from portage import os, _shell_quote, _unicode_encode
+from portage.const import BASH_BINARY
+
+class BinpkgEnvExtractor(CompositeTask):
+ """
+ Extract environment.bz2 for a binary or installed package.
+ """
+ __slots__ = ('settings',)
+
+ def saved_env_exists(self):
+ return os.path.exists(self._get_saved_env_path())
+
+ def dest_env_exists(self):
+ return os.path.exists(self._get_dest_env_path())
+
+ def _get_saved_env_path(self):
+ return os.path.join(os.path.dirname(self.settings['EBUILD']),
+ "environment.bz2")
+
+ def _get_dest_env_path(self):
+ return os.path.join(self.settings["T"], "environment")
+
+ def _start(self):
+ saved_env_path = self._get_saved_env_path()
+ dest_env_path = self._get_dest_env_path()
+ shell_cmd = "${PORTAGE_BUNZIP2_COMMAND:-${PORTAGE_BZIP2_COMMAND} -d} -c -- %s > %s" % \
+ (_shell_quote(saved_env_path),
+ _shell_quote(dest_env_path))
+ extractor_proc = SpawnProcess(
+ args=[BASH_BINARY, "-c", shell_cmd],
+ background=self.background,
+ env=self.settings.environ(),
+ scheduler=self.scheduler,
+ logfile=self.settings.get('PORTAGE_LOG_FILE'))
+
+ self._start_task(extractor_proc, self._extractor_exit)
+
+ def _remove_dest_env(self):
+ try:
+ os.unlink(self._get_dest_env_path())
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+ def _extractor_exit(self, extractor_proc):
+
+ if self._default_exit(extractor_proc) != os.EX_OK:
+ self._remove_dest_env()
+ self.wait()
+ return
+
+ # This is a signal to ebuild.sh, so that it knows to filter
+ # out things like SANDBOX_{DENY,PREDICT,READ,WRITE} that
+ # would be preserved between normal phases.
+ open(_unicode_encode(self._get_dest_env_path() + '.raw'), 'wb').close()
+
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
diff --git a/lib/_emerge/BinpkgExtractorAsync.py b/lib/_emerge/BinpkgExtractorAsync.py
new file mode 100644
index 000000000..3733bdeb5
--- /dev/null
+++ b/lib/_emerge/BinpkgExtractorAsync.py
@@ -0,0 +1,108 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+
+from _emerge.SpawnProcess import SpawnProcess
+import portage
+from portage.localization import _
+from portage.util.compression_probe import (
+ compression_probe,
+ _compressors,
+)
+from portage.process import find_binary
+from portage.util import (
+ shlex_split,
+ varexpand,
+)
+import signal
+import subprocess
+import tarfile
+
+
+class BinpkgExtractorAsync(SpawnProcess):
+
+ __slots__ = ("features", "image_dir", "pkg", "pkg_path")
+
+ _shell_binary = portage.const.BASH_BINARY
+
+ def _start(self):
+ tar_options = ""
+ if "xattr" in self.features:
+ process = subprocess.Popen(["tar", "--help"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ output = process.communicate()[0]
+ if b"--xattrs" in output:
+ tar_options = ["--xattrs", "--xattrs-include='*'"]
+ for x in portage.util.shlex_split(self.env.get("PORTAGE_XATTR_EXCLUDE", "")):
+ tar_options.append(portage._shell_quote("--xattrs-exclude=%s" % x))
+ tar_options = " ".join(tar_options)
+
+ decomp = _compressors.get(compression_probe(self.pkg_path))
+ if decomp is not None:
+ decomp_cmd = decomp.get("decompress")
+ elif tarfile.is_tarfile(portage._unicode_encode(self.pkg_path,
+ encoding=portage._encodings['fs'], errors='strict')):
+ decomp_cmd = 'cat'
+ decomp = {
+ 'compress': 'cat',
+ 'package': 'sys-apps/coreutils',
+ }
+ else:
+ decomp_cmd = None
+ if decomp_cmd is None:
+ self.scheduler.output("!!! %s\n" %
+ _("File compression header unrecognized: %s") %
+ self.pkg_path, log_path=self.logfile,
+ background=self.background, level=logging.ERROR)
+ self.returncode = 1
+ self._async_wait()
+ return
+
+ try:
+ decompression_binary = shlex_split(varexpand(decomp_cmd, mydict=self.env))[0]
+ except IndexError:
+ decompression_binary = ""
+
+ if find_binary(decompression_binary) is None:
+ # Try alternative command if it exists
+ if decomp.get("decompress_alt"):
+ decomp_cmd = decomp.get("decompress_alt")
+ try:
+ decompression_binary = shlex_split(varexpand(decomp_cmd, mydict=self.env))[0]
+ except IndexError:
+ decompression_binary = ""
+
+ if find_binary(decompression_binary) is None:
+ missing_package = decomp.get("package")
+ self.scheduler.output("!!! %s\n" %
+ _("File compression unsupported %s.\n Command was: %s.\n Maybe missing package: %s") %
+ (self.pkg_path, varexpand(decomp_cmd, mydict=self.env), missing_package), log_path=self.logfile,
+ background=self.background, level=logging.ERROR)
+ self.returncode = 1
+ self._async_wait()
+ return
+
+ pkg_xpak = portage.xpak.tbz2(self.pkg_path)
+ pkg_xpak.scan()
+
+ # SIGPIPE handling (128 + SIGPIPE) should be compatible with
+ # assert_sigpipe_ok() that's used by the ebuild unpack() helper.
+ self.args = [self._shell_binary, "-c",
+ ("cmd0=(head -c %d -- %s) cmd1=(%s) cmd2=(tar -xp %s -C %s -f -); " + \
+ '"${cmd0[@]}" | "${cmd1[@]}" | "${cmd2[@]}"; ' + \
+ "p=(${PIPESTATUS[@]}) ; for i in {0..2}; do " + \
+ "if [[ ${p[$i]} != 0 && ${p[$i]} != %d ]] ; then " + \
+ "echo command $(eval \"echo \\\"'\\${cmd$i[*]}'\\\"\") " + \
+ "failed with status ${p[$i]} ; exit ${p[$i]} ; fi ; done; " + \
+ "if [ ${p[$i]} != 0 ] ; then " + \
+ "echo command $(eval \"echo \\\"'\\${cmd$i[*]}'\\\"\") " + \
+ "failed with status ${p[$i]} ; exit ${p[$i]} ; fi ; " + \
+ "exit 0 ;") % \
+ (pkg_xpak.filestat.st_size - pkg_xpak.xpaksize,
+ portage._shell_quote(self.pkg_path),
+ decomp_cmd,
+ tar_options,
+ portage._shell_quote(self.image_dir),
+ 128 + signal.SIGPIPE)]
+
+ SpawnProcess._start(self)
diff --git a/lib/_emerge/BinpkgFetcher.py b/lib/_emerge/BinpkgFetcher.py
new file mode 100644
index 000000000..36d027de3
--- /dev/null
+++ b/lib/_emerge/BinpkgFetcher.py
@@ -0,0 +1,240 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import functools
+
+from _emerge.AsynchronousLock import AsynchronousLock
+from _emerge.CompositeTask import CompositeTask
+from _emerge.SpawnProcess import SpawnProcess
+try:
+ from urllib.parse import urlparse as urllib_parse_urlparse
+except ImportError:
+ from urlparse import urlparse as urllib_parse_urlparse
+import stat
+import sys
+import portage
+from portage import os
+from portage.util._async.AsyncTaskFuture import AsyncTaskFuture
+from portage.util._pty import _create_pty_or_pipe
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+
+class BinpkgFetcher(CompositeTask):
+
+ __slots__ = ("pkg", "pretend", "logfile", "pkg_path")
+
+ def __init__(self, **kwargs):
+ CompositeTask.__init__(self, **kwargs)
+ pkg = self.pkg
+ self.pkg_path = pkg.root_config.trees["bintree"].getname(
+ pkg.cpv) + ".partial"
+
+ def _start(self):
+ fetcher = _BinpkgFetcherProcess(background=self.background,
+ logfile=self.logfile, pkg=self.pkg, pkg_path=self.pkg_path,
+ pretend=self.pretend, scheduler=self.scheduler)
+
+ if not self.pretend:
+ portage.util.ensure_dirs(os.path.dirname(self.pkg_path))
+ if "distlocks" in self.pkg.root_config.settings.features:
+ self._start_task(
+ AsyncTaskFuture(future=fetcher.async_lock()),
+ functools.partial(self._start_locked, fetcher))
+ return
+
+ self._start_task(fetcher, self._fetcher_exit)
+
+ def _start_locked(self, fetcher, lock_task):
+ self._assert_current(lock_task)
+ if lock_task.cancelled:
+ self._default_final_exit(lock_task)
+ return
+
+ lock_task.future.result()
+ self._start_task(fetcher, self._fetcher_exit)
+
+ def _fetcher_exit(self, fetcher):
+ self._assert_current(fetcher)
+ if not self.pretend and fetcher.returncode == os.EX_OK:
+ fetcher.sync_timestamp()
+ if fetcher.locked:
+ self._start_task(
+ AsyncTaskFuture(future=fetcher.async_unlock()),
+ functools.partial(self._fetcher_exit_unlocked, fetcher))
+ else:
+ self._fetcher_exit_unlocked(fetcher)
+
+ def _fetcher_exit_unlocked(self, fetcher, unlock_task=None):
+ if unlock_task is not None:
+ self._assert_current(unlock_task)
+ if unlock_task.cancelled:
+ self._default_final_exit(unlock_task)
+ return
+
+ unlock_task.future.result()
+
+ self._current_task = None
+ self.returncode = fetcher.returncode
+ self._async_wait()
+
+
+class _BinpkgFetcherProcess(SpawnProcess):
+
+ __slots__ = ("pkg", "pretend", "locked", "pkg_path", "_lock_obj")
+
+ def _start(self):
+ pkg = self.pkg
+ pretend = self.pretend
+ bintree = pkg.root_config.trees["bintree"]
+ settings = bintree.settings
+ pkg_path = self.pkg_path
+
+ exists = os.path.exists(pkg_path)
+ resume = exists and os.path.basename(pkg_path) in bintree.invalids
+ if not (pretend or resume):
+ # Remove existing file or broken symlink.
+ try:
+ os.unlink(pkg_path)
+ except OSError:
+ pass
+
+ # urljoin doesn't work correctly with
+ # unrecognized protocols like sftp
+ if bintree._remote_has_index:
+ instance_key = bintree.dbapi._instance_key(pkg.cpv)
+ rel_uri = bintree._remotepkgs[instance_key].get("PATH")
+ if not rel_uri:
+ rel_uri = pkg.cpv + ".tbz2"
+ remote_base_uri = bintree._remotepkgs[
+ instance_key]["BASE_URI"]
+ uri = remote_base_uri.rstrip("/") + "/" + rel_uri.lstrip("/")
+ else:
+ uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
+ "/" + pkg.pf + ".tbz2"
+
+ if pretend:
+ portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ protocol = urllib_parse_urlparse(uri)[0]
+ fcmd_prefix = "FETCHCOMMAND"
+ if resume:
+ fcmd_prefix = "RESUMECOMMAND"
+ fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
+ if not fcmd:
+ fcmd = settings.get(fcmd_prefix)
+
+ fcmd_vars = {
+ "DISTDIR" : os.path.dirname(pkg_path),
+ "URI" : uri,
+ "FILE" : os.path.basename(pkg_path)
+ }
+
+ for k in ("PORTAGE_SSH_OPTS",):
+ v = settings.get(k)
+ if v is not None:
+ fcmd_vars[k] = v
+
+ fetch_env = dict(settings.items())
+ fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
+ for x in portage.util.shlex_split(fcmd)]
+
+ if self.fd_pipes is None:
+ self.fd_pipes = {}
+ fd_pipes = self.fd_pipes
+
+ # Redirect all output to stdout since some fetchers like
+ # wget pollute stderr (if portage detects a problem then it
+ # can send it's own message to stderr).
+ fd_pipes.setdefault(0, portage._get_stdin().fileno())
+ fd_pipes.setdefault(1, sys.__stdout__.fileno())
+ fd_pipes.setdefault(2, sys.__stdout__.fileno())
+
+ self.args = fetch_args
+ self.env = fetch_env
+ if settings.selinux_enabled():
+ self._selinux_type = settings["PORTAGE_FETCH_T"]
+ SpawnProcess._start(self)
+
+ def _pipe(self, fd_pipes):
+ """When appropriate, use a pty so that fetcher progress bars,
+ like wget has, will work properly."""
+ if self.background or not sys.__stdout__.isatty():
+ # When the output only goes to a log file,
+ # there's no point in creating a pty.
+ return os.pipe()
+ stdout_pipe = None
+ if not self.background:
+ stdout_pipe = fd_pipes.get(1)
+ got_pty, master_fd, slave_fd = \
+ _create_pty_or_pipe(copy_term_size=stdout_pipe)
+ return (master_fd, slave_fd)
+
+ def sync_timestamp(self):
+ # If possible, update the mtime to match the remote package if
+ # the fetcher didn't already do it automatically.
+ bintree = self.pkg.root_config.trees["bintree"]
+ if bintree._remote_has_index:
+ remote_mtime = bintree._remotepkgs[
+ bintree.dbapi._instance_key(
+ self.pkg.cpv)].get("_mtime_")
+ if remote_mtime is not None:
+ try:
+ remote_mtime = long(remote_mtime)
+ except ValueError:
+ pass
+ else:
+ try:
+ local_mtime = os.stat(self.pkg_path)[stat.ST_MTIME]
+ except OSError:
+ pass
+ else:
+ if remote_mtime != local_mtime:
+ try:
+ os.utime(self.pkg_path,
+ (remote_mtime, remote_mtime))
+ except OSError:
+ pass
+
+ def async_lock(self):
+ """
+ This raises an AlreadyLocked exception if lock() is called
+ while a lock is already held. In order to avoid this, call
+ unlock() or check whether the "locked" attribute is True
+ or False before calling lock().
+ """
+ if self._lock_obj is not None:
+ raise self.AlreadyLocked((self._lock_obj,))
+
+ result = self.scheduler.create_future()
+
+ def acquired_lock(async_lock):
+ if async_lock.wait() == os.EX_OK:
+ self.locked = True
+ result.set_result(None)
+ else:
+ result.set_exception(AssertionError(
+ "AsynchronousLock failed with returncode %s"
+ % (async_lock.returncode,)))
+
+ self._lock_obj = AsynchronousLock(path=self.pkg_path,
+ scheduler=self.scheduler)
+ self._lock_obj.addExitListener(acquired_lock)
+ self._lock_obj.start()
+ return result
+
+ class AlreadyLocked(portage.exception.PortageException):
+ pass
+
+ def async_unlock(self):
+ if self._lock_obj is None:
+ raise AssertionError('already unlocked')
+ result = self._lock_obj.async_unlock()
+ self._lock_obj = None
+ self.locked = False
+ return result
+
diff --git a/lib/_emerge/BinpkgPrefetcher.py b/lib/_emerge/BinpkgPrefetcher.py
new file mode 100644
index 000000000..7ca897049
--- /dev/null
+++ b/lib/_emerge/BinpkgPrefetcher.py
@@ -0,0 +1,43 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.BinpkgFetcher import BinpkgFetcher
+from _emerge.CompositeTask import CompositeTask
+from _emerge.BinpkgVerifier import BinpkgVerifier
+from portage import os
+
+class BinpkgPrefetcher(CompositeTask):
+
+ __slots__ = ("pkg",) + \
+ ("pkg_path", "_bintree",)
+
+ def _start(self):
+ self._bintree = self.pkg.root_config.trees["bintree"]
+ fetcher = BinpkgFetcher(background=self.background,
+ logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
+ scheduler=self.scheduler)
+ self.pkg_path = fetcher.pkg_path
+ self._start_task(fetcher, self._fetcher_exit)
+
+ def _fetcher_exit(self, fetcher):
+
+ if self._default_exit(fetcher) != os.EX_OK:
+ self.wait()
+ return
+
+ verifier = BinpkgVerifier(background=self.background,
+ logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
+ scheduler=self.scheduler, _pkg_path=self.pkg_path)
+ self._start_task(verifier, self._verifier_exit)
+
+ def _verifier_exit(self, verifier):
+ if self._default_exit(verifier) != os.EX_OK:
+ self.wait()
+ return
+
+ self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
+
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
+
diff --git a/lib/_emerge/BinpkgVerifier.py b/lib/_emerge/BinpkgVerifier.py
new file mode 100644
index 000000000..7a6d15e80
--- /dev/null
+++ b/lib/_emerge/BinpkgVerifier.py
@@ -0,0 +1,122 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import io
+import sys
+
+from _emerge.CompositeTask import CompositeTask
+import portage
+from portage import os
+from portage.checksum import (_apply_hash_filter,
+ _filter_unaccelarated_hashes, _hash_filter)
+from portage.output import EOutput
+from portage.util._async.FileDigester import FileDigester
+from portage.package.ebuild.fetch import _checksum_failure_temp_file
+
+class BinpkgVerifier(CompositeTask):
+ __slots__ = ("logfile", "pkg", "_digests", "_pkg_path")
+
+ def _start(self):
+
+ bintree = self.pkg.root_config.trees["bintree"]
+ digests = bintree._get_digests(self.pkg)
+ if "size" not in digests:
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ digests = _filter_unaccelarated_hashes(digests)
+ hash_filter = _hash_filter(
+ bintree.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if not hash_filter.transparent:
+ digests = _apply_hash_filter(digests, hash_filter)
+
+ self._digests = digests
+
+ try:
+ size = os.stat(self._pkg_path).st_size
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ self.scheduler.output(("!!! Fetching Binary failed "
+ "for '%s'\n") % self.pkg.cpv, log_path=self.logfile,
+ background=self.background)
+ self.returncode = 1
+ self._async_wait()
+ return
+ else:
+ if size != digests["size"]:
+ self._digest_exception("size", size, digests["size"])
+ self.returncode = 1
+ self._async_wait()
+ return
+
+ self._start_task(FileDigester(file_path=self._pkg_path,
+ hash_names=(k for k in digests if k != "size"),
+ background=self.background, logfile=self.logfile,
+ scheduler=self.scheduler),
+ self._digester_exit)
+
+ def _digester_exit(self, digester):
+
+ if self._default_exit(digester) != os.EX_OK:
+ self.wait()
+ return
+
+ for hash_name in digester.hash_names:
+ if digester.digests[hash_name] != self._digests[hash_name]:
+ self._digest_exception(hash_name,
+ digester.digests[hash_name], self._digests[hash_name])
+ self.returncode = 1
+ self.wait()
+ return
+
+ if self.pkg.root_config.settings.get("PORTAGE_QUIET") != "1":
+ self._display_success()
+
+ self.returncode = os.EX_OK
+ self.wait()
+
+ def _display_success(self):
+ stdout_orig = sys.stdout
+ stderr_orig = sys.stderr
+ global_havecolor = portage.output.havecolor
+ out = io.StringIO()
+ try:
+ sys.stdout = out
+ sys.stderr = out
+ if portage.output.havecolor:
+ portage.output.havecolor = not self.background
+
+ path = self._pkg_path
+ if path.endswith(".partial"):
+ path = path[:-len(".partial")]
+ eout = EOutput()
+ eout.ebegin("%s %s ;-)" % (os.path.basename(path),
+ " ".join(sorted(self._digests))))
+ eout.eend(0)
+
+ finally:
+ sys.stdout = stdout_orig
+ sys.stderr = stderr_orig
+ portage.output.havecolor = global_havecolor
+
+ self.scheduler.output(out.getvalue(), log_path=self.logfile,
+ background=self.background)
+
+ def _digest_exception(self, name, value, expected):
+
+ head, tail = os.path.split(self._pkg_path)
+ temp_filename = _checksum_failure_temp_file(head, tail)
+
+ self.scheduler.output((
+ "\n!!! Digest verification failed:\n"
+ "!!! %s\n"
+ "!!! Reason: Failed on %s verification\n"
+ "!!! Got: %s\n"
+ "!!! Expected: %s\n"
+ "File renamed to '%s'\n") %
+ (self._pkg_path, name, value, expected, temp_filename),
+ log_path=self.logfile,
+ background=self.background)
diff --git a/lib/_emerge/Blocker.py b/lib/_emerge/Blocker.py
new file mode 100644
index 000000000..93046069d
--- /dev/null
+++ b/lib/_emerge/Blocker.py
@@ -0,0 +1,15 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.Task import Task
+
+class Blocker(Task):
+
+ __hash__ = Task.__hash__
+ __slots__ = ("root", "atom", "cp", "eapi", "priority", "satisfied")
+
+ def __init__(self, **kwargs):
+ Task.__init__(self, **kwargs)
+ self.cp = self.atom.cp
+ self._hash_key = ("blocks", self.root, self.atom, self.eapi)
+ self._hash_value = hash(self._hash_key)
diff --git a/lib/_emerge/BlockerCache.py b/lib/_emerge/BlockerCache.py
new file mode 100644
index 000000000..53342d6d6
--- /dev/null
+++ b/lib/_emerge/BlockerCache.py
@@ -0,0 +1,191 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import sys
+from portage.util import writemsg
+from portage.data import secpass
+import portage
+from portage import os
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+ _unicode = str
+else:
+ _unicode = unicode
+
+class BlockerCache(portage.cache.mappings.MutableMapping):
+ """This caches blockers of installed packages so that dep_check does not
+ have to be done for every single installed package on every invocation of
+ emerge. The cache is invalidated whenever it is detected that something
+ has changed that might alter the results of dep_check() calls:
+ 1) the set of installed packages (including COUNTER) has changed
+ """
+
+ # Number of uncached packages to trigger cache update, since
+ # it's wasteful to update it for every vdb change.
+ _cache_threshold = 5
+
+ class BlockerData(object):
+
+ __slots__ = ("__weakref__", "atoms", "counter")
+
+ def __init__(self, counter, atoms):
+ self.counter = counter
+ self.atoms = atoms
+
+ def __init__(self, myroot, vardb):
+ """ myroot is ignored in favour of EROOT """
+ self._vardb = vardb
+ self._cache_filename = os.path.join(vardb.settings['EROOT'],
+ portage.CACHE_PATH, "vdb_blockers.pickle")
+ self._cache_version = "1"
+ self._cache_data = None
+ self._modified = set()
+ self._load()
+
+ def _load(self):
+ try:
+ f = open(self._cache_filename, mode='rb')
+ mypickle = pickle.Unpickler(f)
+ try:
+ mypickle.find_global = None
+ except AttributeError:
+ # TODO: If py3k, override Unpickler.find_class().
+ pass
+ self._cache_data = mypickle.load()
+ f.close()
+ del f
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except Exception as e:
+ if isinstance(e, EnvironmentError) and \
+ getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
+ pass
+ else:
+ writemsg("!!! Error loading '%s': %s\n" % \
+ (self._cache_filename, str(e)), noiselevel=-1)
+ del e
+
+ cache_valid = self._cache_data and \
+ isinstance(self._cache_data, dict) and \
+ self._cache_data.get("version") == self._cache_version and \
+ isinstance(self._cache_data.get("blockers"), dict)
+ if cache_valid:
+ # Validate all the atoms and counters so that
+ # corruption is detected as soon as possible.
+ invalid_items = set()
+ for k, v in self._cache_data["blockers"].items():
+ if not isinstance(k, basestring):
+ invalid_items.add(k)
+ continue
+ try:
+ if portage.catpkgsplit(k) is None:
+ invalid_items.add(k)
+ continue
+ except portage.exception.InvalidData:
+ invalid_items.add(k)
+ continue
+ if not isinstance(v, tuple) or \
+ len(v) != 2:
+ invalid_items.add(k)
+ continue
+ counter, atoms = v
+ if not isinstance(counter, (int, long)):
+ invalid_items.add(k)
+ continue
+ if not isinstance(atoms, (list, tuple)):
+ invalid_items.add(k)
+ continue
+ invalid_atom = False
+ for atom in atoms:
+ if not isinstance(atom, basestring):
+ invalid_atom = True
+ break
+ if atom[:1] != "!" or \
+ not portage.isvalidatom(
+ atom, allow_blockers=True):
+ invalid_atom = True
+ break
+ if invalid_atom:
+ invalid_items.add(k)
+ continue
+
+ for k in invalid_items:
+ del self._cache_data["blockers"][k]
+ if not self._cache_data["blockers"]:
+ cache_valid = False
+
+ if not cache_valid:
+ self._cache_data = {"version":self._cache_version}
+ self._cache_data["blockers"] = {}
+ self._modified.clear()
+
+ def flush(self):
+ """If the current user has permission and the internal blocker cache has
+ been updated, save it to disk and mark it unmodified. This is called
+ by emerge after it has processed blockers for all installed packages.
+ Currently, the cache is only written if the user has superuser
+ privileges (since that's required to obtain a lock), but all users
+ have read access and benefit from faster blocker lookups (as long as
+ the entire cache is still valid). The cache is stored as a pickled
+ dict object with the following format:
+
+ {
+ version : "1",
+ "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
+ }
+ """
+ if len(self._modified) >= self._cache_threshold and \
+ secpass >= 2:
+ try:
+ f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
+ pickle.dump(self._cache_data, f, protocol=2)
+ f.close()
+ portage.util.apply_secpass_permissions(
+ self._cache_filename, gid=portage.portage_gid, mode=0o644)
+ except (IOError, OSError):
+ pass
+ self._modified.clear()
+
+ def __setitem__(self, cpv, blocker_data):
+ """
+ Update the cache and mark it as modified for a future call to
+ self.flush().
+
+ @param cpv: Package for which to cache blockers.
+ @type cpv: String
+ @param blocker_data: An object with counter and atoms attributes.
+ @type blocker_data: BlockerData
+ """
+ self._cache_data["blockers"][_unicode(cpv)] = (blocker_data.counter,
+ tuple(_unicode(x) for x in blocker_data.atoms))
+ self._modified.add(cpv)
+
+ def __iter__(self):
+ if self._cache_data is None:
+ # triggered by python-trace
+ return iter([])
+ return iter(self._cache_data["blockers"])
+
+ def __len__(self):
+ """This needs to be implemented in order to avoid
+ infinite recursion in some cases."""
+ return len(self._cache_data["blockers"])
+
+ def __delitem__(self, cpv):
+ del self._cache_data["blockers"][cpv]
+
+ def __getitem__(self, cpv):
+ """
+ @rtype: BlockerData
+ @return: An object with counter and atoms attributes.
+ """
+ return self.BlockerData(*self._cache_data["blockers"][cpv])
+
diff --git a/lib/_emerge/BlockerDB.py b/lib/_emerge/BlockerDB.py
new file mode 100644
index 000000000..5b3b01c37
--- /dev/null
+++ b/lib/_emerge/BlockerDB.py
@@ -0,0 +1,126 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+import portage
+from portage import os
+from portage import digraph
+from portage._sets.base import InternalPackageSet
+from portage.dep import Atom
+
+from _emerge.BlockerCache import BlockerCache
+from _emerge.Package import Package
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+class BlockerDB(object):
+
+ def __init__(self, fake_vartree):
+ root_config = fake_vartree._root_config
+ self._root_config = root_config
+ self._vartree = root_config.trees["vartree"]
+ self._portdb = root_config.trees["porttree"].dbapi
+
+ self._dep_check_trees = None
+ self._fake_vartree = fake_vartree
+ self._dep_check_trees = {
+ self._vartree.settings["EROOT"] : {
+ "porttree" : fake_vartree,
+ "vartree" : fake_vartree,
+ }}
+
+ def findInstalledBlockers(self, new_pkg):
+ """
+ Search for installed run-time blockers in the root where
+ new_pkg is planned to be installed. This ignores build-time
+ blockers, since new_pkg is assumed to be built already.
+ """
+ blocker_cache = BlockerCache(None,
+ self._vartree.dbapi)
+ dep_keys = Package._runtime_keys
+ settings = self._vartree.settings
+ stale_cache = set(blocker_cache)
+ fake_vartree = self._fake_vartree
+ dep_check_trees = self._dep_check_trees
+ vardb = fake_vartree.dbapi
+ installed_pkgs = list(vardb)
+
+ for inst_pkg in installed_pkgs:
+ stale_cache.discard(inst_pkg.cpv)
+ cached_blockers = blocker_cache.get(inst_pkg.cpv)
+ if cached_blockers is not None and \
+ cached_blockers.counter != inst_pkg.counter:
+ cached_blockers = None
+ if cached_blockers is not None:
+ blocker_atoms = cached_blockers.atoms
+ else:
+ # Use aux_get() to trigger FakeVartree global
+ # updates on *DEPEND when appropriate.
+ depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
+ success, atoms = portage.dep_check(depstr,
+ vardb, settings, myuse=inst_pkg.use.enabled,
+ trees=dep_check_trees, myroot=inst_pkg.root)
+ if not success:
+ pkg_location = os.path.join(inst_pkg.root,
+ portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
+ portage.writemsg("!!! %s/*DEPEND: %s\n" % \
+ (pkg_location, atoms), noiselevel=-1)
+ continue
+
+ blocker_atoms = [atom for atom in atoms \
+ if atom.startswith("!")]
+ blocker_atoms.sort()
+ blocker_cache[inst_pkg.cpv] = \
+ blocker_cache.BlockerData(inst_pkg.counter, blocker_atoms)
+ for cpv in stale_cache:
+ del blocker_cache[cpv]
+ blocker_cache.flush()
+
+ blocker_parents = digraph()
+ blocker_atoms = []
+ for pkg in installed_pkgs:
+ for blocker_atom in blocker_cache[pkg.cpv].atoms:
+ blocker_atom = blocker_atom.lstrip("!")
+ blocker_atoms.append(blocker_atom)
+ blocker_parents.add(blocker_atom, pkg)
+
+ blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
+ blocking_pkgs = set()
+ for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
+ blocking_pkgs.update(blocker_parents.parent_nodes(atom))
+
+ # Check for blockers in the other direction.
+ depstr = " ".join(new_pkg._metadata[k] for k in dep_keys)
+ success, atoms = portage.dep_check(depstr,
+ vardb, settings, myuse=new_pkg.use.enabled,
+ trees=dep_check_trees, myroot=new_pkg.root)
+ if not success:
+ # We should never get this far with invalid deps.
+ show_invalid_depstring_notice(new_pkg, atoms)
+ assert False
+
+ blocker_atoms = [atom.lstrip("!") for atom in atoms \
+ if atom[:1] == "!"]
+ if blocker_atoms:
+ blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
+ for inst_pkg in installed_pkgs:
+ try:
+ next(blocker_atoms.iterAtomsForPackage(inst_pkg))
+ except (portage.exception.InvalidDependString, StopIteration):
+ continue
+ blocking_pkgs.add(inst_pkg)
+
+ return blocking_pkgs
+
+ def discardBlocker(self, pkg):
+ """Discard a package from the list of potential blockers.
+ This will match any package(s) with identical cpv or cp:slot."""
+ for cpv_match in self._fake_vartree.dbapi.match_pkgs(Atom("=%s" % (pkg.cpv,))):
+ if cpv_match.cp == pkg.cp:
+ self._fake_vartree.cpv_discard(cpv_match)
+ for slot_match in self._fake_vartree.dbapi.match_pkgs(pkg.slot_atom):
+ if slot_match.cp == pkg.cp:
+ self._fake_vartree.cpv_discard(slot_match)
diff --git a/lib/_emerge/BlockerDepPriority.py b/lib/_emerge/BlockerDepPriority.py
new file mode 100644
index 000000000..1004a3717
--- /dev/null
+++ b/lib/_emerge/BlockerDepPriority.py
@@ -0,0 +1,13 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+class BlockerDepPriority(DepPriority):
+ __slots__ = ()
+ def __int__(self):
+ return 0
+
+ def __str__(self):
+ return 'blocker'
+
+BlockerDepPriority.instance = BlockerDepPriority()
diff --git a/lib/_emerge/CompositeTask.py b/lib/_emerge/CompositeTask.py
new file mode 100644
index 000000000..1edec4a17
--- /dev/null
+++ b/lib/_emerge/CompositeTask.py
@@ -0,0 +1,123 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AsynchronousTask import AsynchronousTask
+from portage import os
+
+class CompositeTask(AsynchronousTask):
+
+ __slots__ = ("_current_task",)
+
+ _TASK_QUEUED = -1
+
+ def isAlive(self):
+ return self._current_task is not None
+
+ def _cancel(self):
+ if self._current_task is not None:
+ if self._current_task is self._TASK_QUEUED:
+ self.returncode = 1
+ self._current_task = None
+ self._async_wait()
+ else:
+ self._current_task.cancel()
+
+ def _poll(self):
+ """
+ This does a loop calling self._current_task.poll()
+ repeatedly as long as the value of self._current_task
+ keeps changing. It calls poll() a maximum of one time
+ for a given self._current_task instance. This is useful
+ since calling poll() on a task can trigger advance to
+ the next task could eventually lead to the returncode
+ being set in cases when polling only a single task would
+ not have the same effect.
+ """
+
+ prev = None
+ while True:
+ task = self._current_task
+ if task is None or \
+ task is self._TASK_QUEUED or \
+ task is prev:
+ # don't poll the same task more than once
+ break
+ task.poll()
+ prev = task
+
+ return self.returncode
+
+ def _assert_current(self, task):
+ """
+ Raises an AssertionError if the given task is not the
+ same one as self._current_task. This can be useful
+ for detecting bugs.
+ """
+ if task is not self._current_task:
+ raise AssertionError("Unrecognized task: %s" % (task,))
+
+ def _default_exit(self, task):
+ """
+ Calls _assert_current() on the given task and then sets the
+ composite returncode attribute if task.returncode != os.EX_OK.
+ If the task failed then self._current_task will be set to None.
+ Subclasses can use this as a generic task exit callback.
+
+ @rtype: int
+ @return: The task.returncode attribute.
+ """
+ self._assert_current(task)
+ if task.returncode != os.EX_OK:
+ self.returncode = task.returncode
+ self.cancelled = task.cancelled
+ self._current_task = None
+ return task.returncode
+
+ def _final_exit(self, task):
+ """
+ Assumes that task is the final task of this composite task.
+ Calls _default_exit() and sets self.returncode to the task's
+ returncode and sets self._current_task to None.
+ """
+ self._default_exit(task)
+ self._current_task = None
+ self.returncode = task.returncode
+ return self.returncode
+
+ def _default_final_exit(self, task):
+ """
+ This calls _final_exit() and then wait().
+
+ Subclasses can use this as a generic final task exit callback.
+
+ """
+ self._final_exit(task)
+ return self.wait()
+
+ def _start_task(self, task, exit_handler):
+ """
+ Register exit handler for the given task, set it
+ as self._current_task, and call task.start().
+
+ Subclasses can use this as a generic way to start
+ a task.
+
+ """
+ try:
+ task.scheduler = self.scheduler
+ except AttributeError:
+ pass
+ task.addExitListener(exit_handler)
+ self._current_task = task
+ task.start()
+
+ def _task_queued(self, task):
+ task.addStartListener(self._task_queued_start_handler)
+ self._current_task = self._TASK_QUEUED
+
+ def _task_queued_start_handler(self, task):
+ self._current_task = task
+
+ def _task_queued_wait(self):
+ return self._current_task is not self._TASK_QUEUED or \
+ self.cancelled or self.returncode is not None
diff --git a/lib/_emerge/DepPriority.py b/lib/_emerge/DepPriority.py
new file mode 100644
index 000000000..34fdb481c
--- /dev/null
+++ b/lib/_emerge/DepPriority.py
@@ -0,0 +1,56 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractDepPriority import AbstractDepPriority
+class DepPriority(AbstractDepPriority):
+
+ __slots__ = ("satisfied", "optional", "ignored")
+
+ def __int__(self):
+ """
+ Note: These priorities are only used for measuring hardness
+ in the circular dependency display via digraph.debug_print(),
+ and nothing more. For actual merge order calculations, the
+ measures defined by the DepPriorityNormalRange and
+ DepPrioritySatisfiedRange classes are used.
+
+ Attributes Hardness
+
+ buildtime_slot_op 0
+ buildtime -1
+ runtime -2
+ runtime_post -3
+ optional -4
+ (none of the above) -5
+
+ """
+
+ if self.optional:
+ return -4
+ if self.buildtime_slot_op:
+ return 0
+ if self.buildtime:
+ return -1
+ if self.runtime:
+ return -2
+ if self.runtime_post:
+ return -3
+ return -5
+
+ def __str__(self):
+ if self.ignored:
+ return "ignored"
+ if self.optional:
+ return "optional"
+ if self.buildtime_slot_op:
+ return "buildtime_slot_op"
+ if self.buildtime:
+ return "buildtime"
+ if self.runtime_slot_op:
+ return "runtime_slot_op"
+ if self.runtime:
+ return "runtime"
+ if self.runtime_post:
+ return "runtime_post"
+ return "soft"
+
diff --git a/lib/_emerge/DepPriorityNormalRange.py b/lib/_emerge/DepPriorityNormalRange.py
new file mode 100644
index 000000000..86395549f
--- /dev/null
+++ b/lib/_emerge/DepPriorityNormalRange.py
@@ -0,0 +1,47 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+class DepPriorityNormalRange(object):
+ """
+ DepPriority properties Index Category
+
+ buildtime HARD
+ runtime 3 MEDIUM
+ runtime_post 2 MEDIUM_SOFT
+ optional 1 SOFT
+ (none of the above) 0 NONE
+ """
+ MEDIUM = 3
+ MEDIUM_SOFT = 2
+ SOFT = 1
+ NONE = 0
+
+ @classmethod
+ def _ignore_optional(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional)
+
+ @classmethod
+ def _ignore_runtime_post(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional or priority.runtime_post)
+
+ @classmethod
+ def _ignore_runtime(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional or not priority.buildtime)
+
+ ignore_medium = _ignore_runtime
+ ignore_medium_soft = _ignore_runtime_post
+ ignore_soft = _ignore_optional
+
+DepPriorityNormalRange.ignore_priority = (
+ None,
+ DepPriorityNormalRange._ignore_optional,
+ DepPriorityNormalRange._ignore_runtime_post,
+ DepPriorityNormalRange._ignore_runtime
+)
diff --git a/lib/_emerge/DepPrioritySatisfiedRange.py b/lib/_emerge/DepPrioritySatisfiedRange.py
new file mode 100644
index 000000000..391f5409b
--- /dev/null
+++ b/lib/_emerge/DepPrioritySatisfiedRange.py
@@ -0,0 +1,97 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+class DepPrioritySatisfiedRange(object):
+ """
+ DepPriority Index Category
+
+ not satisfied and buildtime HARD
+ not satisfied and runtime 7 MEDIUM
+ not satisfied and runtime_post 6 MEDIUM_SOFT
+ satisfied and buildtime_slot_op 5 SOFT
+ satisfied and buildtime 4 SOFT
+ satisfied and runtime 3 SOFT
+ satisfied and runtime_post 2 SOFT
+ optional 1 SOFT
+ (none of the above) 0 NONE
+ """
+ MEDIUM = 7
+ MEDIUM_SOFT = 6
+ SOFT = 5
+ NONE = 0
+
+ @classmethod
+ def _ignore_optional(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional)
+
+ @classmethod
+ def _ignore_satisfied_runtime_post(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ if priority.optional:
+ return True
+ if not priority.satisfied:
+ return False
+ return bool(priority.runtime_post)
+
+ @classmethod
+ def _ignore_satisfied_runtime(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ if priority.optional:
+ return True
+ if not priority.satisfied:
+ return False
+ return not priority.buildtime
+
+ @classmethod
+ def _ignore_satisfied_buildtime(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ if priority.optional:
+ return True
+ if priority.buildtime_slot_op:
+ return False
+ return bool(priority.satisfied)
+
+ @classmethod
+ def _ignore_satisfied_buildtime_slot_op(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional or \
+ priority.satisfied)
+
+ @classmethod
+ def _ignore_runtime_post(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.optional or \
+ priority.satisfied or \
+ priority.runtime_post)
+
+ @classmethod
+ def _ignore_runtime(cls, priority):
+ if priority.__class__ is not DepPriority:
+ return False
+ return bool(priority.satisfied or \
+ priority.optional or \
+ not priority.buildtime)
+
+ ignore_medium = _ignore_runtime
+ ignore_medium_soft = _ignore_runtime_post
+ ignore_soft = _ignore_satisfied_buildtime
+
+
+DepPrioritySatisfiedRange.ignore_priority = (
+ None,
+ DepPrioritySatisfiedRange._ignore_optional,
+ DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
+ DepPrioritySatisfiedRange._ignore_satisfied_runtime,
+ DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
+ DepPrioritySatisfiedRange._ignore_satisfied_buildtime_slot_op,
+ DepPrioritySatisfiedRange._ignore_runtime_post,
+ DepPrioritySatisfiedRange._ignore_runtime
+)
diff --git a/lib/_emerge/Dependency.py b/lib/_emerge/Dependency.py
new file mode 100644
index 000000000..2ec860f83
--- /dev/null
+++ b/lib/_emerge/Dependency.py
@@ -0,0 +1,21 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.util.SlotObject import SlotObject
+from _emerge.DepPriority import DepPriority
+
+class Dependency(SlotObject):
+ __slots__ = ("atom", "blocker", "child", "depth",
+ "parent", "onlydeps", "priority", "root", "want_update",
+ "collapsed_parent", "collapsed_priority")
+ def __init__(self, **kwargs):
+ SlotObject.__init__(self, **kwargs)
+ if self.priority is None:
+ self.priority = DepPriority()
+ if self.depth is None:
+ self.depth = 0
+ if self.collapsed_parent is None:
+ self.collapsed_parent = self.parent
+ if self.collapsed_priority is None:
+ self.collapsed_priority = self.priority
+
diff --git a/lib/_emerge/DependencyArg.py b/lib/_emerge/DependencyArg.py
new file mode 100644
index 000000000..29a0072c4
--- /dev/null
+++ b/lib/_emerge/DependencyArg.py
@@ -0,0 +1,46 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import sys
+
+from portage import _encodings, _unicode_encode
+
+class DependencyArg(object):
+
+ __slots__ = ('arg', 'force_reinstall', 'internal', 'reset_depth', 'root_config')
+
+ def __init__(self, arg=None, force_reinstall=False, internal=False,
+ reset_depth=True, root_config=None):
+ """
+ Use reset_depth=False for special arguments that should not interact
+ with depth calculations (see the emerge --deep=DEPTH option).
+ """
+ self.arg = arg
+ self.force_reinstall = force_reinstall
+ self.internal = internal
+ self.reset_depth = reset_depth
+ self.root_config = root_config
+
+ def __eq__(self, other):
+ if self.__class__ is not other.__class__:
+ return False
+ return self.arg == other.arg and \
+ self.root_config.root == other.root_config.root
+
+ def __hash__(self):
+ return hash((self.arg, self.root_config.root))
+
+ def __str__(self):
+ # Use unicode_literals format string for python-2.x safety,
+ # ensuring that self.arg.__unicode__() is used
+ # when necessary.
+ return "%s" % (self.arg,)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(), encoding=_encodings['content'])
diff --git a/lib/_emerge/EbuildBinpkg.py b/lib/_emerge/EbuildBinpkg.py
new file mode 100644
index 000000000..6e098eb8a
--- /dev/null
+++ b/lib/_emerge/EbuildBinpkg.py
@@ -0,0 +1,53 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.CompositeTask import CompositeTask
+from _emerge.EbuildPhase import EbuildPhase
+from portage import os
+
+class EbuildBinpkg(CompositeTask):
+ """
+ This assumes that src_install() has successfully completed.
+ """
+ __slots__ = ('pkg', 'settings') + \
+ ('_binpkg_tmpfile', '_binpkg_info')
+
+ def _start(self):
+ pkg = self.pkg
+ root_config = pkg.root_config
+ bintree = root_config.trees["bintree"]
+ binpkg_tmpfile = os.path.join(bintree.pkgdir,
+ pkg.cpv + ".tbz2." + str(os.getpid()))
+ bintree._ensure_dir(os.path.dirname(binpkg_tmpfile))
+
+ self._binpkg_tmpfile = binpkg_tmpfile
+ self.settings["PORTAGE_BINPKG_TMPFILE"] = self._binpkg_tmpfile
+
+ package_phase = EbuildPhase(background=self.background,
+ phase='package', scheduler=self.scheduler,
+ settings=self.settings)
+
+ self._start_task(package_phase, self._package_phase_exit)
+
+ def _package_phase_exit(self, package_phase):
+
+ self.settings.pop("PORTAGE_BINPKG_TMPFILE", None)
+ if self._default_exit(package_phase) != os.EX_OK:
+ try:
+ os.unlink(self._binpkg_tmpfile)
+ except OSError:
+ pass
+ self.wait()
+ return
+
+ pkg = self.pkg
+ bintree = pkg.root_config.trees["bintree"]
+ self._binpkg_info = bintree.inject(pkg.cpv,
+ filename=self._binpkg_tmpfile)
+
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
+
+ def get_binpkg_info(self):
+ return self._binpkg_info
diff --git a/lib/_emerge/EbuildBuild.py b/lib/_emerge/EbuildBuild.py
new file mode 100644
index 000000000..ab5a4da74
--- /dev/null
+++ b/lib/_emerge/EbuildBuild.py
@@ -0,0 +1,535 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import functools
+import io
+
+import _emerge.emergelog
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.EbuildExecuter import EbuildExecuter
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.EbuildBinpkg import EbuildBinpkg
+from _emerge.EbuildFetcher import EbuildFetcher
+from _emerge.CompositeTask import CompositeTask
+from _emerge.EbuildMerge import EbuildMerge
+from _emerge.EbuildFetchonly import EbuildFetchonly
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from _emerge.TaskSequence import TaskSequence
+
+import portage
+from portage import _encodings, _unicode_decode, _unicode_encode, os
+from portage.package.ebuild.digestcheck import digestcheck
+from portage.package.ebuild.doebuild import _check_temp_dir
+from portage.package.ebuild._spawn_nofetch import SpawnNofetchWithoutBuilddir
+from portage.util._async.AsyncTaskFuture import AsyncTaskFuture
+
+
+class EbuildBuild(CompositeTask):
+
+ __slots__ = ("args_set", "config_pool", "find_blockers",
+ "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
+ "prefetcher", "settings", "world_atom") + \
+ ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
+
+ def _start(self):
+ if not self.opts.fetchonly:
+ rval = _check_temp_dir(self.settings)
+ if rval != os.EX_OK:
+ self.returncode = rval
+ self._current_task = None
+ self._async_wait()
+ return
+
+ # First get the SRC_URI metadata (it's not cached in self.pkg.metadata
+ # because some packages have an extremely large SRC_URI value).
+ self._start_task(
+ AsyncTaskFuture(
+ future=self.pkg.root_config.trees["porttree"].dbapi.\
+ async_aux_get(self.pkg.cpv, ["SRC_URI"], myrepo=self.pkg.repo,
+ loop=self.scheduler)),
+ self._start_with_metadata)
+
+ def _start_with_metadata(self, aux_get_task):
+ self._assert_current(aux_get_task)
+ if aux_get_task.cancelled:
+ self._default_final_exit(aux_get_task)
+ return
+
+ pkg = self.pkg
+ settings = self.settings
+ root_config = pkg.root_config
+ tree = "porttree"
+ self._tree = tree
+ portdb = root_config.trees[tree].dbapi
+ settings.setcpv(pkg)
+ settings.configdict["pkg"]["SRC_URI"], = aux_get_task.future.result()
+ settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
+ if self.opts.buildpkgonly:
+ settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
+ else:
+ settings.configdict["pkg"]["MERGE_TYPE"] = "source"
+ ebuild_path = portdb.findname(pkg.cpv, myrepo=pkg.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % pkg.cpv)
+ self._ebuild_path = ebuild_path
+ portage.doebuild_environment(ebuild_path, 'setup',
+ settings=self.settings, db=portdb)
+
+ # Check the manifest here since with --keep-going mode it's
+ # currently possible to get this far with a broken manifest.
+ if not self._check_manifest():
+ self.returncode = 1
+ self._current_task = None
+ self._async_wait()
+ return
+
+ prefetcher = self.prefetcher
+ if prefetcher is None:
+ pass
+ elif prefetcher.isAlive() and \
+ prefetcher.poll() is None:
+
+ if not self.background:
+ fetch_log = os.path.join(
+ _emerge.emergelog._emerge_log_dir, 'emerge-fetch.log')
+ msg = (
+ 'Fetching files in the background.',
+ 'To view fetch progress, run in another terminal:',
+ 'tail -f %s' % fetch_log,
+ )
+ out = portage.output.EOutput()
+ for l in msg:
+ out.einfo(l)
+
+ self._current_task = prefetcher
+ prefetcher.addExitListener(self._prefetch_exit)
+ return
+
+ self._prefetch_exit(prefetcher)
+
+ def _check_manifest(self):
+ success = True
+
+ settings = self.settings
+ if 'strict' in settings.features and \
+ 'digest' not in settings.features:
+ settings['O'] = os.path.dirname(self._ebuild_path)
+ quiet_setting = settings.get('PORTAGE_QUIET')
+ settings['PORTAGE_QUIET'] = '1'
+ try:
+ success = digestcheck([], settings, strict=True)
+ finally:
+ if quiet_setting:
+ settings['PORTAGE_QUIET'] = quiet_setting
+ else:
+ del settings['PORTAGE_QUIET']
+
+ return success
+
+ def _prefetch_exit(self, prefetcher):
+
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ opts = self.opts
+ pkg = self.pkg
+ settings = self.settings
+
+ if opts.fetchonly:
+ if opts.pretend:
+ fetcher = EbuildFetchonly(
+ fetch_all=opts.fetch_all_uri,
+ pkg=pkg, pretend=opts.pretend,
+ settings=settings)
+ retval = fetcher.execute()
+ if retval == os.EX_OK:
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self._async_wait()
+ else:
+ # For pretend mode, the convention it to execute
+ # pkg_nofetch and return a successful exitcode.
+ self._start_task(SpawnNofetchWithoutBuilddir(
+ background=self.background,
+ portdb=self.pkg.root_config.trees[self._tree].dbapi,
+ ebuild_path=self._ebuild_path,
+ scheduler=self.scheduler,
+ settings=self.settings),
+ self._default_final_exit)
+ return
+ else:
+ fetcher = EbuildFetcher(
+ config_pool=self.config_pool,
+ ebuild_path=self._ebuild_path,
+ fetchall=self.opts.fetch_all_uri,
+ fetchonly=self.opts.fetchonly,
+ background=False,
+ logfile=None,
+ pkg=self.pkg,
+ scheduler=self.scheduler)
+ self._start_task(fetcher, self._fetchonly_exit)
+ return
+
+ self._build_dir = EbuildBuildDir(
+ scheduler=self.scheduler, settings=settings)
+ self._start_task(
+ AsyncTaskFuture(future=self._build_dir.async_lock()),
+ self._start_pre_clean)
+
+ def _start_pre_clean(self, lock_task):
+ self._assert_current(lock_task)
+ if lock_task.cancelled:
+ self._default_final_exit(lock_task)
+ return
+
+ lock_task.future.result()
+ # Cleaning needs to happen before fetch, since the build dir
+ # is used for log handling.
+ msg = " === (%s of %s) Cleaning (%s::%s)" % \
+ (self.pkg_count.curval, self.pkg_count.maxval,
+ self.pkg.cpv, self._ebuild_path)
+ short_msg = "emerge: (%s of %s) %s Clean" % \
+ (self.pkg_count.curval, self.pkg_count.maxval, self.pkg.cpv)
+ self.logger.log(msg, short_msg=short_msg)
+
+ pre_clean_phase = EbuildPhase(background=self.background,
+ phase='clean', scheduler=self.scheduler, settings=self.settings)
+ self._start_task(pre_clean_phase, self._pre_clean_exit)
+
+ def _fetchonly_exit(self, fetcher):
+ self._final_exit(fetcher)
+ if self.returncode != os.EX_OK:
+ self.returncode = None
+ portdb = self.pkg.root_config.trees[self._tree].dbapi
+ self._start_task(SpawnNofetchWithoutBuilddir(
+ background=self.background,
+ portdb=portdb,
+ ebuild_path=self._ebuild_path,
+ scheduler=self.scheduler,
+ settings=self.settings),
+ self._nofetch_without_builddir_exit)
+ return
+
+ self.wait()
+
+ def _nofetch_without_builddir_exit(self, nofetch):
+ self._final_exit(nofetch)
+ self.returncode = 1
+ self.wait()
+
+ def _pre_clean_exit(self, pre_clean_phase):
+ if self._default_exit(pre_clean_phase) != os.EX_OK:
+ self._async_unlock_builddir(returncode=self.returncode)
+ return
+
+ # for log handling
+ portage.prepare_build_dirs(self.pkg.root, self.settings, 1)
+
+ fetcher = EbuildFetcher(config_pool=self.config_pool,
+ ebuild_path=self._ebuild_path,
+ fetchall=self.opts.fetch_all_uri,
+ fetchonly=self.opts.fetchonly,
+ background=self.background,
+ logfile=self.settings.get('PORTAGE_LOG_FILE'),
+ pkg=self.pkg, scheduler=self.scheduler)
+
+ self._start_task(AsyncTaskFuture(
+ future=fetcher.async_already_fetched(self.settings)),
+ functools.partial(self._start_fetch, fetcher))
+
+ def _start_fetch(self, fetcher, already_fetched_task):
+ self._assert_current(already_fetched_task)
+ if already_fetched_task.cancelled:
+ self._default_final_exit(already_fetched_task)
+ return
+
+ try:
+ already_fetched = already_fetched_task.future.result()
+ except portage.exception.InvalidDependString as e:
+ msg_lines = []
+ msg = "Fetch failed for '%s' due to invalid SRC_URI: %s" % \
+ (self.pkg.cpv, e)
+ msg_lines.append(msg)
+ fetcher._eerror(msg_lines)
+ portage.elog.elog_process(self.pkg.cpv, self.settings)
+ self._async_unlock_builddir(returncode=1)
+ return
+
+ if already_fetched:
+ # This case is optimized to skip the fetch queue.
+ fetcher = None
+ self._fetch_exit(fetcher)
+ return
+
+ # Allow the Scheduler's fetch queue to control the
+ # number of concurrent fetchers.
+ fetcher.addExitListener(self._fetch_exit)
+ self._task_queued(fetcher)
+ self.scheduler.fetch.schedule(fetcher)
+
+ def _fetch_exit(self, fetcher):
+
+ if fetcher is not None and \
+ self._default_exit(fetcher) != os.EX_OK:
+ self._fetch_failed()
+ return
+
+ # discard successful fetch log
+ self._build_dir.clean_log()
+ pkg = self.pkg
+ logger = self.logger
+ opts = self.opts
+ pkg_count = self.pkg_count
+ scheduler = self.scheduler
+ settings = self.settings
+ features = settings.features
+ ebuild_path = self._ebuild_path
+ system_set = pkg.root_config.sets["system"]
+
+ #buildsyspkg: Check if we need to _force_ binary package creation
+ self._issyspkg = "buildsyspkg" in features and \
+ system_set.findAtomForPackage(pkg) and \
+ "buildpkg" not in features and \
+ opts.buildpkg != 'n'
+
+ if ("buildpkg" in features or self._issyspkg) \
+ and not self.opts.buildpkg_exclude.findAtomForPackage(pkg):
+
+ self._buildpkg = True
+
+ msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
+ short_msg = "emerge: (%s of %s) %s Compile" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ logger.log(msg, short_msg=short_msg)
+
+ else:
+ msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
+ short_msg = "emerge: (%s of %s) %s Compile" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ logger.log(msg, short_msg=short_msg)
+
+ build = EbuildExecuter(background=self.background, pkg=pkg,
+ scheduler=scheduler, settings=settings)
+ self._start_task(build, self._build_exit)
+
+ def _fetch_failed(self):
+ # We only call the pkg_nofetch phase if either RESTRICT=fetch
+ # is set or the package has explicitly overridden the default
+ # pkg_nofetch implementation. This allows specialized messages
+ # to be displayed for problematic packages even though they do
+ # not set RESTRICT=fetch (bug #336499).
+
+ if 'fetch' not in self.pkg.restrict and \
+ 'nofetch' not in self.pkg.defined_phases:
+ self._async_unlock_builddir(returncode=self.returncode)
+ return
+
+ self.returncode = None
+ nofetch_phase = EbuildPhase(background=self.background,
+ phase='nofetch', scheduler=self.scheduler, settings=self.settings)
+ self._start_task(nofetch_phase, self._nofetch_exit)
+
+ def _nofetch_exit(self, nofetch_phase):
+ self._final_exit(nofetch_phase)
+ self._async_unlock_builddir(returncode=1)
+
+ def _async_unlock_builddir(self, returncode=None):
+ """
+ Release the lock asynchronously, and if a returncode parameter
+ is given then set self.returncode and notify exit listeners.
+ """
+ if returncode is not None:
+ # The returncode will be set after unlock is complete.
+ self.returncode = None
+ portage.elog.elog_process(self.pkg.cpv, self.settings)
+ self._start_task(
+ AsyncTaskFuture(future=self._build_dir.async_unlock()),
+ functools.partial(self._unlock_builddir_exit, returncode=returncode))
+
+ def _unlock_builddir_exit(self, unlock_task, returncode=None):
+ self._assert_current(unlock_task)
+ if unlock_task.cancelled and returncode is not None:
+ self._default_final_exit(unlock_task)
+ return
+
+ # Normally, async_unlock should not raise an exception here.
+ unlock_task.future.cancelled() or unlock_task.future.result()
+ if returncode is not None:
+ self.returncode = returncode
+ self._async_wait()
+
+ def _build_exit(self, build):
+ if self._default_exit(build) != os.EX_OK:
+ self._async_unlock_builddir(returncode=self.returncode)
+ return
+
+ buildpkg = self._buildpkg
+
+ if not buildpkg:
+ self._final_exit(build)
+ self.wait()
+ return
+
+ if self._issyspkg:
+ msg = ">>> This is a system package, " + \
+ "let's pack a rescue tarball.\n"
+ self.scheduler.output(msg,
+ log_path=self.settings.get("PORTAGE_LOG_FILE"))
+
+ binpkg_tasks = TaskSequence()
+ requested_binpkg_formats = self.settings.get("PORTAGE_BINPKG_FORMAT", "tar").split()
+ for pkg_fmt in portage.const.SUPPORTED_BINPKG_FORMATS:
+ if pkg_fmt in requested_binpkg_formats:
+ if pkg_fmt == "rpm":
+ binpkg_tasks.add(EbuildPhase(background=self.background,
+ phase="rpm", scheduler=self.scheduler,
+ settings=self.settings))
+ else:
+ task = EbuildBinpkg(
+ background=self.background,
+ pkg=self.pkg, scheduler=self.scheduler,
+ settings=self.settings)
+ binpkg_tasks.add(task)
+ # Guarantee that _record_binpkg_info is called
+ # immediately after EbuildBinpkg. Note that
+ # task.addExitListener does not provide the
+ # necessary guarantee (see bug 578204).
+ binpkg_tasks.add(self._RecordBinpkgInfo(
+ ebuild_binpkg=task, ebuild_build=self))
+
+ if binpkg_tasks:
+ self._start_task(binpkg_tasks, self._buildpkg_exit)
+ return
+
+ self._final_exit(build)
+ self.wait()
+
+ class _RecordBinpkgInfo(AsynchronousTask):
+ """
+ This class wraps the EbuildBuild _record_binpkg_info method
+ with an AsynchronousTask interface, so that it can be
+ scheduled as a member of a TaskSequence.
+ """
+
+ __slots__ = ('ebuild_binpkg', 'ebuild_build',)
+
+ def _start(self):
+ self.ebuild_build._record_binpkg_info(self.ebuild_binpkg)
+ AsynchronousTask._start(self)
+
+ def _buildpkg_exit(self, packager):
+ """
+ Released build dir lock when there is a failure or
+ when in buildpkgonly mode. Otherwise, the lock will
+ be released when merge() is called.
+ """
+
+ if self._default_exit(packager) != os.EX_OK:
+ self._async_unlock_builddir(returncode=self.returncode)
+ return
+
+ if self.opts.buildpkgonly:
+ phase = 'success_hooks'
+ success_hooks = MiscFunctionsProcess(
+ background=self.background,
+ commands=[phase], phase=phase,
+ scheduler=self.scheduler, settings=self.settings)
+ self._start_task(success_hooks,
+ self._buildpkgonly_success_hook_exit)
+ return
+
+ # Continue holding the builddir lock until
+ # after the package has been installed.
+ self._current_task = None
+ self.returncode = packager.returncode
+ self.wait()
+
+ def _record_binpkg_info(self, task):
+ if task.returncode != os.EX_OK:
+ return
+
+ # Save info about the created binary package, so that
+ # identifying information can be passed to the install
+ # task, to be recorded in the installed package database.
+ pkg = task.get_binpkg_info()
+ infoloc = os.path.join(self.settings["PORTAGE_BUILDDIR"],
+ "build-info")
+ info = {
+ "BINPKGMD5": "%s\n" % pkg._metadata["MD5"],
+ }
+ if pkg.build_id is not None:
+ info["BUILD_ID"] = "%s\n" % pkg.build_id
+ for k, v in info.items():
+ with io.open(_unicode_encode(os.path.join(infoloc, k),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='strict') as f:
+ f.write(v)
+
+ def _buildpkgonly_success_hook_exit(self, success_hooks):
+ self._default_exit(success_hooks)
+ self.returncode = None
+ # Need to call "clean" phase for buildpkgonly mode
+ portage.elog.elog_process(self.pkg.cpv, self.settings)
+ phase = 'clean'
+ clean_phase = EbuildPhase(background=self.background,
+ phase=phase, scheduler=self.scheduler, settings=self.settings)
+ self._start_task(clean_phase, self._clean_exit)
+
+ def _clean_exit(self, clean_phase):
+ if self._final_exit(clean_phase) != os.EX_OK or \
+ self.opts.buildpkgonly:
+ self._async_unlock_builddir(returncode=self.returncode)
+ else:
+ self.wait()
+
+ def create_install_task(self):
+ """
+ Install the package and then clean up and release locks.
+ Only call this after the build has completed successfully
+ and neither fetchonly nor buildpkgonly mode are enabled.
+ """
+
+ ldpath_mtimes = self.ldpath_mtimes
+ logger = self.logger
+ pkg = self.pkg
+ pkg_count = self.pkg_count
+ settings = self.settings
+ world_atom = self.world_atom
+ ebuild_path = self._ebuild_path
+ tree = self._tree
+
+ task = EbuildMerge(exit_hook=self._install_exit,
+ find_blockers=self.find_blockers,
+ ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
+ pkg_count=pkg_count, pkg_path=ebuild_path,
+ scheduler=self.scheduler,
+ settings=settings, tree=tree, world_atom=world_atom)
+
+ msg = " === (%s of %s) Merging (%s::%s)" % \
+ (pkg_count.curval, pkg_count.maxval,
+ pkg.cpv, ebuild_path)
+ short_msg = "emerge: (%s of %s) %s Merge" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ logger.log(msg, short_msg=short_msg)
+
+ return task
+
+ def _install_exit(self, task):
+ """
+ @returns: Future, result is the returncode from an
+ EbuildBuildDir.async_unlock() task
+ """
+ self._async_unlock_builddir()
+ if self._current_task is None:
+ result = self.scheduler.create_future()
+ self.scheduler.call_soon(result.set_result, os.EX_OK)
+ else:
+ result = self._current_task.async_wait()
+ return result
diff --git a/lib/_emerge/EbuildBuildDir.py b/lib/_emerge/EbuildBuildDir.py
new file mode 100644
index 000000000..477113db8
--- /dev/null
+++ b/lib/_emerge/EbuildBuildDir.py
@@ -0,0 +1,161 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import functools
+
+from _emerge.AsynchronousLock import AsynchronousLock
+
+import portage
+from portage import os
+from portage.exception import PortageException
+from portage.util.SlotObject import SlotObject
+
+class EbuildBuildDir(SlotObject):
+
+ __slots__ = ("scheduler", "settings",
+ "locked", "_catdir", "_lock_obj")
+
+ def __init__(self, **kwargs):
+ SlotObject.__init__(self, **kwargs)
+ self.locked = False
+
+ def _assert_lock(self, async_lock):
+ if async_lock.returncode != os.EX_OK:
+ # TODO: create a better way to propagate this error to the caller
+ raise AssertionError("AsynchronousLock failed with returncode %s" \
+ % (async_lock.returncode,))
+
+ def clean_log(self):
+ """Discard existing log. The log will not be be discarded
+ in cases when it would not make sense, like when FEATURES=keepwork
+ is enabled."""
+ settings = self.settings
+ if 'keepwork' in settings.features:
+ return
+ log_file = settings.get('PORTAGE_LOG_FILE')
+ if log_file is not None and os.path.isfile(log_file):
+ try:
+ os.unlink(log_file)
+ except OSError:
+ pass
+
+ def async_lock(self):
+ """
+ Acquire the lock asynchronously. Notification is available
+ via the add_done_callback method of the returned Future instance.
+
+ This raises an AlreadyLocked exception if async_lock() is called
+ while a lock is already held. In order to avoid this, call
+ async_unlock() or check whether the "locked" attribute is True
+ or False before calling async_lock().
+
+ @returns: Future, result is None
+ """
+ if self._lock_obj is not None:
+ raise self.AlreadyLocked((self._lock_obj,))
+
+ dir_path = self.settings.get('PORTAGE_BUILDDIR')
+ if not dir_path:
+ raise AssertionError('PORTAGE_BUILDDIR is unset')
+ catdir = os.path.dirname(dir_path)
+ self._catdir = catdir
+ catdir_lock = AsynchronousLock(path=catdir, scheduler=self.scheduler)
+ builddir_lock = AsynchronousLock(path=dir_path, scheduler=self.scheduler)
+ result = self.scheduler.create_future()
+
+ def catdir_locked(catdir_lock):
+ try:
+ self._assert_lock(catdir_lock)
+ except AssertionError as e:
+ result.set_exception(e)
+ return
+
+ try:
+ portage.util.ensure_dirs(catdir,
+ gid=portage.portage_gid,
+ mode=0o70, mask=0)
+ except PortageException as e:
+ if not os.path.isdir(catdir):
+ result.set_exception(e)
+ return
+
+ builddir_lock.addExitListener(builddir_locked)
+ builddir_lock.start()
+
+ def builddir_locked(builddir_lock):
+ try:
+ self._assert_lock(builddir_lock)
+ except AssertionError as e:
+ catdir_lock.async_unlock.add_done_callback(
+ functools.partial(catdir_unlocked, exception=e))
+ return
+
+ self._lock_obj = builddir_lock
+ self.locked = True
+ self.settings['PORTAGE_BUILDDIR_LOCKED'] = '1'
+ catdir_lock.async_unlock().add_done_callback(catdir_unlocked)
+
+ def catdir_unlocked(future, exception=None):
+ if not (exception is None and future.exception() is None):
+ result.set_exception(exception or future.exception())
+ else:
+ result.set_result(None)
+
+ try:
+ portage.util.ensure_dirs(os.path.dirname(catdir),
+ gid=portage.portage_gid,
+ mode=0o70, mask=0)
+ except PortageException:
+ if not os.path.isdir(os.path.dirname(catdir)):
+ raise
+
+ catdir_lock.addExitListener(catdir_locked)
+ catdir_lock.start()
+ return result
+
+ def async_unlock(self):
+ """
+ Release the lock asynchronously. Release notification is available
+ via the add_done_callback method of the returned Future instance.
+
+ @returns: Future, result is None
+ """
+ result = self.scheduler.create_future()
+
+ def builddir_unlocked(future):
+ if future.exception() is not None:
+ result.set_exception(future.exception())
+ else:
+ self._lock_obj = None
+ self.locked = False
+ self.settings.pop('PORTAGE_BUILDDIR_LOCKED', None)
+ catdir_lock = AsynchronousLock(
+ path=self._catdir, scheduler=self.scheduler)
+ catdir_lock.addExitListener(catdir_locked)
+ catdir_lock.start()
+
+ def catdir_locked(catdir_lock):
+ if catdir_lock.wait() != os.EX_OK:
+ result.set_result(None)
+ else:
+ try:
+ os.rmdir(self._catdir)
+ except OSError:
+ pass
+ catdir_lock.async_unlock().add_done_callback(catdir_unlocked)
+
+ def catdir_unlocked(future):
+ if future.exception() is None:
+ result.set_result(None)
+ else:
+ result.set_exception(future.exception())
+
+ if self._lock_obj is None:
+ self.scheduler.call_soon(result.set_result, None)
+ else:
+ self._lock_obj.async_unlock().add_done_callback(builddir_unlocked)
+ return result
+
+ class AlreadyLocked(portage.exception.PortageException):
+ pass
+
diff --git a/lib/_emerge/EbuildExecuter.py b/lib/_emerge/EbuildExecuter.py
new file mode 100644
index 000000000..d387b42be
--- /dev/null
+++ b/lib/_emerge/EbuildExecuter.py
@@ -0,0 +1,84 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.TaskSequence import TaskSequence
+from _emerge.CompositeTask import CompositeTask
+import portage
+from portage import os
+from portage.eapi import eapi_has_src_prepare_and_src_configure, \
+ eapi_exports_replace_vars
+
+class EbuildExecuter(CompositeTask):
+
+ __slots__ = ("pkg", "settings")
+
+ _phases = ("prepare", "configure", "compile", "test", "install")
+
+ _live_eclasses = portage.const.LIVE_ECLASSES
+
+ def _start(self):
+ pkg = self.pkg
+ scheduler = self.scheduler
+ settings = self.settings
+ cleanup = 0
+ portage.prepare_build_dirs(pkg.root, settings, cleanup)
+
+ if eapi_exports_replace_vars(settings['EAPI']):
+ vardb = pkg.root_config.trees['vartree'].dbapi
+ settings["REPLACING_VERSIONS"] = " ".join(
+ set(portage.versions.cpv_getversion(match) \
+ for match in vardb.match(pkg.slot_atom) + \
+ vardb.match('='+pkg.cpv)))
+
+ setup_phase = EbuildPhase(background=self.background,
+ phase="setup", scheduler=scheduler,
+ settings=settings)
+
+ setup_phase.addExitListener(self._setup_exit)
+ self._task_queued(setup_phase)
+ self.scheduler.scheduleSetup(setup_phase)
+
+ def _setup_exit(self, setup_phase):
+
+ if self._default_exit(setup_phase) != os.EX_OK:
+ self.wait()
+ return
+
+ unpack_phase = EbuildPhase(background=self.background,
+ phase="unpack", scheduler=self.scheduler,
+ settings=self.settings)
+
+ if self._live_eclasses.intersection(self.pkg.inherited):
+ # Serialize $DISTDIR access for live ebuilds since
+ # otherwise they can interfere with eachother.
+
+ unpack_phase.addExitListener(self._unpack_exit)
+ self._task_queued(unpack_phase)
+ self.scheduler.scheduleUnpack(unpack_phase)
+
+ else:
+ self._start_task(unpack_phase, self._unpack_exit)
+
+ def _unpack_exit(self, unpack_phase):
+
+ if self._default_exit(unpack_phase) != os.EX_OK:
+ self.wait()
+ return
+
+ ebuild_phases = TaskSequence(scheduler=self.scheduler)
+
+ pkg = self.pkg
+ phases = self._phases
+ eapi = pkg.eapi
+ if not eapi_has_src_prepare_and_src_configure(eapi):
+ # skip src_prepare and src_configure
+ phases = phases[2:]
+
+ for phase in phases:
+ ebuild_phases.add(EbuildPhase(background=self.background,
+ phase=phase, scheduler=self.scheduler,
+ settings=self.settings))
+
+ self._start_task(ebuild_phases, self._default_final_exit)
+
diff --git a/lib/_emerge/EbuildFetcher.py b/lib/_emerge/EbuildFetcher.py
new file mode 100644
index 000000000..ad5109c28
--- /dev/null
+++ b/lib/_emerge/EbuildFetcher.py
@@ -0,0 +1,373 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import copy
+import io
+import sys
+
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage import _unicode_decode
+from portage.checksum import _hash_filter
+from portage.elog.messages import eerror
+from portage.package.ebuild.fetch import _check_distfile, fetch
+from portage.util._async.AsyncTaskFuture import AsyncTaskFuture
+from portage.util._async.ForkProcess import ForkProcess
+from portage.util._pty import _create_pty_or_pipe
+from _emerge.CompositeTask import CompositeTask
+
+
+class EbuildFetcher(CompositeTask):
+
+ __slots__ = ("config_pool", "ebuild_path", "fetchonly", "fetchall",
+ "logfile", "pkg", "prefetch", "_fetcher_proc")
+
+ def __init__(self, **kwargs):
+ CompositeTask.__init__(self, **kwargs)
+ self._fetcher_proc = _EbuildFetcherProcess(**kwargs)
+
+ def async_already_fetched(self, settings):
+ """
+ Returns True if all files already exist locally and have correct
+ digests, otherwise return False. When returning True, appropriate
+ digest checking messages are produced for display and/or logging.
+ When returning False, no messages are produced, since we assume
+ that a fetcher process will later be executed in order to produce
+ such messages. This will raise InvalidDependString if SRC_URI is
+ invalid.
+ """
+ return self._fetcher_proc.async_already_fetched(settings)
+
+ def _start(self):
+ self._start_task(
+ AsyncTaskFuture(future=self._fetcher_proc._async_uri_map()),
+ self._start_fetch)
+
+ def _start_fetch(self, uri_map_task):
+ self._assert_current(uri_map_task)
+ if uri_map_task.cancelled:
+ self._default_final_exit(uri_map_task)
+ return
+
+ try:
+ uri_map = uri_map_task.future.result()
+ except portage.exception.InvalidDependString as e:
+ msg_lines = []
+ msg = "Fetch failed for '%s' due to invalid SRC_URI: %s" % \
+ (self.pkg.cpv, e)
+ msg_lines.append(msg)
+ self._fetcher_proc._eerror(msg_lines)
+ self._current_task = None
+ self.returncode = 1
+ self._async_wait()
+ return
+
+ # First get the SRC_URI metadata (it's not cached in self.pkg.metadata
+ # because some packages have an extremely large SRC_URI value).
+ self._start_task(
+ AsyncTaskFuture(
+ future=self.pkg.root_config.trees["porttree"].dbapi.\
+ async_aux_get(self.pkg.cpv, ["SRC_URI"], myrepo=self.pkg.repo,
+ loop=self.scheduler)),
+ self._start_with_metadata)
+
+ def _start_with_metadata(self, aux_get_task):
+ self._assert_current(aux_get_task)
+ if aux_get_task.cancelled:
+ self._default_final_exit(aux_get_task)
+ return
+
+ self._fetcher_proc.src_uri, = aux_get_task.future.result()
+ self._start_task(self._fetcher_proc, self._default_final_exit)
+
+
+class _EbuildFetcherProcess(ForkProcess):
+
+ __slots__ = ("config_pool", "ebuild_path", "fetchonly", "fetchall",
+ "pkg", "prefetch", "src_uri", "_digests", "_manifest",
+ "_settings", "_uri_map")
+
+ def async_already_fetched(self, settings):
+ result = self.scheduler.create_future()
+
+ def uri_map_done(uri_map_future):
+ if uri_map_future.cancelled():
+ result.cancel()
+ return
+
+ if uri_map_future.exception() is not None or result.cancelled():
+ if not result.cancelled():
+ result.set_exception(uri_map_future.exception())
+ return
+
+ uri_map = uri_map_future.result()
+ if uri_map:
+ result.set_result(
+ self._check_already_fetched(settings, uri_map))
+ else:
+ result.set_result(True)
+
+ uri_map_future = self._async_uri_map()
+ result.add_done_callback(lambda result:
+ uri_map_future.cancel() if result.cancelled() else None)
+ uri_map_future.add_done_callback(uri_map_done)
+ return result
+
+ def _check_already_fetched(self, settings, uri_map):
+ digests = self._get_digests()
+ distdir = settings["DISTDIR"]
+ allow_missing = self._get_manifest().allow_missing
+
+ for filename in uri_map:
+ # Use stat rather than lstat since fetch() creates
+ # symlinks when PORTAGE_RO_DISTDIRS is used.
+ try:
+ st = os.stat(os.path.join(distdir, filename))
+ except OSError:
+ return False
+ if st.st_size == 0:
+ return False
+ expected_size = digests.get(filename, {}).get('size')
+ if expected_size is None:
+ continue
+ if st.st_size != expected_size:
+ return False
+
+ hash_filter = _hash_filter(settings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if hash_filter.transparent:
+ hash_filter = None
+ stdout_orig = sys.stdout
+ stderr_orig = sys.stderr
+ global_havecolor = portage.output.havecolor
+ out = io.StringIO()
+ eout = portage.output.EOutput()
+ eout.quiet = settings.get("PORTAGE_QUIET") == "1"
+ success = True
+ try:
+ sys.stdout = out
+ sys.stderr = out
+ if portage.output.havecolor:
+ portage.output.havecolor = not self.background
+
+ for filename in uri_map:
+ mydigests = digests.get(filename)
+ if mydigests is None:
+ if not allow_missing:
+ success = False
+ break
+ continue
+ ok, st = _check_distfile(os.path.join(distdir, filename),
+ mydigests, eout, show_errors=False, hash_filter=hash_filter)
+ if not ok:
+ success = False
+ break
+ except portage.exception.FileNotFound:
+ # A file disappeared unexpectedly.
+ return False
+ finally:
+ sys.stdout = stdout_orig
+ sys.stderr = stderr_orig
+ portage.output.havecolor = global_havecolor
+
+ if success:
+ # When returning unsuccessfully, no messages are produced, since
+ # we assume that a fetcher process will later be executed in order
+ # to produce such messages.
+ msg = out.getvalue()
+ if msg:
+ self.scheduler.output(msg, log_path=self.logfile)
+
+ return success
+
+ def _start(self):
+
+ root_config = self.pkg.root_config
+ portdb = root_config.trees["porttree"].dbapi
+ ebuild_path = self._get_ebuild_path()
+ # This is initialized by an earlier _async_uri_map call.
+ uri_map = self._uri_map
+
+ if not uri_map:
+ # Nothing to fetch.
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ settings = self.config_pool.allocate()
+ settings.setcpv(self.pkg)
+ settings.configdict["pkg"]["SRC_URI"] = self.src_uri
+ portage.doebuild_environment(ebuild_path, 'fetch',
+ settings=settings, db=portdb)
+
+ if self.prefetch and \
+ self._prefetch_size_ok(uri_map, settings, ebuild_path):
+ self.config_pool.deallocate(settings)
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ nocolor = settings.get("NOCOLOR")
+
+ if self.prefetch:
+ settings["PORTAGE_PARALLEL_FETCHONLY"] = "1"
+
+ if self.background:
+ nocolor = "true"
+
+ if nocolor is not None:
+ settings["NOCOLOR"] = nocolor
+
+ self._settings = settings
+ ForkProcess._start(self)
+
+ # Free settings now since it's no longer needed in
+ # this process (the subprocess has a private copy).
+ self.config_pool.deallocate(settings)
+ settings = None
+ self._settings = None
+
+ def _run(self):
+ # Force consistent color output, in case we are capturing fetch
+ # output through a normal pipe due to unavailability of ptys.
+ portage.output.havecolor = self._settings.get('NOCOLOR') \
+ not in ('yes', 'true')
+
+ rval = 1
+ allow_missing = self._get_manifest().allow_missing or \
+ 'digest' in self._settings.features
+ if fetch(self._uri_map, self._settings, fetchonly=self.fetchonly,
+ digests=copy.deepcopy(self._get_digests()),
+ allow_missing_digests=allow_missing):
+ rval = os.EX_OK
+ return rval
+
+ def _get_ebuild_path(self):
+ if self.ebuild_path is not None:
+ return self.ebuild_path
+ portdb = self.pkg.root_config.trees["porttree"].dbapi
+ self.ebuild_path = portdb.findname(self.pkg.cpv, myrepo=self.pkg.repo)
+ if self.ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % self.pkg.cpv)
+ return self.ebuild_path
+
+ def _get_manifest(self):
+ if self._manifest is None:
+ pkgdir = os.path.dirname(self._get_ebuild_path())
+ self._manifest = self.pkg.root_config.settings.repositories.get_repo_for_location(
+ os.path.dirname(os.path.dirname(pkgdir))).load_manifest(pkgdir, None)
+ return self._manifest
+
+ def _get_digests(self):
+ if self._digests is None:
+ self._digests = self._get_manifest().getTypeDigests("DIST")
+ return self._digests
+
+ def _async_uri_map(self):
+ """
+ This calls the portdbapi.async_fetch_map method and returns the
+ resulting Future (may contain InvalidDependString exception).
+ """
+ if self._uri_map is not None:
+ result = self.scheduler.create_future()
+ result.set_result(self._uri_map)
+ return result
+
+ pkgdir = os.path.dirname(self._get_ebuild_path())
+ mytree = os.path.dirname(os.path.dirname(pkgdir))
+ use = None
+ if not self.fetchall:
+ use = self.pkg.use.enabled
+ portdb = self.pkg.root_config.trees["porttree"].dbapi
+
+ def cache_result(result):
+ try:
+ self._uri_map = result.result()
+ except Exception:
+ # The caller handles this when it retrieves the result.
+ pass
+
+ result = portdb.async_fetch_map(self.pkg.cpv,
+ useflags=use, mytree=mytree, loop=self.scheduler)
+ result.add_done_callback(cache_result)
+ return result
+
+ def _prefetch_size_ok(self, uri_map, settings, ebuild_path):
+ distdir = settings["DISTDIR"]
+
+ sizes = {}
+ for filename in uri_map:
+ # Use stat rather than lstat since portage.fetch() creates
+ # symlinks when PORTAGE_RO_DISTDIRS is used.
+ try:
+ st = os.stat(os.path.join(distdir, filename))
+ except OSError:
+ return False
+ if st.st_size == 0:
+ return False
+ sizes[filename] = st.st_size
+
+ digests = self._get_digests()
+ for filename, actual_size in sizes.items():
+ size = digests.get(filename, {}).get('size')
+ if size is None:
+ continue
+ if size != actual_size:
+ return False
+
+ # All files are present and sizes are ok. In this case the normal
+ # fetch code will be skipped, so we need to generate equivalent
+ # output here.
+ if self.logfile is not None:
+ f = io.open(_unicode_encode(self.logfile,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='a', encoding=_encodings['content'],
+ errors='backslashreplace')
+ for filename in uri_map:
+ f.write(_unicode_decode((' * %s size ;-) ...' % \
+ filename).ljust(73) + '[ ok ]\n'))
+ f.close()
+
+ return True
+
+ def _pipe(self, fd_pipes):
+ """When appropriate, use a pty so that fetcher progress bars,
+ like wget has, will work properly."""
+ if self.background or not sys.stdout.isatty():
+ # When the output only goes to a log file,
+ # there's no point in creating a pty.
+ return os.pipe()
+ stdout_pipe = None
+ if not self.background:
+ stdout_pipe = fd_pipes.get(1)
+ got_pty, master_fd, slave_fd = \
+ _create_pty_or_pipe(copy_term_size=stdout_pipe)
+ return (master_fd, slave_fd)
+
+ def _eerror(self, lines):
+ out = io.StringIO()
+ for line in lines:
+ eerror(line, phase="unpack", key=self.pkg.cpv, out=out)
+ msg = out.getvalue()
+ if msg:
+ self.scheduler.output(msg, log_path=self.logfile)
+
+ def _async_waitpid_cb(self, *args, **kwargs):
+ """
+ Override _async_waitpid_cb to perform cleanup that is
+ not necessarily idempotent.
+ """
+ ForkProcess._async_waitpid_cb(self, *args, **kwargs)
+ # Collect elog messages that might have been
+ # created by the pkg_nofetch phase.
+ # Skip elog messages for prefetch, in order to avoid duplicates.
+ if not self.prefetch and self.returncode != os.EX_OK:
+ msg_lines = []
+ msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
+ if self.logfile is not None:
+ msg += ", Log file:"
+ msg_lines.append(msg)
+ if self.logfile is not None:
+ msg_lines.append(" '%s'" % (self.logfile,))
+ self._eerror(msg_lines)
diff --git a/lib/_emerge/EbuildFetchonly.py b/lib/_emerge/EbuildFetchonly.py
new file mode 100644
index 000000000..eec2ad208
--- /dev/null
+++ b/lib/_emerge/EbuildFetchonly.py
@@ -0,0 +1,35 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.elog.messages import eerror
+from portage.util.SlotObject import SlotObject
+
+class EbuildFetchonly(SlotObject):
+
+ __slots__ = ("fetch_all", "pkg", "pretend", "settings")
+
+ def execute(self):
+ settings = self.settings
+ pkg = self.pkg
+ portdb = pkg.root_config.trees["porttree"].dbapi
+ ebuild_path = portdb.findname(pkg.cpv, myrepo=pkg.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % pkg.cpv)
+ settings.setcpv(pkg)
+ debug = settings.get("PORTAGE_DEBUG") == "1"
+
+ rval = portage.doebuild(ebuild_path, "fetch",
+ settings=settings, debug=debug,
+ listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
+ mydbapi=portdb, tree="porttree")
+
+ # For pretend mode, this error message is suppressed,
+ # and the unsuccessful return value is used to trigger
+ # a call to the pkg_nofetch phase.
+ if rval != os.EX_OK and not self.pretend:
+ msg = "Fetch failed for '%s'" % (pkg.cpv,)
+ eerror(msg, phase="unpack", key=pkg.cpv)
+
+ return rval
diff --git a/lib/_emerge/EbuildIpcDaemon.py b/lib/_emerge/EbuildIpcDaemon.py
new file mode 100644
index 000000000..d0dbe18bd
--- /dev/null
+++ b/lib/_emerge/EbuildIpcDaemon.py
@@ -0,0 +1,119 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import pickle
+from portage import os
+from portage.exception import TryAgain
+from portage.localization import _
+from portage.locks import lockfile, unlockfile
+from portage.util import writemsg_level
+from _emerge.FifoIpcDaemon import FifoIpcDaemon
+
+class EbuildIpcDaemon(FifoIpcDaemon):
+ """
+ This class serves as an IPC daemon, which ebuild processes can use
+ to communicate with portage's main python process.
+
+ Here are a few possible uses:
+
+ 1) Robust subshell/subprocess die support. This allows the ebuild
+ environment to reliably die without having to rely on signal IPC.
+
+ 2) Delegation of portageq calls to the main python process, eliminating
+ performance and userpriv permission issues.
+
+ 3) Reliable ebuild termination in cases when the ebuild has accidentally
+ left orphan processes running in the background (as in bug #278895).
+
+ 4) Detect cases in which bash has exited unexpectedly (as in bug #190128).
+ """
+
+ __slots__ = ('commands',)
+
+ def _input_handler(self):
+ # Read the whole pickle in a single atomic read() call.
+ data = self._read_buf(self._files.pipe_in)
+ if data is None:
+ pass # EAGAIN
+ elif data:
+ try:
+ obj = pickle.loads(data)
+ except SystemExit:
+ raise
+ except Exception:
+ # The pickle module can raise practically
+ # any exception when given corrupt data.
+ pass
+ else:
+
+ self._reopen_input()
+
+ cmd_key = obj[0]
+ cmd_handler = self.commands[cmd_key]
+ reply = cmd_handler(obj)
+ try:
+ self._send_reply(reply)
+ except OSError as e:
+ if e.errno == errno.ENXIO:
+ # This happens if the client side has been killed.
+ pass
+ else:
+ raise
+
+ # Allow the command to execute hooks after its reply
+ # has been sent. This hook is used by the 'exit'
+ # command to kill the ebuild process. For some
+ # reason, the ebuild-ipc helper hangs up the
+ # ebuild process if it is waiting for a reply
+ # when we try to kill the ebuild process.
+ reply_hook = getattr(cmd_handler,
+ 'reply_hook', None)
+ if reply_hook is not None:
+ reply_hook()
+
+ else: # EIO/POLLHUP
+ # This can be triggered due to a race condition which happens when
+ # the previous _reopen_input() call occurs before the writer has
+ # closed the pipe (see bug #401919). It's not safe to re-open
+ # without a lock here, since it's possible that another writer will
+ # write something to the pipe just before we close it, and in that
+ # case the write will be lost. Therefore, try for a non-blocking
+ # lock, and only re-open the pipe if the lock is acquired.
+ lock_filename = os.path.join(
+ os.path.dirname(self.input_fifo), '.ipc_lock')
+ try:
+ lock_obj = lockfile(lock_filename, unlinkfile=True,
+ flags=os.O_NONBLOCK)
+ except TryAgain:
+ # We'll try again when another IO_HUP event arrives.
+ pass
+ else:
+ try:
+ self._reopen_input()
+ finally:
+ unlockfile(lock_obj)
+
+ def _send_reply(self, reply):
+ # File streams are in unbuffered mode since we do atomic
+ # read and write of whole pickles. Use non-blocking mode so
+ # we don't hang if the client is killed before we can send
+ # the reply. We rely on the client opening the other side
+ # of this fifo before it sends its request, since otherwise
+ # we'd have a race condition with this open call raising
+ # ENXIO if the client hasn't opened the fifo yet.
+ try:
+ output_fd = os.open(self.output_fifo,
+ os.O_WRONLY | os.O_NONBLOCK)
+ try:
+ os.write(output_fd, pickle.dumps(reply))
+ finally:
+ os.close(output_fd)
+ except OSError as e:
+ # This probably means that the client has been killed,
+ # which causes open to fail with ENXIO.
+ writemsg_level(
+ "!!! EbuildIpcDaemon %s: %s\n" % \
+ (_('failed to send reply'), e),
+ level=logging.ERROR, noiselevel=-1)
diff --git a/lib/_emerge/EbuildMerge.py b/lib/_emerge/EbuildMerge.py
new file mode 100644
index 000000000..bedea902d
--- /dev/null
+++ b/lib/_emerge/EbuildMerge.py
@@ -0,0 +1,75 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import functools
+
+from _emerge.CompositeTask import CompositeTask
+from portage import os
+from portage.dbapi._MergeProcess import MergeProcess
+from portage.util._async.AsyncTaskFuture import AsyncTaskFuture
+
+class EbuildMerge(CompositeTask):
+
+ __slots__ = ("exit_hook", "find_blockers", "logger", "ldpath_mtimes",
+ "pkg", "pkg_count", "pkg_path", "postinst_failure", "pretend",
+ "settings", "tree", "world_atom")
+
+ def _start(self):
+ root_config = self.pkg.root_config
+ settings = self.settings
+ mycat = settings["CATEGORY"]
+ mypkg = settings["PF"]
+ pkgloc = settings["D"]
+ infloc = os.path.join(settings["PORTAGE_BUILDDIR"], "build-info")
+ myebuild = settings["EBUILD"]
+ mydbapi = root_config.trees[self.tree].dbapi
+ vartree = root_config.trees["vartree"]
+ background = (settings.get('PORTAGE_BACKGROUND') == '1')
+ logfile = settings.get('PORTAGE_LOG_FILE')
+
+ merge_task = MergeProcess(
+ mycat=mycat, mypkg=mypkg, settings=settings,
+ treetype=self.tree, vartree=vartree, scheduler=self.scheduler,
+ background=background, blockers=self.find_blockers, pkgloc=pkgloc,
+ infloc=infloc, myebuild=myebuild, mydbapi=mydbapi,
+ prev_mtimes=self.ldpath_mtimes, logfile=logfile)
+
+ self._start_task(merge_task, self._merge_exit)
+
+ def _merge_exit(self, merge_task):
+ if self._final_exit(merge_task) != os.EX_OK:
+ self._start_exit_hook(self.returncode)
+ return
+
+ self.postinst_failure = merge_task.postinst_failure
+ pkg = self.pkg
+ self.world_atom(pkg)
+ pkg_count = self.pkg_count
+ pkg_path = self.pkg_path
+ logger = self.logger
+ if "noclean" not in self.settings.features:
+ short_msg = "emerge: (%s of %s) %s Clean Post" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv)
+ logger.log((" === (%s of %s) " + \
+ "Post-Build Cleaning (%s::%s)") % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
+ short_msg=short_msg)
+ logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
+
+ self._start_exit_hook(self.returncode)
+
+ def _start_exit_hook(self, returncode):
+ """
+ Start the exit hook, and set returncode after it completes.
+ """
+ # The returncode will be set after exit hook is complete.
+ self.returncode = None
+ self._start_task(
+ AsyncTaskFuture(future=self.exit_hook(self)),
+ functools.partial(self._exit_hook_exit, returncode))
+
+ def _exit_hook_exit(self, returncode, task):
+ self._assert_current(task)
+ self.returncode = returncode
+ self._async_wait()
diff --git a/lib/_emerge/EbuildMetadataPhase.py b/lib/_emerge/EbuildMetadataPhase.py
new file mode 100644
index 000000000..4940d40b6
--- /dev/null
+++ b/lib/_emerge/EbuildMetadataPhase.py
@@ -0,0 +1,220 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SubProcess import SubProcess
+import sys
+from portage.cache.mappings import slot_dict_class
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.package.ebuild._metadata_invalid:eapi_invalid',
+)
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.dep import extract_unpack_dependencies
+from portage.eapi import eapi_has_automatic_unpack_dependencies
+
+import fcntl
+import io
+
+class EbuildMetadataPhase(SubProcess):
+
+ """
+ Asynchronous interface for the ebuild "depend" phase which is
+ used to extract metadata from the ebuild.
+ """
+
+ __slots__ = ("cpv", "eapi_supported", "ebuild_hash", "fd_pipes",
+ "metadata", "portdb", "repo_path", "settings", "write_auxdb") + \
+ ("_eapi", "_eapi_lineno", "_raw_metadata",)
+
+ _file_names = ("ebuild",)
+ _files_dict = slot_dict_class(_file_names, prefix="")
+
+ def _start(self):
+ ebuild_path = self.ebuild_hash.location
+
+ with io.open(_unicode_encode(ebuild_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ self._eapi, self._eapi_lineno = portage._parse_eapi_ebuild_head(f)
+
+ parsed_eapi = self._eapi
+ if parsed_eapi is None:
+ parsed_eapi = "0"
+
+ if not parsed_eapi:
+ # An empty EAPI setting is invalid.
+ self._eapi_invalid(None)
+ self.returncode = 1
+ self._async_wait()
+ return
+
+ self.eapi_supported = portage.eapi_is_supported(parsed_eapi)
+ if not self.eapi_supported:
+ self.metadata = {"EAPI": parsed_eapi}
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ settings = self.settings
+ settings.setcpv(self.cpv)
+ settings.configdict['pkg']['EAPI'] = parsed_eapi
+
+ debug = settings.get("PORTAGE_DEBUG") == "1"
+ master_fd = None
+ slave_fd = None
+ fd_pipes = None
+ if self.fd_pipes is not None:
+ fd_pipes = self.fd_pipes.copy()
+ else:
+ fd_pipes = {}
+
+ null_input = open('/dev/null', 'rb')
+ fd_pipes.setdefault(0, null_input.fileno())
+ fd_pipes.setdefault(1, sys.__stdout__.fileno())
+ fd_pipes.setdefault(2, sys.__stderr__.fileno())
+
+ # flush any pending output
+ stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
+ for fd in fd_pipes.values():
+ if fd in stdout_filenos:
+ sys.__stdout__.flush()
+ sys.__stderr__.flush()
+ break
+
+ self._files = self._files_dict()
+ files = self._files
+
+ master_fd, slave_fd = os.pipe()
+
+ fcntl.fcntl(master_fd, fcntl.F_SETFL,
+ fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(master_fd, fcntl.F_SETFD,
+ fcntl.fcntl(master_fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ fd_pipes[slave_fd] = slave_fd
+ settings["PORTAGE_PIPE_FD"] = str(slave_fd)
+
+ self._raw_metadata = []
+ files.ebuild = master_fd
+ self.scheduler.add_reader(files.ebuild, self._output_handler)
+ self._registered = True
+
+ retval = portage.doebuild(ebuild_path, "depend",
+ settings=settings, debug=debug,
+ mydbapi=self.portdb, tree="porttree",
+ fd_pipes=fd_pipes, returnpid=True)
+ settings.pop("PORTAGE_PIPE_FD", None)
+
+ os.close(slave_fd)
+ null_input.close()
+
+ if isinstance(retval, int):
+ # doebuild failed before spawning
+ self.returncode = retval
+ self._async_wait()
+ return
+
+ self.pid = retval[0]
+
+ def _output_handler(self):
+ while True:
+ buf = self._read_buf(self._files.ebuild)
+ if buf is None:
+ break # EAGAIN
+ elif buf:
+ self._raw_metadata.append(buf)
+ else: # EIO/POLLHUP
+ if self.pid is None:
+ self._unregister()
+ self._async_wait()
+ else:
+ self._async_waitpid()
+ break
+
+ def _unregister(self):
+ self.scheduler.remove_reader(self._files.ebuild)
+ SubProcess._unregister(self)
+
+ def _async_waitpid_cb(self, *args, **kwargs):
+ """
+ Override _async_waitpid_cb to perform cleanup that is
+ not necessarily idempotent.
+ """
+ SubProcess._async_waitpid_cb(self, *args, **kwargs)
+ # self._raw_metadata is None when _start returns
+ # early due to an unsupported EAPI
+ if self.returncode == os.EX_OK and \
+ self._raw_metadata is not None:
+ metadata_lines = _unicode_decode(b''.join(self._raw_metadata),
+ encoding=_encodings['repo.content'],
+ errors='replace').splitlines()
+ metadata_valid = True
+ if len(portage.auxdbkeys) != len(metadata_lines):
+ # Don't trust bash's returncode if the
+ # number of lines is incorrect.
+ metadata_valid = False
+ else:
+ metadata = dict(zip(portage.auxdbkeys, metadata_lines))
+ parsed_eapi = self._eapi
+ if parsed_eapi is None:
+ parsed_eapi = "0"
+ self.eapi_supported = \
+ portage.eapi_is_supported(metadata["EAPI"])
+ if (not metadata["EAPI"] or self.eapi_supported) and \
+ metadata["EAPI"] != parsed_eapi:
+ self._eapi_invalid(metadata)
+ metadata_valid = False
+
+ if metadata_valid:
+ # Since we're supposed to be able to efficiently obtain the
+ # EAPI from _parse_eapi_ebuild_head, we don't write cache
+ # entries for unsupported EAPIs.
+ if self.eapi_supported:
+
+ if metadata.get("INHERITED", False):
+ metadata["_eclasses_"] = \
+ self.portdb.repositories.get_repo_for_location(
+ self.repo_path).eclass_db.get_eclass_data(
+ metadata["INHERITED"].split())
+ else:
+ metadata["_eclasses_"] = {}
+ metadata.pop("INHERITED", None)
+
+ if eapi_has_automatic_unpack_dependencies(metadata["EAPI"]):
+ repo = self.portdb.repositories.get_name_for_location(self.repo_path)
+ unpackers = self.settings.unpack_dependencies.get(repo, {}).get(metadata["EAPI"], {})
+ unpack_dependencies = extract_unpack_dependencies(metadata["SRC_URI"], unpackers)
+ if unpack_dependencies:
+ metadata["DEPEND"] += (" " if metadata["DEPEND"] else "") + unpack_dependencies
+
+ # If called by egencache, this cache write is
+ # undesirable when metadata-transfer is disabled.
+ if self.write_auxdb is not False:
+ self.portdb._write_cache(self.cpv,
+ self.repo_path, metadata, self.ebuild_hash)
+ else:
+ metadata = {"EAPI": metadata["EAPI"]}
+ self.metadata = metadata
+ else:
+ self.returncode = 1
+
+ def _eapi_invalid(self, metadata):
+ repo_name = self.portdb.getRepositoryName(self.repo_path)
+ if metadata is not None:
+ eapi_var = metadata["EAPI"]
+ else:
+ eapi_var = None
+ eapi_invalid(self, self.cpv, repo_name, self.settings,
+ eapi_var, self._eapi, self._eapi_lineno)
diff --git a/lib/_emerge/EbuildPhase.py b/lib/_emerge/EbuildPhase.py
new file mode 100644
index 000000000..4104cefa7
--- /dev/null
+++ b/lib/_emerge/EbuildPhase.py
@@ -0,0 +1,439 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import functools
+import gzip
+import io
+import sys
+import tempfile
+
+from _emerge.AsynchronousLock import AsynchronousLock
+from _emerge.BinpkgEnvExtractor import BinpkgEnvExtractor
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from _emerge.EbuildProcess import EbuildProcess
+from _emerge.CompositeTask import CompositeTask
+from _emerge.PackagePhase import PackagePhase
+from _emerge.TaskSequence import TaskSequence
+from portage.package.ebuild.prepare_build_dirs import (_prepare_workdir,
+ _prepare_fake_distdir, _prepare_fake_filesdir)
+from portage.util import writemsg
+from portage.util._async.AsyncTaskFuture import AsyncTaskFuture
+
+try:
+ from portage.xml.metadata import MetaDataXML
+except (SystemExit, KeyboardInterrupt):
+ raise
+except (ImportError, SystemError, RuntimeError, Exception):
+ # broken or missing xml support
+ # https://bugs.python.org/issue14988
+ MetaDataXML = None
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.elog:messages@elog_messages',
+ 'portage.package.ebuild.doebuild:_check_build_log,' + \
+ '_post_phase_cmds,_post_phase_userpriv_perms,' + \
+ '_post_src_install_soname_symlinks,' + \
+ '_post_src_install_uid_fix,_postinst_bsdflags,' + \
+ '_post_src_install_write_metadata,' + \
+ '_preinst_bsdflags'
+)
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+
+class EbuildPhase(CompositeTask):
+
+ __slots__ = ("actionmap", "fd_pipes", "phase", "settings") + \
+ ("_ebuild_lock",)
+
+ # FEATURES displayed prior to setup phase
+ _features_display = (
+ "ccache", "compressdebug", "distcc", "distcc-pump", "fakeroot",
+ "installsources", "keeptemp", "keepwork", "network-sandbox",
+ "network-sandbox-proxy", "nostrip", "preserve-libs", "sandbox",
+ "selinux", "sesandbox", "splitdebug", "suidctl", "test",
+ "userpriv", "usersandbox"
+ )
+
+ # Locked phases
+ _locked_phases = ("setup", "preinst", "postinst", "prerm", "postrm")
+
+ def _start(self):
+
+ need_builddir = self.phase not in EbuildProcess._phases_without_builddir
+
+ if need_builddir:
+ phase_completed_file = os.path.join(
+ self.settings['PORTAGE_BUILDDIR'],
+ ".%sed" % self.phase.rstrip('e'))
+ if not os.path.exists(phase_completed_file):
+ # If the phase is really going to run then we want
+ # to eliminate any stale elog messages that may
+ # exist from a previous run.
+ try:
+ os.unlink(os.path.join(self.settings['T'],
+ 'logging', self.phase))
+ except OSError:
+ pass
+
+ if self.phase in ('nofetch', 'pretend', 'setup'):
+
+ use = self.settings.get('PORTAGE_BUILT_USE')
+ if use is None:
+ use = self.settings['PORTAGE_USE']
+
+ maint_str = ""
+ upstr_str = ""
+ metadata_xml_path = os.path.join(os.path.dirname(self.settings['EBUILD']), "metadata.xml")
+ if MetaDataXML is not None and os.path.isfile(metadata_xml_path):
+ herds_path = os.path.join(self.settings['PORTDIR'],
+ 'metadata/herds.xml')
+ try:
+ metadata_xml = MetaDataXML(metadata_xml_path, herds_path)
+ maint_str = metadata_xml.format_maintainer_string()
+ upstr_str = metadata_xml.format_upstream_string()
+ except SyntaxError:
+ maint_str = "<invalid metadata.xml>"
+
+ msg = []
+ msg.append("Package: %s" % self.settings.mycpv)
+ if self.settings.get('PORTAGE_REPO_NAME'):
+ msg.append("Repository: %s" % self.settings['PORTAGE_REPO_NAME'])
+ if maint_str:
+ msg.append("Maintainer: %s" % maint_str)
+ if upstr_str:
+ msg.append("Upstream: %s" % upstr_str)
+
+ msg.append("USE: %s" % use)
+ relevant_features = []
+ enabled_features = self.settings.features
+ for x in self._features_display:
+ if x in enabled_features:
+ relevant_features.append(x)
+ if relevant_features:
+ msg.append("FEATURES: %s" % " ".join(relevant_features))
+
+ # Force background=True for this header since it's intended
+ # for the log and it doesn't necessarily need to be visible
+ # elsewhere.
+ self._elog('einfo', msg, background=True)
+
+ if self.phase == 'package':
+ if 'PORTAGE_BINPKG_TMPFILE' not in self.settings:
+ self.settings['PORTAGE_BINPKG_TMPFILE'] = \
+ os.path.join(self.settings['PKGDIR'],
+ self.settings['CATEGORY'], self.settings['PF']) + '.tbz2'
+
+ if self.phase in ("pretend", "prerm"):
+ env_extractor = BinpkgEnvExtractor(background=self.background,
+ scheduler=self.scheduler, settings=self.settings)
+ if env_extractor.saved_env_exists():
+ self._start_task(env_extractor, self._env_extractor_exit)
+ return
+ # If the environment.bz2 doesn't exist, then ebuild.sh will
+ # source the ebuild as a fallback.
+
+ self._start_lock()
+
+ def _env_extractor_exit(self, env_extractor):
+ if self._default_exit(env_extractor) != os.EX_OK:
+ self.wait()
+ return
+
+ self._start_lock()
+
+ def _start_lock(self):
+ if (self.phase in self._locked_phases and
+ "ebuild-locks" in self.settings.features):
+ eroot = self.settings["EROOT"]
+ lock_path = os.path.join(eroot, portage.VDB_PATH + "-ebuild")
+ if os.access(os.path.dirname(lock_path), os.W_OK):
+ self._ebuild_lock = AsynchronousLock(path=lock_path,
+ scheduler=self.scheduler)
+ self._start_task(self._ebuild_lock, self._lock_exit)
+ return
+
+ self._start_ebuild()
+
+ def _lock_exit(self, ebuild_lock):
+ if self._default_exit(ebuild_lock) != os.EX_OK:
+ self.wait()
+ return
+ self._start_ebuild()
+
+ def _get_log_path(self):
+ # Don't open the log file during the clean phase since the
+ # open file can result in an nfs lock on $T/build.log which
+ # prevents the clean phase from removing $T.
+ logfile = None
+ if self.phase not in ("clean", "cleanrm") and \
+ self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ logfile = self.settings.get("PORTAGE_LOG_FILE")
+ return logfile
+
+ def _start_ebuild(self):
+ if self.phase == "package":
+ self._start_task(PackagePhase(actionmap=self.actionmap,
+ background=self.background, fd_pipes=self.fd_pipes,
+ logfile=self._get_log_path(), scheduler=self.scheduler,
+ settings=self.settings), self._ebuild_exit)
+ return
+
+ if self.phase == "unpack":
+ alist = self.settings.configdict["pkg"].get("A", "").split()
+ _prepare_fake_distdir(self.settings, alist)
+ _prepare_fake_filesdir(self.settings)
+
+ fd_pipes = self.fd_pipes
+ if fd_pipes is None:
+ if not self.background and self.phase == 'nofetch':
+ # All the pkg_nofetch output goes to stderr since
+ # it's considered to be an error message.
+ fd_pipes = {1 : sys.__stderr__.fileno()}
+
+ ebuild_process = EbuildProcess(actionmap=self.actionmap,
+ background=self.background, fd_pipes=fd_pipes,
+ logfile=self._get_log_path(), phase=self.phase,
+ scheduler=self.scheduler, settings=self.settings)
+
+ self._start_task(ebuild_process, self._ebuild_exit)
+
+ def _ebuild_exit(self, ebuild_process):
+ self._assert_current(ebuild_process)
+ if self._ebuild_lock is None:
+ self._ebuild_exit_unlocked(ebuild_process)
+ else:
+ self._start_task(
+ AsyncTaskFuture(future=self._ebuild_lock.async_unlock()),
+ functools.partial(self._ebuild_exit_unlocked, ebuild_process))
+
+ def _ebuild_exit_unlocked(self, ebuild_process, unlock_task=None):
+ if unlock_task is not None:
+ self._assert_current(unlock_task)
+ if unlock_task.cancelled:
+ self._default_final_exit(unlock_task)
+ return
+
+ # Normally, async_unlock should not raise an exception here.
+ unlock_task.future.result()
+
+ fail = False
+ if ebuild_process.returncode != os.EX_OK:
+ self.returncode = ebuild_process.returncode
+ if self.phase == "test" and \
+ "test-fail-continue" in self.settings.features:
+ # mark test phase as complete (bug #452030)
+ try:
+ open(_unicode_encode(os.path.join(
+ self.settings["PORTAGE_BUILDDIR"], ".tested"),
+ encoding=_encodings['fs'], errors='strict'),
+ 'wb').close()
+ except OSError:
+ pass
+ else:
+ fail = True
+
+ if not fail:
+ self.returncode = None
+
+ logfile = self._get_log_path()
+
+ if self.phase == "install":
+ out = io.StringIO()
+ _check_build_log(self.settings, out=out)
+ msg = out.getvalue()
+ self.scheduler.output(msg, log_path=logfile)
+
+ if fail:
+ self._die_hooks()
+ return
+
+ settings = self.settings
+ _post_phase_userpriv_perms(settings)
+
+ if self.phase == "unpack":
+ # Bump WORKDIR timestamp, in case tar gave it a timestamp
+ # that will interfere with distfiles / WORKDIR timestamp
+ # comparisons as reported in bug #332217. Also, fix
+ # ownership since tar can change that too.
+ os.utime(settings["WORKDIR"], None)
+ _prepare_workdir(settings)
+ elif self.phase == "install":
+ out = io.StringIO()
+ _post_src_install_write_metadata(settings)
+ _post_src_install_uid_fix(settings, out)
+ msg = out.getvalue()
+ if msg:
+ self.scheduler.output(msg, log_path=logfile)
+ elif self.phase == "preinst":
+ _preinst_bsdflags(settings)
+ elif self.phase == "postinst":
+ _postinst_bsdflags(settings)
+
+ post_phase_cmds = _post_phase_cmds.get(self.phase)
+ if post_phase_cmds is not None:
+ if logfile is not None and self.phase in ("install",):
+ # Log to a temporary file, since the code we are running
+ # reads PORTAGE_LOG_FILE for QA checks, and we want to
+ # avoid annoying "gzip: unexpected end of file" messages
+ # when FEATURES=compress-build-logs is enabled.
+ fd, logfile = tempfile.mkstemp()
+ os.close(fd)
+ post_phase = _PostPhaseCommands(background=self.background,
+ commands=post_phase_cmds, fd_pipes=self.fd_pipes,
+ logfile=logfile, phase=self.phase, scheduler=self.scheduler,
+ settings=settings)
+ self._start_task(post_phase, self._post_phase_exit)
+ return
+
+ # this point is not reachable if there was a failure and
+ # we returned for die_hooks above, so returncode must
+ # indicate success (especially if ebuild_process.returncode
+ # is unsuccessful and test-fail-continue came into play)
+ self.returncode = os.EX_OK
+ self._current_task = None
+ self.wait()
+
+ def _post_phase_exit(self, post_phase):
+
+ self._assert_current(post_phase)
+
+ log_path = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+
+ if post_phase.logfile is not None and \
+ post_phase.logfile != log_path:
+ # We were logging to a temp file (see above), so append
+ # temp file to main log and remove temp file.
+ self._append_temp_log(post_phase.logfile, log_path)
+
+ if self._final_exit(post_phase) != os.EX_OK:
+ writemsg("!!! post %s failed; exiting.\n" % self.phase,
+ noiselevel=-1)
+ self._die_hooks()
+ return
+
+ if self.phase == "install":
+ out = io.StringIO()
+ _post_src_install_soname_symlinks(self.settings, out)
+ msg = out.getvalue()
+ if msg:
+ self.scheduler.output(msg, log_path=log_path)
+
+ self._current_task = None
+ self.wait()
+ return
+
+ def _append_temp_log(self, temp_log, log_path):
+
+ temp_file = open(_unicode_encode(temp_log,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+
+ log_file, log_file_real = self._open_log(log_path)
+
+ for line in temp_file:
+ log_file.write(line)
+
+ temp_file.close()
+ log_file.close()
+ if log_file_real is not log_file:
+ log_file_real.close()
+ os.unlink(temp_log)
+
+ def _open_log(self, log_path):
+
+ f = open(_unicode_encode(log_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='ab')
+ f_real = f
+
+ if log_path.endswith('.gz'):
+ f = gzip.GzipFile(filename='', mode='ab', fileobj=f)
+
+ return (f, f_real)
+
+ def _die_hooks(self):
+ self.returncode = None
+ phase = 'die_hooks'
+ die_hooks = MiscFunctionsProcess(background=self.background,
+ commands=[phase], phase=phase, logfile=self._get_log_path(),
+ fd_pipes=self.fd_pipes, scheduler=self.scheduler,
+ settings=self.settings)
+ self._start_task(die_hooks, self._die_hooks_exit)
+
+ def _die_hooks_exit(self, die_hooks):
+ if self.phase != 'clean' and \
+ 'noclean' not in self.settings.features and \
+ 'fail-clean' in self.settings.features:
+ self._default_exit(die_hooks)
+ self._fail_clean()
+ return
+ self._final_exit(die_hooks)
+ self.returncode = 1
+ self.wait()
+
+ def _fail_clean(self):
+ self.returncode = None
+ portage.elog.elog_process(self.settings.mycpv, self.settings)
+ phase = "clean"
+ clean_phase = EbuildPhase(background=self.background,
+ fd_pipes=self.fd_pipes, phase=phase, scheduler=self.scheduler,
+ settings=self.settings)
+ self._start_task(clean_phase, self._fail_clean_exit)
+ return
+
+ def _fail_clean_exit(self, clean_phase):
+ self._final_exit(clean_phase)
+ self.returncode = 1
+ self.wait()
+
+ def _elog(self, elog_funcname, lines, background=None):
+ if background is None:
+ background = self.background
+ out = io.StringIO()
+ phase = self.phase
+ elog_func = getattr(elog_messages, elog_funcname)
+ global_havecolor = portage.output.havecolor
+ try:
+ portage.output.havecolor = \
+ self.settings.get('NOCOLOR', 'false').lower() in ('no', 'false')
+ for line in lines:
+ elog_func(line, phase=phase, key=self.settings.mycpv, out=out)
+ finally:
+ portage.output.havecolor = global_havecolor
+ msg = out.getvalue()
+ if msg:
+ log_path = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ self.scheduler.output(msg, log_path=log_path,
+ background=background)
+
+
+class _PostPhaseCommands(CompositeTask):
+
+ __slots__ = ("commands", "fd_pipes", "logfile", "phase", "settings")
+
+ def _start(self):
+ if isinstance(self.commands, list):
+ cmds = [({}, self.commands)]
+ else:
+ cmds = list(self.commands)
+
+ if 'selinux' not in self.settings.features:
+ cmds = [(kwargs, commands) for kwargs, commands in
+ cmds if not kwargs.get('selinux_only')]
+
+ tasks = TaskSequence()
+ for kwargs, commands in cmds:
+ # Select args intended for MiscFunctionsProcess.
+ kwargs = dict((k, v) for k, v in kwargs.items()
+ if k in ('ld_preload_sandbox',))
+ tasks.add(MiscFunctionsProcess(background=self.background,
+ commands=commands, fd_pipes=self.fd_pipes,
+ logfile=self.logfile, phase=self.phase,
+ scheduler=self.scheduler, settings=self.settings, **kwargs))
+
+ self._start_task(tasks, self._default_final_exit)
diff --git a/lib/_emerge/EbuildProcess.py b/lib/_emerge/EbuildProcess.py
new file mode 100644
index 000000000..333ad7bd0
--- /dev/null
+++ b/lib/_emerge/EbuildProcess.py
@@ -0,0 +1,27 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.package.ebuild.doebuild:_doebuild_spawn,_spawn_actionmap'
+)
+
+class EbuildProcess(AbstractEbuildProcess):
+
+ __slots__ = ('actionmap',)
+
+ def _spawn(self, args, **kwargs):
+
+ actionmap = self.actionmap
+ if actionmap is None:
+ actionmap = _spawn_actionmap(self.settings)
+
+ if self._dummy_pipe_fd is not None:
+ self.settings["PORTAGE_PIPE_FD"] = str(self._dummy_pipe_fd)
+
+ try:
+ return _doebuild_spawn(self.phase, self.settings,
+ actionmap=actionmap, **kwargs)
+ finally:
+ self.settings.pop("PORTAGE_PIPE_FD", None)
diff --git a/lib/_emerge/EbuildSpawnProcess.py b/lib/_emerge/EbuildSpawnProcess.py
new file mode 100644
index 000000000..26d26fc77
--- /dev/null
+++ b/lib/_emerge/EbuildSpawnProcess.py
@@ -0,0 +1,22 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
+
+class EbuildSpawnProcess(AbstractEbuildProcess):
+ """
+ Used by doebuild.spawn() to manage the spawned process.
+ """
+ _spawn_kwarg_names = AbstractEbuildProcess._spawn_kwarg_names + \
+ ('fakeroot_state',)
+
+ __slots__ = ('fakeroot_state', 'spawn_func')
+
+ def _spawn(self, args, **kwargs):
+
+ env = self.settings.environ()
+
+ if self._dummy_pipe_fd is not None:
+ env["PORTAGE_PIPE_FD"] = str(self._dummy_pipe_fd)
+
+ return self.spawn_func(args, env=env, **kwargs)
diff --git a/lib/_emerge/FakeVartree.py b/lib/_emerge/FakeVartree.py
new file mode 100644
index 000000000..3f82e97e9
--- /dev/null
+++ b/lib/_emerge/FakeVartree.py
@@ -0,0 +1,337 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import sys
+import warnings
+
+import portage
+from portage import os
+from _emerge.Package import Package
+from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
+from portage.const import VDB_PATH
+from portage.dbapi.vartree import vartree
+from portage.dep._slot_operator import find_built_slot_operator_atoms
+from portage.eapi import _get_eapi_attrs
+from portage.exception import InvalidData, InvalidDependString
+from portage.update import grab_updates, parse_updates, update_dbentries
+from portage.versions import _pkg_str
+from _emerge.resolver.DbapiProvidesIndex import PackageDbapiProvidesIndex
+
+if sys.hexversion >= 0x3000000:
+ long = int
+ _unicode = str
+else:
+ _unicode = unicode
+
+class FakeVardbGetPath(object):
+ """
+ Implements the vardbapi.getpath() method which is used in error handling
+ code for the Package class and vartree.get_provide().
+ """
+ def __init__(self, vardb):
+ self.settings = vardb.settings
+
+ def __call__(self, cpv, filename=None):
+ path = os.path.join(self.settings['EROOT'], VDB_PATH, cpv)
+ if filename is not None:
+ path =os.path.join(path, filename)
+ return path
+
+class _DynamicDepsNotApplicable(Exception):
+ pass
+
+class FakeVartree(vartree):
+ """This is implements an in-memory copy of a vartree instance that provides
+ all the interfaces required for use by the depgraph. The vardb is locked
+ during the constructor call just long enough to read a copy of the
+ installed package information. This allows the depgraph to do it's
+ dependency calculations without holding a lock on the vardb. It also
+ allows things like vardb global updates to be done in memory so that the
+ user doesn't necessarily need write access to the vardb in cases where
+ global updates are necessary (updates are performed when necessary if there
+ is not a matching ebuild in the tree). Instances of this class are not
+ populated until the sync() method is called."""
+ def __init__(self, root_config, pkg_cache=None, pkg_root_config=None,
+ dynamic_deps=False, ignore_built_slot_operator_deps=False,
+ soname_deps=False):
+ self._root_config = root_config
+ self._dynamic_deps = dynamic_deps
+ self._ignore_built_slot_operator_deps = ignore_built_slot_operator_deps
+ if pkg_root_config is None:
+ pkg_root_config = self._root_config
+ self._pkg_root_config = pkg_root_config
+ if pkg_cache is None:
+ pkg_cache = {}
+ real_vartree = root_config.trees["vartree"]
+ self._real_vardb = real_vartree.dbapi
+ portdb = root_config.trees["porttree"].dbapi
+ self.settings = real_vartree.settings
+ mykeys = list(real_vartree.dbapi._aux_cache_keys)
+ if "_mtime_" not in mykeys:
+ mykeys.append("_mtime_")
+ self._db_keys = mykeys
+ self._pkg_cache = pkg_cache
+ self.dbapi = PackageVirtualDbapi(real_vartree.settings)
+ if soname_deps:
+ self.dbapi = PackageDbapiProvidesIndex(self.dbapi)
+ self.dbapi.getpath = FakeVardbGetPath(self.dbapi)
+ self.dbapi._aux_cache_keys = set(self._db_keys)
+
+ # Initialize variables needed for lazy cache pulls of the live ebuild
+ # metadata. This ensures that the vardb lock is released ASAP, without
+ # being delayed in case cache generation is triggered.
+ self._aux_get = self.dbapi.aux_get
+ self._match = self.dbapi.match
+ if dynamic_deps:
+ self.dbapi.aux_get = self._aux_get_wrapper
+ self.dbapi.match = self._match_wrapper
+ self._aux_get_history = set()
+ self._portdb_keys = Package._dep_keys + ("EAPI", "KEYWORDS")
+ self._portdb = portdb
+ self._global_updates = None
+
+ @property
+ def root(self):
+ warnings.warn("The root attribute of "
+ "_emerge.FakeVartree.FakeVartree"
+ " is deprecated. Use "
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=3)
+ return self.settings['ROOT']
+
+ def _match_wrapper(self, cpv, use_cache=1):
+ """
+ Make sure the metadata in Package instances gets updated for any
+ cpv that is returned from a match() call, since the metadata can
+ be accessed directly from the Package instance instead of via
+ aux_get().
+ """
+ matches = self._match(cpv, use_cache=use_cache)
+ for cpv in matches:
+ if cpv in self._aux_get_history:
+ continue
+ self._aux_get_wrapper(cpv, [])
+ return matches
+
+ def _aux_get_wrapper(self, cpv, wants, myrepo=None):
+ if cpv in self._aux_get_history:
+ return self._aux_get(cpv, wants)
+ self._aux_get_history.add(cpv)
+
+ # This raises a KeyError to the caller if appropriate.
+ pkg = self.dbapi._cpv_map[cpv]
+
+ try:
+ live_metadata = dict(zip(self._portdb_keys,
+ self._portdb.aux_get(cpv, self._portdb_keys,
+ myrepo=pkg.repo)))
+ except (KeyError, portage.exception.PortageException):
+ live_metadata = None
+
+ self._apply_dynamic_deps(pkg, live_metadata)
+
+ return self._aux_get(cpv, wants)
+
+ def _apply_dynamic_deps(self, pkg, live_metadata):
+
+ try:
+ if live_metadata is None:
+ raise _DynamicDepsNotApplicable()
+ # Use the metadata from the installed instance if the EAPI
+ # of either instance is unsupported, since if the installed
+ # instance has an unsupported or corrupt EAPI then we don't
+ # want to attempt to do complex operations such as execute
+ # pkg_config, pkg_prerm or pkg_postrm phases. If both EAPIs
+ # are supported then go ahead and use the live_metadata, in
+ # order to respect dep updates without revision bump or EAPI
+ # bump, as in bug #368725.
+ if not (portage.eapi_is_supported(live_metadata["EAPI"]) and \
+ portage.eapi_is_supported(pkg.eapi)):
+ raise _DynamicDepsNotApplicable()
+
+ # preserve built slot/sub-slot := operator deps
+ built_slot_operator_atoms = None
+ if not self._ignore_built_slot_operator_deps and \
+ _get_eapi_attrs(pkg.eapi).slot_operator:
+ try:
+ built_slot_operator_atoms = \
+ find_built_slot_operator_atoms(pkg)
+ except InvalidDependString:
+ pass
+
+ if built_slot_operator_atoms:
+ live_eapi_attrs = _get_eapi_attrs(live_metadata["EAPI"])
+ if not live_eapi_attrs.slot_operator:
+ raise _DynamicDepsNotApplicable()
+ for k, v in built_slot_operator_atoms.items():
+ live_metadata[k] += (" " +
+ " ".join(_unicode(atom) for atom in v))
+
+ self.dbapi.aux_update(pkg.cpv, live_metadata)
+ except _DynamicDepsNotApplicable:
+ if self._global_updates is None:
+ self._global_updates = \
+ grab_global_updates(self._portdb)
+
+ # Bypass _aux_get_wrapper, since calling that
+ # here would trigger infinite recursion.
+ aux_keys = Package._dep_keys + self.dbapi._pkg_str_aux_keys
+ aux_dict = dict(zip(aux_keys, self._aux_get(pkg.cpv, aux_keys)))
+ perform_global_updates(
+ pkg.cpv, aux_dict, self.dbapi, self._global_updates)
+
+ def dynamic_deps_preload(self, pkg, metadata):
+ if metadata is not None:
+ metadata = dict((k, metadata.get(k, ''))
+ for k in self._portdb_keys)
+ self._apply_dynamic_deps(pkg, metadata)
+ self._aux_get_history.add(pkg.cpv)
+
+ def cpv_discard(self, pkg):
+ """
+ Discard a package from the fake vardb if it exists.
+ """
+ old_pkg = self.dbapi.get(pkg)
+ if old_pkg is not None:
+ self.dbapi.cpv_remove(old_pkg)
+ self._pkg_cache.pop(old_pkg, None)
+ self._aux_get_history.discard(old_pkg.cpv)
+
+ def sync(self, acquire_lock=1):
+ """
+ Call this method to synchronize state with the real vardb
+ after one or more packages may have been installed or
+ uninstalled.
+ """
+ locked = False
+ try:
+ if acquire_lock and os.access(self._real_vardb._dbroot, os.W_OK):
+ self._real_vardb.lock()
+ locked = True
+ self._sync()
+ finally:
+ if locked:
+ self._real_vardb.unlock()
+
+ # Populate the old-style virtuals using the cached values.
+ # Skip the aux_get wrapper here, to avoid unwanted
+ # cache generation.
+ try:
+ self.dbapi.aux_get = self._aux_get
+ self.settings._populate_treeVirtuals_if_needed(self)
+ finally:
+ if self._dynamic_deps:
+ self.dbapi.aux_get = self._aux_get_wrapper
+
+ def _sync(self):
+
+ real_vardb = self._root_config.trees["vartree"].dbapi
+ current_cpv_set = frozenset(real_vardb.cpv_all())
+ pkg_vardb = self.dbapi
+
+ # Remove any packages that have been uninstalled.
+ for pkg in list(pkg_vardb):
+ if pkg.cpv not in current_cpv_set:
+ self.cpv_discard(pkg)
+
+ # Validate counters and timestamps.
+ slot_counters = {}
+ root_config = self._pkg_root_config
+ validation_keys = ["COUNTER", "_mtime_"]
+ for cpv in current_cpv_set:
+
+ pkg_hash_key = Package._gen_hash_key(cpv=cpv,
+ installed=True, root_config=root_config,
+ type_name="installed")
+ pkg = pkg_vardb.get(pkg_hash_key)
+ if pkg is not None:
+ counter, mtime = real_vardb.aux_get(cpv, validation_keys)
+ try:
+ counter = long(counter)
+ except ValueError:
+ counter = 0
+
+ if counter != pkg.counter or \
+ mtime != pkg.mtime:
+ self.cpv_discard(pkg)
+ pkg = None
+
+ if pkg is None:
+ pkg = self._pkg(cpv)
+
+ other_counter = slot_counters.get(pkg.slot_atom)
+ if other_counter is not None:
+ if other_counter > pkg.counter:
+ continue
+
+ slot_counters[pkg.slot_atom] = pkg.counter
+ pkg_vardb.cpv_inject(pkg)
+
+ real_vardb.flush_cache()
+
+ def _pkg(self, cpv):
+ """
+ The RootConfig instance that will become the Package.root_config
+ attribute can be overridden by the FakeVartree pkg_root_config
+ constructory argument, since we want to be consistent with the
+ depgraph._pkg() method which uses a specially optimized
+ RootConfig that has a FakeVartree instead of a real vartree.
+ """
+ pkg = Package(cpv=cpv, built=True, installed=True,
+ metadata=zip(self._db_keys,
+ self._real_vardb.aux_get(cpv, self._db_keys)),
+ root_config=self._pkg_root_config,
+ type_name="installed")
+
+ self._pkg_cache[pkg] = pkg
+ return pkg
+
+def grab_global_updates(portdb):
+ retupdates = {}
+
+ for repo_name in portdb.getRepositories():
+ repo = portdb.getRepositoryPath(repo_name)
+ updpath = os.path.join(repo, "profiles", "updates")
+ if not os.path.isdir(updpath):
+ continue
+
+ try:
+ rawupdates = grab_updates(updpath)
+ except portage.exception.DirectoryNotFound:
+ rawupdates = []
+ upd_commands = []
+ for mykey, mystat, mycontent in rawupdates:
+ commands, errors = parse_updates(mycontent)
+ upd_commands.extend(commands)
+ retupdates[repo_name] = upd_commands
+
+ master_repo = portdb.repositories.mainRepo()
+ if master_repo is not None:
+ master_repo = master_repo.name
+ if master_repo in retupdates:
+ retupdates['DEFAULT'] = retupdates[master_repo]
+
+ return retupdates
+
+def perform_global_updates(mycpv, aux_dict, mydb, myupdates):
+ try:
+ pkg = _pkg_str(mycpv, metadata=aux_dict, settings=mydb.settings)
+ except InvalidData:
+ return
+ aux_dict = dict((k, aux_dict[k]) for k in Package._dep_keys)
+ try:
+ mycommands = myupdates[pkg.repo]
+ except KeyError:
+ try:
+ mycommands = myupdates['DEFAULT']
+ except KeyError:
+ return
+
+ if not mycommands:
+ return
+
+ updates = update_dbentries(mycommands, aux_dict, parent=pkg)
+ if updates:
+ mydb.aux_update(mycpv, updates)
diff --git a/lib/_emerge/FifoIpcDaemon.py b/lib/_emerge/FifoIpcDaemon.py
new file mode 100644
index 000000000..0cbaa13c7
--- /dev/null
+++ b/lib/_emerge/FifoIpcDaemon.py
@@ -0,0 +1,97 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+try:
+ import fcntl
+except ImportError:
+ # http://bugs.jython.org/issue1074
+ fcntl = None
+
+from portage import os
+from _emerge.AbstractPollTask import AbstractPollTask
+from portage.cache.mappings import slot_dict_class
+
+class FifoIpcDaemon(AbstractPollTask):
+
+ __slots__ = ("input_fifo", "output_fifo", "_files")
+
+ _file_names = ("pipe_in",)
+ _files_dict = slot_dict_class(_file_names, prefix="")
+
+ def _start(self):
+ self._files = self._files_dict()
+
+ # File streams are in unbuffered mode since we do atomic
+ # read and write of whole pickles.
+ self._files.pipe_in = \
+ os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000 and fcntl is not None:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(self._files.pipe_in, fcntl.F_SETFD,
+ fcntl.fcntl(self._files.pipe_in,
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self.scheduler.add_reader(
+ self._files.pipe_in,
+ self._input_handler)
+
+ self._registered = True
+
+ def _reopen_input(self):
+ """
+ Re-open the input stream, in order to suppress
+ POLLHUP events (bug #339976).
+ """
+ self.scheduler.remove_reader(self._files.pipe_in)
+ os.close(self._files.pipe_in)
+ self._files.pipe_in = \
+ os.open(self.input_fifo, os.O_RDONLY|os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000 and fcntl is not None:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(self._files.pipe_in, fcntl.F_SETFD,
+ fcntl.fcntl(self._files.pipe_in,
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self.scheduler.add_reader(
+ self._files.pipe_in,
+ self._input_handler)
+
+ def isAlive(self):
+ return self._registered
+
+ def _cancel(self):
+ if self.returncode is None:
+ self.returncode = 1
+ self._unregister()
+ # notify exit listeners
+ self._async_wait()
+
+ def _input_handler(self):
+ raise NotImplementedError(self)
+
+ def _unregister(self):
+ """
+ Unregister from the scheduler and close open files.
+ """
+
+ self._registered = False
+
+ if self._files is not None:
+ for f in self._files.values():
+ self.scheduler.remove_reader(f)
+ os.close(f)
+ self._files = None
diff --git a/lib/_emerge/JobStatusDisplay.py b/lib/_emerge/JobStatusDisplay.py
new file mode 100644
index 000000000..b8e142af9
--- /dev/null
+++ b/lib/_emerge/JobStatusDisplay.py
@@ -0,0 +1,303 @@
+# Copyright 1999-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import io
+import sys
+import time
+
+import portage
+import portage.util.formatter as formatter
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage.output import xtermTitle
+
+from _emerge.getloadavg import getloadavg
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+class JobStatusDisplay(object):
+
+ _bound_properties = ("curval", "failed", "running")
+
+ # Don't update the display unless at least this much
+ # time has passed, in units of seconds.
+ _min_display_latency = 2
+
+ _default_term_codes = {
+ 'cr' : '\r',
+ 'el' : '\x1b[K',
+ 'nel' : '\n',
+ }
+
+ _termcap_name_map = {
+ 'carriage_return' : 'cr',
+ 'clr_eol' : 'el',
+ 'newline' : 'nel',
+ }
+
+ def __init__(self, quiet=False, xterm_titles=True):
+ object.__setattr__(self, "quiet", quiet)
+ object.__setattr__(self, "xterm_titles", xterm_titles)
+ object.__setattr__(self, "maxval", 0)
+ object.__setattr__(self, "merges", 0)
+ object.__setattr__(self, "_changed", False)
+ object.__setattr__(self, "_displayed", False)
+ object.__setattr__(self, "_last_display_time", 0)
+
+ self.reset()
+
+ isatty = os.environ.get('TERM') != 'dumb' and \
+ hasattr(self.out, 'isatty') and \
+ self.out.isatty()
+ object.__setattr__(self, "_isatty", isatty)
+ if not isatty or not self._init_term():
+ term_codes = {}
+ for k, capname in self._termcap_name_map.items():
+ term_codes[k] = self._default_term_codes[capname]
+ object.__setattr__(self, "_term_codes", term_codes)
+ encoding = sys.getdefaultencoding()
+ for k, v in self._term_codes.items():
+ if not isinstance(v, basestring):
+ self._term_codes[k] = v.decode(encoding, 'replace')
+
+ if self._isatty:
+ width = portage.output.get_term_size()[1]
+ else:
+ width = 80
+ self._set_width(width)
+
+ def _set_width(self, width):
+ if width == getattr(self, 'width', None):
+ return
+ if width <= 0 or width > 80:
+ width = 80
+ object.__setattr__(self, "width", width)
+ object.__setattr__(self, "_jobs_column_width", width - 32)
+
+ @property
+ def out(self):
+ """Use a lazy reference to sys.stdout, in case the API consumer has
+ temporarily overridden stdout."""
+ return sys.stdout
+
+ def _write(self, s):
+ # avoid potential UnicodeEncodeError
+ s = _unicode_encode(s,
+ encoding=_encodings['stdio'], errors='backslashreplace')
+ out = self.out
+ if sys.hexversion >= 0x3000000:
+ out = out.buffer
+ out.write(s)
+ out.flush()
+
+ def _init_term(self):
+ """
+ Initialize term control codes.
+ @rtype: bool
+ @return: True if term codes were successfully initialized,
+ False otherwise.
+ """
+
+ term_type = os.environ.get("TERM", "").strip()
+ if not term_type:
+ return False
+ tigetstr = None
+
+ try:
+ import curses
+ try:
+ curses.setupterm(term_type, self.out.fileno())
+ tigetstr = curses.tigetstr
+ except curses.error:
+ pass
+ except ImportError:
+ pass
+
+ if tigetstr is None:
+ return False
+
+ term_codes = {}
+ for k, capname in self._termcap_name_map.items():
+ # Use _native_string for PyPy compat (bug #470258).
+ code = tigetstr(portage._native_string(capname))
+ if code is None:
+ code = self._default_term_codes[capname]
+ term_codes[k] = code
+ object.__setattr__(self, "_term_codes", term_codes)
+ return True
+
+ def _format_msg(self, msg):
+ return ">>> %s" % msg
+
+ def _erase(self):
+ self._write(
+ self._term_codes['carriage_return'] + \
+ self._term_codes['clr_eol'])
+ self._displayed = False
+
+ def _display(self, line):
+ self._write(line)
+ self._displayed = True
+
+ def _update(self, msg):
+
+ if not self._isatty:
+ self._write(self._format_msg(msg) + self._term_codes['newline'])
+ self._displayed = True
+ return
+
+ if self._displayed:
+ self._erase()
+
+ self._display(self._format_msg(msg))
+
+ def displayMessage(self, msg):
+
+ was_displayed = self._displayed
+
+ if self._isatty and self._displayed:
+ self._erase()
+
+ self._write(self._format_msg(msg) + self._term_codes['newline'])
+ self._displayed = False
+
+ if was_displayed:
+ self._changed = True
+ self.display()
+
+ def reset(self):
+ self.maxval = 0
+ self.merges = 0
+ for name in self._bound_properties:
+ object.__setattr__(self, name, 0)
+
+ if self._displayed:
+ self._write(self._term_codes['newline'])
+ self._displayed = False
+
+ def __setattr__(self, name, value):
+ old_value = getattr(self, name)
+ if value == old_value:
+ return
+ object.__setattr__(self, name, value)
+ if name in self._bound_properties:
+ self._property_change(name, old_value, value)
+
+ def _property_change(self, name, old_value, new_value):
+ self._changed = True
+ self.display()
+
+ def _load_avg_str(self):
+ try:
+ avg = getloadavg()
+ except OSError:
+ return 'unknown'
+
+ max_avg = max(avg)
+
+ if max_avg < 10:
+ digits = 2
+ elif max_avg < 100:
+ digits = 1
+ else:
+ digits = 0
+
+ return ", ".join(("%%.%df" % digits ) % x for x in avg)
+
+ def display(self):
+ """
+ Display status on stdout, but only if something has
+ changed since the last call. This always returns True,
+ for continuous scheduling via timeout_add.
+ """
+
+ if self.quiet:
+ return True
+
+ current_time = time.time()
+ time_delta = current_time - self._last_display_time
+ if self._displayed and \
+ not self._changed:
+ if not self._isatty:
+ return True
+ if time_delta < self._min_display_latency:
+ return True
+
+ self._last_display_time = current_time
+ self._changed = False
+ self._display_status()
+ return True
+
+ def _display_status(self):
+ # Don't use len(self._completed_tasks) here since that also
+ # can include uninstall tasks.
+ curval_str = "%s" % (self.curval,)
+ maxval_str = "%s" % (self.maxval,)
+ running_str = "%s" % (self.running,)
+ failed_str = "%s" % (self.failed,)
+ load_avg_str = self._load_avg_str()
+
+ color_output = io.StringIO()
+ plain_output = io.StringIO()
+ style_file = portage.output.ConsoleStyleFile(color_output)
+ style_file.write_listener = plain_output
+ style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
+ style_writer.style_listener = style_file.new_styles
+ f = formatter.AbstractFormatter(style_writer)
+
+ number_style = "INFORM"
+ f.add_literal_data("Jobs: ")
+ f.push_style(number_style)
+ f.add_literal_data(curval_str)
+ f.pop_style()
+ f.add_literal_data(" of ")
+ f.push_style(number_style)
+ f.add_literal_data(maxval_str)
+ f.pop_style()
+ f.add_literal_data(" complete")
+
+ if self.running:
+ f.add_literal_data(", ")
+ f.push_style(number_style)
+ f.add_literal_data(running_str)
+ f.pop_style()
+ f.add_literal_data(" running")
+
+ if self.failed:
+ f.add_literal_data(", ")
+ f.push_style(number_style)
+ f.add_literal_data(failed_str)
+ f.pop_style()
+ f.add_literal_data(" failed")
+
+ padding = self._jobs_column_width - len(plain_output.getvalue())
+ if padding > 0:
+ f.add_literal_data(padding * " ")
+
+ f.add_literal_data("Load avg: ")
+ f.add_literal_data(load_avg_str)
+
+ # Truncate to fit width, to avoid making the terminal scroll if the
+ # line overflows (happens when the load average is large).
+ plain_output = plain_output.getvalue()
+ if self._isatty and len(plain_output) > self.width:
+ # Use plain_output here since it's easier to truncate
+ # properly than the color output which contains console
+ # color codes.
+ self._update(plain_output[:self.width])
+ else:
+ self._update(color_output.getvalue())
+
+ if self.xterm_titles:
+ # If the HOSTNAME variable is exported, include it
+ # in the xterm title, just like emergelog() does.
+ # See bug #390699.
+ title_str = " ".join(plain_output.split())
+ hostname = os.environ.get("HOSTNAME")
+ if hostname is not None:
+ title_str = "%s: %s" % (hostname, title_str)
+ xtermTitle(title_str)
diff --git a/lib/_emerge/MergeListItem.py b/lib/_emerge/MergeListItem.py
new file mode 100644
index 000000000..938f8014a
--- /dev/null
+++ b/lib/_emerge/MergeListItem.py
@@ -0,0 +1,129 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.dep import _repo_separator
+from portage.output import colorize
+
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.Binpkg import Binpkg
+from _emerge.CompositeTask import CompositeTask
+from _emerge.EbuildBuild import EbuildBuild
+from _emerge.PackageUninstall import PackageUninstall
+
+class MergeListItem(CompositeTask):
+
+ """
+ TODO: For parallel scheduling, everything here needs asynchronous
+ execution support (start, poll, and wait methods).
+ """
+
+ __slots__ = ("args_set",
+ "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
+ "find_blockers", "logger", "mtimedb", "pkg",
+ "pkg_count", "pkg_to_replace", "prefetcher",
+ "settings", "statusMessage", "world_atom") + \
+ ("_install_task",)
+
+ def _start(self):
+
+ pkg = self.pkg
+ build_opts = self.build_opts
+
+ if pkg.installed:
+ # uninstall, executed by self.merge()
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ args_set = self.args_set
+ find_blockers = self.find_blockers
+ logger = self.logger
+ mtimedb = self.mtimedb
+ pkg_count = self.pkg_count
+ scheduler = self.scheduler
+ settings = self.settings
+ world_atom = self.world_atom
+ ldpath_mtimes = mtimedb["ldpath"]
+
+ action_desc = "Emerging"
+ preposition = "for"
+ pkg_color = "PKG_MERGE"
+ if pkg.type_name == "binary":
+ pkg_color = "PKG_BINARY_MERGE"
+ action_desc += " binary"
+
+ if build_opts.fetchonly:
+ action_desc = "Fetching"
+
+ msg = "%s (%s of %s) %s" % \
+ (action_desc,
+ colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
+ colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
+ colorize(pkg_color, pkg.cpv + _repo_separator + pkg.repo))
+
+ if pkg.root_config.settings["ROOT"] != "/":
+ msg += " %s %s" % (preposition, pkg.root)
+
+ if not build_opts.pretend:
+ self.statusMessage(msg)
+ logger.log(" >>> emerge (%s of %s) %s to %s" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
+
+ if pkg.type_name == "ebuild":
+
+ build = EbuildBuild(args_set=args_set,
+ background=self.background,
+ config_pool=self.config_pool,
+ find_blockers=find_blockers,
+ ldpath_mtimes=ldpath_mtimes, logger=logger,
+ opts=build_opts, pkg=pkg, pkg_count=pkg_count,
+ prefetcher=self.prefetcher, scheduler=scheduler,
+ settings=settings, world_atom=world_atom)
+
+ self._install_task = build
+ self._start_task(build, self._default_final_exit)
+ return
+
+ elif pkg.type_name == "binary":
+
+ binpkg = Binpkg(background=self.background,
+ find_blockers=find_blockers,
+ ldpath_mtimes=ldpath_mtimes, logger=logger,
+ opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
+ prefetcher=self.prefetcher, settings=settings,
+ scheduler=scheduler, world_atom=world_atom)
+
+ self._install_task = binpkg
+ self._start_task(binpkg, self._default_final_exit)
+ return
+
+ def create_install_task(self):
+
+ pkg = self.pkg
+ build_opts = self.build_opts
+ mtimedb = self.mtimedb
+ scheduler = self.scheduler
+ settings = self.settings
+ world_atom = self.world_atom
+ ldpath_mtimes = mtimedb["ldpath"]
+
+ if pkg.installed:
+ if not (build_opts.buildpkgonly or \
+ build_opts.fetchonly or build_opts.pretend):
+
+ task = PackageUninstall(background=self.background,
+ ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
+ pkg=pkg, scheduler=scheduler, settings=settings,
+ world_atom=world_atom)
+
+ else:
+ task = AsynchronousTask()
+
+ elif build_opts.fetchonly or \
+ build_opts.buildpkgonly:
+ task = AsynchronousTask()
+ else:
+ task = self._install_task.create_install_task()
+
+ return task
diff --git a/lib/_emerge/MetadataRegen.py b/lib/_emerge/MetadataRegen.py
new file mode 100644
index 000000000..8eb110a46
--- /dev/null
+++ b/lib/_emerge/MetadataRegen.py
@@ -0,0 +1,150 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.dep import _repo_separator
+from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
+from portage.cache.cache_errors import CacheError
+from portage.util._async.AsyncScheduler import AsyncScheduler
+
+class MetadataRegen(AsyncScheduler):
+
+ def __init__(self, portdb, cp_iter=None, consumer=None,
+ write_auxdb=True, **kwargs):
+ AsyncScheduler.__init__(self, **kwargs)
+ self._portdb = portdb
+ self._write_auxdb = write_auxdb
+ self._global_cleanse = False
+ if cp_iter is None:
+ cp_iter = self._iter_every_cp()
+ # We can globally cleanse stale cache only if we
+ # iterate over every single cp.
+ self._global_cleanse = True
+ self._cp_iter = cp_iter
+ self._consumer = consumer
+
+ self._valid_pkgs = set()
+ self._cp_set = set()
+ self._process_iter = self._iter_metadata_processes()
+ self._running_tasks = set()
+
+ def _next_task(self):
+ return next(self._process_iter)
+
+ def _iter_every_cp(self):
+ # List categories individually, in order to start yielding quicker,
+ # and in order to reduce latency in case of a signal interrupt.
+ cp_all = self._portdb.cp_all
+ for category in sorted(self._portdb.categories):
+ for cp in cp_all(categories=(category,)):
+ yield cp
+
+ def _iter_metadata_processes(self):
+ portdb = self._portdb
+ valid_pkgs = self._valid_pkgs
+ cp_set = self._cp_set
+ consumer = self._consumer
+
+ portage.writemsg_stdout("Regenerating cache entries...\n")
+ for cp in self._cp_iter:
+ if self._terminated.is_set():
+ break
+ cp_set.add(cp)
+ portage.writemsg_stdout("Processing %s\n" % cp)
+ # We iterate over portdb.porttrees, since it's common to
+ # tweak this attribute in order to adjust repo selection.
+ for mytree in portdb.porttrees:
+ repo = portdb.repositories.get_repo_for_location(mytree)
+ cpv_list = portdb.cp_list(cp, mytree=[repo.location])
+ for cpv in cpv_list:
+ if self._terminated.is_set():
+ break
+ valid_pkgs.add(cpv)
+ ebuild_path, repo_path = portdb.findname2(cpv, myrepo=repo.name)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s%s%s'" % (cpv, _repo_separator, repo.name))
+ metadata, ebuild_hash = portdb._pull_valid_cache(
+ cpv, ebuild_path, repo_path)
+ if metadata is not None:
+ if consumer is not None:
+ consumer(cpv, repo_path, metadata, ebuild_hash, True)
+ continue
+
+ yield EbuildMetadataPhase(cpv=cpv,
+ ebuild_hash=ebuild_hash,
+ portdb=portdb, repo_path=repo_path,
+ settings=portdb.doebuild_settings,
+ write_auxdb=self._write_auxdb)
+
+ def _cleanup(self):
+ super(MetadataRegen, self)._cleanup()
+
+ portdb = self._portdb
+ dead_nodes = {}
+
+ if self._terminated.is_set():
+ portdb.flush_cache()
+ return
+
+ if self._global_cleanse:
+ for mytree in portdb.porttrees:
+ try:
+ dead_nodes[mytree] = set(portdb.auxdb[mytree])
+ except CacheError as e:
+ portage.writemsg("Error listing cache entries for " + \
+ "'%s': %s, continuing...\n" % (mytree, e),
+ noiselevel=-1)
+ del e
+ dead_nodes = None
+ break
+ else:
+ cp_set = self._cp_set
+ cpv_getkey = portage.cpv_getkey
+ for mytree in portdb.porttrees:
+ try:
+ dead_nodes[mytree] = set(cpv for cpv in \
+ portdb.auxdb[mytree] \
+ if cpv_getkey(cpv) in cp_set)
+ except CacheError as e:
+ portage.writemsg("Error listing cache entries for " + \
+ "'%s': %s, continuing...\n" % (mytree, e),
+ noiselevel=-1)
+ del e
+ dead_nodes = None
+ break
+
+ if dead_nodes:
+ for y in self._valid_pkgs:
+ for mytree in portdb.porttrees:
+ if portdb.findname2(y, mytree=mytree)[0]:
+ dead_nodes[mytree].discard(y)
+
+ for mytree, nodes in dead_nodes.items():
+ auxdb = portdb.auxdb[mytree]
+ for y in nodes:
+ try:
+ del auxdb[y]
+ except (KeyError, CacheError):
+ pass
+
+ portdb.flush_cache()
+
+ def _task_exit(self, metadata_process):
+
+ if metadata_process.returncode != os.EX_OK:
+ self._valid_pkgs.discard(metadata_process.cpv)
+ if not self._terminated_tasks:
+ portage.writemsg("Error processing %s, continuing...\n" % \
+ (metadata_process.cpv,), noiselevel=-1)
+
+ if self._consumer is not None:
+ # On failure, still notify the consumer (in this case the metadata
+ # argument is None).
+ self._consumer(metadata_process.cpv,
+ metadata_process.repo_path,
+ metadata_process.metadata,
+ metadata_process.ebuild_hash,
+ metadata_process.eapi_supported)
+
+ AsyncScheduler._task_exit(self, metadata_process)
diff --git a/lib/_emerge/MiscFunctionsProcess.py b/lib/_emerge/MiscFunctionsProcess.py
new file mode 100644
index 000000000..89fd22635
--- /dev/null
+++ b/lib/_emerge/MiscFunctionsProcess.py
@@ -0,0 +1,51 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractEbuildProcess import AbstractEbuildProcess
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.package.ebuild.doebuild:spawn'
+)
+from portage import os
+
+class MiscFunctionsProcess(AbstractEbuildProcess):
+ """
+ Spawns misc-functions.sh with an existing ebuild environment.
+ """
+
+ __slots__ = ('commands', 'ld_preload_sandbox')
+
+ def _start(self):
+ settings = self.settings
+ portage_bin_path = settings["PORTAGE_BIN_PATH"]
+ misc_sh_binary = os.path.join(portage_bin_path,
+ os.path.basename(portage.const.MISC_SH_BINARY))
+
+ self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
+ if self.logfile is None and \
+ self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ self.logfile = settings.get("PORTAGE_LOG_FILE")
+
+ AbstractEbuildProcess._start(self)
+
+ def _spawn(self, args, **kwargs):
+ # If self.ld_preload_sandbox is None, default to free=False,
+ # in alignment with the spawn(free=False) default.
+ kwargs.setdefault('free', False if self.ld_preload_sandbox is None
+ else not self.ld_preload_sandbox)
+
+ if self._dummy_pipe_fd is not None:
+ self.settings["PORTAGE_PIPE_FD"] = str(self._dummy_pipe_fd)
+
+ if "fakeroot" in self.settings.features:
+ kwargs["fakeroot"] = True
+
+ # Temporarily unset EBUILD_PHASE so that bashrc code doesn't
+ # think this is a real phase.
+ phase_backup = self.settings.pop("EBUILD_PHASE", None)
+ try:
+ return spawn(" ".join(args), self.settings, **kwargs)
+ finally:
+ if phase_backup is not None:
+ self.settings["EBUILD_PHASE"] = phase_backup
+ self.settings.pop("PORTAGE_PIPE_FD", None)
diff --git a/lib/_emerge/Package.py b/lib/_emerge/Package.py
new file mode 100644
index 000000000..5f34f3d27
--- /dev/null
+++ b/lib/_emerge/Package.py
@@ -0,0 +1,927 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import functools
+import sys
+from itertools import chain
+import warnings
+
+import portage
+from portage import _encodings, _unicode_decode, _unicode_encode
+from portage.cache.mappings import slot_dict_class
+from portage.const import EBUILD_PHASES
+from portage.dep import Atom, check_required_use, use_reduce, \
+ paren_enclose, _slot_separator, _repo_separator
+from portage.dep.soname.parse import parse_soname_deps
+from portage.versions import _pkg_str, _unknown_repo
+from portage.eapi import _get_eapi_attrs, eapi_has_use_aliases
+from portage.exception import InvalidData, InvalidDependString
+from portage.localization import _
+from _emerge.Task import Task
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+ _unicode = str
+else:
+ _unicode = unicode
+
+class Package(Task):
+
+ __hash__ = Task.__hash__
+ __slots__ = ("built", "cpv", "depth",
+ "installed", "onlydeps", "operation",
+ "root_config", "type_name",
+ "category", "counter", "cp", "cpv_split",
+ "inherited", "iuse", "mtime",
+ "pf", "root", "slot", "sub_slot", "slot_atom", "version") + \
+ ("_invalid", "_masks", "_metadata", "_provided_cps",
+ "_raw_metadata", "_provides", "_requires", "_use",
+ "_validated_atoms", "_visible")
+
+ metadata_keys = [
+ "BDEPEND",
+ "BUILD_ID", "BUILD_TIME", "CHOST", "COUNTER", "DEFINED_PHASES",
+ "DEPEND", "EAPI", "HDEPEND", "INHERITED", "IUSE", "KEYWORDS",
+ "LICENSE", "MD5", "PDEPEND", "PROVIDES",
+ "RDEPEND", "repository", "REQUIRED_USE",
+ "PROPERTIES", "REQUIRES", "RESTRICT", "SIZE",
+ "SLOT", "USE", "_mtime_"]
+
+ _dep_keys = ('BDEPEND', 'DEPEND', 'HDEPEND', 'PDEPEND', 'RDEPEND')
+ _buildtime_keys = ('BDEPEND', 'DEPEND', 'HDEPEND')
+ _runtime_keys = ('PDEPEND', 'RDEPEND')
+ _use_conditional_misc_keys = ('LICENSE', 'PROPERTIES', 'RESTRICT')
+ UNKNOWN_REPO = _unknown_repo
+
+ def __init__(self, **kwargs):
+ metadata = _PackageMetadataWrapperBase(kwargs.pop('metadata'))
+ Task.__init__(self, **kwargs)
+ # the SlotObject constructor assigns self.root_config from keyword args
+ # and is an instance of a '_emerge.RootConfig.RootConfig class
+ self.root = self.root_config.root
+ self._raw_metadata = metadata
+ self._metadata = _PackageMetadataWrapper(self, metadata)
+ if not self.built:
+ self._metadata['CHOST'] = self.root_config.settings.get('CHOST', '')
+ eapi_attrs = _get_eapi_attrs(self.eapi)
+
+ try:
+ db = self.cpv._db
+ except AttributeError:
+ if self.built:
+ # For independence from the source ebuild repository and
+ # profile implicit IUSE state, require the _db attribute
+ # for built packages.
+ raise
+ db = self.root_config.trees['porttree'].dbapi
+
+ self.cpv = _pkg_str(self.cpv, metadata=self._metadata,
+ settings=self.root_config.settings, db=db)
+ if hasattr(self.cpv, 'slot_invalid'):
+ self._invalid_metadata('SLOT.invalid',
+ "SLOT: invalid value: '%s'" % self._metadata["SLOT"])
+ self.cpv_split = self.cpv.cpv_split
+ self.category, self.pf = portage.catsplit(self.cpv)
+ self.cp = self.cpv.cp
+ self.version = self.cpv.version
+ self.slot = self.cpv.slot
+ self.sub_slot = self.cpv.sub_slot
+ self.slot_atom = Atom("%s%s%s" % (self.cp, _slot_separator, self.slot))
+ # sync metadata with validated repo (may be UNKNOWN_REPO)
+ self._metadata['repository'] = self.cpv.repo
+
+ if self.root_config.settings.local_config:
+ implicit_match = db._iuse_implicit_cnstr(self.cpv, self._metadata)
+ else:
+ implicit_match = db._repoman_iuse_implicit_cnstr(self.cpv, self._metadata)
+ usealiases = self.root_config.settings._use_manager.getUseAliases(self)
+ self.iuse = self._iuse(self, self._metadata["IUSE"].split(),
+ implicit_match, usealiases, self.eapi)
+
+ if (self.iuse.enabled or self.iuse.disabled) and \
+ not eapi_attrs.iuse_defaults:
+ if not self.installed:
+ self._invalid_metadata('EAPI.incompatible',
+ "IUSE contains defaults, but EAPI doesn't allow them")
+ if self.inherited is None:
+ self.inherited = frozenset()
+
+ if self.operation is None:
+ if self.onlydeps or self.installed:
+ self.operation = "nomerge"
+ else:
+ self.operation = "merge"
+
+ self._hash_key = Package._gen_hash_key(cpv=self.cpv,
+ installed=self.installed, onlydeps=self.onlydeps,
+ operation=self.operation, repo_name=self.cpv.repo,
+ root_config=self.root_config,
+ type_name=self.type_name)
+ self._hash_value = hash(self._hash_key)
+
+ @property
+ def eapi(self):
+ return self._metadata["EAPI"]
+
+ @property
+ def build_id(self):
+ return self.cpv.build_id
+
+ @property
+ def build_time(self):
+ if not self.built:
+ raise AttributeError('build_time')
+ return self.cpv.build_time
+
+ @property
+ def defined_phases(self):
+ return self._metadata.defined_phases
+
+ @property
+ def properties(self):
+ return self._metadata.properties
+
+ @property
+ def provided_cps(self):
+ return (self.cp,)
+
+ @property
+ def restrict(self):
+ return self._metadata.restrict
+
+ @property
+ def metadata(self):
+ warnings.warn("_emerge.Package.Package.metadata is deprecated",
+ DeprecationWarning, stacklevel=3)
+ return self._metadata
+
+ # These are calculated on-demand, so that they are calculated
+ # after FakeVartree applies its metadata tweaks.
+ @property
+ def invalid(self):
+ if self._invalid is None:
+ self._validate_deps()
+ if self._invalid is None:
+ self._invalid = False
+ return self._invalid
+
+ @property
+ def masks(self):
+ if self._masks is None:
+ self._masks = self._eval_masks()
+ return self._masks
+
+ @property
+ def visible(self):
+ if self._visible is None:
+ self._visible = self._eval_visiblity(self.masks)
+ return self._visible
+
+ @property
+ def validated_atoms(self):
+ """
+ Returns *all* validated atoms from the deps, regardless
+ of USE conditionals, with USE conditionals inside
+ atoms left unevaluated.
+ """
+ if self._validated_atoms is None:
+ self._validate_deps()
+ return self._validated_atoms
+
+ @property
+ def stable(self):
+ return self.cpv.stable
+
+ @property
+ def provides(self):
+ self.invalid
+ return self._provides
+
+ @property
+ def requires(self):
+ self.invalid
+ return self._requires
+
+ @classmethod
+ def _gen_hash_key(cls, cpv=None, installed=None, onlydeps=None,
+ operation=None, repo_name=None, root_config=None,
+ type_name=None, **kwargs):
+
+ if operation is None:
+ if installed or onlydeps:
+ operation = "nomerge"
+ else:
+ operation = "merge"
+
+ root = None
+ if root_config is not None:
+ root = root_config.root
+ else:
+ raise TypeError("root_config argument is required")
+
+ elements = [type_name, root, _unicode(cpv), operation]
+
+ # For installed (and binary) packages we don't care for the repo
+ # when it comes to hashing, because there can only be one cpv.
+ # So overwrite the repo_key with type_name.
+ if type_name is None:
+ raise TypeError("type_name argument is required")
+ elif type_name == "ebuild":
+ if repo_name is None:
+ raise AssertionError(
+ "Package._gen_hash_key() " + \
+ "called without 'repo_name' argument")
+ elements.append(repo_name)
+ elif type_name == "binary":
+ # Including a variety of fingerprints in the hash makes
+ # it possible to simultaneously consider multiple similar
+ # packages. Note that digests are not included here, since
+ # they are relatively expensive to compute, and they may
+ # not necessarily be available.
+ elements.extend([cpv.build_id, cpv.file_size,
+ cpv.build_time, cpv.mtime])
+ else:
+ # For installed (and binary) packages we don't care for the repo
+ # when it comes to hashing, because there can only be one cpv.
+ # So overwrite the repo_key with type_name.
+ elements.append(type_name)
+
+ return tuple(elements)
+
+ def _validate_deps(self):
+ """
+ Validate deps. This does not trigger USE calculation since that
+ is expensive for ebuilds and therefore we want to avoid doing
+ it unnecessarily (like for masked packages).
+ """
+ eapi = self.eapi
+ dep_eapi = eapi
+ dep_valid_flag = self.iuse.is_valid_flag
+ if self.installed:
+ # Ignore EAPI.incompatible and conditionals missing
+ # from IUSE for installed packages since these issues
+ # aren't relevant now (re-evaluate when new EAPIs are
+ # deployed).
+ dep_eapi = None
+ dep_valid_flag = None
+
+ validated_atoms = []
+ for k in self._dep_keys:
+ v = self._metadata.get(k)
+ if not v:
+ continue
+ try:
+ atoms = use_reduce(v, eapi=dep_eapi,
+ matchall=True, is_valid_flag=dep_valid_flag,
+ token_class=Atom, flat=True)
+ except InvalidDependString as e:
+ self._metadata_exception(k, e)
+ else:
+ validated_atoms.extend(atoms)
+ if not self.built:
+ for atom in atoms:
+ if not isinstance(atom, Atom):
+ continue
+ if atom.slot_operator_built:
+ e = InvalidDependString(
+ _("Improper context for slot-operator "
+ "\"built\" atom syntax: %s") %
+ (atom.unevaluated_atom,))
+ self._metadata_exception(k, e)
+
+ self._validated_atoms = tuple(set(atom for atom in
+ validated_atoms if isinstance(atom, Atom)))
+
+ for k in self._use_conditional_misc_keys:
+ v = self._metadata.get(k)
+ if not v:
+ continue
+ try:
+ use_reduce(v, eapi=dep_eapi, matchall=True,
+ is_valid_flag=dep_valid_flag)
+ except InvalidDependString as e:
+ self._metadata_exception(k, e)
+
+ k = 'REQUIRED_USE'
+ v = self._metadata.get(k)
+ if v and not self.built:
+ if not _get_eapi_attrs(eapi).required_use:
+ self._invalid_metadata('EAPI.incompatible',
+ "REQUIRED_USE set, but EAPI='%s' doesn't allow it" % eapi)
+ else:
+ try:
+ check_required_use(v, (),
+ self.iuse.is_valid_flag, eapi=eapi)
+ except InvalidDependString as e:
+ self._invalid_metadata(k + ".syntax", "%s: %s" % (k, e))
+
+ k = 'SRC_URI'
+ v = self._metadata.get(k)
+ if v:
+ try:
+ use_reduce(v, is_src_uri=True, eapi=eapi, matchall=True,
+ is_valid_flag=self.iuse.is_valid_flag)
+ except InvalidDependString as e:
+ if not self.installed:
+ self._metadata_exception(k, e)
+
+ if self.built:
+ k = 'PROVIDES'
+ try:
+ self._provides = frozenset(
+ parse_soname_deps(self._metadata[k]))
+ except InvalidData as e:
+ self._invalid_metadata(k + ".syntax", "%s: %s" % (k, e))
+
+ k = 'REQUIRES'
+ try:
+ self._requires = frozenset(
+ parse_soname_deps(self._metadata[k]))
+ except InvalidData as e:
+ self._invalid_metadata(k + ".syntax", "%s: %s" % (k, e))
+
+ def copy(self):
+ return Package(built=self.built, cpv=self.cpv, depth=self.depth,
+ installed=self.installed, metadata=self._raw_metadata,
+ onlydeps=self.onlydeps, operation=self.operation,
+ root_config=self.root_config, type_name=self.type_name)
+
+ def _eval_masks(self):
+ masks = {}
+ settings = self.root_config.settings
+
+ if self.invalid is not False:
+ masks['invalid'] = self.invalid
+
+ if not settings._accept_chost(self.cpv, self._metadata):
+ masks['CHOST'] = self._metadata['CHOST']
+
+ eapi = self.eapi
+ if not portage.eapi_is_supported(eapi):
+ masks['EAPI.unsupported'] = eapi
+ if portage._eapi_is_deprecated(eapi):
+ masks['EAPI.deprecated'] = eapi
+
+ missing_keywords = settings._getMissingKeywords(
+ self.cpv, self._metadata)
+ if missing_keywords:
+ masks['KEYWORDS'] = missing_keywords
+
+ try:
+ missing_properties = settings._getMissingProperties(
+ self.cpv, self._metadata)
+ if missing_properties:
+ masks['PROPERTIES'] = missing_properties
+ except InvalidDependString:
+ # already recorded as 'invalid'
+ pass
+
+ try:
+ missing_restricts = settings._getMissingRestrict(
+ self.cpv, self._metadata)
+ if missing_restricts:
+ masks['RESTRICT'] = missing_restricts
+ except InvalidDependString:
+ # already recorded as 'invalid'
+ pass
+
+ mask_atom = settings._getMaskAtom(self.cpv, self._metadata)
+ if mask_atom is not None:
+ masks['package.mask'] = mask_atom
+
+ try:
+ missing_licenses = settings._getMissingLicenses(
+ self.cpv, self._metadata)
+ if missing_licenses:
+ masks['LICENSE'] = missing_licenses
+ except InvalidDependString:
+ # already recorded as 'invalid'
+ pass
+
+ if not masks:
+ masks = False
+
+ return masks
+
+ def _eval_visiblity(self, masks):
+
+ if masks is not False:
+
+ if 'EAPI.unsupported' in masks:
+ return False
+
+ if 'invalid' in masks:
+ return False
+
+ if not self.installed and ( \
+ 'CHOST' in masks or \
+ 'EAPI.deprecated' in masks or \
+ 'KEYWORDS' in masks or \
+ 'PROPERTIES' in masks or \
+ 'RESTRICT' in masks):
+ return False
+
+ if 'package.mask' in masks or \
+ 'LICENSE' in masks:
+ return False
+
+ return True
+
+ def get_keyword_mask(self):
+ """returns None, 'missing', or 'unstable'."""
+
+ missing = self.root_config.settings._getRawMissingKeywords(
+ self.cpv, self._metadata)
+
+ if not missing:
+ return None
+
+ if '**' in missing:
+ return 'missing'
+
+ global_accept_keywords = frozenset(
+ self.root_config.settings.get("ACCEPT_KEYWORDS", "").split())
+
+ for keyword in missing:
+ if keyword.lstrip("~") in global_accept_keywords:
+ return 'unstable'
+
+ return 'missing'
+
+ def isHardMasked(self):
+ """returns a bool if the cpv is in the list of
+ expanded pmaskdict[cp] available ebuilds"""
+ pmask = self.root_config.settings._getRawMaskAtom(
+ self.cpv, self._metadata)
+ return pmask is not None
+
+ def _metadata_exception(self, k, e):
+
+ if k.endswith('DEPEND'):
+ qacat = 'dependency.syntax'
+ else:
+ qacat = k + ".syntax"
+
+ # For unicode safety with python-2.x we need to avoid
+ # using the string format operator with a non-unicode
+ # format string, since that will result in the
+ # PortageException.__str__() method being invoked,
+ # followed by unsafe decoding that may result in a
+ # UnicodeDecodeError. Therefore, use unicode_literals
+ # to ensure that format strings are unicode, so that
+ # PortageException.__unicode__() is used when necessary
+ # in python-2.x.
+ if not self.installed:
+ categorized_error = False
+ if e.errors:
+ for error in e.errors:
+ if getattr(error, 'category', None) is None:
+ continue
+ categorized_error = True
+ self._invalid_metadata(error.category,
+ "%s: %s" % (k, error))
+
+ if not categorized_error:
+ self._invalid_metadata(qacat,"%s: %s" % (k, e))
+ else:
+ # For installed packages, show the path of the file
+ # containing the invalid metadata, since the user may
+ # want to fix the deps by hand.
+ vardb = self.root_config.trees['vartree'].dbapi
+ path = vardb.getpath(self.cpv, filename=k)
+ self._invalid_metadata(qacat, "%s: %s in '%s'" % (k, e, path))
+
+ def _invalid_metadata(self, msg_type, msg):
+ if self._invalid is None:
+ self._invalid = {}
+ msgs = self._invalid.get(msg_type)
+ if msgs is None:
+ msgs = []
+ self._invalid[msg_type] = msgs
+ msgs.append(msg)
+
+ def __str__(self):
+ if self.operation == "merge":
+ if self.type_name == "binary":
+ cpv_color = "PKG_BINARY_MERGE"
+ else:
+ cpv_color = "PKG_MERGE"
+ elif self.operation == "uninstall":
+ cpv_color = "PKG_UNINSTALL"
+ else:
+ cpv_color = "PKG_NOMERGE"
+
+ build_id_str = ""
+ if isinstance(self.cpv.build_id, long) and self.cpv.build_id > 0:
+ build_id_str = "-%s" % self.cpv.build_id
+
+ s = "(%s, %s" \
+ % (portage.output.colorize(cpv_color, self.cpv +
+ build_id_str + _slot_separator + self.slot + "/" +
+ self.sub_slot + _repo_separator + self.repo),
+ self.type_name)
+
+ if self.type_name == "installed":
+ if self.root_config.settings['ROOT'] != "/":
+ s += " in '%s'" % self.root_config.settings['ROOT']
+ if self.operation == "uninstall":
+ s += " scheduled for uninstall"
+ else:
+ if self.operation == "merge":
+ s += " scheduled for merge"
+ if self.root_config.settings['ROOT'] != "/":
+ s += " to '%s'" % self.root_config.settings['ROOT']
+ s += ")"
+ return s
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'])
+
+ class _use_class(object):
+
+ __slots__ = ("enabled", "_expand", "_expand_hidden",
+ "_force", "_pkg", "_mask")
+
+ # Share identical frozenset instances when available.
+ _frozensets = {}
+
+ def __init__(self, pkg, enabled_flags):
+ self._pkg = pkg
+ self._expand = None
+ self._expand_hidden = None
+ self._force = None
+ self._mask = None
+ if eapi_has_use_aliases(pkg.eapi):
+ for enabled_flag in enabled_flags:
+ enabled_flags.extend(pkg.iuse.alias_mapping.get(enabled_flag, []))
+ self.enabled = frozenset(enabled_flags)
+ if pkg.built:
+ # Use IUSE to validate USE settings for built packages,
+ # in case the package manager that built this package
+ # failed to do that for some reason (or in case of
+ # data corruption).
+ missing_iuse = pkg.iuse.get_missing_iuse(self.enabled)
+ if missing_iuse:
+ self.enabled = self.enabled.difference(missing_iuse)
+
+ def _init_force_mask(self):
+ pkgsettings = self._pkg._get_pkgsettings()
+ frozensets = self._frozensets
+ s = frozenset(
+ pkgsettings.get("USE_EXPAND", "").lower().split())
+ self._expand = frozensets.setdefault(s, s)
+ s = frozenset(
+ pkgsettings.get("USE_EXPAND_HIDDEN", "").lower().split())
+ self._expand_hidden = frozensets.setdefault(s, s)
+ s = pkgsettings.useforce
+ self._force = frozensets.setdefault(s, s)
+ s = pkgsettings.usemask
+ self._mask = frozensets.setdefault(s, s)
+
+ @property
+ def expand(self):
+ if self._expand is None:
+ self._init_force_mask()
+ return self._expand
+
+ @property
+ def expand_hidden(self):
+ if self._expand_hidden is None:
+ self._init_force_mask()
+ return self._expand_hidden
+
+ @property
+ def force(self):
+ if self._force is None:
+ self._init_force_mask()
+ return self._force
+
+ @property
+ def mask(self):
+ if self._mask is None:
+ self._init_force_mask()
+ return self._mask
+
+ @property
+ def repo(self):
+ return self._metadata['repository']
+
+ @property
+ def repo_priority(self):
+ repo_info = self.root_config.settings.repositories.prepos.get(self.repo)
+ if repo_info is None:
+ return None
+ return repo_info.priority
+
+ @property
+ def use(self):
+ if self._use is None:
+ self._init_use()
+ return self._use
+
+ def _get_pkgsettings(self):
+ pkgsettings = self.root_config.trees[
+ 'porttree'].dbapi.doebuild_settings
+ pkgsettings.setcpv(self)
+ return pkgsettings
+
+ def _init_use(self):
+ if self.built:
+ # Use IUSE to validate USE settings for built packages,
+ # in case the package manager that built this package
+ # failed to do that for some reason (or in case of
+ # data corruption). The enabled flags must be consistent
+ # with implicit IUSE, in order to avoid potential
+ # inconsistencies in USE dep matching (see bug #453400).
+ use_str = self._metadata['USE']
+ is_valid_flag = self.iuse.is_valid_flag
+ enabled_flags = [x for x in use_str.split() if is_valid_flag(x)]
+ use_str = " ".join(enabled_flags)
+ self._use = self._use_class(
+ self, enabled_flags)
+ else:
+ try:
+ use_str = _PackageMetadataWrapperBase.__getitem__(
+ self._metadata, 'USE')
+ except KeyError:
+ use_str = None
+ calculated_use = False
+ if not use_str:
+ use_str = self._get_pkgsettings()["PORTAGE_USE"]
+ calculated_use = True
+ self._use = self._use_class(
+ self, use_str.split())
+ # Initialize these now, since USE access has just triggered
+ # setcpv, and we want to cache the result of the force/mask
+ # calculations that were done.
+ if calculated_use:
+ self._use._init_force_mask()
+
+ _PackageMetadataWrapperBase.__setitem__(
+ self._metadata, 'USE', use_str)
+
+ return use_str
+
+ class _iuse(object):
+
+ __slots__ = ("__weakref__", "_iuse_implicit_match", "_pkg", "alias_mapping",
+ "all", "all_aliases", "enabled", "disabled", "tokens")
+
+ def __init__(self, pkg, tokens, iuse_implicit_match, aliases, eapi):
+ self._pkg = pkg
+ self.tokens = tuple(tokens)
+ self._iuse_implicit_match = iuse_implicit_match
+ enabled = []
+ disabled = []
+ other = []
+ enabled_aliases = []
+ disabled_aliases = []
+ other_aliases = []
+ aliases_supported = eapi_has_use_aliases(eapi)
+ self.alias_mapping = {}
+ for x in tokens:
+ prefix = x[:1]
+ if prefix == "+":
+ enabled.append(x[1:])
+ if aliases_supported:
+ self.alias_mapping[x[1:]] = aliases.get(x[1:], [])
+ enabled_aliases.extend(self.alias_mapping[x[1:]])
+ elif prefix == "-":
+ disabled.append(x[1:])
+ if aliases_supported:
+ self.alias_mapping[x[1:]] = aliases.get(x[1:], [])
+ disabled_aliases.extend(self.alias_mapping[x[1:]])
+ else:
+ other.append(x)
+ if aliases_supported:
+ self.alias_mapping[x] = aliases.get(x, [])
+ other_aliases.extend(self.alias_mapping[x])
+ self.enabled = frozenset(chain(enabled, enabled_aliases))
+ self.disabled = frozenset(chain(disabled, disabled_aliases))
+ self.all = frozenset(chain(enabled, disabled, other))
+ self.all_aliases = frozenset(chain(enabled_aliases, disabled_aliases, other_aliases))
+
+ def is_valid_flag(self, flags):
+ """
+ @return: True if all flags are valid USE values which may
+ be specified in USE dependencies, False otherwise.
+ """
+ if isinstance(flags, basestring):
+ flags = [flags]
+
+ for flag in flags:
+ if not flag in self.all and not flag in self.all_aliases and \
+ not self._iuse_implicit_match(flag):
+ return False
+ return True
+
+ def get_missing_iuse(self, flags):
+ """
+ @return: A list of flags missing from IUSE.
+ """
+ if isinstance(flags, basestring):
+ flags = [flags]
+ missing_iuse = []
+ for flag in flags:
+ if not flag in self.all and not flag in self.all_aliases and \
+ not self._iuse_implicit_match(flag):
+ missing_iuse.append(flag)
+ return missing_iuse
+
+ def get_real_flag(self, flag):
+ """
+ Returns the flag's name within the scope of this package
+ (accounting for aliases), or None if the flag is unknown.
+ """
+ if flag in self.all:
+ return flag
+ elif flag in self.all_aliases:
+ for k, v in self.alias_mapping.items():
+ if flag in v:
+ return k
+
+ if self._iuse_implicit_match(flag):
+ return flag
+
+ return None
+
+ def __len__(self):
+ return 4
+
+ def __iter__(self):
+ """
+ This is used to generate mtimedb resume mergelist entries, so we
+ limit it to 4 items for backward compatibility.
+ """
+ return iter(self._hash_key[:4])
+
+ def __lt__(self, other):
+ if other.cp != self.cp:
+ return self.cp < other.cp
+ result = portage.vercmp(self.version, other.version)
+ if result < 0:
+ return True
+ if result == 0 and self.built and other.built:
+ return self.build_time < other.build_time
+ return False
+
+ def __le__(self, other):
+ if other.cp != self.cp:
+ return self.cp <= other.cp
+ result = portage.vercmp(self.version, other.version)
+ if result <= 0:
+ return True
+ if result == 0 and self.built and other.built:
+ return self.build_time <= other.build_time
+ return False
+
+ def __gt__(self, other):
+ if other.cp != self.cp:
+ return self.cp > other.cp
+ result = portage.vercmp(self.version, other.version)
+ if result > 0:
+ return True
+ if result == 0 and self.built and other.built:
+ return self.build_time > other.build_time
+ return False
+
+ def __ge__(self, other):
+ if other.cp != self.cp:
+ return self.cp >= other.cp
+ result = portage.vercmp(self.version, other.version)
+ if result >= 0:
+ return True
+ if result == 0 and self.built and other.built:
+ return self.build_time >= other.build_time
+ return False
+
+ def with_use(self, use):
+ """
+ Return an Package instance with the specified USE flags. The
+ current instance may be returned if it has identical USE flags.
+ @param use: a set of USE flags
+ @type use: frozenset
+ @return: A package with the specified USE flags
+ @rtype: Package
+ """
+ if use is not self.use.enabled:
+ pkg = self.copy()
+ pkg._metadata["USE"] = " ".join(use)
+ else:
+ pkg = self
+ return pkg
+
+_all_metadata_keys = set(x for x in portage.auxdbkeys \
+ if not x.startswith("UNUSED_"))
+_all_metadata_keys.update(Package.metadata_keys)
+_all_metadata_keys = frozenset(_all_metadata_keys)
+
+_PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
+
+class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
+ """
+ Detect metadata updates and synchronize Package attributes.
+ """
+
+ __slots__ = ("_pkg",)
+ _wrapped_keys = frozenset(
+ ["COUNTER", "INHERITED", "USE", "_mtime_"])
+ _use_conditional_keys = frozenset(
+ ['LICENSE', 'PROPERTIES', 'RESTRICT',])
+
+ def __init__(self, pkg, metadata):
+ _PackageMetadataWrapperBase.__init__(self)
+ self._pkg = pkg
+ if not pkg.built:
+ # USE is lazy, but we want it to show up in self.keys().
+ _PackageMetadataWrapperBase.__setitem__(self, 'USE', '')
+
+ self.update(metadata)
+
+ def __getitem__(self, k):
+ v = _PackageMetadataWrapperBase.__getitem__(self, k)
+ if k in self._use_conditional_keys:
+ if self._pkg.root_config.settings.local_config and '?' in v:
+ try:
+ v = paren_enclose(use_reduce(v, uselist=self._pkg.use.enabled, \
+ is_valid_flag=self._pkg.iuse.is_valid_flag))
+ except InvalidDependString:
+ # This error should already have been registered via
+ # self._pkg._invalid_metadata().
+ pass
+ else:
+ self[k] = v
+
+ elif k == 'USE' and not self._pkg.built:
+ if not v:
+ # This is lazy because it's expensive.
+ v = self._pkg._init_use()
+
+ return v
+
+ def __setitem__(self, k, v):
+ _PackageMetadataWrapperBase.__setitem__(self, k, v)
+ if k in self._wrapped_keys:
+ getattr(self, "_set_" + k.lower())(k, v)
+
+ def _set_inherited(self, k, v):
+ if isinstance(v, basestring):
+ v = frozenset(v.split())
+ self._pkg.inherited = v
+
+ def _set_counter(self, k, v):
+ if isinstance(v, basestring):
+ try:
+ v = long(v.strip())
+ except ValueError:
+ v = 0
+ self._pkg.counter = v
+
+ def _set_use(self, k, v):
+ # Force regeneration of _use attribute
+ self._pkg._use = None
+ # Use raw metadata to restore USE conditional values
+ # to unevaluated state
+ raw_metadata = self._pkg._raw_metadata
+ for x in self._use_conditional_keys:
+ try:
+ self[x] = raw_metadata[x]
+ except KeyError:
+ pass
+
+ def _set__mtime_(self, k, v):
+ if isinstance(v, basestring):
+ try:
+ v = long(v.strip())
+ except ValueError:
+ v = 0
+ self._pkg.mtime = v
+
+ @property
+ def properties(self):
+ return self['PROPERTIES'].split()
+
+ @property
+ def restrict(self):
+ return self['RESTRICT'].split()
+
+ @property
+ def defined_phases(self):
+ """
+ Returns tokens from DEFINED_PHASES metadata if it is defined,
+ otherwise returns a tuple containing all possible phases. This
+ makes it easy to do containment checks to see if it's safe to
+ skip execution of a given phase.
+ """
+ s = self['DEFINED_PHASES']
+ if s:
+ return s.split()
+ return EBUILD_PHASES
diff --git a/lib/_emerge/PackageArg.py b/lib/_emerge/PackageArg.py
new file mode 100644
index 000000000..ebfe4b21b
--- /dev/null
+++ b/lib/_emerge/PackageArg.py
@@ -0,0 +1,19 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DependencyArg import DependencyArg
+from _emerge.Package import Package
+import portage
+from portage._sets.base import InternalPackageSet
+from portage.dep import _repo_separator
+
+class PackageArg(DependencyArg):
+ def __init__(self, package=None, **kwargs):
+ DependencyArg.__init__(self, **kwargs)
+ self.package = package
+ atom = "=" + package.cpv
+ if package.repo != Package.UNKNOWN_REPO:
+ atom += _repo_separator + package.repo
+ self.atom = portage.dep.Atom(atom, allow_repo=True)
+ self.pset = InternalPackageSet(initial_atoms=(self.atom,),
+ allow_repo=True)
diff --git a/lib/_emerge/PackageMerge.py b/lib/_emerge/PackageMerge.py
new file mode 100644
index 000000000..1e7b58ba3
--- /dev/null
+++ b/lib/_emerge/PackageMerge.py
@@ -0,0 +1,49 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.CompositeTask import CompositeTask
+from portage.dep import _repo_separator
+from portage.output import colorize
+class PackageMerge(CompositeTask):
+ __slots__ = ("merge", "postinst_failure")
+
+ def _start(self):
+
+ self.scheduler = self.merge.scheduler
+ pkg = self.merge.pkg
+ pkg_count = self.merge.pkg_count
+ pkg_color = "PKG_MERGE"
+ if pkg.type_name == "binary":
+ pkg_color = "PKG_BINARY_MERGE"
+
+ if pkg.installed:
+ action_desc = "Uninstalling"
+ preposition = "from"
+ counter_str = ""
+ else:
+ action_desc = "Installing"
+ preposition = "to"
+ counter_str = "(%s of %s) " % \
+ (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
+ colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
+
+ msg = "%s %s%s" % \
+ (action_desc,
+ counter_str,
+ colorize(pkg_color, pkg.cpv + _repo_separator + pkg.repo))
+
+ if pkg.root_config.settings["ROOT"] != "/":
+ msg += " %s %s" % (preposition, pkg.root)
+
+ if not self.merge.build_opts.fetchonly and \
+ not self.merge.build_opts.pretend and \
+ not self.merge.build_opts.buildpkgonly:
+ self.merge.statusMessage(msg)
+
+ task = self.merge.create_install_task()
+ self._start_task(task, self._install_exit)
+
+ def _install_exit(self, task):
+ self.postinst_failure = getattr(task, 'postinst_failure', None)
+ self._final_exit(task)
+ self.wait()
diff --git a/lib/_emerge/PackagePhase.py b/lib/_emerge/PackagePhase.py
new file mode 100644
index 000000000..107e2ca36
--- /dev/null
+++ b/lib/_emerge/PackagePhase.py
@@ -0,0 +1,93 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+
+from _emerge.CompositeTask import CompositeTask
+from _emerge.EbuildProcess import EbuildProcess
+from _emerge.SpawnProcess import SpawnProcess
+
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage.util._async.AsyncFunction import AsyncFunction
+from portage.util.install_mask import install_mask_dir, InstallMask
+
+
+class PackagePhase(CompositeTask):
+ """
+ Invokes the package phase and handles PKG_INSTALL_MASK.
+ """
+
+ __slots__ = ("actionmap", "fd_pipes", "logfile", "settings",
+ "_pkg_install_mask", "_proot")
+
+ _shell_binary = portage.const.BASH_BINARY
+
+ def _start(self):
+ try:
+ with io.open(_unicode_encode(
+ os.path.join(self.settings["PORTAGE_BUILDDIR"],
+ "build-info", "PKG_INSTALL_MASK"),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ self._pkg_install_mask = InstallMask(f.read())
+ except EnvironmentError:
+ self._pkg_install_mask = None
+ if self._pkg_install_mask:
+ self._proot = os.path.join(self.settings['T'], 'packaging')
+ self._start_task(SpawnProcess(
+ args=[self._shell_binary, '-e', '-c', ('rm -rf {PROOT}; '
+ 'cp -pPR $(cp --help | grep -q -- "^[[:space:]]*-l," && echo -l)'
+ ' "${{D}}" {PROOT}').format(PROOT=portage._shell_quote(self._proot))],
+ background=self.background, env=self.settings.environ(),
+ scheduler=self.scheduler, logfile=self.logfile),
+ self._copy_proot_exit)
+ else:
+ self._proot = self.settings['D']
+ self._start_package_phase()
+
+ def _copy_proot_exit(self, proc):
+ if self._default_exit(proc) != os.EX_OK:
+ self.wait()
+ else:
+ self._start_task(AsyncFunction(
+ target=install_mask_dir,
+ args=(os.path.join(self._proot,
+ self.settings['EPREFIX'].lstrip(os.sep)),
+ self._pkg_install_mask)),
+ self._pkg_install_mask_exit)
+
+ def _pkg_install_mask_exit(self, proc):
+ if self._default_exit(proc) != os.EX_OK:
+ self.wait()
+ else:
+ self._start_package_phase()
+
+ def _start_package_phase(self):
+ ebuild_process = EbuildProcess(actionmap=self.actionmap,
+ background=self.background, fd_pipes=self.fd_pipes,
+ logfile=self.logfile, phase="package",
+ scheduler=self.scheduler, settings=self.settings)
+
+ if self._pkg_install_mask:
+ d_orig = self.settings["D"]
+ try:
+ self.settings["D"] = self._proot
+ self._start_task(ebuild_process, self._pkg_install_mask_cleanup)
+ finally:
+ self.settings["D"] = d_orig
+ else:
+ self._start_task(ebuild_process, self._default_final_exit)
+
+ def _pkg_install_mask_cleanup(self, proc):
+ if self._default_exit(proc) != os.EX_OK:
+ self.wait()
+ else:
+ self._start_task(SpawnProcess(
+ args=['rm', '-rf', self._proot],
+ background=self.background, env=self.settings.environ(),
+ scheduler=self.scheduler, logfile=self.logfile),
+ self._default_final_exit)
diff --git a/lib/_emerge/PackageUninstall.py b/lib/_emerge/PackageUninstall.py
new file mode 100644
index 000000000..43210b4bc
--- /dev/null
+++ b/lib/_emerge/PackageUninstall.py
@@ -0,0 +1,143 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import functools
+import logging
+import portage
+from portage import os
+from portage.dbapi._MergeProcess import MergeProcess
+from portage.exception import UnsupportedAPIException
+from portage.util._async.AsyncTaskFuture import AsyncTaskFuture
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.emergelog import emergelog
+from _emerge.CompositeTask import CompositeTask
+from _emerge.unmerge import _unmerge_display
+
+class PackageUninstall(CompositeTask):
+ """
+ Uninstall a package asynchronously in a subprocess. When
+ both parallel-install and ebuild-locks FEATURES are enabled,
+ it is essential for the ebuild-locks code to execute in a
+ subprocess, since the portage.locks module does not behave
+ as desired if we try to lock the same file multiple times
+ concurrently from the same process for ebuild-locks phases
+ such as pkg_setup, pkg_prerm, and pkg_postrm.
+ """
+
+ __slots__ = ("world_atom", "ldpath_mtimes", "opts",
+ "pkg", "settings", "_builddir_lock")
+
+ def _start(self):
+
+ vardb = self.pkg.root_config.trees["vartree"].dbapi
+ dbdir = vardb.getpath(self.pkg.cpv)
+ if not os.path.exists(dbdir):
+ # Apparently the package got uninstalled
+ # already, so we can safely return early.
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ self.settings.setcpv(self.pkg)
+ cat, pf = portage.catsplit(self.pkg.cpv)
+ myebuildpath = os.path.join(dbdir, pf + ".ebuild")
+
+ try:
+ portage.doebuild_environment(myebuildpath, "prerm",
+ settings=self.settings, db=vardb)
+ except UnsupportedAPIException:
+ # This is safe to ignore since this function is
+ # guaranteed to set PORTAGE_BUILDDIR even though
+ # it raises UnsupportedAPIException. The error
+ # will be logged when it prevents the pkg_prerm
+ # and pkg_postrm phases from executing.
+ pass
+
+ self._builddir_lock = EbuildBuildDir(
+ scheduler=self.scheduler, settings=self.settings)
+ self._start_task(
+ AsyncTaskFuture(future=self._builddir_lock.async_lock()),
+ self._start_unmerge)
+
+ def _start_unmerge(self, lock_task):
+ self._assert_current(lock_task)
+ if lock_task.cancelled:
+ self._default_final_exit(lock_task)
+ return
+
+ lock_task.future.result()
+ portage.prepare_build_dirs(
+ settings=self.settings, cleanup=True)
+
+ # Output only gets logged if it comes after prepare_build_dirs()
+ # which initializes PORTAGE_LOG_FILE.
+ retval, pkgmap = _unmerge_display(self.pkg.root_config,
+ self.opts, "unmerge", [self.pkg.cpv], clean_delay=0,
+ writemsg_level=self._writemsg_level)
+
+ if retval != os.EX_OK:
+ self._async_unlock_builddir(returncode=retval)
+ return
+
+ self._writemsg_level(">>> Unmerging %s...\n" % (self.pkg.cpv,),
+ noiselevel=-1)
+ self._emergelog("=== Unmerging... (%s)" % (self.pkg.cpv,))
+
+ cat, pf = portage.catsplit(self.pkg.cpv)
+ unmerge_task = MergeProcess(
+ mycat=cat, mypkg=pf, settings=self.settings,
+ treetype="vartree", vartree=self.pkg.root_config.trees["vartree"],
+ scheduler=self.scheduler, background=self.background,
+ mydbapi=self.pkg.root_config.trees["vartree"].dbapi,
+ prev_mtimes=self.ldpath_mtimes,
+ logfile=self.settings.get("PORTAGE_LOG_FILE"), unmerge=True)
+
+ self._start_task(unmerge_task, self._unmerge_exit)
+
+ def _unmerge_exit(self, unmerge_task):
+ if self._final_exit(unmerge_task) != os.EX_OK:
+ self._emergelog(" !!! unmerge FAILURE: %s" % (self.pkg.cpv,))
+ else:
+ self._emergelog(" >>> unmerge success: %s" % (self.pkg.cpv,))
+ self.world_atom(self.pkg)
+ self._async_unlock_builddir(returncode=self.returncode)
+
+ def _async_unlock_builddir(self, returncode=None):
+ """
+ Release the lock asynchronously, and if a returncode parameter
+ is given then set self.returncode and notify exit listeners.
+ """
+ if returncode is not None:
+ # The returncode will be set after unlock is complete.
+ self.returncode = None
+ self._start_task(
+ AsyncTaskFuture(future=self._builddir_lock.async_unlock()),
+ functools.partial(self._unlock_builddir_exit, returncode=returncode))
+
+ def _unlock_builddir_exit(self, unlock_task, returncode=None):
+ self._assert_current(unlock_task)
+ if unlock_task.cancelled and returncode is not None:
+ self._default_final_exit(unlock_task)
+ return
+
+ # Normally, async_unlock should not raise an exception here.
+ unlock_task.future.cancelled() or unlock_task.future.result()
+ if returncode is not None:
+ self.returncode = returncode
+ self._async_wait()
+
+ def _emergelog(self, msg):
+ emergelog("notitles" not in self.settings.features, msg)
+
+ def _writemsg_level(self, msg, level=0, noiselevel=0):
+
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ background = self.background
+
+ if log_path is None:
+ if not (background and level < logging.WARNING):
+ portage.util.writemsg_level(msg,
+ level=level, noiselevel=noiselevel)
+ else:
+ self.scheduler.output(msg, log_path=log_path,
+ level=level, noiselevel=noiselevel)
diff --git a/lib/_emerge/PackageVirtualDbapi.py b/lib/_emerge/PackageVirtualDbapi.py
new file mode 100644
index 000000000..26293dd98
--- /dev/null
+++ b/lib/_emerge/PackageVirtualDbapi.py
@@ -0,0 +1,149 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from portage.dbapi import dbapi
+from portage.dbapi.dep_expand import dep_expand
+
+class PackageVirtualDbapi(dbapi):
+ """
+ A dbapi-like interface class that represents the state of the installed
+ package database as new packages are installed, replacing any packages
+ that previously existed in the same slot. The main difference between
+ this class and fakedbapi is that this one uses Package instances
+ internally (passed in via cpv_inject() and cpv_remove() calls).
+ """
+ def __init__(self, settings):
+ dbapi.__init__(self)
+ self.settings = settings
+ self._match_cache = {}
+ self._cp_map = {}
+ self._cpv_map = {}
+
+ def clear(self):
+ """
+ Remove all packages.
+ """
+ if self._cpv_map:
+ self._clear_cache()
+ self._cp_map.clear()
+ self._cpv_map.clear()
+
+ def copy(self):
+ obj = PackageVirtualDbapi(self.settings)
+ obj._match_cache = self._match_cache.copy()
+ obj._cp_map = self._cp_map.copy()
+ for k, v in obj._cp_map.items():
+ obj._cp_map[k] = v[:]
+ obj._cpv_map = self._cpv_map.copy()
+ return obj
+
+ def __bool__(self):
+ return bool(self._cpv_map)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+ def __iter__(self):
+ return iter(self._cpv_map.values())
+
+ def __contains__(self, item):
+ existing = self._cpv_map.get(item.cpv)
+ if existing is not None and \
+ existing == item:
+ return True
+ return False
+
+ def get(self, item, default=None):
+ cpv = getattr(item, "cpv", None)
+ if cpv is None:
+ if len(item) != 5:
+ return default
+ type_name, root, cpv, operation, repo_key = item
+
+ existing = self._cpv_map.get(cpv)
+ if existing is not None and \
+ existing == item:
+ return existing
+ return default
+
+ def match_pkgs(self, atom):
+ return [self._cpv_map[cpv] for cpv in self.match(atom)]
+
+ def _clear_cache(self):
+ if self._categories is not None:
+ self._categories = None
+ if self._match_cache:
+ self._match_cache = {}
+
+ def match(self, origdep, use_cache=1):
+ atom = dep_expand(origdep, mydb=self, settings=self.settings)
+ cache_key = (atom, atom.unevaluated_atom)
+ result = self._match_cache.get(cache_key)
+ if result is not None:
+ return result[:]
+ result = list(self._iter_match(atom, self.cp_list(atom.cp)))
+ self._match_cache[cache_key] = result
+ return result[:]
+
+ def cpv_exists(self, cpv, myrepo=None):
+ return cpv in self._cpv_map
+
+ def cp_list(self, mycp, use_cache=1):
+ # NOTE: Cache can be safely shared with the match cache, since the
+ # match cache uses the result from dep_expand for the cache_key.
+ cache_key = (mycp, mycp)
+ cachelist = self._match_cache.get(cache_key)
+ if cachelist is not None:
+ return cachelist[:]
+ cpv_list = self._cp_map.get(mycp)
+ if cpv_list is None:
+ cpv_list = []
+ else:
+ cpv_list = [pkg.cpv for pkg in cpv_list]
+ self._cpv_sort_ascending(cpv_list)
+ self._match_cache[cache_key] = cpv_list
+ return cpv_list[:]
+
+ def cp_all(self, sort=False):
+ return sorted(self._cp_map) if sort else list(self._cp_map)
+
+ def cpv_all(self):
+ return list(self._cpv_map)
+
+ def cpv_inject(self, pkg):
+ cp_list = self._cp_map.get(pkg.cp)
+ if cp_list is None:
+ cp_list = []
+ self._cp_map[pkg.cp] = cp_list
+ e_pkg = self._cpv_map.get(pkg.cpv)
+ if e_pkg is not None:
+ if e_pkg == pkg:
+ return
+ self.cpv_remove(e_pkg)
+ for e_pkg in cp_list:
+ if e_pkg.slot_atom == pkg.slot_atom:
+ if e_pkg == pkg:
+ return
+ self.cpv_remove(e_pkg)
+ break
+ cp_list.append(pkg)
+ self._cpv_map[pkg.cpv] = pkg
+ self._clear_cache()
+
+ def cpv_remove(self, pkg):
+ old_pkg = self._cpv_map.get(pkg.cpv)
+ if old_pkg != pkg:
+ raise KeyError(pkg)
+ self._cp_map[pkg.cp].remove(pkg)
+ del self._cpv_map[pkg.cpv]
+ self._clear_cache()
+
+ def aux_get(self, cpv, wants, myrepo=None):
+ metadata = self._cpv_map[cpv]._metadata
+ return [metadata.get(x, "") for x in wants]
+
+ def aux_update(self, cpv, values):
+ self._cpv_map[cpv]._metadata.update(values)
+ self._clear_cache()
+
diff --git a/lib/_emerge/PipeReader.py b/lib/_emerge/PipeReader.py
new file mode 100644
index 000000000..1aa5ee3bf
--- /dev/null
+++ b/lib/_emerge/PipeReader.py
@@ -0,0 +1,106 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import fcntl
+import sys
+
+from portage import os
+from _emerge.AbstractPollTask import AbstractPollTask
+
+class PipeReader(AbstractPollTask):
+
+ """
+ Reads output from one or more files and saves it in memory,
+ for retrieval via the getvalue() method. This is driven by
+ the scheduler's poll() loop, so it runs entirely within the
+ current process.
+ """
+
+ __slots__ = ("input_files",) + \
+ ("_read_data", "_use_array")
+
+ def _start(self):
+ self._read_data = []
+
+ for f in self.input_files.values():
+ fd = f if isinstance(f, int) else f.fileno()
+ fcntl.fcntl(fd, fcntl.F_SETFL,
+ fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(fd, fcntl.F_SETFD,
+ fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ if self._use_array:
+ self.scheduler.add_reader(fd, self._array_output_handler, f)
+ else:
+ self.scheduler.add_reader(fd, self._output_handler, fd)
+
+ self._registered = True
+
+ def _cancel(self):
+ self._unregister()
+ if self.returncode is None:
+ self.returncode = self._cancelled_returncode
+
+ def getvalue(self):
+ """Retrieve the entire contents"""
+ return b''.join(self._read_data)
+
+ def close(self):
+ """Free the memory buffer."""
+ self._read_data = None
+
+ def _output_handler(self, fd):
+
+ while True:
+ data = self._read_buf(fd)
+ if data is None:
+ break
+ if data:
+ self._read_data.append(data)
+ else:
+ self._unregister()
+ self.returncode = self.returncode or os.EX_OK
+ self._async_wait()
+ break
+
+ def _array_output_handler(self, f):
+
+ while True:
+ data = self._read_array(f)
+ if data is None:
+ break
+ if data:
+ self._read_data.append(data)
+ else:
+ self._unregister()
+ self.returncode = self.returncode or os.EX_OK
+ self._async_wait()
+ break
+
+ return True
+
+ def _unregister(self):
+ """
+ Unregister from the scheduler and close open files.
+ """
+
+ self._registered = False
+
+ if self.input_files is not None:
+ for f in self.input_files.values():
+ if isinstance(f, int):
+ self.scheduler.remove_reader(f)
+ os.close(f)
+ else:
+ self.scheduler.remove_reader(f.fileno())
+ f.close()
+ self.input_files = None
+
diff --git a/lib/_emerge/PollScheduler.py b/lib/_emerge/PollScheduler.py
new file mode 100644
index 000000000..569879b36
--- /dev/null
+++ b/lib/_emerge/PollScheduler.py
@@ -0,0 +1,187 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+import portage
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.EventLoop import EventLoop
+from portage.util._eventloop.global_event_loop import global_event_loop
+
+from _emerge.getloadavg import getloadavg
+
+class PollScheduler(object):
+
+ # max time between loadavg checks (milliseconds)
+ _loadavg_latency = None
+
+ def __init__(self, main=False, event_loop=None):
+ """
+ @param main: If True then use global_event_loop(), otherwise use
+ a local EventLoop instance (default is False, for safe use in
+ a non-main thread)
+ @type main: bool
+ """
+ self._term_rlock = threading.RLock()
+ self._terminated = threading.Event()
+ self._terminated_tasks = False
+ self._term_check_handle = None
+ self._max_jobs = 1
+ self._max_load = None
+ self._scheduling = False
+ self._background = False
+ if event_loop is not None:
+ self._event_loop = event_loop
+ elif main:
+ self._event_loop = global_event_loop()
+ else:
+ self._event_loop = (portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
+ self._sched_iface = SchedulerInterface(self._event_loop,
+ is_background=self._is_background)
+
+ def _is_background(self):
+ return self._background
+
+ def _cleanup(self):
+ """
+ Cleanup any callbacks that have been registered with the global
+ event loop.
+ """
+ # The self._term_check_handle attribute requires locking
+ # since it's modified by the thread safe terminate method.
+ with self._term_rlock:
+ if self._term_check_handle not in (None, False):
+ self._term_check_handle.cancel()
+ # This prevents the terminate method from scheduling
+ # any more callbacks (since _cleanup must eliminate all
+ # callbacks in order to ensure complete cleanup).
+ self._term_check_handle = False
+
+ def terminate(self):
+ """
+ Schedules asynchronous, graceful termination of the scheduler
+ at the earliest opportunity.
+
+ This method is thread-safe (and safe for signal handlers).
+ """
+ with self._term_rlock:
+ if self._term_check_handle is None:
+ self._terminated.set()
+ self._term_check_handle = self._event_loop.call_soon_threadsafe(
+ self._termination_check, True)
+
+ def _termination_check(self, retry=False):
+ """
+ Calls _terminate_tasks() if appropriate. It's guaranteed not to
+ call it while _schedule_tasks() is being called. This method must
+ only be called via the event loop thread.
+
+ @param retry: If True then reschedule if scheduling state prevents
+ immediate termination.
+ @type retry: bool
+ """
+ if self._terminated.is_set() and \
+ not self._terminated_tasks:
+ if not self._scheduling:
+ self._scheduling = True
+ try:
+ self._terminated_tasks = True
+ self._terminate_tasks()
+ finally:
+ self._scheduling = False
+
+ elif retry:
+ with self._term_rlock:
+ self._term_check_handle = self._event_loop.call_soon(
+ self._termination_check, True)
+
+ def _terminate_tasks(self):
+ """
+ Send signals to terminate all tasks. This is called once
+ from _keep_scheduling() or _is_work_scheduled() in the event
+ dispatching thread. It will not be called while the _schedule_tasks()
+ implementation is running, in order to avoid potential
+ interference. All tasks should be cleaned up at the earliest
+ opportunity, but not necessarily before this method returns.
+ Typically, this method will send kill signals and return without
+ waiting for exit status. This allows basic cleanup to occur, such as
+ flushing of buffered output to logs.
+ """
+ raise NotImplementedError()
+
+ def _keep_scheduling(self):
+ """
+ @rtype: bool
+ @return: True if there may be remaining tasks to schedule,
+ False otherwise.
+ """
+ return False
+
+ def _schedule_tasks(self):
+ """
+ This is called from inside the _schedule() method, which
+ guarantees the following:
+
+ 1) It will not be called recursively.
+ 2) _terminate_tasks() will not be called while it is running.
+ 3) The state of the boolean _terminated_tasks variable will
+ not change while it is running.
+
+ Unless this method is used to perform user interface updates,
+ or something like that, the first thing it should do is check
+ the state of _terminated_tasks and if that is True then it
+ should return immediately (since there's no need to
+ schedule anything after _terminate_tasks() has been called).
+ """
+ pass
+
+ def _schedule(self):
+ """
+ Calls _schedule_tasks() and automatically returns early from
+ any recursive calls to this method that the _schedule_tasks()
+ call might trigger. This makes _schedule() safe to call from
+ inside exit listeners. This method always returns True, so that
+ it may be scheduled continuously via EventLoop.timeout_add().
+ """
+ if self._scheduling:
+ return True
+ self._scheduling = True
+ try:
+ self._schedule_tasks()
+ finally:
+ self._scheduling = False
+ return True
+
+ def _is_work_scheduled(self):
+ return bool(self._running_job_count())
+
+ def _running_job_count(self):
+ raise NotImplementedError(self)
+
+ def _can_add_job(self):
+ if self._terminated_tasks:
+ return False
+
+ max_jobs = self._max_jobs
+ max_load = self._max_load
+
+ if self._max_jobs is not True and \
+ self._running_job_count() >= self._max_jobs:
+ return False
+
+ if max_load is not None and \
+ (max_jobs is True or max_jobs > 1) and \
+ self._running_job_count() >= 1:
+ try:
+ avg1, avg5, avg15 = getloadavg()
+ except OSError:
+ return False
+
+ if avg1 >= max_load:
+ return False
+
+ return True
diff --git a/lib/_emerge/ProgressHandler.py b/lib/_emerge/ProgressHandler.py
new file mode 100644
index 000000000..f5afe6d87
--- /dev/null
+++ b/lib/_emerge/ProgressHandler.py
@@ -0,0 +1,22 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import time
+class ProgressHandler(object):
+ def __init__(self):
+ self.curval = 0
+ self.maxval = 0
+ self._last_update = 0
+ self.min_latency = 0.2
+
+ def onProgress(self, maxval, curval):
+ self.maxval = maxval
+ self.curval = curval
+ cur_time = time.time()
+ if cur_time - self._last_update >= self.min_latency:
+ self._last_update = cur_time
+ self.display()
+
+ def display(self):
+ raise NotImplementedError(self)
+
diff --git a/lib/_emerge/RootConfig.py b/lib/_emerge/RootConfig.py
new file mode 100644
index 000000000..3648d01d7
--- /dev/null
+++ b/lib/_emerge/RootConfig.py
@@ -0,0 +1,41 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class RootConfig(object):
+ """This is used internally by depgraph to track information about a
+ particular $ROOT."""
+ __slots__ = ("mtimedb", "root", "setconfig", "sets", "settings", "trees")
+
+ pkg_tree_map = {
+ "ebuild" : "porttree",
+ "binary" : "bintree",
+ "installed" : "vartree"
+ }
+
+ tree_pkg_map = {}
+ for k, v in pkg_tree_map.items():
+ tree_pkg_map[v] = k
+
+ def __init__(self, settings, trees, setconfig):
+ self.trees = trees
+ self.settings = settings
+ self.root = self.settings['EROOT']
+ self.setconfig = setconfig
+ if setconfig is None:
+ self.sets = {}
+ else:
+ self.sets = self.setconfig.getSets()
+
+ def update(self, other):
+ """
+ Shallow copy all attributes from another instance.
+ """
+ for k in self.__slots__:
+ try:
+ setattr(self, k, getattr(other, k))
+ except AttributeError:
+ # mtimedb is currently not a required attribute
+ try:
+ delattr(self, k)
+ except AttributeError:
+ pass
diff --git a/lib/_emerge/Scheduler.py b/lib/_emerge/Scheduler.py
new file mode 100644
index 000000000..422308184
--- /dev/null
+++ b/lib/_emerge/Scheduler.py
@@ -0,0 +1,2011 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division, print_function, unicode_literals
+
+from collections import deque
+import gc
+import gzip
+import logging
+import signal
+import sys
+import textwrap
+import time
+import warnings
+import weakref
+import zlib
+
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage.cache.mappings import slot_dict_class
+from portage.elog.messages import eerror
+from portage.localization import _
+from portage.output import colorize, create_color_func, red
+bad = create_color_func("BAD")
+from portage._sets import SETPREFIX
+from portage._sets.base import InternalPackageSet
+from portage.util import ensure_dirs, writemsg, writemsg_level
+from portage.util.SlotObject import SlotObject
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.EventLoop import EventLoop
+from portage.package.ebuild.digestcheck import digestcheck
+from portage.package.ebuild.digestgen import digestgen
+from portage.package.ebuild.doebuild import (_check_temp_dir,
+ _prepare_self_update)
+from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
+
+import _emerge
+from _emerge.BinpkgFetcher import BinpkgFetcher
+from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
+from _emerge.BinpkgVerifier import BinpkgVerifier
+from _emerge.Blocker import Blocker
+from _emerge.BlockerDB import BlockerDB
+from _emerge.clear_caches import clear_caches
+from _emerge.create_depgraph_params import create_depgraph_params
+from _emerge.create_world_atom import create_world_atom
+from _emerge.DepPriority import DepPriority
+from _emerge.depgraph import depgraph, resume_depgraph
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EbuildFetcher import EbuildFetcher
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.emergelog import emergelog
+from _emerge.FakeVartree import FakeVartree
+from _emerge.getloadavg import getloadavg
+from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
+from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
+from _emerge.JobStatusDisplay import JobStatusDisplay
+from _emerge.MergeListItem import MergeListItem
+from _emerge.Package import Package
+from _emerge.PackageMerge import PackageMerge
+from _emerge.PollScheduler import PollScheduler
+from _emerge.SequentialTaskQueue import SequentialTaskQueue
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+# enums
+FAILURE = 1
+
+
+class Scheduler(PollScheduler):
+
+ # max time between loadavg checks (seconds)
+ _loadavg_latency = 30
+
+ # max time between display status updates (seconds)
+ _max_display_latency = 3
+
+ _opts_ignore_blockers = \
+ frozenset(["--buildpkgonly",
+ "--fetchonly", "--fetch-all-uri",
+ "--nodeps", "--pretend"])
+
+ _opts_no_background = \
+ frozenset(["--pretend",
+ "--fetchonly", "--fetch-all-uri"])
+
+ _opts_no_self_update = frozenset(["--buildpkgonly",
+ "--fetchonly", "--fetch-all-uri", "--pretend"])
+
+ class _iface_class(SchedulerInterface):
+ __slots__ = ("fetch",
+ "scheduleSetup", "scheduleUnpack")
+
+ class _fetch_iface_class(SlotObject):
+ __slots__ = ("log_file", "schedule")
+
+ _task_queues_class = slot_dict_class(
+ ("merge", "jobs", "ebuild_locks", "fetch", "unpack"), prefix="")
+
+ class _build_opts_class(SlotObject):
+ __slots__ = ("buildpkg", "buildpkg_exclude", "buildpkgonly",
+ "fetch_all_uri", "fetchonly", "pretend")
+
+ class _binpkg_opts_class(SlotObject):
+ __slots__ = ("fetchonly", "getbinpkg", "pretend")
+
+ class _pkg_count_class(SlotObject):
+ __slots__ = ("curval", "maxval")
+
+ class _emerge_log_class(SlotObject):
+ __slots__ = ("xterm_titles",)
+
+ def log(self, *pargs, **kwargs):
+ if not self.xterm_titles:
+ # Avoid interference with the scheduler's status display.
+ kwargs.pop("short_msg", None)
+ emergelog(self.xterm_titles, *pargs, **kwargs)
+
+ class _failed_pkg(SlotObject):
+ __slots__ = ("build_dir", "build_log", "pkg",
+ "postinst_failure", "returncode")
+
+ class _ConfigPool(object):
+ """Interface for a task to temporarily allocate a config
+ instance from a pool. This allows a task to be constructed
+ long before the config instance actually becomes needed, like
+ when prefetchers are constructed for the whole merge list."""
+ __slots__ = ("_root", "_allocate", "_deallocate")
+ def __init__(self, root, allocate, deallocate):
+ self._root = root
+ self._allocate = allocate
+ self._deallocate = deallocate
+ def allocate(self):
+ return self._allocate(self._root)
+ def deallocate(self, settings):
+ self._deallocate(settings)
+
+ class _unknown_internal_error(portage.exception.PortageException):
+ """
+ Used internally to terminate scheduling. The specific reason for
+ the failure should have been dumped to stderr.
+ """
+ def __init__(self, value=""):
+ portage.exception.PortageException.__init__(self, value)
+
+ def __init__(self, settings, trees, mtimedb, myopts,
+ spinner, mergelist=None, favorites=None, graph_config=None):
+ PollScheduler.__init__(self, main=True)
+
+ if mergelist is not None:
+ warnings.warn("The mergelist parameter of the " + \
+ "_emerge.Scheduler constructor is now unused. Use " + \
+ "the graph_config parameter instead.",
+ DeprecationWarning, stacklevel=2)
+
+ self.settings = settings
+ self.target_root = settings["EROOT"]
+ self.trees = trees
+ self.myopts = myopts
+ self._spinner = spinner
+ self._mtimedb = mtimedb
+ self._favorites = favorites
+ self._args_set = InternalPackageSet(favorites, allow_repo=True)
+ self._build_opts = self._build_opts_class()
+
+ for k in self._build_opts.__slots__:
+ setattr(self._build_opts, k, myopts.get("--" + k.replace("_", "-")))
+ self._build_opts.buildpkg_exclude = InternalPackageSet( \
+ initial_atoms=" ".join(myopts.get("--buildpkg-exclude", [])).split(), \
+ allow_wildcard=True, allow_repo=True)
+ if "mirror" in self.settings.features:
+ self._build_opts.fetch_all_uri = True
+
+ self._binpkg_opts = self._binpkg_opts_class()
+ for k in self._binpkg_opts.__slots__:
+ setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
+
+ self.curval = 0
+ self._logger = self._emerge_log_class()
+ self._task_queues = self._task_queues_class()
+ for k in self._task_queues.allowed_keys:
+ setattr(self._task_queues, k,
+ SequentialTaskQueue())
+
+ # Holds merges that will wait to be executed when no builds are
+ # executing. This is useful for system packages since dependencies
+ # on system packages are frequently unspecified. For example, see
+ # bug #256616.
+ self._merge_wait_queue = deque()
+ # Holds merges that have been transfered from the merge_wait_queue to
+ # the actual merge queue. They are removed from this list upon
+ # completion. Other packages can start building only when this list is
+ # empty.
+ self._merge_wait_scheduled = []
+
+ # Holds system packages and their deep runtime dependencies. Before
+ # being merged, these packages go to merge_wait_queue, to be merged
+ # when no other packages are building.
+ self._deep_system_deps = set()
+
+ # Holds packages to merge which will satisfy currently unsatisfied
+ # deep runtime dependencies of system packages. If this is not empty
+ # then no parallel builds will be spawned until it is empty. This
+ # minimizes the possibility that a build will fail due to the system
+ # being in a fragile state. For example, see bug #259954.
+ self._unsatisfied_system_deps = set()
+
+ self._status_display = JobStatusDisplay(
+ xterm_titles=('notitles' not in settings.features))
+ self._max_load = myopts.get("--load-average")
+ max_jobs = myopts.get("--jobs")
+ if max_jobs is None:
+ max_jobs = 1
+ self._set_max_jobs(max_jobs)
+ self._running_root = trees[trees._running_eroot]["root_config"]
+ self.edebug = 0
+ if settings.get("PORTAGE_DEBUG", "") == "1":
+ self.edebug = 1
+ self.pkgsettings = {}
+ self._config_pool = {}
+ for root in self.trees:
+ self._config_pool[root] = []
+
+ self._fetch_log = os.path.join(_emerge.emergelog._emerge_log_dir,
+ 'emerge-fetch.log')
+ fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
+ schedule=self._schedule_fetch)
+ self._sched_iface = self._iface_class(
+ self._event_loop,
+ is_background=self._is_background,
+ fetch=fetch_iface,
+ scheduleSetup=self._schedule_setup,
+ scheduleUnpack=self._schedule_unpack)
+
+ self._prefetchers = weakref.WeakValueDictionary()
+ self._pkg_queue = []
+ self._jobs = 0
+ self._running_tasks = {}
+ self._completed_tasks = set()
+ self._main_exit = None
+ self._main_loadavg_handle = None
+
+ self._failed_pkgs = []
+ self._failed_pkgs_all = []
+ self._failed_pkgs_die_msgs = []
+ self._post_mod_echo_msgs = []
+ self._parallel_fetch = False
+ self._init_graph(graph_config)
+ merge_count = len([x for x in self._mergelist \
+ if isinstance(x, Package) and x.operation == "merge"])
+ self._pkg_count = self._pkg_count_class(
+ curval=0, maxval=merge_count)
+ self._status_display.maxval = self._pkg_count.maxval
+
+ # The load average takes some time to respond when new
+ # jobs are added, so we need to limit the rate of adding
+ # new jobs.
+ self._job_delay_max = 5
+ self._previous_job_start_time = None
+ self._job_delay_timeout_id = None
+
+ # The load average takes some time to respond when after
+ # a SIGSTOP/SIGCONT cycle, so delay scheduling for some
+ # time after SIGCONT is received.
+ self._sigcont_delay = 5
+ self._sigcont_time = None
+
+ # This is used to memoize the _choose_pkg() result when
+ # no packages can be chosen until one of the existing
+ # jobs completes.
+ self._choose_pkg_return_early = False
+
+ features = self.settings.features
+ if "parallel-fetch" in features and \
+ not ("--pretend" in self.myopts or \
+ "--fetch-all-uri" in self.myopts or \
+ "--fetchonly" in self.myopts):
+ if "distlocks" not in features:
+ portage.writemsg(red("!!!")+"\n", noiselevel=-1)
+ portage.writemsg(red("!!!")+" parallel-fetching " + \
+ "requires the distlocks feature enabled"+"\n",
+ noiselevel=-1)
+ portage.writemsg(red("!!!")+" you have it disabled, " + \
+ "thus parallel-fetching is being disabled"+"\n",
+ noiselevel=-1)
+ portage.writemsg(red("!!!")+"\n", noiselevel=-1)
+ elif merge_count > 1:
+ self._parallel_fetch = True
+
+ if self._parallel_fetch:
+ # clear out existing fetch log if it exists
+ try:
+ open(self._fetch_log, 'w').close()
+ except EnvironmentError:
+ pass
+
+ self._running_portage = None
+ portage_match = self._running_root.trees["vartree"].dbapi.match(
+ portage.const.PORTAGE_PACKAGE_ATOM)
+ if portage_match:
+ cpv = portage_match.pop()
+ self._running_portage = self._pkg(cpv, "installed",
+ self._running_root, installed=True)
+
+ def _handle_self_update(self):
+
+ if self._opts_no_self_update.intersection(self.myopts):
+ return os.EX_OK
+
+ for x in self._mergelist:
+ if not isinstance(x, Package):
+ continue
+ if x.operation != "merge":
+ continue
+ if x.root != self._running_root.root:
+ continue
+ if not portage.dep.match_from_list(
+ portage.const.PORTAGE_PACKAGE_ATOM, [x]):
+ continue
+ rval = _check_temp_dir(self.settings)
+ if rval != os.EX_OK:
+ return rval
+ _prepare_self_update(self.settings)
+ break
+
+ return os.EX_OK
+
+ def _terminate_tasks(self):
+ self._status_display.quiet = True
+ for task in list(self._running_tasks.values()):
+ if task.isAlive():
+ # This task should keep the main loop running until
+ # it has had an opportunity to clean up after itself.
+ # Rely on its exit hook to remove it from
+ # self._running_tasks when it has finished cleaning up.
+ task.cancel()
+ else:
+ # This task has been waiting to be started in one of
+ # self._task_queues which are all cleared below. It
+ # will never be started, so purged it from
+ # self._running_tasks so that it won't keep the main
+ # loop running.
+ del self._running_tasks[id(task)]
+
+ for q in self._task_queues.values():
+ q.clear()
+
+ def _init_graph(self, graph_config):
+ """
+ Initialization structures used for dependency calculations
+ involving currently installed packages.
+ """
+ self._set_graph_config(graph_config)
+ self._blocker_db = {}
+ depgraph_params = create_depgraph_params(self.myopts, None)
+ dynamic_deps = "dynamic_deps" in depgraph_params
+ ignore_built_slot_operator_deps = self.myopts.get(
+ "--ignore-built-slot-operator-deps", "n") == "y"
+ for root in self.trees:
+ if graph_config is None:
+ fake_vartree = FakeVartree(self.trees[root]["root_config"],
+ pkg_cache=self._pkg_cache, dynamic_deps=dynamic_deps,
+ ignore_built_slot_operator_deps=ignore_built_slot_operator_deps)
+ fake_vartree.sync()
+ else:
+ fake_vartree = graph_config.trees[root]['vartree']
+ self._blocker_db[root] = BlockerDB(fake_vartree)
+
+ def _destroy_graph(self):
+ """
+ Use this to free memory at the beginning of _calc_resume_list().
+ After _calc_resume_list(), the _init_graph() method
+ must to be called in order to re-generate the structures that
+ this method destroys.
+ """
+ self._blocker_db = None
+ self._set_graph_config(None)
+ gc.collect()
+
+ def _set_max_jobs(self, max_jobs):
+ self._max_jobs = max_jobs
+ self._task_queues.jobs.max_jobs = max_jobs
+ if "parallel-install" in self.settings.features:
+ self._task_queues.merge.max_jobs = max_jobs
+
+ def _background_mode(self):
+ """
+ Check if background mode is enabled and adjust states as necessary.
+
+ @rtype: bool
+ @return: True if background mode is enabled, False otherwise.
+ """
+ background = (self._max_jobs is True or \
+ self._max_jobs > 1 or "--quiet" in self.myopts \
+ or self.myopts.get("--quiet-build") == "y") and \
+ not bool(self._opts_no_background.intersection(self.myopts))
+
+ if background:
+ interactive_tasks = self._get_interactive_tasks()
+ if interactive_tasks:
+ background = False
+ writemsg_level(">>> Sending package output to stdio due " + \
+ "to interactive package(s):\n",
+ level=logging.INFO, noiselevel=-1)
+ msg = [""]
+ for pkg in interactive_tasks:
+ pkg_str = " " + colorize("INFORM", str(pkg.cpv))
+ if pkg.root_config.settings["ROOT"] != "/":
+ pkg_str += " for " + pkg.root
+ msg.append(pkg_str)
+ msg.append("")
+ writemsg_level("".join("%s\n" % (l,) for l in msg),
+ level=logging.INFO, noiselevel=-1)
+ if self._max_jobs is True or self._max_jobs > 1:
+ self._set_max_jobs(1)
+ writemsg_level(">>> Setting --jobs=1 due " + \
+ "to the above interactive package(s)\n",
+ level=logging.INFO, noiselevel=-1)
+ writemsg_level(">>> In order to temporarily mask " + \
+ "interactive updates, you may\n" + \
+ ">>> specify --accept-properties=-interactive\n",
+ level=logging.INFO, noiselevel=-1)
+ self._status_display.quiet = \
+ not background or \
+ ("--quiet" in self.myopts and \
+ "--verbose" not in self.myopts)
+
+ self._logger.xterm_titles = \
+ "notitles" not in self.settings.features and \
+ self._status_display.quiet
+
+ return background
+
+ def _get_interactive_tasks(self):
+ interactive_tasks = []
+ for task in self._mergelist:
+ if not (isinstance(task, Package) and \
+ task.operation == "merge"):
+ continue
+ if 'interactive' in task.properties:
+ interactive_tasks.append(task)
+ return interactive_tasks
+
+ def _set_graph_config(self, graph_config):
+
+ if graph_config is None:
+ self._graph_config = None
+ self._pkg_cache = {}
+ self._digraph = None
+ self._mergelist = []
+ self._world_atoms = None
+ self._deep_system_deps.clear()
+ return
+
+ self._graph_config = graph_config
+ self._pkg_cache = graph_config.pkg_cache
+ self._digraph = graph_config.graph
+ self._mergelist = graph_config.mergelist
+
+ # Generate world atoms while the event loop is not running,
+ # since otherwise portdbapi match calls in the create_world_atom
+ # function could trigger event loop recursion.
+ self._world_atoms = {}
+ for pkg in self._mergelist:
+ if getattr(pkg, 'operation', None) != 'merge':
+ continue
+ atom = create_world_atom(pkg, self._args_set,
+ pkg.root_config, before_install=True)
+ if atom is not None:
+ self._world_atoms[pkg] = atom
+
+ if "--nodeps" in self.myopts or \
+ (self._max_jobs is not True and self._max_jobs < 2):
+ # save some memory
+ self._digraph = None
+ graph_config.graph = None
+ graph_config.pkg_cache.clear()
+ self._deep_system_deps.clear()
+ for pkg in self._mergelist:
+ self._pkg_cache[pkg] = pkg
+ return
+
+ self._find_system_deps()
+ self._prune_digraph()
+ self._prevent_builddir_collisions()
+ if '--debug' in self.myopts:
+ writemsg("\nscheduler digraph:\n\n", noiselevel=-1)
+ self._digraph.debug_print()
+ writemsg("\n", noiselevel=-1)
+
+ def _find_system_deps(self):
+ """
+ Find system packages and their deep runtime dependencies. Before being
+ merged, these packages go to merge_wait_queue, to be merged when no
+ other packages are building.
+ NOTE: This can only find deep system deps if the system set has been
+ added to the graph and traversed deeply (the depgraph "complete"
+ parameter will do this, triggered by emerge --complete-graph option).
+ """
+ deep_system_deps = self._deep_system_deps
+ deep_system_deps.clear()
+ deep_system_deps.update(
+ _find_deep_system_runtime_deps(self._digraph))
+ deep_system_deps.difference_update([pkg for pkg in \
+ deep_system_deps if pkg.operation != "merge"])
+
+ def _prune_digraph(self):
+ """
+ Prune any root nodes that are irrelevant.
+ """
+
+ graph = self._digraph
+ completed_tasks = self._completed_tasks
+ removed_nodes = set()
+ while True:
+ for node in graph.root_nodes():
+ if not isinstance(node, Package) or \
+ (node.installed and node.operation == "nomerge") or \
+ node.onlydeps or \
+ node in completed_tasks:
+ removed_nodes.add(node)
+ if removed_nodes:
+ graph.difference_update(removed_nodes)
+ if not removed_nodes:
+ break
+ removed_nodes.clear()
+
+ def _prevent_builddir_collisions(self):
+ """
+ When building stages, sometimes the same exact cpv needs to be merged
+ to both $ROOTs. Add edges to the digraph in order to avoid collisions
+ in the builddir. Currently, normal file locks would be inappropriate
+ for this purpose since emerge holds all of it's build dir locks from
+ the main process.
+ """
+ cpv_map = {}
+ for pkg in self._mergelist:
+ if not isinstance(pkg, Package):
+ # a satisfied blocker
+ continue
+ if pkg.installed:
+ continue
+ if pkg.cpv not in cpv_map:
+ cpv_map[pkg.cpv] = [pkg]
+ continue
+ for earlier_pkg in cpv_map[pkg.cpv]:
+ self._digraph.add(earlier_pkg, pkg,
+ priority=DepPriority(buildtime=True))
+ cpv_map[pkg.cpv].append(pkg)
+
+ class _pkg_failure(portage.exception.PortageException):
+ """
+ An instance of this class is raised by unmerge() when
+ an uninstallation fails.
+ """
+ status = 1
+ def __init__(self, *pargs):
+ portage.exception.PortageException.__init__(self, pargs)
+ if pargs:
+ self.status = pargs[0]
+
+ def _schedule_fetch(self, fetcher):
+ """
+ Schedule a fetcher, in order to control the number of concurrent
+ fetchers. If self._max_jobs is greater than 1 then the fetch
+ queue is bypassed and the fetcher is started immediately,
+ otherwise it is added to the front of the parallel-fetch queue.
+ NOTE: The parallel-fetch queue is currently used to serialize
+ access to the parallel-fetch log, so changes in the log handling
+ would be required before it would be possible to enable
+ concurrent fetching within the parallel-fetch queue.
+ """
+ if self._max_jobs > 1:
+ fetcher.start()
+ else:
+ self._task_queues.fetch.addFront(fetcher)
+
+ def _schedule_setup(self, setup_phase):
+ """
+ Schedule a setup phase on the merge queue, in order to
+ serialize unsandboxed access to the live filesystem.
+ """
+ if self._task_queues.merge.max_jobs > 1 and \
+ "ebuild-locks" in self.settings.features:
+ # Use a separate queue for ebuild-locks when the merge
+ # queue allows more than 1 job (due to parallel-install),
+ # since the portage.locks module does not behave as desired
+ # if we try to lock the same file multiple times
+ # concurrently from the same process.
+ self._task_queues.ebuild_locks.add(setup_phase)
+ else:
+ self._task_queues.merge.add(setup_phase)
+ self._schedule()
+
+ def _schedule_unpack(self, unpack_phase):
+ """
+ Schedule an unpack phase on the unpack queue, in order
+ to serialize $DISTDIR access for live ebuilds.
+ """
+ self._task_queues.unpack.add(unpack_phase)
+
+ def _find_blockers(self, new_pkg):
+ """
+ Returns a callable.
+ """
+ def get_blockers():
+ return self._find_blockers_impl(new_pkg)
+ return get_blockers
+
+ def _find_blockers_impl(self, new_pkg):
+ if self._opts_ignore_blockers.intersection(self.myopts):
+ return None
+
+ blocker_db = self._blocker_db[new_pkg.root]
+
+ blocked_pkgs = []
+ for blocking_pkg in blocker_db.findInstalledBlockers(new_pkg):
+ if new_pkg.slot_atom == blocking_pkg.slot_atom:
+ continue
+ if new_pkg.cpv == blocking_pkg.cpv:
+ continue
+ blocked_pkgs.append(blocking_pkg)
+
+ return blocked_pkgs
+
+ def _generate_digests(self):
+ """
+ Generate digests if necessary for --digests or FEATURES=digest.
+ In order to avoid interference, this must done before parallel
+ tasks are started.
+ """
+
+ digest = '--digest' in self.myopts
+ if not digest:
+ for pkgsettings in self.pkgsettings.values():
+ if pkgsettings.mycpv is not None:
+ # ensure that we are using global features
+ # settings rather than those from package.env
+ pkgsettings.reset()
+ if 'digest' in pkgsettings.features:
+ digest = True
+ break
+
+ if not digest:
+ return os.EX_OK
+
+ for x in self._mergelist:
+ if not isinstance(x, Package) or \
+ x.type_name != 'ebuild' or \
+ x.operation != 'merge':
+ continue
+ pkgsettings = self.pkgsettings[x.root]
+ if pkgsettings.mycpv is not None:
+ # ensure that we are using global features
+ # settings rather than those from package.env
+ pkgsettings.reset()
+ if '--digest' not in self.myopts and \
+ 'digest' not in pkgsettings.features:
+ continue
+ portdb = x.root_config.trees['porttree'].dbapi
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % x.cpv)
+ pkgsettings['O'] = os.path.dirname(ebuild_path)
+ if not digestgen(mysettings=pkgsettings, myportdb=portdb):
+ writemsg_level(
+ "!!! Unable to generate manifest for '%s'.\n" \
+ % x.cpv, level=logging.ERROR, noiselevel=-1)
+ return FAILURE
+
+ return os.EX_OK
+
+ def _check_manifests(self):
+ # Verify all the manifests now so that the user is notified of failure
+ # as soon as possible.
+ if "strict" not in self.settings.features or \
+ "--fetchonly" in self.myopts or \
+ "--fetch-all-uri" in self.myopts:
+ return os.EX_OK
+
+ shown_verifying_msg = False
+ quiet_settings = {}
+ for myroot, pkgsettings in self.pkgsettings.items():
+ quiet_config = portage.config(clone=pkgsettings)
+ quiet_config["PORTAGE_QUIET"] = "1"
+ quiet_config.backup_changes("PORTAGE_QUIET")
+ quiet_settings[myroot] = quiet_config
+ del quiet_config
+
+ failures = 0
+
+ for x in self._mergelist:
+ if not isinstance(x, Package) or \
+ x.type_name != "ebuild":
+ continue
+
+ if x.operation == "uninstall":
+ continue
+
+ if not shown_verifying_msg:
+ shown_verifying_msg = True
+ self._status_msg("Verifying ebuild manifests")
+
+ root_config = x.root_config
+ portdb = root_config.trees["porttree"].dbapi
+ quiet_config = quiet_settings[root_config.root]
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % x.cpv)
+ quiet_config["O"] = os.path.dirname(ebuild_path)
+ if not digestcheck([], quiet_config, strict=True):
+ failures |= 1
+
+ if failures:
+ return FAILURE
+ return os.EX_OK
+
+ def _add_prefetchers(self):
+
+ if not self._parallel_fetch:
+ return
+
+ if self._parallel_fetch:
+
+ prefetchers = self._prefetchers
+
+ for pkg in self._mergelist:
+ # mergelist can contain solved Blocker instances
+ if not isinstance(pkg, Package) or pkg.operation == "uninstall":
+ continue
+ prefetcher = self._create_prefetcher(pkg)
+ if prefetcher is not None:
+ # This will start the first prefetcher immediately, so that
+ # self._task() won't discard it. This avoids a case where
+ # the first prefetcher is discarded, causing the second
+ # prefetcher to occupy the fetch queue before the first
+ # fetcher has an opportunity to execute.
+ prefetchers[pkg] = prefetcher
+ self._task_queues.fetch.add(prefetcher)
+
+ def _create_prefetcher(self, pkg):
+ """
+ @return: a prefetcher, or None if not applicable
+ """
+ prefetcher = None
+
+ if not isinstance(pkg, Package):
+ pass
+
+ elif pkg.type_name == "ebuild":
+
+ prefetcher = EbuildFetcher(background=True,
+ config_pool=self._ConfigPool(pkg.root,
+ self._allocate_config, self._deallocate_config),
+ fetchonly=1, fetchall=self._build_opts.fetch_all_uri,
+ logfile=self._fetch_log,
+ pkg=pkg, prefetch=True, scheduler=self._sched_iface)
+
+ elif pkg.type_name == "binary" and \
+ "--getbinpkg" in self.myopts and \
+ pkg.root_config.trees["bintree"].isremote(pkg.cpv):
+
+ prefetcher = BinpkgPrefetcher(background=True,
+ pkg=pkg, scheduler=self._sched_iface)
+
+ return prefetcher
+
+ def _run_pkg_pretend(self):
+ """
+ Since pkg_pretend output may be important, this method sends all
+ output directly to stdout (regardless of options like --quiet or
+ --jobs).
+ """
+
+ failures = 0
+
+ # Use a local EventLoop instance here, since we don't
+ # want tasks here to trigger the usual Scheduler callbacks
+ # that handle job scheduling and status display.
+ sched_iface = SchedulerInterface(EventLoop(main=False))
+
+ for x in self._mergelist:
+ if not isinstance(x, Package):
+ continue
+
+ if x.operation == "uninstall":
+ continue
+
+ if x.eapi in ("0", "1", "2", "3"):
+ continue
+
+ if "pretend" not in x.defined_phases:
+ continue
+
+ out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"
+ portage.util.writemsg_stdout(out_str, noiselevel=-1)
+
+ root_config = x.root_config
+ settings = self.pkgsettings[root_config.root]
+ settings.setcpv(x)
+
+ # setcpv/package.env allows for per-package PORTAGE_TMPDIR so we
+ # have to validate it for each package
+ rval = _check_temp_dir(settings)
+ if rval != os.EX_OK:
+ return rval
+
+ build_dir_path = os.path.join(
+ os.path.realpath(settings["PORTAGE_TMPDIR"]),
+ "portage", x.category, x.pf)
+ existing_builddir = os.path.isdir(build_dir_path)
+ settings["PORTAGE_BUILDDIR"] = build_dir_path
+ build_dir = EbuildBuildDir(scheduler=sched_iface,
+ settings=settings)
+ sched_iface.run_until_complete(build_dir.async_lock())
+ current_task = None
+
+ try:
+
+ # Clean up the existing build dir, in case pkg_pretend
+ # checks for available space (bug #390711).
+ if existing_builddir:
+ if x.built:
+ tree = "bintree"
+ infloc = os.path.join(build_dir_path, "build-info")
+ ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
+ else:
+ tree = "porttree"
+ portdb = root_config.trees["porttree"].dbapi
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError(
+ "ebuild not found for '%s'" % x.cpv)
+ portage.package.ebuild.doebuild.doebuild_environment(
+ ebuild_path, "clean", settings=settings,
+ db=self.trees[settings['EROOT']][tree].dbapi)
+ clean_phase = EbuildPhase(background=False,
+ phase='clean', scheduler=sched_iface, settings=settings)
+ current_task = clean_phase
+ clean_phase.start()
+ clean_phase.wait()
+
+ if x.built:
+ tree = "bintree"
+ bintree = root_config.trees["bintree"].dbapi.bintree
+ fetched = False
+
+ # Display fetch on stdout, so that it's always clear what
+ # is consuming time here.
+ if bintree.isremote(x.cpv):
+ fetcher = BinpkgFetcher(pkg=x,
+ scheduler=sched_iface)
+ fetcher.start()
+ if fetcher.wait() != os.EX_OK:
+ failures += 1
+ continue
+ fetched = fetcher.pkg_path
+
+ if fetched is False:
+ filename = bintree.getname(x.cpv)
+ else:
+ filename = fetched
+ verifier = BinpkgVerifier(pkg=x,
+ scheduler=sched_iface, _pkg_path=filename)
+ current_task = verifier
+ verifier.start()
+ if verifier.wait() != os.EX_OK:
+ failures += 1
+ continue
+
+ if fetched:
+ bintree.inject(x.cpv, filename=fetched)
+ tbz2_file = bintree.getname(x.cpv)
+ infloc = os.path.join(build_dir_path, "build-info")
+ ensure_dirs(infloc)
+ portage.xpak.tbz2(tbz2_file).unpackinfo(infloc)
+ ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
+ settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
+ settings.configdict["pkg"]["MERGE_TYPE"] = "binary"
+
+ else:
+ tree = "porttree"
+ portdb = root_config.trees["porttree"].dbapi
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % x.cpv)
+ settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
+ if self._build_opts.buildpkgonly:
+ settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
+ else:
+ settings.configdict["pkg"]["MERGE_TYPE"] = "source"
+
+ portage.package.ebuild.doebuild.doebuild_environment(ebuild_path,
+ "pretend", settings=settings,
+ db=self.trees[settings['EROOT']][tree].dbapi)
+
+ prepare_build_dirs(root_config.root, settings, cleanup=0)
+
+ vardb = root_config.trees['vartree'].dbapi
+ settings["REPLACING_VERSIONS"] = " ".join(
+ set(portage.versions.cpv_getversion(match) \
+ for match in vardb.match(x.slot_atom) + \
+ vardb.match('='+x.cpv)))
+ pretend_phase = EbuildPhase(
+ phase="pretend", scheduler=sched_iface,
+ settings=settings)
+
+ current_task = pretend_phase
+ pretend_phase.start()
+ ret = pretend_phase.wait()
+ if ret != os.EX_OK:
+ failures += 1
+ portage.elog.elog_process(x.cpv, settings)
+ finally:
+
+ if current_task is not None:
+ if current_task.isAlive():
+ current_task.cancel()
+ current_task.wait()
+ if current_task.returncode == os.EX_OK:
+ clean_phase = EbuildPhase(background=False,
+ phase='clean', scheduler=sched_iface,
+ settings=settings)
+ clean_phase.start()
+ clean_phase.wait()
+
+ sched_iface.run_until_complete(build_dir.async_unlock())
+
+ if failures:
+ return FAILURE
+ return os.EX_OK
+
+ def merge(self):
+ if "--resume" in self.myopts:
+ # We're resuming.
+ portage.writemsg_stdout(
+ colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
+ self._logger.log(" *** Resuming merge...")
+
+ self._save_resume_list()
+
+ try:
+ self._background = self._background_mode()
+ except self._unknown_internal_error:
+ return FAILURE
+
+ rval = self._handle_self_update()
+ if rval != os.EX_OK:
+ return rval
+
+ for root in self.trees:
+ root_config = self.trees[root]["root_config"]
+
+ # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
+ # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
+ # for ensuring sane $PWD (bug #239560) and storing elog messages.
+ tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
+ if not tmpdir or not os.path.isdir(tmpdir):
+ msg = (
+ 'The directory specified in your PORTAGE_TMPDIR variable does not exist:',
+ tmpdir,
+ 'Please create this directory or correct your PORTAGE_TMPDIR setting.',
+ )
+ out = portage.output.EOutput()
+ for l in msg:
+ out.eerror(l)
+ return FAILURE
+
+ if self._background:
+ root_config.settings.unlock()
+ root_config.settings["PORTAGE_BACKGROUND"] = "1"
+ root_config.settings.backup_changes("PORTAGE_BACKGROUND")
+ root_config.settings.lock()
+
+ self.pkgsettings[root] = portage.config(
+ clone=root_config.settings)
+
+ keep_going = "--keep-going" in self.myopts
+ fetchonly = self._build_opts.fetchonly
+ mtimedb = self._mtimedb
+ failed_pkgs = self._failed_pkgs
+
+ rval = self._generate_digests()
+ if rval != os.EX_OK:
+ return rval
+
+ # TODO: Immediately recalculate deps here if --keep-going
+ # is enabled and corrupt manifests are detected.
+ rval = self._check_manifests()
+ if rval != os.EX_OK and not keep_going:
+ return rval
+
+ if not fetchonly:
+ rval = self._run_pkg_pretend()
+ if rval != os.EX_OK:
+ return rval
+
+ while True:
+
+ received_signal = []
+
+ def sighandler(signum, frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \
+ {"signal":signum})
+ self.terminate()
+ received_signal.append(128 + signum)
+
+ earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
+ earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
+ earlier_sigcont_handler = \
+ signal.signal(signal.SIGCONT, self._sigcont_handler)
+ signal.siginterrupt(signal.SIGCONT, False)
+
+ try:
+ rval = self._merge()
+ finally:
+ # Restore previous handlers
+ if earlier_sigint_handler is not None:
+ signal.signal(signal.SIGINT, earlier_sigint_handler)
+ else:
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ if earlier_sigterm_handler is not None:
+ signal.signal(signal.SIGTERM, earlier_sigterm_handler)
+ else:
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ if earlier_sigcont_handler is not None:
+ signal.signal(signal.SIGCONT, earlier_sigcont_handler)
+ else:
+ signal.signal(signal.SIGCONT, signal.SIG_DFL)
+
+ self._termination_check()
+ if received_signal:
+ sys.exit(received_signal[0])
+
+ if rval == os.EX_OK or fetchonly or not keep_going:
+ break
+ if "resume" not in mtimedb:
+ break
+ mergelist = self._mtimedb["resume"].get("mergelist")
+ if not mergelist:
+ break
+
+ if not failed_pkgs:
+ break
+
+ for failed_pkg in failed_pkgs:
+ mergelist.remove(list(failed_pkg.pkg))
+
+ self._failed_pkgs_all.extend(failed_pkgs)
+ del failed_pkgs[:]
+
+ if not mergelist:
+ break
+
+ if not self._calc_resume_list():
+ break
+
+ clear_caches(self.trees)
+ if not self._mergelist:
+ break
+
+ self._save_resume_list()
+ self._pkg_count.curval = 0
+ self._pkg_count.maxval = len([x for x in self._mergelist \
+ if isinstance(x, Package) and x.operation == "merge"])
+ self._status_display.maxval = self._pkg_count.maxval
+
+ # Cleanup any callbacks that have been registered with the global
+ # event loop by calls to the terminate method.
+ self._cleanup()
+
+ self._logger.log(" *** Finished. Cleaning up...")
+
+ if failed_pkgs:
+ self._failed_pkgs_all.extend(failed_pkgs)
+ del failed_pkgs[:]
+
+ printer = portage.output.EOutput()
+ background = self._background
+ failure_log_shown = False
+ if background and len(self._failed_pkgs_all) == 1 and \
+ self.myopts.get('--quiet-fail', 'n') != 'y':
+ # If only one package failed then just show it's
+ # whole log for easy viewing.
+ failed_pkg = self._failed_pkgs_all[-1]
+ log_file = None
+ log_file_real = None
+
+ log_path = self._locate_failure_log(failed_pkg)
+ if log_path is not None:
+ try:
+ log_file = open(_unicode_encode(log_path,
+ encoding=_encodings['fs'], errors='strict'), mode='rb')
+ except IOError:
+ pass
+ else:
+ if log_path.endswith('.gz'):
+ log_file_real = log_file
+ log_file = gzip.GzipFile(filename='',
+ mode='rb', fileobj=log_file)
+
+ if log_file is not None:
+ try:
+ for line in log_file:
+ writemsg_level(line, noiselevel=-1)
+ except zlib.error as e:
+ writemsg_level("%s\n" % (e,), level=logging.ERROR,
+ noiselevel=-1)
+ finally:
+ log_file.close()
+ if log_file_real is not None:
+ log_file_real.close()
+ failure_log_shown = True
+
+ # Dump mod_echo output now since it tends to flood the terminal.
+ # This allows us to avoid having more important output, generated
+ # later, from being swept away by the mod_echo output.
+ mod_echo_output = _flush_elog_mod_echo()
+
+ if background and not failure_log_shown and \
+ self._failed_pkgs_all and \
+ self._failed_pkgs_die_msgs and \
+ not mod_echo_output:
+
+ for mysettings, key, logentries in self._failed_pkgs_die_msgs:
+ root_msg = ""
+ if mysettings["ROOT"] != "/":
+ root_msg = " merged to %s" % mysettings["ROOT"]
+ print()
+ printer.einfo("Error messages for package %s%s:" % \
+ (colorize("INFORM", key), root_msg))
+ print()
+ for phase in portage.const.EBUILD_PHASES:
+ if phase not in logentries:
+ continue
+ for msgtype, msgcontent in logentries[phase]:
+ if isinstance(msgcontent, basestring):
+ msgcontent = [msgcontent]
+ for line in msgcontent:
+ printer.eerror(line.strip("\n"))
+
+ if self._post_mod_echo_msgs:
+ for msg in self._post_mod_echo_msgs:
+ msg()
+
+ if len(self._failed_pkgs_all) > 1 or \
+ (self._failed_pkgs_all and keep_going):
+ if len(self._failed_pkgs_all) > 1:
+ msg = "The following %d packages have " % \
+ len(self._failed_pkgs_all) + \
+ "failed to build, install, or execute postinst:"
+ else:
+ msg = "The following package has " + \
+ "failed to build, install, or execute postinst:"
+
+ printer.eerror("")
+ for line in textwrap.wrap(msg, 72):
+ printer.eerror(line)
+ printer.eerror("")
+ for failed_pkg in self._failed_pkgs_all:
+ # Use unicode_literals to force unicode format string so
+ # that Package.__unicode__() is called in python2.
+ msg = " %s" % (failed_pkg.pkg,)
+ if failed_pkg.postinst_failure:
+ msg += " (postinst failed)"
+ log_path = self._locate_failure_log(failed_pkg)
+ if log_path is not None:
+ msg += ", Log file:"
+ printer.eerror(msg)
+ if log_path is not None:
+ printer.eerror(" '%s'" % colorize('INFORM', log_path))
+ printer.eerror("")
+
+ if self._failed_pkgs_all:
+ return FAILURE
+ return os.EX_OK
+
+ def _elog_listener(self, mysettings, key, logentries, fulltext):
+ errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
+ if errors:
+ self._failed_pkgs_die_msgs.append(
+ (mysettings, key, errors))
+
+ def _locate_failure_log(self, failed_pkg):
+
+ log_paths = [failed_pkg.build_log]
+
+ for log_path in log_paths:
+ if not log_path:
+ continue
+
+ try:
+ log_size = os.stat(log_path).st_size
+ except OSError:
+ continue
+
+ if log_size == 0:
+ continue
+
+ return log_path
+
+ return None
+
+ def _add_packages(self):
+ pkg_queue = self._pkg_queue
+ for pkg in self._mergelist:
+ if isinstance(pkg, Package):
+ pkg_queue.append(pkg)
+ elif isinstance(pkg, Blocker):
+ pass
+
+ def _system_merge_started(self, merge):
+ """
+ Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
+ In general, this keeps track of installed system packages with
+ unsatisfied RDEPEND or PDEPEND (circular dependencies). It can be
+ a fragile situation, so we don't execute any unrelated builds until
+ the circular dependencies are built and installed.
+ """
+ graph = self._digraph
+ if graph is None:
+ return
+ pkg = merge.merge.pkg
+
+ # Skip this if $ROOT != / since it shouldn't matter if there
+ # are unsatisfied system runtime deps in this case.
+ if pkg.root_config.settings["ROOT"] != "/":
+ return
+
+ completed_tasks = self._completed_tasks
+ unsatisfied = self._unsatisfied_system_deps
+
+ def ignore_non_runtime_or_satisfied(priority):
+ """
+ Ignore non-runtime and satisfied runtime priorities.
+ """
+ if isinstance(priority, DepPriority) and \
+ not priority.satisfied and \
+ (priority.runtime or priority.runtime_post):
+ return False
+ return True
+
+ # When checking for unsatisfied runtime deps, only check
+ # direct deps since indirect deps are checked when the
+ # corresponding parent is merged.
+ for child in graph.child_nodes(pkg,
+ ignore_priority=ignore_non_runtime_or_satisfied):
+ if not isinstance(child, Package) or \
+ child.operation == 'uninstall':
+ continue
+ if child is pkg:
+ continue
+ if child.operation == 'merge' and \
+ child not in completed_tasks:
+ unsatisfied.add(child)
+
+ def _merge_wait_exit_handler(self, task):
+ self._merge_wait_scheduled.remove(task)
+ self._merge_exit(task)
+
+ def _merge_exit(self, merge):
+ self._running_tasks.pop(id(merge), None)
+ self._do_merge_exit(merge)
+ self._deallocate_config(merge.merge.settings)
+ if merge.returncode == os.EX_OK and \
+ not merge.merge.pkg.installed:
+ self._status_display.curval += 1
+ self._status_display.merges = len(self._task_queues.merge)
+ self._schedule()
+
+ def _do_merge_exit(self, merge):
+ pkg = merge.merge.pkg
+ if merge.returncode != os.EX_OK:
+ settings = merge.merge.settings
+ build_dir = settings.get("PORTAGE_BUILDDIR")
+ build_log = settings.get("PORTAGE_LOG_FILE")
+
+ self._failed_pkgs.append(self._failed_pkg(
+ build_dir=build_dir, build_log=build_log,
+ pkg=pkg,
+ returncode=merge.returncode))
+ if not self._terminated_tasks:
+ self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
+ self._status_display.failed = len(self._failed_pkgs)
+ return
+
+ if merge.postinst_failure:
+ # Append directly to _failed_pkgs_all for non-critical errors.
+ self._failed_pkgs_all.append(self._failed_pkg(
+ build_dir=merge.merge.settings.get("PORTAGE_BUILDDIR"),
+ build_log=merge.merge.settings.get("PORTAGE_LOG_FILE"),
+ pkg=pkg,
+ postinst_failure=True,
+ returncode=merge.returncode))
+ self._failed_pkg_msg(self._failed_pkgs_all[-1],
+ "execute postinst for", "for")
+
+ self._task_complete(pkg)
+ pkg_to_replace = merge.merge.pkg_to_replace
+ if pkg_to_replace is not None:
+ # When a package is replaced, mark it's uninstall
+ # task complete (if any).
+ if self._digraph is not None and \
+ pkg_to_replace in self._digraph:
+ try:
+ self._pkg_queue.remove(pkg_to_replace)
+ except ValueError:
+ pass
+ self._task_complete(pkg_to_replace)
+ else:
+ self._pkg_cache.pop(pkg_to_replace, None)
+
+ if pkg.installed:
+ return
+
+ # Call mtimedb.commit() after each merge so that
+ # --resume still works after being interrupted
+ # by reboot, sigkill or similar.
+ mtimedb = self._mtimedb
+ mtimedb["resume"]["mergelist"].remove(list(pkg))
+ if not mtimedb["resume"]["mergelist"]:
+ del mtimedb["resume"]
+ mtimedb.commit()
+
+ def _build_exit(self, build):
+ self._running_tasks.pop(id(build), None)
+ if build.returncode == os.EX_OK and self._terminated_tasks:
+ # We've been interrupted, so we won't
+ # add this to the merge queue.
+ self.curval += 1
+ self._deallocate_config(build.settings)
+ elif build.returncode == os.EX_OK:
+ self.curval += 1
+ merge = PackageMerge(merge=build)
+ self._running_tasks[id(merge)] = merge
+ if not build.build_opts.buildpkgonly and \
+ build.pkg in self._deep_system_deps:
+ # Since dependencies on system packages are frequently
+ # unspecified, merge them only when no builds are executing.
+ self._merge_wait_queue.append(merge)
+ merge.addStartListener(self._system_merge_started)
+ else:
+ merge.addExitListener(self._merge_exit)
+ self._task_queues.merge.add(merge)
+ self._status_display.merges = len(self._task_queues.merge)
+ else:
+ settings = build.settings
+ build_dir = settings.get("PORTAGE_BUILDDIR")
+ build_log = settings.get("PORTAGE_LOG_FILE")
+
+ self._failed_pkgs.append(self._failed_pkg(
+ build_dir=build_dir, build_log=build_log,
+ pkg=build.pkg,
+ returncode=build.returncode))
+ if not self._terminated_tasks:
+ self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
+ self._status_display.failed = len(self._failed_pkgs)
+ self._deallocate_config(build.settings)
+ self._jobs -= 1
+ self._status_display.running = self._jobs
+ self._schedule()
+
+ def _extract_exit(self, build):
+ self._build_exit(build)
+
+ def _task_complete(self, pkg):
+ self._completed_tasks.add(pkg)
+ self._unsatisfied_system_deps.discard(pkg)
+ self._choose_pkg_return_early = False
+ blocker_db = self._blocker_db[pkg.root]
+ blocker_db.discardBlocker(pkg)
+
+ def _main_loop(self):
+ self._main_exit = self._event_loop.create_future()
+
+ if self._max_load is not None and \
+ self._loadavg_latency is not None and \
+ (self._max_jobs is True or self._max_jobs > 1):
+ # We have to schedule periodically, in case the load
+ # average has changed since the last call.
+ self._main_loadavg_handle = self._event_loop.call_later(
+ self._loadavg_latency, self._schedule)
+
+ self._schedule()
+ self._event_loop.run_until_complete(self._main_exit)
+
+ def _merge(self):
+
+ if self._opts_no_background.intersection(self.myopts):
+ self._set_max_jobs(1)
+
+ self._add_prefetchers()
+ self._add_packages()
+ failed_pkgs = self._failed_pkgs
+ portage.locks._quiet = self._background
+ portage.elog.add_listener(self._elog_listener)
+
+ def display_callback():
+ self._status_display.display()
+ display_callback.handle = self._event_loop.call_later(
+ self._max_display_latency, display_callback)
+ display_callback.handle = None
+
+ if self._status_display._isatty and not self._status_display.quiet:
+ display_callback()
+ rval = os.EX_OK
+
+ try:
+ self._main_loop()
+ finally:
+ self._main_loop_cleanup()
+ portage.locks._quiet = False
+ portage.elog.remove_listener(self._elog_listener)
+ if display_callback.handle is not None:
+ display_callback.handle.cancel()
+ if failed_pkgs:
+ rval = failed_pkgs[-1].returncode
+
+ return rval
+
+ def _main_loop_cleanup(self):
+ del self._pkg_queue[:]
+ self._completed_tasks.clear()
+ self._deep_system_deps.clear()
+ self._unsatisfied_system_deps.clear()
+ self._choose_pkg_return_early = False
+ self._status_display.reset()
+ self._digraph = None
+ self._task_queues.fetch.clear()
+ self._prefetchers.clear()
+ self._main_exit = None
+ if self._main_loadavg_handle is not None:
+ self._main_loadavg_handle.cancel()
+ self._main_loadavg_handle = None
+
+ def _choose_pkg(self):
+ """
+ Choose a task that has all its dependencies satisfied. This is used
+ for parallel build scheduling, and ensures that we don't build
+ anything with deep dependencies that have yet to be merged.
+ """
+
+ if self._choose_pkg_return_early:
+ return None
+
+ if self._digraph is None:
+ if self._is_work_scheduled() and \
+ not ("--nodeps" in self.myopts and \
+ (self._max_jobs is True or self._max_jobs > 1)):
+ self._choose_pkg_return_early = True
+ return None
+ return self._pkg_queue.pop(0)
+
+ if not self._is_work_scheduled():
+ return self._pkg_queue.pop(0)
+
+ self._prune_digraph()
+
+ chosen_pkg = None
+
+ # Prefer uninstall operations when available.
+ graph = self._digraph
+ for pkg in self._pkg_queue:
+ if pkg.operation == 'uninstall' and \
+ not graph.child_nodes(pkg):
+ chosen_pkg = pkg
+ break
+
+ if chosen_pkg is None:
+ later = set(self._pkg_queue)
+ for pkg in self._pkg_queue:
+ later.remove(pkg)
+ if not self._dependent_on_scheduled_merges(pkg, later):
+ chosen_pkg = pkg
+ break
+
+ if chosen_pkg is not None:
+ self._pkg_queue.remove(chosen_pkg)
+
+ if chosen_pkg is None:
+ # There's no point in searching for a package to
+ # choose until at least one of the existing jobs
+ # completes.
+ self._choose_pkg_return_early = True
+
+ return chosen_pkg
+
+ def _dependent_on_scheduled_merges(self, pkg, later):
+ """
+ Traverse the subgraph of the given packages deep dependencies
+ to see if it contains any scheduled merges.
+ @param pkg: a package to check dependencies for
+ @type pkg: Package
+ @param later: packages for which dependence should be ignored
+ since they will be merged later than pkg anyway and therefore
+ delaying the merge of pkg will not result in a more optimal
+ merge order
+ @type later: set
+ @rtype: bool
+ @return: True if the package is dependent, False otherwise.
+ """
+
+ graph = self._digraph
+ completed_tasks = self._completed_tasks
+
+ dependent = False
+ traversed_nodes = set([pkg])
+ direct_deps = graph.child_nodes(pkg)
+ node_stack = direct_deps
+ direct_deps = frozenset(direct_deps)
+ while node_stack:
+ node = node_stack.pop()
+ if node in traversed_nodes:
+ continue
+ traversed_nodes.add(node)
+ if not ((node.installed and node.operation == "nomerge") or \
+ (node.operation == "uninstall" and \
+ node not in direct_deps) or \
+ node in completed_tasks or \
+ node in later):
+ dependent = True
+ break
+
+ # Don't traverse children of uninstall nodes since
+ # those aren't dependencies in the usual sense.
+ if node.operation != "uninstall":
+ node_stack.extend(graph.child_nodes(node))
+
+ return dependent
+
+ def _allocate_config(self, root):
+ """
+ Allocate a unique config instance for a task in order
+ to prevent interference between parallel tasks.
+ """
+ if self._config_pool[root]:
+ temp_settings = self._config_pool[root].pop()
+ else:
+ temp_settings = portage.config(clone=self.pkgsettings[root])
+ # Since config.setcpv() isn't guaranteed to call config.reset() due to
+ # performance reasons, call it here to make sure all settings from the
+ # previous package get flushed out (such as PORTAGE_LOG_FILE).
+ temp_settings.reload()
+ temp_settings.reset()
+ return temp_settings
+
+ def _deallocate_config(self, settings):
+ self._config_pool[settings['EROOT']].append(settings)
+
+ def _keep_scheduling(self):
+ return bool(not self._terminated.is_set() and self._pkg_queue and \
+ not (self._failed_pkgs and not self._build_opts.fetchonly))
+
+ def _is_work_scheduled(self):
+ return bool(self._running_tasks)
+
+ def _running_job_count(self):
+ return self._jobs
+
+ def _schedule_tasks(self):
+
+ while True:
+
+ state_change = 0
+
+ # When the number of jobs and merges drops to zero,
+ # process a single merge from _merge_wait_queue if
+ # it's not empty. We only process one since these are
+ # special packages and we want to ensure that
+ # parallel-install does not cause more than one of
+ # them to install at the same time.
+ if (self._merge_wait_queue and not self._jobs and
+ not self._task_queues.merge):
+ task = self._merge_wait_queue.popleft()
+ task.addExitListener(self._merge_wait_exit_handler)
+ self._merge_wait_scheduled.append(task)
+ self._task_queues.merge.add(task)
+ self._status_display.merges = len(self._task_queues.merge)
+ state_change += 1
+
+ if self._schedule_tasks_imp():
+ state_change += 1
+
+ self._status_display.display()
+
+ # Cancel prefetchers if they're the only reason
+ # the main poll loop is still running.
+ if self._failed_pkgs and not self._build_opts.fetchonly and \
+ not self._is_work_scheduled() and \
+ self._task_queues.fetch:
+ # Since this happens asynchronously, it doesn't count in
+ # state_change (counting it triggers an infinite loop).
+ self._task_queues.fetch.clear()
+
+ if not (state_change or \
+ (self._merge_wait_queue and not self._jobs and
+ not self._task_queues.merge)):
+ break
+
+ if not (self._is_work_scheduled() or
+ self._keep_scheduling() or self._main_exit.done()):
+ self._main_exit.set_result(None)
+ elif self._main_loadavg_handle is not None:
+ self._main_loadavg_handle.cancel()
+ self._main_loadavg_handle = self._event_loop.call_later(
+ self._loadavg_latency, self._schedule)
+
+ def _sigcont_handler(self, signum, frame):
+ self._sigcont_time = time.time()
+
+ def _job_delay(self):
+ """
+ @rtype: bool
+ @return: True if job scheduling should be delayed, False otherwise.
+ """
+
+ if self._jobs and self._max_load is not None:
+
+ current_time = time.time()
+
+ if self._sigcont_time is not None:
+
+ elapsed_seconds = current_time - self._sigcont_time
+ # elapsed_seconds < 0 means the system clock has been adjusted
+ if elapsed_seconds > 0 and \
+ elapsed_seconds < self._sigcont_delay:
+
+ if self._job_delay_timeout_id is not None:
+ self._job_delay_timeout_id.cancel()
+
+ self._job_delay_timeout_id = self._event_loop.call_later(
+ self._sigcont_delay - elapsed_seconds,
+ self._schedule)
+ return True
+
+ # Only set this to None after the delay has expired,
+ # since this method may be called again before the
+ # delay has expired.
+ self._sigcont_time = None
+
+ try:
+ avg1, avg5, avg15 = getloadavg()
+ except OSError:
+ return False
+
+ delay = self._job_delay_max * avg1 / self._max_load
+ if delay > self._job_delay_max:
+ delay = self._job_delay_max
+ elapsed_seconds = current_time - self._previous_job_start_time
+ # elapsed_seconds < 0 means the system clock has been adjusted
+ if elapsed_seconds > 0 and elapsed_seconds < delay:
+
+ if self._job_delay_timeout_id is not None:
+ self._job_delay_timeout_id.cancel()
+
+ self._job_delay_timeout_id = self._event_loop.call_later(
+ delay - elapsed_seconds, self._schedule)
+ return True
+
+ return False
+
+ def _schedule_tasks_imp(self):
+ """
+ @rtype: bool
+ @return: True if state changed, False otherwise.
+ """
+
+ state_change = 0
+
+ while True:
+
+ if not self._keep_scheduling():
+ return bool(state_change)
+
+ if self._choose_pkg_return_early or \
+ self._merge_wait_scheduled or \
+ (self._jobs and self._unsatisfied_system_deps) or \
+ not self._can_add_job() or \
+ self._job_delay():
+ return bool(state_change)
+
+ pkg = self._choose_pkg()
+ if pkg is None:
+ return bool(state_change)
+
+ state_change += 1
+
+ if not pkg.installed:
+ self._pkg_count.curval += 1
+
+ task = self._task(pkg)
+
+ if pkg.installed:
+ merge = PackageMerge(merge=task)
+ self._running_tasks[id(merge)] = merge
+ merge.addExitListener(self._merge_exit)
+ self._task_queues.merge.addFront(merge)
+
+ elif pkg.built:
+ self._jobs += 1
+ self._previous_job_start_time = time.time()
+ self._status_display.running = self._jobs
+ self._running_tasks[id(task)] = task
+ task.addExitListener(self._extract_exit)
+ self._task_queues.jobs.add(task)
+
+ else:
+ self._jobs += 1
+ self._previous_job_start_time = time.time()
+ self._status_display.running = self._jobs
+ self._running_tasks[id(task)] = task
+ task.addExitListener(self._build_exit)
+ self._task_queues.jobs.add(task)
+
+ return bool(state_change)
+
+ def _task(self, pkg):
+
+ pkg_to_replace = None
+ if pkg.operation != "uninstall":
+ vardb = pkg.root_config.trees["vartree"].dbapi
+ previous_cpv = [x for x in vardb.match(pkg.slot_atom) \
+ if portage.cpv_getkey(x) == pkg.cp]
+ if not previous_cpv and vardb.cpv_exists(pkg.cpv):
+ # same cpv, different SLOT
+ previous_cpv = [pkg.cpv]
+ if previous_cpv:
+ previous_cpv = previous_cpv.pop()
+ pkg_to_replace = self._pkg(previous_cpv,
+ "installed", pkg.root_config, installed=True,
+ operation="uninstall")
+
+ try:
+ prefetcher = self._prefetchers.pop(pkg, None)
+ except KeyError:
+ # KeyError observed with PyPy 1.8, despite None given as default.
+ # Note that PyPy 1.8 has the same WeakValueDictionary code as
+ # CPython 2.7, so it may be possible for CPython to raise KeyError
+ # here as well.
+ prefetcher = None
+ if prefetcher is not None and not prefetcher.isAlive():
+ try:
+ self._task_queues.fetch._task_queue.remove(prefetcher)
+ except ValueError:
+ pass
+ prefetcher = None
+
+ task = MergeListItem(args_set=self._args_set,
+ background=self._background, binpkg_opts=self._binpkg_opts,
+ build_opts=self._build_opts,
+ config_pool=self._ConfigPool(pkg.root,
+ self._allocate_config, self._deallocate_config),
+ emerge_opts=self.myopts,
+ find_blockers=self._find_blockers(pkg), logger=self._logger,
+ mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
+ pkg_to_replace=pkg_to_replace,
+ prefetcher=prefetcher,
+ scheduler=self._sched_iface,
+ settings=self._allocate_config(pkg.root),
+ statusMessage=self._status_msg,
+ world_atom=self._world_atom)
+
+ return task
+
+ def _failed_pkg_msg(self, failed_pkg, action, preposition):
+ pkg = failed_pkg.pkg
+ msg = "%s to %s %s" % \
+ (bad("Failed"), action, colorize("INFORM", pkg.cpv))
+ if pkg.root_config.settings["ROOT"] != "/":
+ msg += " %s %s" % (preposition, pkg.root)
+
+ log_path = self._locate_failure_log(failed_pkg)
+ if log_path is not None:
+ msg += ", Log file:"
+ self._status_msg(msg)
+
+ if log_path is not None:
+ self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
+
+ def _status_msg(self, msg):
+ """
+ Display a brief status message (no newlines) in the status display.
+ This is called by tasks to provide feedback to the user. This
+ delegates the resposibility of generating \r and \n control characters,
+ to guarantee that lines are created or erased when necessary and
+ appropriate.
+
+ @type msg: str
+ @param msg: a brief status message (no newlines allowed)
+ """
+ if not self._background:
+ writemsg_level("\n")
+ self._status_display.displayMessage(msg)
+
+ def _save_resume_list(self):
+ """
+ Do this before verifying the ebuild Manifests since it might
+ be possible for the user to use --resume --skipfirst get past
+ a non-essential package with a broken digest.
+ """
+ mtimedb = self._mtimedb
+
+ mtimedb["resume"] = {}
+ # Stored as a dict starting with portage-2.1.6_rc1, and supported
+ # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
+ # a list type for options.
+ mtimedb["resume"]["myopts"] = self.myopts.copy()
+
+ # Convert Atom instances to plain str.
+ mtimedb["resume"]["favorites"] = [str(x) for x in self._favorites]
+ mtimedb["resume"]["mergelist"] = [list(x) \
+ for x in self._mergelist \
+ if isinstance(x, Package) and x.operation == "merge"]
+
+ mtimedb.commit()
+
+ def _calc_resume_list(self):
+ """
+ Use the current resume list to calculate a new one,
+ dropping any packages with unsatisfied deps.
+ @rtype: bool
+ @return: True if successful, False otherwise.
+ """
+ print(colorize("GOOD", "*** Resuming merge..."))
+
+ # free some memory before creating
+ # the resume depgraph
+ self._destroy_graph()
+
+ myparams = create_depgraph_params(self.myopts, None)
+ success = False
+ e = None
+ try:
+ success, mydepgraph, dropped_tasks = resume_depgraph(
+ self.settings, self.trees, self._mtimedb, self.myopts,
+ myparams, self._spinner)
+ except depgraph.UnsatisfiedResumeDep as exc:
+ # rename variable to avoid python-3.0 error:
+ # SyntaxError: can not delete variable 'e' referenced in nested
+ # scope
+ e = exc
+ mydepgraph = e.depgraph
+ dropped_tasks = {}
+
+ if e is not None:
+ def unsatisfied_resume_dep_msg():
+ mydepgraph.display_problems()
+ out = portage.output.EOutput()
+ out.eerror("One or more packages are either masked or " + \
+ "have missing dependencies:")
+ out.eerror("")
+ indent = " "
+ show_parents = set()
+ for dep in e.value:
+ if dep.parent in show_parents:
+ continue
+ show_parents.add(dep.parent)
+ if dep.atom is None:
+ out.eerror(indent + "Masked package:")
+ out.eerror(2 * indent + str(dep.parent))
+ out.eerror("")
+ else:
+ out.eerror(indent + str(dep.atom) + " pulled in by:")
+ out.eerror(2 * indent + str(dep.parent))
+ out.eerror("")
+ msg = "The resume list contains packages " + \
+ "that are either masked or have " + \
+ "unsatisfied dependencies. " + \
+ "Please restart/continue " + \
+ "the operation manually, or use --skipfirst " + \
+ "to skip the first package in the list and " + \
+ "any other packages that may be " + \
+ "masked or have missing dependencies."
+ for line in textwrap.wrap(msg, 72):
+ out.eerror(line)
+ self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
+ return False
+
+ if success and self._show_list():
+ mydepgraph.display(mydepgraph.altlist(), favorites=self._favorites)
+
+ if not success:
+ self._post_mod_echo_msgs.append(mydepgraph.display_problems)
+ return False
+ mydepgraph.display_problems()
+ self._init_graph(mydepgraph.schedulerGraph())
+
+ msg_width = 75
+ for task, atoms in dropped_tasks.items():
+ if not (isinstance(task, Package) and task.operation == "merge"):
+ continue
+ pkg = task
+ msg = "emerge --keep-going:" + \
+ " %s" % (pkg.cpv,)
+ if pkg.root_config.settings["ROOT"] != "/":
+ msg += " for %s" % (pkg.root,)
+ if not atoms:
+ msg += " dropped because it is masked or unavailable"
+ else:
+ msg += " dropped because it requires %s" % ", ".join(atoms)
+ for line in textwrap.wrap(msg, msg_width):
+ eerror(line, phase="other", key=pkg.cpv)
+ settings = self.pkgsettings[pkg.root]
+ # Ensure that log collection from $T is disabled inside
+ # elog_process(), since any logs that might exist are
+ # not valid here.
+ settings.pop("T", None)
+ portage.elog.elog_process(pkg.cpv, settings)
+ self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
+
+ return True
+
+ def _show_list(self):
+ myopts = self.myopts
+ if "--quiet" not in myopts and \
+ ("--ask" in myopts or "--tree" in myopts or \
+ "--verbose" in myopts):
+ return True
+ return False
+
+ def _world_atom(self, pkg):
+ """
+ Add or remove the package to the world file, but only if
+ it's supposed to be added or removed. Otherwise, do nothing.
+ """
+
+ if set(("--buildpkgonly", "--fetchonly",
+ "--fetch-all-uri",
+ "--oneshot", "--onlydeps",
+ "--pretend")).intersection(self.myopts):
+ return
+
+ if pkg.root != self.target_root:
+ return
+
+ args_set = self._args_set
+ if not args_set.findAtomForPackage(pkg):
+ return
+
+ logger = self._logger
+ pkg_count = self._pkg_count
+ root_config = pkg.root_config
+ world_set = root_config.sets["selected"]
+ world_locked = False
+ atom = None
+
+ if pkg.operation != "uninstall":
+ atom = self._world_atoms.get(pkg)
+
+ try:
+
+ if hasattr(world_set, "lock"):
+ world_set.lock()
+ world_locked = True
+
+ if hasattr(world_set, "load"):
+ world_set.load() # maybe it's changed on disk
+
+ if pkg.operation == "uninstall":
+ if hasattr(world_set, "cleanPackage"):
+ world_set.cleanPackage(pkg.root_config.trees["vartree"].dbapi,
+ pkg.cpv)
+ if hasattr(world_set, "remove"):
+ for s in pkg.root_config.setconfig.active:
+ world_set.remove(SETPREFIX+s)
+ else:
+ if atom is not None:
+ if hasattr(world_set, "add"):
+ self._status_msg(('Recording %s in "world" ' + \
+ 'favorites file...') % atom)
+ logger.log(" === (%s of %s) Updating world file (%s)" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv))
+ world_set.add(atom)
+ else:
+ writemsg_level('\n!!! Unable to record %s in "world"\n' % \
+ (atom,), level=logging.WARN, noiselevel=-1)
+ finally:
+ if world_locked:
+ world_set.unlock()
+
+ def _pkg(self, cpv, type_name, root_config, installed=False,
+ operation=None, myrepo=None):
+ """
+ Get a package instance from the cache, or create a new
+ one if necessary. Raises KeyError from aux_get if it
+ failures for some reason (package does not exist or is
+ corrupt).
+ """
+
+ # Reuse existing instance when available.
+ pkg = self._pkg_cache.get(Package._gen_hash_key(cpv=cpv,
+ type_name=type_name, repo_name=myrepo, root_config=root_config,
+ installed=installed, operation=operation))
+
+ if pkg is not None:
+ return pkg
+
+ tree_type = depgraph.pkg_tree_map[type_name]
+ db = root_config.trees[tree_type].dbapi
+ db_keys = list(self.trees[root_config.root][
+ tree_type].dbapi._aux_cache_keys)
+ metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
+ pkg = Package(built=(type_name != "ebuild"),
+ cpv=cpv, installed=installed, metadata=metadata,
+ root_config=root_config, type_name=type_name)
+ self._pkg_cache[pkg] = pkg
+ return pkg
diff --git a/lib/_emerge/SequentialTaskQueue.py b/lib/_emerge/SequentialTaskQueue.py
new file mode 100644
index 000000000..80908936c
--- /dev/null
+++ b/lib/_emerge/SequentialTaskQueue.py
@@ -0,0 +1,81 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from collections import deque
+import sys
+
+from portage.util.SlotObject import SlotObject
+
+class SequentialTaskQueue(SlotObject):
+
+ __slots__ = ("max_jobs", "running_tasks") + \
+ ("_scheduling", "_task_queue")
+
+ def __init__(self, **kwargs):
+ SlotObject.__init__(self, **kwargs)
+ self._task_queue = deque()
+ self.running_tasks = set()
+ if self.max_jobs is None:
+ self.max_jobs = 1
+
+ def add(self, task):
+ self._task_queue.append(task)
+ self.schedule()
+
+ def addFront(self, task):
+ self._task_queue.appendleft(task)
+ self.schedule()
+
+ def schedule(self):
+
+ if self._scheduling:
+ # Ignore any recursive schedule() calls triggered via
+ # self._task_exit().
+ return
+
+ self._scheduling = True
+ try:
+ while self._task_queue and (self.max_jobs is True or
+ len(self.running_tasks) < self.max_jobs):
+ task = self._task_queue.popleft()
+ cancelled = getattr(task, "cancelled", None)
+ if not cancelled:
+ self.running_tasks.add(task)
+ task.addExitListener(self._task_exit)
+ task.start()
+ finally:
+ self._scheduling = False
+
+ def _task_exit(self, task):
+ """
+ Since we can always rely on exit listeners being called, the set of
+ running tasks is always pruned automatically and there is never any need
+ to actively prune it.
+ """
+ self.running_tasks.remove(task)
+ if self._task_queue:
+ self.schedule()
+
+ def clear(self):
+ """
+ Clear the task queue and asynchronously terminate any running tasks.
+ """
+ self._task_queue.clear()
+ for task in list(self.running_tasks):
+ task.cancel()
+
+ def wait(self):
+ """
+ Synchronously wait for all running tasks to exit.
+ """
+ while self.running_tasks:
+ next(iter(self.running_tasks)).wait()
+
+ def __bool__(self):
+ return bool(self._task_queue or self.running_tasks)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+ def __len__(self):
+ return len(self._task_queue) + len(self.running_tasks)
diff --git a/lib/_emerge/SetArg.py b/lib/_emerge/SetArg.py
new file mode 100644
index 000000000..5c8297547
--- /dev/null
+++ b/lib/_emerge/SetArg.py
@@ -0,0 +1,14 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DependencyArg import DependencyArg
+from portage._sets import SETPREFIX
+class SetArg(DependencyArg):
+
+ __slots__ = ('name', 'pset')
+
+ def __init__(self, pset=None, **kwargs):
+ DependencyArg.__init__(self, **kwargs)
+ self.pset = pset
+ self.name = self.arg[len(SETPREFIX):]
+
diff --git a/lib/_emerge/SpawnProcess.py b/lib/_emerge/SpawnProcess.py
new file mode 100644
index 000000000..cd535d143
--- /dev/null
+++ b/lib/_emerge/SpawnProcess.py
@@ -0,0 +1,241 @@
+# Copyright 2008-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+try:
+ import fcntl
+except ImportError:
+ # http://bugs.jython.org/issue1074
+ fcntl = None
+
+import errno
+import logging
+import signal
+import sys
+
+from _emerge.SubProcess import SubProcess
+import portage
+from portage import os
+from portage.const import BASH_BINARY
+from portage.localization import _
+from portage.output import EOutput
+from portage.util import writemsg_level
+from portage.util._async.PipeLogger import PipeLogger
+
+class SpawnProcess(SubProcess):
+
+ """
+ Constructor keyword args are passed into portage.process.spawn().
+ The required "args" keyword argument will be passed as the first
+ spawn() argument.
+ """
+
+ _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
+ "uid", "gid", "groups", "umask", "logfile",
+ "path_lookup", "pre_exec", "close_fds", "cgroup",
+ "unshare_ipc", "unshare_net")
+
+ __slots__ = ("args",) + \
+ _spawn_kwarg_names + ("_pipe_logger", "_selinux_type",)
+
+ # Max number of attempts to kill the processes listed in cgroup.procs,
+ # given that processes may fork before they can be killed.
+ _CGROUP_CLEANUP_RETRY_MAX = 8
+
+ def _start(self):
+
+ if self.fd_pipes is None:
+ self.fd_pipes = {}
+ else:
+ self.fd_pipes = self.fd_pipes.copy()
+ fd_pipes = self.fd_pipes
+
+ master_fd, slave_fd = self._pipe(fd_pipes)
+
+ can_log = self._can_log(slave_fd)
+ if can_log:
+ log_file_path = self.logfile
+ else:
+ log_file_path = None
+
+ null_input = None
+ if not self.background or 0 in fd_pipes:
+ # Subclasses such as AbstractEbuildProcess may have already passed
+ # in a null file descriptor in fd_pipes, so use that when given.
+ pass
+ else:
+ # TODO: Use job control functions like tcsetpgrp() to control
+ # access to stdin. Until then, use /dev/null so that any
+ # attempts to read from stdin will immediately return EOF
+ # instead of blocking indefinitely.
+ null_input = os.open('/dev/null', os.O_RDWR)
+ fd_pipes[0] = null_input
+
+ fd_pipes.setdefault(0, portage._get_stdin().fileno())
+ fd_pipes.setdefault(1, sys.__stdout__.fileno())
+ fd_pipes.setdefault(2, sys.__stderr__.fileno())
+
+ # flush any pending output
+ stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
+ for fd in fd_pipes.values():
+ if fd in stdout_filenos:
+ sys.__stdout__.flush()
+ sys.__stderr__.flush()
+ break
+
+ fd_pipes_orig = fd_pipes.copy()
+
+ if log_file_path is not None or self.background:
+ fd_pipes[1] = slave_fd
+ fd_pipes[2] = slave_fd
+
+ else:
+ # Create a dummy pipe that PipeLogger uses to efficiently
+ # monitor for process exit by listening for the EOF event.
+ # Re-use of the allocated fd number for the key in fd_pipes
+ # guarantees that the keys will not collide for similarly
+ # allocated pipes which are used by callers such as
+ # FileDigester and MergeProcess. See the _setup_pipes
+ # docstring for more benefits of this allocation approach.
+ self._dummy_pipe_fd = slave_fd
+ fd_pipes[slave_fd] = slave_fd
+
+ kwargs = {}
+ for k in self._spawn_kwarg_names:
+ v = getattr(self, k)
+ if v is not None:
+ kwargs[k] = v
+
+ kwargs["fd_pipes"] = fd_pipes
+ kwargs["returnpid"] = True
+ kwargs.pop("logfile", None)
+
+ retval = self._spawn(self.args, **kwargs)
+
+ os.close(slave_fd)
+ if null_input is not None:
+ os.close(null_input)
+
+ if isinstance(retval, int):
+ # spawn failed
+ self.returncode = retval
+ self._async_wait()
+ return
+
+ self.pid = retval[0]
+
+ stdout_fd = None
+ if can_log and not self.background:
+ stdout_fd = os.dup(fd_pipes_orig[1])
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000 and fcntl is not None:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(stdout_fd, fcntl.F_SETFD,
+ fcntl.fcntl(stdout_fd,
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._pipe_logger = PipeLogger(background=self.background,
+ scheduler=self.scheduler, input_fd=master_fd,
+ log_file_path=log_file_path,
+ stdout_fd=stdout_fd)
+ self._pipe_logger.addExitListener(self._pipe_logger_exit)
+ self._pipe_logger.start()
+ self._registered = True
+
+ def _can_log(self, slave_fd):
+ return True
+
+ def _pipe(self, fd_pipes):
+ """
+ @type fd_pipes: dict
+ @param fd_pipes: pipes from which to copy terminal size if desired.
+ """
+ return os.pipe()
+
+ def _spawn(self, args, **kwargs):
+ spawn_func = portage.process.spawn
+
+ if self._selinux_type is not None:
+ spawn_func = portage.selinux.spawn_wrapper(spawn_func,
+ self._selinux_type)
+ # bash is an allowed entrypoint, while most binaries are not
+ if args[0] != BASH_BINARY:
+ args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
+
+ return spawn_func(args, **kwargs)
+
+ def _pipe_logger_exit(self, pipe_logger):
+ self._pipe_logger = None
+ self._async_waitpid()
+
+ def _unregister(self):
+ SubProcess._unregister(self)
+ if self.cgroup is not None:
+ self._cgroup_cleanup()
+ self.cgroup = None
+ if self._pipe_logger is not None:
+ self._pipe_logger.cancel()
+ self._pipe_logger = None
+
+ def _cancel(self):
+ SubProcess._cancel(self)
+ self._cgroup_cleanup()
+
+ def _cgroup_cleanup(self):
+ if self.cgroup:
+ def get_pids(cgroup):
+ try:
+ with open(os.path.join(cgroup, 'cgroup.procs'), 'r') as f:
+ return [int(p) for p in f.read().split()]
+ except EnvironmentError:
+ # removed by cgroup-release-agent
+ return []
+
+ def kill_all(pids, sig):
+ for p in pids:
+ try:
+ os.kill(p, sig)
+ except OSError as e:
+ if e.errno == errno.EPERM:
+ # Reported with hardened kernel (bug #358211).
+ writemsg_level(
+ "!!! kill: (%i) - Operation not permitted\n" %
+ (p,), level=logging.ERROR,
+ noiselevel=-1)
+ elif e.errno != errno.ESRCH:
+ raise
+
+ # step 1: kill all orphans (loop in case of new forks)
+ remaining = self._CGROUP_CLEANUP_RETRY_MAX
+ while remaining:
+ remaining -= 1
+ pids = get_pids(self.cgroup)
+ if pids:
+ kill_all(pids, signal.SIGKILL)
+ else:
+ break
+
+ if pids:
+ msg = []
+ msg.append(
+ _("Failed to kill pid(s) in '%(cgroup)s': %(pids)s") % dict(
+ cgroup=os.path.join(self.cgroup, 'cgroup.procs'),
+ pids=' '.join(str(pid) for pid in pids)))
+
+ self._elog('eerror', msg)
+
+ # step 2: remove the cgroup
+ try:
+ os.rmdir(self.cgroup)
+ except OSError:
+ # it may be removed already, or busy
+ # we can't do anything good about it
+ pass
+
+ def _elog(self, elog_funcname, lines):
+ elog_func = getattr(EOutput(), elog_funcname)
+ for line in lines:
+ elog_func(line)
diff --git a/lib/_emerge/SubProcess.py b/lib/_emerge/SubProcess.py
new file mode 100644
index 000000000..7d6b03272
--- /dev/null
+++ b/lib/_emerge/SubProcess.py
@@ -0,0 +1,84 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+
+from portage import os
+from portage.util import writemsg_level
+from _emerge.AbstractPollTask import AbstractPollTask
+import signal
+import errno
+
+class SubProcess(AbstractPollTask):
+
+ __slots__ = ("pid",) + \
+ ("_dummy_pipe_fd", "_files", "_waitpid_id")
+
+ # This is how much time we allow for waitpid to succeed after
+ # we've sent a kill signal to our subprocess.
+ _cancel_timeout = 1 # seconds
+
+ def _poll(self):
+ # Simply rely on _async_waitpid_cb to set the returncode.
+ return self.returncode
+
+ def _cancel(self):
+ if self.isAlive():
+ try:
+ os.kill(self.pid, signal.SIGTERM)
+ except OSError as e:
+ if e.errno == errno.EPERM:
+ # Reported with hardened kernel (bug #358211).
+ writemsg_level(
+ "!!! kill: (%i) - Operation not permitted\n" %
+ (self.pid,), level=logging.ERROR,
+ noiselevel=-1)
+ elif e.errno != errno.ESRCH:
+ raise
+
+ def isAlive(self):
+ return self.pid is not None and \
+ self.returncode is None
+
+ def _async_waitpid(self):
+ """
+ Wait for exit status of self.pid asynchronously, and then
+ set the returncode and notify exit listeners. This is
+ prefered over _waitpid_loop, since the synchronous nature
+ of _waitpid_loop can cause event loop recursion.
+ """
+ if self.returncode is not None:
+ self._async_wait()
+ elif self._waitpid_id is None:
+ self._waitpid_id = self.pid
+ self.scheduler._asyncio_child_watcher.\
+ add_child_handler(self.pid, self._async_waitpid_cb)
+
+ def _async_waitpid_cb(self, pid, returncode):
+ if pid != self.pid:
+ raise AssertionError("expected pid %s, got %s" % (self.pid, pid))
+ self.returncode = returncode
+ self._async_wait()
+
+ def _orphan_process_warn(self):
+ pass
+
+ def _unregister(self):
+ """
+ Unregister from the scheduler and close open files.
+ """
+
+ self._registered = False
+
+ if self._waitpid_id is not None:
+ self.scheduler._asyncio_child_watcher.\
+ remove_child_handler(self._waitpid_id)
+ self._waitpid_id = None
+
+ if self._files is not None:
+ for f in self._files.values():
+ if isinstance(f, int):
+ os.close(f)
+ else:
+ f.close()
+ self._files = None
diff --git a/lib/_emerge/Task.py b/lib/_emerge/Task.py
new file mode 100644
index 000000000..250d45802
--- /dev/null
+++ b/lib/_emerge/Task.py
@@ -0,0 +1,50 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.util.SlotObject import SlotObject
+
+class Task(SlotObject):
+ __slots__ = ("_hash_key", "_hash_value")
+
+ def __eq__(self, other):
+ try:
+ return self._hash_key == other._hash_key
+ except AttributeError:
+ # depgraph._pkg() generates _hash_key
+ # for lookups here, so handle that
+ return self._hash_key == other
+
+ def __ne__(self, other):
+ try:
+ return self._hash_key != other._hash_key
+ except AttributeError:
+ return True
+
+ def __hash__(self):
+ return self._hash_value
+
+ def __len__(self):
+ return len(self._hash_key)
+
+ def __getitem__(self, key):
+ return self._hash_key[key]
+
+ def __iter__(self):
+ return iter(self._hash_key)
+
+ def __contains__(self, key):
+ return key in self._hash_key
+
+ def __str__(self):
+ """
+ Emulate tuple.__repr__, but don't show 'foo' as u'foo' for unicode
+ strings.
+ """
+ return "(%s)" % ", ".join(("'%s'" % x for x in self._hash_key))
+
+ def __repr__(self):
+ if self._hash_key is None:
+ # triggered by python-trace
+ return SlotObject.__repr__(self)
+ return "<%s (%s)>" % (self.__class__.__name__,
+ ", ".join(("'%s'" % x for x in self._hash_key)))
diff --git a/lib/_emerge/TaskSequence.py b/lib/_emerge/TaskSequence.py
new file mode 100644
index 000000000..1f2ba94c2
--- /dev/null
+++ b/lib/_emerge/TaskSequence.py
@@ -0,0 +1,61 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from collections import deque
+
+from portage import os
+from _emerge.CompositeTask import CompositeTask
+from _emerge.AsynchronousTask import AsynchronousTask
+
+class TaskSequence(CompositeTask):
+ """
+ A collection of tasks that executes sequentially. Each task
+ must have a addExitListener() method that can be used as
+ a means to trigger movement from one task to the next.
+ """
+
+ __slots__ = ("_task_queue",)
+
+ def __init__(self, **kwargs):
+ AsynchronousTask.__init__(self, **kwargs)
+ self._task_queue = deque()
+
+ def add(self, task):
+ self._task_queue.append(task)
+
+ def _start(self):
+ self._start_next_task()
+
+ def _cancel(self):
+ self._task_queue.clear()
+ CompositeTask._cancel(self)
+
+ def _start_next_task(self):
+ try:
+ task = self._task_queue.popleft()
+ except IndexError:
+ self._current_task = None
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ self._start_task(task, self._task_exit_handler)
+
+ def _task_exit_handler(self, task):
+ if self._default_exit(task) != os.EX_OK:
+ self.wait()
+ elif self._task_queue:
+ self._start_next_task()
+ else:
+ self._final_exit(task)
+ self.wait()
+
+ def __bool__(self):
+ return bool(self._task_queue)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+ def __len__(self):
+ return len(self._task_queue)
diff --git a/lib/_emerge/UninstallFailure.py b/lib/_emerge/UninstallFailure.py
new file mode 100644
index 000000000..e4f28347a
--- /dev/null
+++ b/lib/_emerge/UninstallFailure.py
@@ -0,0 +1,15 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+
+class UninstallFailure(portage.exception.PortageException):
+ """
+ An instance of this class is raised by unmerge() when
+ an uninstallation fails.
+ """
+ status = 1
+ def __init__(self, *pargs):
+ portage.exception.PortageException.__init__(self, pargs)
+ if pargs:
+ self.status = pargs[0]
diff --git a/lib/_emerge/UnmergeDepPriority.py b/lib/_emerge/UnmergeDepPriority.py
new file mode 100644
index 000000000..ec44a67a1
--- /dev/null
+++ b/lib/_emerge/UnmergeDepPriority.py
@@ -0,0 +1,46 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.AbstractDepPriority import AbstractDepPriority
+class UnmergeDepPriority(AbstractDepPriority):
+ __slots__ = ("ignored", "optional", "satisfied",)
+ """
+ Combination of properties Priority Category
+
+ runtime_slot_op 0 HARD
+ runtime -1 HARD
+ runtime_post -2 HARD
+ buildtime -3 SOFT
+ (none of the above) -3 SOFT
+ """
+
+ MAX = 0
+ SOFT = -3
+ MIN = -3
+
+ def __init__(self, **kwargs):
+ AbstractDepPriority.__init__(self, **kwargs)
+ if self.buildtime:
+ self.optional = True
+
+ def __int__(self):
+ if self.runtime_slot_op:
+ return 0
+ if self.runtime:
+ return -1
+ if self.runtime_post:
+ return -2
+ if self.buildtime:
+ return -3
+ return -3
+
+ def __str__(self):
+ if self.ignored:
+ return "ignored"
+ if self.runtime_slot_op:
+ return "hard slot op"
+ myvalue = self.__int__()
+ if myvalue > self.SOFT:
+ return "hard"
+ return "soft"
+
diff --git a/lib/_emerge/UseFlagDisplay.py b/lib/_emerge/UseFlagDisplay.py
new file mode 100644
index 000000000..12820e9d1
--- /dev/null
+++ b/lib/_emerge/UseFlagDisplay.py
@@ -0,0 +1,131 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import collections
+from itertools import chain
+import sys
+
+from portage import _encodings, _unicode_encode
+from portage.output import red
+from portage.util import cmp_sort_key
+from portage.output import blue
+
+class UseFlagDisplay(object):
+
+ __slots__ = ('name', 'enabled', 'forced')
+
+ def __init__(self, name, enabled, forced):
+ self.name = name
+ self.enabled = enabled
+ self.forced = forced
+
+ def __str__(self):
+ s = self.name
+ if self.enabled:
+ s = red(s)
+ else:
+ s = '-' + s
+ s = blue(s)
+ if self.forced:
+ s = '(%s)' % s
+ return s
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'])
+
+ def _cmp_combined(a, b):
+ """
+ Sort by name, combining enabled and disabled flags.
+ """
+ return (a.name > b.name) - (a.name < b.name)
+
+ sort_combined = cmp_sort_key(_cmp_combined)
+ del _cmp_combined
+
+ def _cmp_separated(a, b):
+ """
+ Sort by name, separating enabled flags from disabled flags.
+ """
+ enabled_diff = b.enabled - a.enabled
+ if enabled_diff:
+ return enabled_diff
+ return (a.name > b.name) - (a.name < b.name)
+
+ sort_separated = cmp_sort_key(_cmp_separated)
+ del _cmp_separated
+
+
+_flag_info = collections.namedtuple('_flag_info', ('flag', 'display'))
+
+
+def pkg_use_display(pkg, opts, modified_use=None):
+ settings = pkg.root_config.settings
+ use_expand = pkg.use.expand
+ use_expand_hidden = pkg.use.expand_hidden
+ alphabetical_use = '--alphabetical' in opts
+ forced_flags = set(chain(pkg.use.force,
+ pkg.use.mask))
+ if modified_use is None:
+ use = set(pkg.use.enabled)
+ else:
+ use = set(modified_use)
+ use.discard(settings.get('ARCH'))
+ use_expand_flags = set()
+ use_enabled = {}
+ use_disabled = {}
+ for varname in use_expand:
+ flag_prefix = varname.lower() + "_"
+ for f in use:
+ if f.startswith(flag_prefix):
+ use_expand_flags.add(f)
+ use_enabled.setdefault(
+ varname.upper(), []).append(
+ _flag_info(f, f[len(flag_prefix):]))
+
+ for f in pkg.iuse.all:
+ if f.startswith(flag_prefix):
+ use_expand_flags.add(f)
+ if f not in use:
+ use_disabled.setdefault(
+ varname.upper(), []).append(
+ _flag_info(f, f[len(flag_prefix):]))
+
+ var_order = set(use_enabled)
+ var_order.update(use_disabled)
+ var_order = sorted(var_order)
+ var_order.insert(0, 'USE')
+ use.difference_update(use_expand_flags)
+ use_enabled['USE'] = list(_flag_info(f, f) for f in use)
+ use_disabled['USE'] = []
+
+ for f in pkg.iuse.all:
+ if f not in use and \
+ f not in use_expand_flags:
+ use_disabled['USE'].append(_flag_info(f, f))
+
+ flag_displays = []
+ for varname in var_order:
+ if varname.lower() in use_expand_hidden:
+ continue
+ flags = []
+ for f in use_enabled.get(varname, []):
+ flags.append(UseFlagDisplay(f.display, True, f.flag in forced_flags))
+ for f in use_disabled.get(varname, []):
+ flags.append(UseFlagDisplay(f.display, False, f.flag in forced_flags))
+ if alphabetical_use:
+ flags.sort(key=UseFlagDisplay.sort_combined)
+ else:
+ flags.sort(key=UseFlagDisplay.sort_separated)
+ # Use unicode_literals to force unicode format string so
+ # that UseFlagDisplay.__unicode__() is called in python2.
+ flag_displays.append('%s="%s"' % (varname,
+ ' '.join("%s" % (f,) for f in flags)))
+
+ return ' '.join(flag_displays)
diff --git a/lib/_emerge/UserQuery.py b/lib/_emerge/UserQuery.py
new file mode 100644
index 000000000..e20bbc6c3
--- /dev/null
+++ b/lib/_emerge/UserQuery.py
@@ -0,0 +1,78 @@
+# Copyright 1999-2016 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function, unicode_literals
+
+import signal
+import sys
+
+from portage import _unicode_decode
+from portage.output import bold, create_color_func
+
+
+class UserQuery(object):
+ """The UserQuery class is used to prompt the user with a set of responses,
+ as well as accepting and handling the responses."""
+
+ def __init__(self, myopts):
+ self.myopts = myopts
+
+ def query(self, prompt, enter_invalid, responses=None, colours=None):
+ """Display a prompt and a set of responses, then waits for user input
+ and check it against the responses. The first match is returned.
+
+ An empty response will match the first value in the list of responses,
+ unless enter_invalid is True. The input buffer is *not* cleared prior
+ to the prompt!
+
+ prompt: The String to display as a prompt.
+ responses: a List of Strings with the acceptable responses.
+ colours: a List of Functions taking and returning a String, used to
+ process the responses for display. Typically these will be functions
+ like red() but could be e.g. lambda x: "DisplayString".
+
+ If responses is omitted, it defaults to ["Yes", "No"], [green, red].
+ If only colours is omitted, it defaults to [bold, ...].
+
+ Returns a member of the List responses. (If called without optional
+ arguments, it returns "Yes" or "No".)
+
+ KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
+ printed."""
+ if responses is None:
+ responses = ["Yes", "No"]
+ colours = [
+ create_color_func("PROMPT_CHOICE_DEFAULT"),
+ create_color_func("PROMPT_CHOICE_OTHER")
+ ]
+ elif colours is None:
+ colours=[bold]
+ colours=(colours*len(responses))[:len(responses)]
+ responses = [_unicode_decode(x) for x in responses]
+ if "--alert" in self.myopts:
+ prompt = '\a' + prompt
+ print(bold(prompt), end=' ')
+ try:
+ while True:
+ if sys.hexversion >= 0x3000000:
+ try:
+ response = input("[%s] " %
+ "/".join([colours[i](responses[i])
+ for i in range(len(responses))]))
+ except UnicodeDecodeError as e:
+ response = _unicode_decode(e.object).rstrip('\n')
+ else:
+ response=raw_input("["+"/".join([colours[i](responses[i])
+ for i in range(len(responses))])+"] ")
+ response = _unicode_decode(response)
+ if response or not enter_invalid:
+ for key in responses:
+ # An empty response will match the
+ # first value in responses.
+ if response.upper()==key[:len(response)].upper():
+ return key
+ print("Sorry, response '%s' not understood." % response,
+ end=' ')
+ except (EOFError, KeyboardInterrupt):
+ print("Interrupted.")
+ sys.exit(128 + signal.SIGINT)
diff --git a/lib/_emerge/__init__.py b/lib/_emerge/__init__.py
new file mode 100644
index 000000000..f98c56457
--- /dev/null
+++ b/lib/_emerge/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/_emerge/_find_deep_system_runtime_deps.py b/lib/_emerge/_find_deep_system_runtime_deps.py
new file mode 100644
index 000000000..ca09d83ac
--- /dev/null
+++ b/lib/_emerge/_find_deep_system_runtime_deps.py
@@ -0,0 +1,38 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.DepPriority import DepPriority
+from _emerge.Package import Package
+
+def _find_deep_system_runtime_deps(graph):
+ deep_system_deps = set()
+ node_stack = []
+ for node in graph:
+ if not isinstance(node, Package) or \
+ node.operation == 'uninstall':
+ continue
+ if node.root_config.sets['system'].findAtomForPackage(node):
+ node_stack.append(node)
+
+ def ignore_priority(priority):
+ """
+ Ignore non-runtime priorities.
+ """
+ if isinstance(priority, DepPriority) and \
+ (priority.runtime or priority.runtime_post):
+ return False
+ return True
+
+ while node_stack:
+ node = node_stack.pop()
+ if node in deep_system_deps:
+ continue
+ deep_system_deps.add(node)
+ for child in graph.child_nodes(node, ignore_priority=ignore_priority):
+ if not isinstance(child, Package) or \
+ child.operation == 'uninstall':
+ continue
+ node_stack.append(child)
+
+ return deep_system_deps
+
diff --git a/lib/_emerge/_flush_elog_mod_echo.py b/lib/_emerge/_flush_elog_mod_echo.py
new file mode 100644
index 000000000..9ac65b8ae
--- /dev/null
+++ b/lib/_emerge/_flush_elog_mod_echo.py
@@ -0,0 +1,15 @@
+# Copyright 1999-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.elog import mod_echo
+
+def _flush_elog_mod_echo():
+ """
+ Dump the mod_echo output now so that our other
+ notifications are shown last.
+ @rtype: bool
+ @return: True if messages were shown, False otherwise.
+ """
+ messages_shown = bool(mod_echo._items)
+ mod_echo.finalize()
+ return messages_shown
diff --git a/lib/_emerge/actions.py b/lib/_emerge/actions.py
new file mode 100644
index 000000000..f7232341d
--- /dev/null
+++ b/lib/_emerge/actions.py
@@ -0,0 +1,3337 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division, print_function, unicode_literals
+
+import errno
+import logging
+import operator
+import platform
+import pwd
+import random
+import re
+import signal
+import socket
+import stat
+import subprocess
+import sys
+import tempfile
+import textwrap
+import time
+import warnings
+from itertools import chain
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.dbapi._similar_name_search:similar_name_search',
+ 'portage.debug',
+ 'portage.news:count_unread_news,display_news_notifications',
+ 'portage.util._get_vm_info:get_vm_info',
+ 'portage.util.locale:check_locale',
+ 'portage.emaint.modules.sync.sync:SyncRepos',
+ '_emerge.chk_updated_cfg_files:chk_updated_cfg_files',
+ '_emerge.help:help@emerge_help',
+ '_emerge.post_emerge:display_news_notification,post_emerge',
+ '_emerge.stdout_spinner:stdout_spinner',
+)
+
+from portage import os
+from portage import shutil
+from portage import eapi_is_supported, _encodings, _unicode_decode
+from portage.cache.cache_errors import CacheError
+from portage.const import GLOBAL_CONFIG_PATH, VCS_DIRS, _DEPCLEAN_LIB_CHECK_DEFAULT
+from portage.const import SUPPORTED_BINPKG_FORMATS, TIMESTAMP_FORMAT
+from portage.dbapi.dep_expand import dep_expand
+from portage.dbapi._expand_new_virt import expand_new_virt
+from portage.dbapi.IndexedPortdb import IndexedPortdb
+from portage.dbapi.IndexedVardb import IndexedVardb
+from portage.dep import Atom, _repo_separator, _slot_separator
+from portage.eclass_cache import hashed_path
+from portage.exception import InvalidAtom, InvalidData, ParseError
+from portage.output import blue, colorize, create_color_func, darkgreen, \
+ red, xtermTitle, xtermTitleReset, yellow
+good = create_color_func("GOOD")
+bad = create_color_func("BAD")
+warn = create_color_func("WARN")
+from portage.package.ebuild._ipc.QueryCommand import QueryCommand
+from portage.package.ebuild.doebuild import _check_temp_dir
+from portage._sets import load_default_config, SETPREFIX
+from portage._sets.base import InternalPackageSet
+from portage.util import cmp_sort_key, writemsg, varexpand, \
+ writemsg_level, writemsg_stdout
+from portage.util.digraph import digraph
+from portage.util.SlotObject import SlotObject
+from portage.util._async.run_main_scheduler import run_main_scheduler
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.global_event_loop import global_event_loop
+from portage._global_updates import _global_updates
+from portage.sync.old_tree_timestamp import old_tree_timestamp_warn
+from portage.localization import _
+from portage.metadata import action_metadata
+from portage.emaint.main import print_results
+
+from _emerge.clear_caches import clear_caches
+from _emerge.countdown import countdown
+from _emerge.create_depgraph_params import create_depgraph_params
+from _emerge.Dependency import Dependency
+from _emerge.depgraph import backtrack_depgraph, depgraph, resume_depgraph
+from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.emergelog import emergelog
+from _emerge.is_valid_package_atom import is_valid_package_atom
+from _emerge.main import profile_check
+from _emerge.MetadataRegen import MetadataRegen
+from _emerge.Package import Package
+from _emerge.ProgressHandler import ProgressHandler
+from _emerge.RootConfig import RootConfig
+from _emerge.Scheduler import Scheduler
+from _emerge.search import search
+from _emerge.SetArg import SetArg
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+from _emerge.unmerge import unmerge
+from _emerge.UnmergeDepPriority import UnmergeDepPriority
+from _emerge.UseFlagDisplay import pkg_use_display
+from _emerge.UserQuery import UserQuery
+
+if sys.hexversion >= 0x3000000:
+ long = int
+ _unicode = str
+else:
+ _unicode = unicode
+
+def action_build(emerge_config, trees=DeprecationWarning,
+ mtimedb=DeprecationWarning, myopts=DeprecationWarning,
+ myaction=DeprecationWarning, myfiles=DeprecationWarning, spinner=None):
+
+ if not isinstance(emerge_config, _emerge_config):
+ warnings.warn("_emerge.actions.action_build() now expects "
+ "an _emerge_config instance as the first parameter",
+ DeprecationWarning, stacklevel=2)
+ emerge_config = load_emerge_config(
+ action=myaction, args=myfiles, trees=trees, opts=myopts)
+ adjust_configs(emerge_config.opts, emerge_config.trees)
+
+ settings, trees, mtimedb = emerge_config
+ myopts = emerge_config.opts
+ myaction = emerge_config.action
+ myfiles = emerge_config.args
+
+ if '--usepkgonly' not in myopts:
+ old_tree_timestamp_warn(settings['PORTDIR'], settings)
+
+ # It's best for config updates in /etc/portage to be processed
+ # before we get here, so warn if they're not (bug #267103).
+ chk_updated_cfg_files(settings['EROOT'], ['/etc/portage'])
+
+ # validate the state of the resume data
+ # so that we can make assumptions later.
+ for k in ("resume", "resume_backup"):
+ if k not in mtimedb:
+ continue
+ resume_data = mtimedb[k]
+ if not isinstance(resume_data, dict):
+ del mtimedb[k]
+ continue
+ mergelist = resume_data.get("mergelist")
+ if not isinstance(mergelist, list):
+ del mtimedb[k]
+ continue
+ for x in mergelist:
+ if not (isinstance(x, list) and len(x) == 4):
+ continue
+ pkg_type, pkg_root, pkg_key, pkg_action = x
+ if pkg_root not in trees:
+ # Current $ROOT setting differs,
+ # so the list must be stale.
+ mergelist = None
+ break
+ if not mergelist:
+ del mtimedb[k]
+ continue
+ resume_opts = resume_data.get("myopts")
+ if not isinstance(resume_opts, (dict, list)):
+ del mtimedb[k]
+ continue
+ favorites = resume_data.get("favorites")
+ if not isinstance(favorites, list):
+ del mtimedb[k]
+ continue
+
+ resume = False
+ if "--resume" in myopts and \
+ ("resume" in mtimedb or
+ "resume_backup" in mtimedb):
+ resume = True
+ if "resume" not in mtimedb:
+ mtimedb["resume"] = mtimedb["resume_backup"]
+ del mtimedb["resume_backup"]
+ mtimedb.commit()
+ # "myopts" is a list for backward compatibility.
+ resume_opts = mtimedb["resume"].get("myopts", [])
+ if isinstance(resume_opts, list):
+ resume_opts = dict((k,True) for k in resume_opts)
+ for opt in ("--ask", "--color", "--skipfirst", "--tree"):
+ resume_opts.pop(opt, None)
+
+ # Current options always override resume_opts.
+ resume_opts.update(myopts)
+ myopts.clear()
+ myopts.update(resume_opts)
+
+ if "--debug" in myopts:
+ writemsg_level("myopts %s\n" % (myopts,))
+
+ # Adjust config according to options of the command being resumed.
+ for myroot in trees:
+ mysettings = trees[myroot]["vartree"].settings
+ mysettings.unlock()
+ adjust_config(myopts, mysettings)
+ mysettings.lock()
+ del myroot, mysettings
+
+ ldpath_mtimes = mtimedb["ldpath"]
+ favorites=[]
+ buildpkgonly = "--buildpkgonly" in myopts
+ pretend = "--pretend" in myopts
+ fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
+ ask = "--ask" in myopts
+ enter_invalid = '--ask-enter-invalid' in myopts
+ nodeps = "--nodeps" in myopts
+ oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
+ tree = "--tree" in myopts
+ if nodeps and tree:
+ tree = False
+ del myopts["--tree"]
+ portage.writemsg(colorize("WARN", " * ") + \
+ "--tree is broken with --nodeps. Disabling...\n")
+ debug = "--debug" in myopts
+ verbose = "--verbose" in myopts
+ quiet = "--quiet" in myopts
+ myparams = create_depgraph_params(myopts, myaction)
+ mergelist_shown = False
+
+ if pretend or fetchonly:
+ # make the mtimedb readonly
+ mtimedb.filename = None
+ if '--digest' in myopts or 'digest' in settings.features:
+ if '--digest' in myopts:
+ msg = "The --digest option"
+ else:
+ msg = "The FEATURES=digest setting"
+
+ msg += " can prevent corruption from being" + \
+ " noticed. The `repoman manifest` command is the preferred" + \
+ " way to generate manifests and it is capable of doing an" + \
+ " entire repository or category at once."
+ prefix = bad(" * ")
+ writemsg(prefix + "\n")
+ for line in textwrap.wrap(msg, 72):
+ writemsg("%s%s\n" % (prefix, line))
+ writemsg(prefix + "\n")
+
+ if resume:
+ favorites = mtimedb["resume"].get("favorites")
+ if not isinstance(favorites, list):
+ favorites = []
+
+ resume_data = mtimedb["resume"]
+ mergelist = resume_data["mergelist"]
+ if mergelist and "--skipfirst" in myopts:
+ for i, task in enumerate(mergelist):
+ if isinstance(task, list) and \
+ task and task[-1] == "merge":
+ del mergelist[i]
+ break
+
+ success = False
+ mydepgraph = None
+ try:
+ success, mydepgraph, dropped_tasks = resume_depgraph(
+ settings, trees, mtimedb, myopts, myparams, spinner)
+ except (portage.exception.PackageNotFound,
+ depgraph.UnsatisfiedResumeDep) as e:
+ if isinstance(e, depgraph.UnsatisfiedResumeDep):
+ mydepgraph = e.depgraph
+
+ from portage.output import EOutput
+ out = EOutput()
+
+ resume_data = mtimedb["resume"]
+ mergelist = resume_data.get("mergelist")
+ if not isinstance(mergelist, list):
+ mergelist = []
+ if mergelist and debug or (verbose and not quiet):
+ out.eerror("Invalid resume list:")
+ out.eerror("")
+ indent = " "
+ for task in mergelist:
+ if isinstance(task, list):
+ out.eerror(indent + str(tuple(task)))
+ out.eerror("")
+
+ if isinstance(e, depgraph.UnsatisfiedResumeDep):
+ out.eerror("One or more packages are either masked or " + \
+ "have missing dependencies:")
+ out.eerror("")
+ indent = " "
+ for dep in e.value:
+ if dep.atom is None:
+ out.eerror(indent + "Masked package:")
+ out.eerror(2 * indent + str(dep.parent))
+ out.eerror("")
+ else:
+ out.eerror(indent + str(dep.atom) + " pulled in by:")
+ out.eerror(2 * indent + str(dep.parent))
+ out.eerror("")
+ msg = "The resume list contains packages " + \
+ "that are either masked or have " + \
+ "unsatisfied dependencies. " + \
+ "Please restart/continue " + \
+ "the operation manually, or use --skipfirst " + \
+ "to skip the first package in the list and " + \
+ "any other packages that may be " + \
+ "masked or have missing dependencies."
+ for line in textwrap.wrap(msg, 72):
+ out.eerror(line)
+ elif isinstance(e, portage.exception.PackageNotFound):
+ out.eerror("An expected package is " + \
+ "not available: %s" % str(e))
+ out.eerror("")
+ msg = "The resume list contains one or more " + \
+ "packages that are no longer " + \
+ "available. Please restart/continue " + \
+ "the operation manually."
+ for line in textwrap.wrap(msg, 72):
+ out.eerror(line)
+
+ if success:
+ if dropped_tasks:
+ portage.writemsg("!!! One or more packages have been " + \
+ "dropped due to\n" + \
+ "!!! masking or unsatisfied dependencies:\n\n",
+ noiselevel=-1)
+ for task, atoms in dropped_tasks.items():
+ if not atoms:
+ writemsg(" %s is masked or unavailable\n" %
+ (task,), noiselevel=-1)
+ else:
+ writemsg(" %s requires %s\n" %
+ (task, ", ".join(atoms)), noiselevel=-1)
+
+ portage.writemsg("\n", noiselevel=-1)
+ del dropped_tasks
+ else:
+ if mydepgraph is not None:
+ mydepgraph.display_problems()
+ if not (ask or pretend):
+ # delete the current list and also the backup
+ # since it's probably stale too.
+ for k in ("resume", "resume_backup"):
+ mtimedb.pop(k, None)
+ mtimedb.commit()
+
+ return 1
+ else:
+ if ("--resume" in myopts):
+ print(darkgreen("emerge: It seems we have nothing to resume..."))
+ return os.EX_OK
+
+ try:
+ success, mydepgraph, favorites = backtrack_depgraph(
+ settings, trees, myopts, myparams, myaction, myfiles, spinner)
+ except portage.exception.PackageSetNotFound as e:
+ root_config = trees[settings['EROOT']]['root_config']
+ display_missing_pkg_set(root_config, e.value)
+ return 1
+
+ if success and mydepgraph.need_config_reload():
+ load_emerge_config(emerge_config=emerge_config)
+ adjust_configs(emerge_config.opts, emerge_config.trees)
+ settings, trees, mtimedb = emerge_config
+
+ # After config reload, the freshly instantiated binarytree
+ # instances need to load remote metadata if --getbinpkg
+ # is enabled. Use getbinpkg_refresh=False to use cached
+ # metadata, since the cache is already fresh.
+ if "--getbinpkg" in emerge_config.opts:
+ for root_trees in emerge_config.trees.values():
+ try:
+ root_trees["bintree"].populate(
+ getbinpkgs=True,
+ getbinpkg_refresh=False)
+ except ParseError as e:
+ writemsg("\n\n!!!%s.\nSee make.conf(5) for more info.\n"
+ % e, noiselevel=-1)
+ return 1
+
+ if "--autounmask-only" in myopts:
+ mydepgraph.display_problems()
+ return 0
+
+ if not success:
+ mydepgraph.display_problems()
+ return 1
+
+ mergecount = None
+ if "--pretend" not in myopts and \
+ ("--ask" in myopts or "--tree" in myopts or \
+ "--verbose" in myopts) and \
+ not ("--quiet" in myopts and "--ask" not in myopts):
+ if "--resume" in myopts:
+ mymergelist = mydepgraph.altlist()
+ if len(mymergelist) == 0:
+ print(colorize("INFORM", "emerge: It seems we have nothing to resume..."))
+ return os.EX_OK
+ favorites = mtimedb["resume"]["favorites"]
+ retval = mydepgraph.display(
+ mydepgraph.altlist(),
+ favorites=favorites)
+ mydepgraph.display_problems()
+ mergelist_shown = True
+ if retval != os.EX_OK:
+ return retval
+ prompt="Would you like to resume merging these packages?"
+ else:
+ retval = mydepgraph.display(
+ mydepgraph.altlist(),
+ favorites=favorites)
+ mydepgraph.display_problems()
+ mergelist_shown = True
+ if retval != os.EX_OK:
+ return retval
+ mergecount=0
+ for x in mydepgraph.altlist():
+ if isinstance(x, Package) and x.operation == "merge":
+ mergecount += 1
+
+ prompt = None
+ if mergecount==0:
+ sets = trees[settings['EROOT']]['root_config'].sets
+ world_candidates = None
+ if "selective" in myparams and \
+ not oneshot and favorites:
+ # Sets that are not world candidates are filtered
+ # out here since the favorites list needs to be
+ # complete for depgraph.loadResumeCommand() to
+ # operate correctly.
+ world_candidates = [x for x in favorites \
+ if not (x.startswith(SETPREFIX) and \
+ not sets[x[1:]].world_candidate)]
+
+ if "selective" in myparams and \
+ not oneshot and world_candidates:
+ # Prompt later, inside saveNomergeFavorites.
+ prompt = None
+ else:
+ print()
+ print("Nothing to merge; quitting.")
+ print()
+ return os.EX_OK
+ elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
+ prompt="Would you like to fetch the source files for these packages?"
+ else:
+ prompt="Would you like to merge these packages?"
+ print()
+ uq = UserQuery(myopts)
+ if prompt is not None and "--ask" in myopts and \
+ uq.query(prompt, enter_invalid) == "No":
+ print()
+ print("Quitting.")
+ print()
+ return 128 + signal.SIGINT
+ # Don't ask again (e.g. when auto-cleaning packages after merge)
+ if mergecount != 0:
+ myopts.pop("--ask", None)
+
+ if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
+ if ("--resume" in myopts):
+ mymergelist = mydepgraph.altlist()
+ if len(mymergelist) == 0:
+ print(colorize("INFORM", "emerge: It seems we have nothing to resume..."))
+ return os.EX_OK
+ favorites = mtimedb["resume"]["favorites"]
+ retval = mydepgraph.display(
+ mydepgraph.altlist(),
+ favorites=favorites)
+ mydepgraph.display_problems()
+ mergelist_shown = True
+ if retval != os.EX_OK:
+ return retval
+ else:
+ retval = mydepgraph.display(
+ mydepgraph.altlist(),
+ favorites=favorites)
+ mydepgraph.display_problems()
+ mergelist_shown = True
+ if retval != os.EX_OK:
+ return retval
+
+ else:
+
+ if not mergelist_shown:
+ # If we haven't already shown the merge list above, at
+ # least show warnings about missed updates and such.
+ mydepgraph.display_problems()
+
+
+ need_write_vardb = not Scheduler. \
+ _opts_no_self_update.intersection(myopts)
+
+ need_write_bindb = not any(x in myopts for x in
+ ("--fetchonly", "--fetch-all-uri",
+ "--pretend", "--usepkgonly")) and \
+ (any("buildpkg" in trees[eroot]["root_config"].
+ settings.features for eroot in trees) or
+ any("buildsyspkg" in trees[eroot]["root_config"].
+ settings.features for eroot in trees))
+
+ if need_write_bindb or need_write_vardb:
+
+ eroots = set()
+ ebuild_eroots = set()
+ for x in mydepgraph.altlist():
+ if isinstance(x, Package) and x.operation == "merge":
+ eroots.add(x.root)
+ if x.type_name == "ebuild":
+ ebuild_eroots.add(x.root)
+
+ for eroot in eroots:
+ if need_write_vardb and \
+ not trees[eroot]["vartree"].dbapi.writable:
+ writemsg_level("!!! %s\n" %
+ _("Read-only file system: %s") %
+ trees[eroot]["vartree"].dbapi._dbroot,
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if need_write_bindb and eroot in ebuild_eroots and \
+ ("buildpkg" in trees[eroot]["root_config"].
+ settings.features or
+ "buildsyspkg" in trees[eroot]["root_config"].
+ settings.features) and \
+ not trees[eroot]["bintree"].dbapi.writable:
+ writemsg_level("!!! %s\n" %
+ _("Read-only file system: %s") %
+ trees[eroot]["bintree"].pkgdir,
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if ("--resume" in myopts):
+ favorites=mtimedb["resume"]["favorites"]
+
+ else:
+ if "resume" in mtimedb and \
+ "mergelist" in mtimedb["resume"] and \
+ len(mtimedb["resume"]["mergelist"]) > 1:
+ mtimedb["resume_backup"] = mtimedb["resume"]
+ del mtimedb["resume"]
+ mtimedb.commit()
+
+ mydepgraph.saveNomergeFavorites()
+
+ if mergecount == 0:
+ retval = os.EX_OK
+ else:
+ mergetask = Scheduler(settings, trees, mtimedb, myopts,
+ spinner, favorites=favorites,
+ graph_config=mydepgraph.schedulerGraph())
+
+ del mydepgraph
+ clear_caches(trees)
+
+ retval = mergetask.merge()
+
+ if retval == os.EX_OK and \
+ not (buildpkgonly or fetchonly or pretend):
+ if "yes" == settings.get("AUTOCLEAN"):
+ portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
+ unmerge(trees[settings['EROOT']]['root_config'],
+ myopts, "clean", [],
+ ldpath_mtimes, autoclean=1)
+ else:
+ portage.writemsg_stdout(colorize("WARN", "WARNING:")
+ + " AUTOCLEAN is disabled. This can cause serious"
+ + " problems due to overlapping packages.\n")
+
+ return retval
+
+def action_config(settings, trees, myopts, myfiles):
+ enter_invalid = '--ask-enter-invalid' in myopts
+ uq = UserQuery(myopts)
+ if len(myfiles) != 1:
+ print(red("!!! config can only take a single package atom at this time\n"))
+ sys.exit(1)
+ if not is_valid_package_atom(myfiles[0], allow_repo=True):
+ portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
+ noiselevel=-1)
+ portage.writemsg("!!! Please check ebuild(5) for full details.\n")
+ portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
+ sys.exit(1)
+ print()
+ try:
+ pkgs = trees[settings['EROOT']]['vartree'].dbapi.match(myfiles[0])
+ except portage.exception.AmbiguousPackageName as e:
+ # Multiple matches thrown from cpv_expand
+ pkgs = e.args[0]
+ if len(pkgs) == 0:
+ print("No packages found.\n")
+ sys.exit(0)
+ elif len(pkgs) > 1:
+ if "--ask" in myopts:
+ options = []
+ print("Please select a package to configure:")
+ idx = 0
+ for pkg in pkgs:
+ idx += 1
+ options.append(str(idx))
+ print(options[-1]+") "+pkg)
+ print("X) Cancel")
+ options.append("X")
+ idx = uq.query("Selection?", enter_invalid, responses=options)
+ if idx == "X":
+ sys.exit(128 + signal.SIGINT)
+ pkg = pkgs[int(idx)-1]
+ else:
+ print("The following packages available:")
+ for pkg in pkgs:
+ print("* "+pkg)
+ print("\nPlease use a specific atom or the --ask option.")
+ sys.exit(1)
+ else:
+ pkg = pkgs[0]
+
+ print()
+ if "--ask" in myopts:
+ if uq.query("Ready to configure %s?" % pkg, enter_invalid) == "No":
+ sys.exit(128 + signal.SIGINT)
+ else:
+ print("Configuring pkg...")
+ print()
+ ebuildpath = trees[settings['EROOT']]['vartree'].dbapi.findname(pkg)
+ mysettings = portage.config(clone=settings)
+ vardb = trees[mysettings['EROOT']]['vartree'].dbapi
+ debug = mysettings.get("PORTAGE_DEBUG") == "1"
+ retval = portage.doebuild(ebuildpath, "config", settings=mysettings,
+ debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
+ mydbapi = trees[settings['EROOT']]['vartree'].dbapi, tree="vartree")
+ if retval == os.EX_OK:
+ portage.doebuild(ebuildpath, "clean", settings=mysettings,
+ debug=debug, mydbapi=vardb, tree="vartree")
+ print()
+ return retval
+
+def action_depclean(settings, trees, ldpath_mtimes,
+ myopts, action, myfiles, spinner, scheduler=None):
+ # Kill packages that aren't explicitly merged or are required as a
+ # dependency of another package. World file is explicit.
+
+ # Global depclean or prune operations are not very safe when there are
+ # missing dependencies since it's unknown how badly incomplete
+ # the dependency graph is, and we might accidentally remove packages
+ # that should have been pulled into the graph. On the other hand, it's
+ # relatively safe to ignore missing deps when only asked to remove
+ # specific packages.
+
+ msg = []
+ if "preserve-libs" not in settings.features and \
+ not myopts.get("--depclean-lib-check", _DEPCLEAN_LIB_CHECK_DEFAULT) != "n":
+ msg.append("Depclean may break link level dependencies. Thus, it is\n")
+ msg.append("recommended to use a tool such as " + good("`revdep-rebuild`") + " (from\n")
+ msg.append("app-portage/gentoolkit) in order to detect such breakage.\n")
+ msg.append("\n")
+ msg.append("Always study the list of packages to be cleaned for any obvious\n")
+ msg.append("mistakes. Packages that are part of the world set will always\n")
+ msg.append("be kept. They can be manually added to this set with\n")
+ msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
+ msg.append("package.provided (see portage(5)) will be removed by\n")
+ msg.append("depclean, even if they are part of the world set.\n")
+ msg.append("\n")
+ msg.append("As a safety measure, depclean will not remove any packages\n")
+ msg.append("unless *all* required dependencies have been resolved. As a\n")
+ msg.append("consequence of this, it often becomes necessary to run \n")
+ msg.append("%s" % good("`emerge --update --newuse --deep @world`")
+ + " prior to depclean.\n")
+
+ if action == "depclean" and "--quiet" not in myopts and not myfiles:
+ portage.writemsg_stdout("\n")
+ for x in msg:
+ portage.writemsg_stdout(colorize("WARN", " * ") + x)
+
+ root_config = trees[settings['EROOT']]['root_config']
+ vardb = root_config.trees['vartree'].dbapi
+
+ args_set = InternalPackageSet(allow_repo=True)
+ if myfiles:
+ args_set.update(myfiles)
+ matched_packages = False
+ for x in args_set:
+ if vardb.match(x):
+ matched_packages = True
+ else:
+ writemsg_level("--- Couldn't find '%s' to %s.\n" % \
+ (x.replace("null/", ""), action),
+ level=logging.WARN, noiselevel=-1)
+ if not matched_packages:
+ writemsg_level(">>> No packages selected for removal by %s\n" % \
+ action)
+ return 0
+
+ # The calculation is done in a separate function so that depgraph
+ # references go out of scope and the corresponding memory
+ # is freed before we call unmerge().
+ rval, cleanlist, ordered, req_pkg_count = \
+ calc_depclean(settings, trees, ldpath_mtimes,
+ myopts, action, args_set, spinner)
+
+ clear_caches(trees)
+
+ if rval != os.EX_OK:
+ return rval
+
+ if cleanlist:
+ rval = unmerge(root_config, myopts, "unmerge",
+ cleanlist, ldpath_mtimes, ordered=ordered,
+ scheduler=scheduler)
+
+ if action == "prune":
+ return rval
+
+ if not cleanlist and "--quiet" in myopts:
+ return rval
+
+ set_atoms = {}
+ for k in ("profile", "system", "selected"):
+ try:
+ set_atoms[k] = root_config.setconfig.getSetAtoms(k)
+ except portage.exception.PackageSetNotFound:
+ # A nested set could not be resolved, so ignore nested sets.
+ set_atoms[k] = root_config.sets[k].getAtoms()
+
+ print("Packages installed: " + str(len(vardb.cpv_all())))
+ print("Packages in world: %d" % len(set_atoms["selected"]))
+ print("Packages in system: %d" % len(set_atoms["system"]))
+ if set_atoms["profile"]:
+ print("Packages in profile: %d" % len(set_atoms["profile"]))
+ print("Required packages: "+str(req_pkg_count))
+ if "--pretend" in myopts:
+ print("Number to remove: "+str(len(cleanlist)))
+ else:
+ print("Number removed: "+str(len(cleanlist)))
+
+ return rval
+
+def calc_depclean(settings, trees, ldpath_mtimes,
+ myopts, action, args_set, spinner):
+ allow_missing_deps = bool(args_set)
+
+ debug = '--debug' in myopts
+ xterm_titles = "notitles" not in settings.features
+ root_len = len(settings["ROOT"])
+ eroot = settings['EROOT']
+ root_config = trees[eroot]["root_config"]
+ psets = root_config.setconfig.psets
+ deselect = myopts.get('--deselect') != 'n'
+ required_sets = {}
+ required_sets['world'] = psets['world']
+
+ # When removing packages, a temporary version of the world 'selected'
+ # set may be used which excludes packages that are intended to be
+ # eligible for removal.
+ selected_set = psets['selected']
+ required_sets['selected'] = selected_set
+ protected_set = InternalPackageSet()
+ protected_set_name = '____depclean_protected_set____'
+ required_sets[protected_set_name] = protected_set
+
+ set_error = False
+ set_atoms = {}
+ for k in ("profile", "system", "selected"):
+ try:
+ set_atoms[k] = root_config.setconfig.getSetAtoms(k)
+ except portage.exception.PackageSetNotFound as e:
+ # A nested set could not be resolved, so ignore nested sets.
+ set_atoms[k] = root_config.sets[k].getAtoms()
+ writemsg_level(_("!!! The set '%s' "
+ "contains a non-existent set named '%s'.\n") %
+ (k, e), level=logging.ERROR, noiselevel=-1)
+ set_error = True
+
+ # Support @profile as an alternative to @system.
+ if not (set_atoms["system"] or set_atoms["profile"]):
+ writemsg_level(_("!!! You have no system list.\n"),
+ level=logging.WARNING, noiselevel=-1)
+
+ if not set_atoms["selected"]:
+ writemsg_level(_("!!! You have no world file.\n"),
+ level=logging.WARNING, noiselevel=-1)
+
+ # Suppress world file warnings unless @world is completely empty,
+ # since having an empty world file can be a valid state.
+ try:
+ world_atoms = bool(root_config.setconfig.getSetAtoms('world'))
+ except portage.exception.PackageSetNotFound as e:
+ writemsg_level(_("!!! The set '%s' "
+ "contains a non-existent set named '%s'.\n") %
+ ("world", e), level=logging.ERROR, noiselevel=-1)
+ set_error = True
+ else:
+ if not world_atoms:
+ writemsg_level(_("!!! Your @world set is empty.\n"),
+ level=logging.ERROR, noiselevel=-1)
+ set_error = True
+
+ if set_error:
+ writemsg_level(_("!!! Aborting due to set configuration "
+ "errors displayed above.\n"),
+ level=logging.ERROR, noiselevel=-1)
+ return 1, [], False, 0
+
+ if action == "depclean":
+ emergelog(xterm_titles, " >>> depclean")
+
+ writemsg_level("\nCalculating dependencies ")
+ resolver_params = create_depgraph_params(myopts, "remove")
+ resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
+ resolver._load_vdb()
+ vardb = resolver._frozen_config.trees[eroot]["vartree"].dbapi
+ real_vardb = trees[eroot]["vartree"].dbapi
+
+ if action == "depclean":
+
+ if args_set:
+
+ if deselect:
+ # Start with an empty set.
+ selected_set = InternalPackageSet()
+ required_sets['selected'] = selected_set
+ # Pull in any sets nested within the selected set.
+ selected_set.update(psets['selected'].getNonAtoms())
+
+ # Pull in everything that's installed but not matched
+ # by an argument atom since we don't want to clean any
+ # package if something depends on it.
+ for pkg in vardb:
+ if spinner:
+ spinner.update()
+
+ try:
+ if args_set.findAtomForPackage(pkg) is None:
+ protected_set.add("=" + pkg.cpv)
+ continue
+ except portage.exception.InvalidDependString as e:
+ show_invalid_depstring_notice(pkg, _unicode(e))
+ del e
+ protected_set.add("=" + pkg.cpv)
+ continue
+
+ elif action == "prune":
+
+ if deselect:
+ # Start with an empty set.
+ selected_set = InternalPackageSet()
+ required_sets['selected'] = selected_set
+ # Pull in any sets nested within the selected set.
+ selected_set.update(psets['selected'].getNonAtoms())
+
+ # Pull in everything that's installed since we don't
+ # to prune a package if something depends on it.
+ protected_set.update(vardb.cp_all())
+
+ if not args_set:
+
+ # Try to prune everything that's slotted.
+ for cp in vardb.cp_all():
+ if len(vardb.cp_list(cp)) > 1:
+ args_set.add(cp)
+
+ # Remove atoms from world that match installed packages
+ # that are also matched by argument atoms, but do not remove
+ # them if they match the highest installed version.
+ for pkg in vardb:
+ if spinner is not None:
+ spinner.update()
+ pkgs_for_cp = vardb.match_pkgs(Atom(pkg.cp))
+ if not pkgs_for_cp or pkg not in pkgs_for_cp:
+ raise AssertionError("package expected in matches: " + \
+ "cp = %s, cpv = %s matches = %s" % \
+ (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
+
+ highest_version = pkgs_for_cp[-1]
+ if pkg == highest_version:
+ # pkg is the highest version
+ protected_set.add("=" + pkg.cpv)
+ continue
+
+ if len(pkgs_for_cp) <= 1:
+ raise AssertionError("more packages expected: " + \
+ "cp = %s, cpv = %s matches = %s" % \
+ (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
+
+ try:
+ if args_set.findAtomForPackage(pkg) is None:
+ protected_set.add("=" + pkg.cpv)
+ continue
+ except portage.exception.InvalidDependString as e:
+ show_invalid_depstring_notice(pkg, _unicode(e))
+ del e
+ protected_set.add("=" + pkg.cpv)
+ continue
+
+ if resolver._frozen_config.excluded_pkgs:
+ excluded_set = resolver._frozen_config.excluded_pkgs
+ required_sets['__excluded__'] = InternalPackageSet()
+
+ for pkg in vardb:
+ if spinner:
+ spinner.update()
+
+ try:
+ if excluded_set.findAtomForPackage(pkg):
+ required_sets['__excluded__'].add("=" + pkg.cpv)
+ except portage.exception.InvalidDependString as e:
+ show_invalid_depstring_notice(pkg, _unicode(e))
+ del e
+ required_sets['__excluded__'].add("=" + pkg.cpv)
+
+ success = resolver._complete_graph(required_sets={eroot:required_sets})
+ writemsg_level("\b\b... done!\n")
+
+ resolver.display_problems()
+
+ if not success:
+ return 1, [], False, 0
+
+ def unresolved_deps():
+
+ soname_deps = set()
+ unresolvable = set()
+ for dep in resolver._dynamic_config._initially_unsatisfied_deps:
+ if isinstance(dep.parent, Package) and \
+ (dep.priority > UnmergeDepPriority.SOFT):
+ if dep.atom.soname:
+ soname_deps.add((dep.atom, dep.parent.cpv))
+ else:
+ unresolvable.add((dep.atom, dep.parent.cpv))
+
+ if soname_deps:
+ # Generally, broken soname dependencies can safely be
+ # suppressed by a REQUIRES_EXCLUDE setting in the ebuild,
+ # so they should only trigger a warning message.
+ prefix = warn(" * ")
+ msg = []
+ msg.append("Broken soname dependencies found:")
+ msg.append("")
+ for atom, parent in soname_deps:
+ msg.append(" %s required by:" % (atom,))
+ msg.append(" %s" % (parent,))
+ msg.append("")
+
+ writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
+ level=logging.WARNING, noiselevel=-1)
+
+ if not unresolvable:
+ return False
+
+ if unresolvable and not allow_missing_deps:
+
+ if "--debug" in myopts:
+ writemsg("\ndigraph:\n\n", noiselevel=-1)
+ resolver._dynamic_config.digraph.debug_print()
+ writemsg("\n", noiselevel=-1)
+
+ prefix = bad(" * ")
+ msg = []
+ msg.append("Dependencies could not be completely resolved due to")
+ msg.append("the following required packages not being installed:")
+ msg.append("")
+ for atom, parent in unresolvable:
+ # For readability, we want to display the atom with USE
+ # conditionals evaluated whenever possible. However,
+ # there is a very special case where the atom does not
+ # match because the unevaluated form contains one or
+ # more flags for which the target package has missing
+ # IUSE, but due to conditionals those flags are only
+ # visible in the unevaluated form of the atom. In this
+ # case, we must display the unevaluated atom, so that
+ # the user can see the conditional USE deps that would
+ # otherwise be invisible. Use Atom(_unicode(atom)) to
+ # test for a package where this case would matter. This
+ # is not necessarily the same as atom.without_use,
+ # since Atom(_unicode(atom)) may still contain some
+ # USE dependencies that remain after evaluation of
+ # conditionals.
+ if atom.package and atom != atom.unevaluated_atom and \
+ vardb.match(Atom(_unicode(atom))):
+ msg.append(" %s (%s) pulled in by:" %
+ (atom.unevaluated_atom, atom))
+ else:
+ msg.append(" %s pulled in by:" % (atom,))
+ msg.append(" %s" % (parent,))
+ msg.append("")
+ msg.extend(textwrap.wrap(
+ "Have you forgotten to do a complete update prior " + \
+ "to depclean? The most comprehensive command for this " + \
+ "purpose is as follows:", 65
+ ))
+ msg.append("")
+ msg.append(" " + \
+ good("emerge --update --newuse --deep --with-bdeps=y @world"))
+ msg.append("")
+ msg.extend(textwrap.wrap(
+ "Note that the --with-bdeps=y option is not required in " + \
+ "many situations. Refer to the emerge manual page " + \
+ "(run `man emerge`) for more information about " + \
+ "--with-bdeps.", 65
+ ))
+ msg.append("")
+ msg.extend(textwrap.wrap(
+ "Also, note that it may be necessary to manually uninstall " + \
+ "packages that no longer exist in the portage tree, since " + \
+ "it may not be possible to satisfy their dependencies.", 65
+ ))
+ if action == "prune":
+ msg.append("")
+ msg.append("If you would like to ignore " + \
+ "dependencies then use %s." % good("--nodeps"))
+ writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return True
+ return False
+
+ if unresolved_deps():
+ return 1, [], False, 0
+
+ graph = resolver._dynamic_config.digraph.copy()
+ required_pkgs_total = 0
+ for node in graph:
+ if isinstance(node, Package):
+ required_pkgs_total += 1
+
+ def show_parents(child_node):
+ parent_atoms = \
+ resolver._dynamic_config._parent_atoms.get(child_node, [])
+
+ # Never display the special internal protected_set.
+ parent_atoms = [parent_atom for parent_atom in parent_atoms
+ if not (isinstance(parent_atom[0], SetArg) and
+ parent_atom[0].name == protected_set_name)]
+
+ if not parent_atoms:
+ # With --prune, the highest version can be pulled in without any
+ # real parent since all installed packages are pulled in. In that
+ # case there's nothing to show here.
+ return
+ parent_atom_dict = {}
+ for parent, atom in parent_atoms:
+ parent_atom_dict.setdefault(parent, []).append(atom)
+
+ parent_strs = []
+ for parent, atoms in parent_atom_dict.items():
+ # Display package atoms and soname
+ # atoms in separate groups.
+ atoms = sorted(atoms, reverse=True,
+ key=operator.attrgetter('package'))
+ parent_strs.append("%s requires %s" %
+ (getattr(parent, "cpv", parent),
+ ", ".join(_unicode(atom) for atom in atoms)))
+ parent_strs.sort()
+ msg = []
+ msg.append(" %s pulled in by:\n" % (child_node.cpv,))
+ for parent_str in parent_strs:
+ msg.append(" %s\n" % (parent_str,))
+ msg.append("\n")
+ portage.writemsg_stdout("".join(msg), noiselevel=-1)
+
+ def cmp_pkg_cpv(pkg1, pkg2):
+ """Sort Package instances by cpv."""
+ if pkg1.cpv > pkg2.cpv:
+ return 1
+ elif pkg1.cpv == pkg2.cpv:
+ return 0
+ else:
+ return -1
+
+ def create_cleanlist():
+
+ if "--debug" in myopts:
+ writemsg("\ndigraph:\n\n", noiselevel=-1)
+ graph.debug_print()
+ writemsg("\n", noiselevel=-1)
+
+ pkgs_to_remove = []
+
+ if action == "depclean":
+ if args_set:
+
+ for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
+ arg_atom = None
+ try:
+ arg_atom = args_set.findAtomForPackage(pkg)
+ except portage.exception.InvalidDependString:
+ # this error has already been displayed by now
+ continue
+
+ if arg_atom:
+ if pkg not in graph:
+ pkgs_to_remove.append(pkg)
+ elif "--verbose" in myopts:
+ show_parents(pkg)
+
+ else:
+ for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
+ if pkg not in graph:
+ pkgs_to_remove.append(pkg)
+ elif "--verbose" in myopts:
+ show_parents(pkg)
+
+ elif action == "prune":
+
+ for atom in args_set:
+ for pkg in vardb.match_pkgs(atom):
+ if pkg not in graph:
+ pkgs_to_remove.append(pkg)
+ elif "--verbose" in myopts:
+ show_parents(pkg)
+
+ if not pkgs_to_remove:
+ writemsg_level(
+ ">>> No packages selected for removal by %s\n" % action)
+ if "--verbose" not in myopts:
+ writemsg_level(
+ ">>> To see reverse dependencies, use %s\n" % \
+ good("--verbose"))
+ if action == "prune":
+ writemsg_level(
+ ">>> To ignore dependencies, use %s\n" % \
+ good("--nodeps"))
+
+ return pkgs_to_remove
+
+ cleanlist = create_cleanlist()
+ clean_set = set(cleanlist)
+
+ depclean_lib_check = cleanlist and real_vardb._linkmap is not None and \
+ myopts.get("--depclean-lib-check", _DEPCLEAN_LIB_CHECK_DEFAULT) != "n"
+ preserve_libs = "preserve-libs" in settings.features
+ preserve_libs_restrict = False
+
+ if depclean_lib_check and preserve_libs:
+ for pkg in cleanlist:
+ if "preserve-libs" in pkg.restrict:
+ preserve_libs_restrict = True
+ break
+
+ if depclean_lib_check and \
+ (preserve_libs_restrict or not preserve_libs):
+
+ # Check if any of these packages are the sole providers of libraries
+ # with consumers that have not been selected for removal. If so, these
+ # packages and any dependencies need to be added to the graph.
+ linkmap = real_vardb._linkmap
+ consumer_cache = {}
+ provider_cache = {}
+ consumer_map = {}
+
+ writemsg_level(">>> Checking for lib consumers...\n")
+
+ for pkg in cleanlist:
+
+ if preserve_libs and "preserve-libs" not in pkg.restrict:
+ # Any needed libraries will be preserved
+ # when this package is unmerged, so there's
+ # no need to account for it here.
+ continue
+
+ pkg_dblink = real_vardb._dblink(pkg.cpv)
+ consumers = {}
+
+ for lib in pkg_dblink.getcontents():
+ lib = lib[root_len:]
+ lib_key = linkmap._obj_key(lib)
+ lib_consumers = consumer_cache.get(lib_key)
+ if lib_consumers is None:
+ try:
+ lib_consumers = linkmap.findConsumers(lib_key)
+ except KeyError:
+ continue
+ consumer_cache[lib_key] = lib_consumers
+ if lib_consumers:
+ consumers[lib_key] = lib_consumers
+
+ if not consumers:
+ continue
+
+ for lib, lib_consumers in list(consumers.items()):
+ for consumer_file in list(lib_consumers):
+ if pkg_dblink.isowner(consumer_file):
+ lib_consumers.remove(consumer_file)
+ if not lib_consumers:
+ del consumers[lib]
+
+ if not consumers:
+ continue
+
+ for lib, lib_consumers in consumers.items():
+
+ soname = linkmap.getSoname(lib)
+
+ consumer_providers = []
+ for lib_consumer in lib_consumers:
+ providers = provider_cache.get(lib)
+ if providers is None:
+ providers = linkmap.findProviders(lib_consumer)
+ provider_cache[lib_consumer] = providers
+ if soname not in providers:
+ # Why does this happen?
+ continue
+ consumer_providers.append(
+ (lib_consumer, providers[soname]))
+
+ consumers[lib] = consumer_providers
+
+ consumer_map[pkg] = consumers
+
+ if consumer_map:
+
+ search_files = set()
+ for consumers in consumer_map.values():
+ for lib, consumer_providers in consumers.items():
+ for lib_consumer, providers in consumer_providers:
+ search_files.add(lib_consumer)
+ search_files.update(providers)
+
+ writemsg_level(">>> Assigning files to packages...\n")
+ file_owners = {}
+ for f in search_files:
+ owner_set = set()
+ for owner in linkmap.getOwners(f):
+ owner_dblink = real_vardb._dblink(owner)
+ if owner_dblink.exists():
+ owner_set.add(owner_dblink)
+ if owner_set:
+ file_owners[f] = owner_set
+
+ for pkg, consumers in list(consumer_map.items()):
+ for lib, consumer_providers in list(consumers.items()):
+ lib_consumers = set()
+
+ for lib_consumer, providers in consumer_providers:
+ owner_set = file_owners.get(lib_consumer)
+ provider_dblinks = set()
+ provider_pkgs = set()
+
+ if len(providers) > 1:
+ for provider in providers:
+ provider_set = file_owners.get(provider)
+ if provider_set is not None:
+ provider_dblinks.update(provider_set)
+
+ if len(provider_dblinks) > 1:
+ for provider_dblink in provider_dblinks:
+ provider_pkg = resolver._pkg(
+ provider_dblink.mycpv, "installed",
+ root_config, installed=True)
+ if provider_pkg not in clean_set:
+ provider_pkgs.add(provider_pkg)
+
+ if provider_pkgs:
+ continue
+
+ if owner_set is not None:
+ lib_consumers.update(owner_set)
+
+ for consumer_dblink in list(lib_consumers):
+ if resolver._pkg(consumer_dblink.mycpv, "installed",
+ root_config, installed=True) in clean_set:
+ lib_consumers.remove(consumer_dblink)
+ continue
+
+ if lib_consumers:
+ consumers[lib] = lib_consumers
+ else:
+ del consumers[lib]
+ if not consumers:
+ del consumer_map[pkg]
+
+ if consumer_map:
+ # TODO: Implement a package set for rebuilding consumer packages.
+
+ msg = "In order to avoid breakage of link level " + \
+ "dependencies, one or more packages will not be removed. " + \
+ "This can be solved by rebuilding " + \
+ "the packages that pulled them in."
+
+ prefix = bad(" * ")
+ writemsg_level("".join(prefix + "%s\n" % line for \
+ line in textwrap.wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
+
+ msg = []
+ for pkg in sorted(consumer_map, key=cmp_sort_key(cmp_pkg_cpv)):
+ consumers = consumer_map[pkg]
+ consumer_libs = {}
+ for lib, lib_consumers in consumers.items():
+ for consumer in lib_consumers:
+ consumer_libs.setdefault(
+ consumer.mycpv, set()).add(linkmap.getSoname(lib))
+ unique_consumers = set(chain(*consumers.values()))
+ unique_consumers = sorted(consumer.mycpv \
+ for consumer in unique_consumers)
+ msg.append("")
+ msg.append(" %s pulled in by:" % (pkg.cpv,))
+ for consumer in unique_consumers:
+ libs = consumer_libs[consumer]
+ msg.append(" %s needs %s" % \
+ (consumer, ', '.join(sorted(libs))))
+ msg.append("")
+ writemsg_level("".join(prefix + "%s\n" % line for line in msg),
+ level=logging.WARNING, noiselevel=-1)
+
+ # Add lib providers to the graph as children of lib consumers,
+ # and also add any dependencies pulled in by the provider.
+ writemsg_level(">>> Adding lib providers to graph...\n")
+
+ for pkg, consumers in consumer_map.items():
+ for consumer_dblink in set(chain(*consumers.values())):
+ consumer_pkg = resolver._pkg(consumer_dblink.mycpv,
+ "installed", root_config, installed=True)
+ if not resolver._add_pkg(pkg,
+ Dependency(parent=consumer_pkg,
+ priority=UnmergeDepPriority(runtime=True,
+ runtime_slot_op=True),
+ root=pkg.root)):
+ resolver.display_problems()
+ return 1, [], False, 0
+
+ writemsg_level("\nCalculating dependencies ")
+ success = resolver._complete_graph(
+ required_sets={eroot:required_sets})
+ writemsg_level("\b\b... done!\n")
+ resolver.display_problems()
+ if not success:
+ return 1, [], False, 0
+ if unresolved_deps():
+ return 1, [], False, 0
+
+ graph = resolver._dynamic_config.digraph.copy()
+ required_pkgs_total = 0
+ for node in graph:
+ if isinstance(node, Package):
+ required_pkgs_total += 1
+ cleanlist = create_cleanlist()
+ if not cleanlist:
+ return 0, [], False, required_pkgs_total
+ clean_set = set(cleanlist)
+
+ if clean_set:
+ writemsg_level(">>> Calculating removal order...\n")
+ # Use a topological sort to create an unmerge order such that
+ # each package is unmerged before it's dependencies. This is
+ # necessary to avoid breaking things that may need to run
+ # during pkg_prerm or pkg_postrm phases.
+
+ # Create a new graph to account for dependencies between the
+ # packages being unmerged.
+ graph = digraph()
+ del cleanlist[:]
+
+ runtime = UnmergeDepPriority(runtime=True)
+ runtime_post = UnmergeDepPriority(runtime_post=True)
+ buildtime = UnmergeDepPriority(buildtime=True)
+ priority_map = {
+ "RDEPEND": runtime,
+ "PDEPEND": runtime_post,
+ "BDEPEND": buildtime,
+ "HDEPEND": buildtime,
+ "DEPEND": buildtime,
+ }
+
+ for node in clean_set:
+ graph.add(node, None)
+ for dep_type in Package._dep_keys:
+ depstr = node._metadata[dep_type]
+ if not depstr:
+ continue
+ priority = priority_map[dep_type]
+
+ if debug:
+ writemsg_level("\nParent: %s\n"
+ % (node,), noiselevel=-1, level=logging.DEBUG)
+ writemsg_level( "Depstring: %s\n"
+ % (depstr,), noiselevel=-1, level=logging.DEBUG)
+ writemsg_level( "Priority: %s\n"
+ % (priority,), noiselevel=-1, level=logging.DEBUG)
+
+ try:
+ atoms = resolver._select_atoms(eroot, depstr,
+ myuse=node.use.enabled, parent=node,
+ priority=priority)[node]
+ except portage.exception.InvalidDependString:
+ # Ignore invalid deps of packages that will
+ # be uninstalled anyway.
+ continue
+
+ if debug:
+ writemsg_level("Candidates: [%s]\n" % \
+ ', '.join("'%s'" % (x,) for x in atoms),
+ noiselevel=-1, level=logging.DEBUG)
+
+ for atom in atoms:
+ if not isinstance(atom, portage.dep.Atom):
+ # Ignore invalid atoms returned from dep_check().
+ continue
+ if atom.blocker:
+ continue
+ matches = vardb.match_pkgs(atom)
+ if not matches:
+ continue
+ for child_node in matches:
+ if child_node in clean_set:
+
+ mypriority = priority.copy()
+ if atom.slot_operator_built:
+ if mypriority.buildtime:
+ mypriority.buildtime_slot_op = True
+ if mypriority.runtime:
+ mypriority.runtime_slot_op = True
+
+ graph.add(child_node, node, priority=mypriority)
+
+ if debug:
+ writemsg_level("\nunmerge digraph:\n\n",
+ noiselevel=-1, level=logging.DEBUG)
+ graph.debug_print()
+ writemsg_level("\n", noiselevel=-1, level=logging.DEBUG)
+
+ ordered = True
+ if len(graph.order) == len(graph.root_nodes()):
+ # If there are no dependencies between packages
+ # let unmerge() group them by cat/pn.
+ ordered = False
+ cleanlist = [pkg.cpv for pkg in graph.order]
+ else:
+ # Order nodes from lowest to highest overall reference count for
+ # optimal root node selection (this can help minimize issues
+ # with unaccounted implicit dependencies).
+ node_refcounts = {}
+ for node in graph.order:
+ node_refcounts[node] = len(graph.parent_nodes(node))
+ def cmp_reference_count(node1, node2):
+ return node_refcounts[node1] - node_refcounts[node2]
+ graph.order.sort(key=cmp_sort_key(cmp_reference_count))
+
+ ignore_priority_range = [None]
+ ignore_priority_range.extend(
+ range(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
+ while graph:
+ for ignore_priority in ignore_priority_range:
+ nodes = graph.root_nodes(ignore_priority=ignore_priority)
+ if nodes:
+ break
+ if not nodes:
+ raise AssertionError("no root nodes")
+ if ignore_priority is not None:
+ # Some deps have been dropped due to circular dependencies,
+ # so only pop one node in order to minimize the number that
+ # are dropped.
+ del nodes[1:]
+ for node in nodes:
+ graph.remove(node)
+ cleanlist.append(node.cpv)
+
+ return 0, cleanlist, ordered, required_pkgs_total
+ return 0, [], False, required_pkgs_total
+
+def action_deselect(settings, trees, opts, atoms):
+ enter_invalid = '--ask-enter-invalid' in opts
+ root_config = trees[settings['EROOT']]['root_config']
+ world_set = root_config.sets['selected']
+ if not hasattr(world_set, 'update'):
+ writemsg_level("World @selected set does not appear to be mutable.\n",
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ pretend = '--pretend' in opts
+ locked = False
+ if not pretend and hasattr(world_set, 'lock'):
+ world_set.lock()
+ locked = True
+ try:
+ world_set.load()
+ world_atoms = world_set.getAtoms()
+ vardb = root_config.trees["vartree"].dbapi
+ expanded_atoms = set(atoms)
+
+ for atom in atoms:
+ if not atom.startswith(SETPREFIX):
+ if atom.cp.startswith("null/"):
+ # try to expand category from world set
+ null_cat, pn = portage.catsplit(atom.cp)
+ for world_atom in world_atoms:
+ cat, world_pn = portage.catsplit(world_atom.cp)
+ if pn == world_pn:
+ expanded_atoms.add(
+ Atom(atom.replace("null", cat, 1),
+ allow_repo=True, allow_wildcard=True))
+
+ for cpv in vardb.match(atom):
+ pkg = vardb._pkg_str(cpv, None)
+ expanded_atoms.add(Atom("%s:%s" % (pkg.cp, pkg.slot)))
+
+ discard_atoms = set()
+ for atom in world_set:
+ for arg_atom in expanded_atoms:
+ if arg_atom.startswith(SETPREFIX):
+ if atom.startswith(SETPREFIX) and \
+ arg_atom == atom:
+ discard_atoms.add(atom)
+ break
+ else:
+ if not atom.startswith(SETPREFIX) and \
+ arg_atom.intersects(atom) and \
+ not (arg_atom.slot and not atom.slot) and \
+ not (arg_atom.repo and not atom.repo):
+ discard_atoms.add(atom)
+ break
+ if discard_atoms:
+ for atom in sorted(discard_atoms):
+
+ if pretend:
+ action_desc = "Would remove"
+ else:
+ action_desc = "Removing"
+
+ if atom.startswith(SETPREFIX):
+ filename = "world_sets"
+ else:
+ filename = "world"
+
+ writemsg_stdout(
+ ">>> %s %s from \"%s\" favorites file...\n" %
+ (action_desc, colorize("INFORM", _unicode(atom)),
+ filename), noiselevel=-1)
+
+ if '--ask' in opts:
+ prompt = "Would you like to remove these " + \
+ "packages from your world favorites?"
+ uq = UserQuery(opts)
+ if uq.query(prompt, enter_invalid) == 'No':
+ return 128 + signal.SIGINT
+
+ remaining = set(world_set)
+ remaining.difference_update(discard_atoms)
+ if not pretend:
+ world_set.replace(remaining)
+ else:
+ print(">>> No matching atoms found in \"world\" favorites file...")
+ finally:
+ if locked:
+ world_set.unlock()
+ return os.EX_OK
+
+class _info_pkgs_ver(object):
+ def __init__(self, ver, repo_suffix, provide_suffix):
+ self.ver = ver
+ self.repo_suffix = repo_suffix
+ self.provide_suffix = provide_suffix
+
+ def __lt__(self, other):
+ return portage.versions.vercmp(self.ver, other.ver) < 0
+
+ def toString(self):
+ """
+ This may return unicode if repo_name contains unicode.
+ Don't use __str__ and str() since unicode triggers compatibility
+ issues between python 2.x and 3.x.
+ """
+ return self.ver + self.repo_suffix + self.provide_suffix
+
+def action_info(settings, trees, myopts, myfiles):
+
+ # See if we can find any packages installed matching the strings
+ # passed on the command line
+ mypkgs = []
+ eroot = settings['EROOT']
+ vardb = trees[eroot]["vartree"].dbapi
+ portdb = trees[eroot]['porttree'].dbapi
+ bindb = trees[eroot]["bintree"].dbapi
+ repos = portdb.settings.repositories
+ for x in myfiles:
+ any_match = False
+ cp_exists = bool(vardb.match(x.cp))
+ installed_match = vardb.match(x)
+ for installed in installed_match:
+ mypkgs.append((installed, "installed"))
+ any_match = True
+
+ if any_match:
+ continue
+
+ for db, pkg_type in ((portdb, "ebuild"), (bindb, "binary")):
+ if pkg_type == "binary" and "--usepkg" not in myopts:
+ continue
+
+ # Use match instead of cp_list, to account for old-style virtuals.
+ if not cp_exists and db.match(x.cp):
+ cp_exists = True
+ # Search for masked packages too.
+ if not cp_exists and hasattr(db, "xmatch") and \
+ db.xmatch("match-all", x.cp):
+ cp_exists = True
+
+ matches = db.match(x)
+ matches.reverse()
+ for match in matches:
+ if pkg_type == "binary":
+ if db.bintree.isremote(match):
+ continue
+ auxkeys = ["EAPI", "DEFINED_PHASES"]
+ metadata = dict(zip(auxkeys, db.aux_get(match, auxkeys)))
+ if metadata["EAPI"] not in ("0", "1", "2", "3") and \
+ "info" in metadata["DEFINED_PHASES"].split():
+ mypkgs.append((match, pkg_type))
+ break
+
+ if not cp_exists:
+ xinfo = '"%s"' % x.unevaluated_atom
+ # Discard null/ from failed cpv_expand category expansion.
+ xinfo = xinfo.replace("null/", "")
+ if settings["ROOT"] != "/":
+ xinfo = "%s for %s" % (xinfo, eroot)
+ writemsg("\nemerge: there are no ebuilds to satisfy %s.\n" %
+ colorize("INFORM", xinfo), noiselevel=-1)
+
+ if myopts.get("--misspell-suggestions", "y") != "n":
+
+ writemsg("\nemerge: searching for similar names..."
+ , noiselevel=-1)
+
+ search_index = myopts.get("--search-index", "y") != "n"
+ dbs = [IndexedVardb(vardb) if search_index else vardb]
+ #if "--usepkgonly" not in myopts:
+ dbs.append(IndexedPortdb(portdb) if search_index else portdb)
+ if "--usepkg" in myopts:
+ dbs.append(bindb)
+
+ matches = similar_name_search(dbs, x)
+
+ if len(matches) == 1:
+ writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
+ , noiselevel=-1)
+ elif len(matches) > 1:
+ writemsg(
+ "\nemerge: Maybe you meant any of these: %s?\n" % \
+ (", ".join(matches),), noiselevel=-1)
+ else:
+ # Generally, this would only happen if
+ # all dbapis are empty.
+ writemsg(" nothing similar found.\n"
+ , noiselevel=-1)
+
+ return 1
+
+ output_buffer = []
+ append = output_buffer.append
+ root_config = trees[settings['EROOT']]['root_config']
+ chost = settings.get("CHOST")
+
+ append(getportageversion(settings["PORTDIR"], None,
+ settings.profile_path, chost,
+ trees[settings['EROOT']]["vartree"].dbapi))
+
+ header_width = 65
+ header_title = "System Settings"
+ if myfiles:
+ append(header_width * "=")
+ append(header_title.rjust(int(header_width/2 + len(header_title)/2)))
+ append(header_width * "=")
+ append("System uname: %s" % (platform.platform(aliased=1),))
+
+ vm_info = get_vm_info()
+ if "ram.total" in vm_info:
+ line = "%-9s %10d total" % ("KiB Mem:", vm_info["ram.total"] // 1024)
+ if "ram.free" in vm_info:
+ line += ",%10d free" % (vm_info["ram.free"] // 1024,)
+ append(line)
+ if "swap.total" in vm_info:
+ line = "%-9s %10d total" % ("KiB Swap:", vm_info["swap.total"] // 1024)
+ if "swap.free" in vm_info:
+ line += ",%10d free" % (vm_info["swap.free"] // 1024,)
+ append(line)
+
+ for repo in repos:
+ last_sync = portage.grabfile(os.path.join(repo.location, "metadata", "timestamp.chk"))
+ head_commit = None
+ if last_sync:
+ append("Timestamp of repository %s: %s" % (repo.name, last_sync[0]))
+ if repo.sync_type:
+ sync = portage.sync.module_controller.get_class(repo.sync_type)()
+ options = { 'repo': repo }
+ try:
+ head_commit = sync.retrieve_head(options=options)
+ except NotImplementedError:
+ head_commit = (1, False)
+ if head_commit and head_commit[0] == os.EX_OK:
+ append("Head commit of repository %s: %s" % (repo.name, head_commit[1]))
+
+ # Searching contents for the /bin/sh provider is somewhat
+ # slow. Therefore, use the basename of the symlink target
+ # to locate the package. If this fails, then only the
+ # basename of the symlink target will be displayed. So,
+ # typical output is something like "sh bash 4.2_p53". Since
+ # realpath is used to resolve symlinks recursively, this
+ # approach is also able to handle multiple levels of symlinks
+ # such as /bin/sh -> bb -> busybox. Note that we do not parse
+ # the output of "/bin/sh --version" because many shells
+ # do not have a --version option.
+ basename = os.path.basename(os.path.realpath(os.path.join(
+ os.sep, portage.const.EPREFIX, "bin", "sh")))
+ try:
+ Atom("null/%s" % basename)
+ except InvalidAtom:
+ matches = None
+ else:
+ try:
+ # Try a match against the basename, which should work for
+ # busybox and most shells.
+ matches = (trees[trees._running_eroot]["vartree"].dbapi.
+ match(basename))
+ except portage.exception.AmbiguousPackageName:
+ # If the name is ambiguous, then restrict our match
+ # to the app-shells category.
+ matches = (trees[trees._running_eroot]["vartree"].dbapi.
+ match("app-shells/%s" % basename))
+
+ if matches:
+ pkg = matches[-1]
+ name = pkg.cp
+ version = pkg.version
+ # Omit app-shells category from the output.
+ if name.startswith("app-shells/"):
+ name = name[len("app-shells/"):]
+ sh_str = "%s %s" % (name, version)
+ else:
+ sh_str = basename
+
+ append("sh %s" % sh_str)
+
+ ld_names = []
+ if chost:
+ ld_names.append(chost + "-ld")
+ ld_names.append("ld")
+ for name in ld_names:
+ try:
+ proc = subprocess.Popen([name, "--version"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ pass
+ else:
+ output = _unicode_decode(proc.communicate()[0]).splitlines()
+ proc.wait()
+ if proc.wait() == os.EX_OK and output:
+ append("ld %s" % (output[0]))
+ break
+
+ try:
+ proc = subprocess.Popen(["distcc", "--version"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ output = (1, None)
+ else:
+ output = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+ output = (proc.wait(), output)
+ if output[0] == os.EX_OK:
+ distcc_str = output[1].split("\n", 1)[0]
+ if "distcc" in settings.features:
+ distcc_str += " [enabled]"
+ else:
+ distcc_str += " [disabled]"
+ append(distcc_str)
+
+ try:
+ proc = subprocess.Popen(["ccache", "-V"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ output = (1, None)
+ else:
+ output = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+ output = (proc.wait(), output)
+ if output[0] == os.EX_OK:
+ ccache_str = output[1].split("\n", 1)[0]
+ if "ccache" in settings.features:
+ ccache_str += " [enabled]"
+ else:
+ ccache_str += " [disabled]"
+ append(ccache_str)
+
+ myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
+ "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
+ myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
+ atoms = []
+ for x in myvars:
+ try:
+ x = Atom(x)
+ except InvalidAtom:
+ append("%-20s %s" % (x+":", "[NOT VALID]"))
+ else:
+ for atom in expand_new_virt(vardb, x):
+ if not atom.blocker:
+ atoms.append((x, atom))
+
+ myvars = sorted(set(atoms))
+
+ cp_map = {}
+ cp_max_len = 0
+
+ for orig_atom, x in myvars:
+ pkg_matches = vardb.match(x)
+
+ versions = []
+ for cpv in pkg_matches:
+ matched_cp = portage.versions.cpv_getkey(cpv)
+ ver = portage.versions.cpv_getversion(cpv)
+ ver_map = cp_map.setdefault(matched_cp, {})
+ prev_match = ver_map.get(ver)
+ if prev_match is not None:
+ if prev_match.provide_suffix:
+ # prefer duplicate matches that include
+ # additional virtual provider info
+ continue
+
+ if len(matched_cp) > cp_max_len:
+ cp_max_len = len(matched_cp)
+ repo = vardb.aux_get(cpv, ["repository"])[0]
+ if repo:
+ repo_suffix = _repo_separator + repo
+ else:
+ repo_suffix = _repo_separator + "<unknown repository>"
+
+ if matched_cp == orig_atom.cp:
+ provide_suffix = ""
+ else:
+ provide_suffix = " (%s)" % (orig_atom,)
+
+ ver_map[ver] = _info_pkgs_ver(ver, repo_suffix, provide_suffix)
+
+ for cp in sorted(cp_map):
+ versions = sorted(cp_map[cp].values())
+ versions = ", ".join(ver.toString() for ver in versions)
+ append("%s %s" % \
+ ((cp + ":").ljust(cp_max_len + 1), versions))
+
+ append("Repositories:\n")
+ for repo in repos:
+ append(repo.info_string())
+
+ installed_sets = sorted(s for s in
+ root_config.sets['selected'].getNonAtoms() if s.startswith(SETPREFIX))
+ if installed_sets:
+ sets_line = "Installed sets: "
+ sets_line += ", ".join(installed_sets)
+ append(sets_line)
+
+ if "--verbose" in myopts:
+ myvars = list(settings)
+ else:
+ myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
+ 'DISTDIR', 'ENV_UNSET', 'PKGDIR', 'PORTAGE_TMPDIR',
+ 'PORTAGE_BINHOST', 'PORTAGE_BUNZIP2_COMMAND',
+ 'PORTAGE_BZIP2_COMMAND',
+ 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
+ 'ACCEPT_KEYWORDS', 'ACCEPT_LICENSE', 'FEATURES',
+ 'EMERGE_DEFAULT_OPTS']
+
+ myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
+
+ myvars_ignore_defaults = {
+ 'PORTAGE_BZIP2_COMMAND' : 'bzip2',
+ }
+
+ skipped_vars = ['PORTAGE_REPOSITORIES']
+ # Deprecated variables
+ skipped_vars.extend(('PORTDIR', 'PORTDIR_OVERLAY', 'SYNC'))
+
+ myvars = set(myvars)
+ myvars.difference_update(skipped_vars)
+ myvars = sorted(myvars)
+
+ use_expand = settings.get('USE_EXPAND', '').split()
+ use_expand.sort()
+ unset_vars = []
+
+ for k in myvars:
+ v = settings.get(k)
+ if v is not None:
+ if k != "USE":
+ default = myvars_ignore_defaults.get(k)
+ if default is not None and \
+ default == v:
+ continue
+ append('%s="%s"' % (k, v))
+ else:
+ use = set(v.split())
+ for varname in use_expand:
+ flag_prefix = varname.lower() + "_"
+ for f in list(use):
+ if f.startswith(flag_prefix):
+ use.remove(f)
+ use = list(use)
+ use.sort()
+ use = ['USE="%s"' % " ".join(use)]
+ for varname in use_expand:
+ myval = settings.get(varname)
+ if myval:
+ use.append('%s="%s"' % (varname, myval))
+ append(" ".join(use))
+ else:
+ unset_vars.append(k)
+ if unset_vars:
+ append("Unset: "+", ".join(unset_vars))
+ append("")
+ append("")
+ writemsg_stdout("\n".join(output_buffer),
+ noiselevel=-1)
+ del output_buffer[:]
+
+ # If some packages were found...
+ if mypkgs:
+ # Get our global settings (we only print stuff if it varies from
+ # the current config)
+ mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
+ auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
+ auxkeys.append('DEFINED_PHASES')
+ pkgsettings = portage.config(clone=settings)
+
+ # Loop through each package
+ # Only print settings if they differ from global settings
+ header_title = "Package Settings"
+ append(header_width * "=")
+ append(header_title.rjust(int(header_width/2 + len(header_title)/2)))
+ append(header_width * "=")
+ append("")
+ writemsg_stdout("\n".join(output_buffer),
+ noiselevel=-1)
+ del output_buffer[:]
+
+ out = portage.output.EOutput()
+ for mypkg in mypkgs:
+ cpv = mypkg[0]
+ pkg_type = mypkg[1]
+ # Get all package specific variables
+ if pkg_type == "installed":
+ metadata = dict(zip(auxkeys, vardb.aux_get(cpv, auxkeys)))
+ elif pkg_type == "ebuild":
+ metadata = dict(zip(auxkeys, portdb.aux_get(cpv, auxkeys)))
+ elif pkg_type == "binary":
+ metadata = dict(zip(auxkeys, bindb.aux_get(cpv, auxkeys)))
+
+ pkg = Package(built=(pkg_type!="ebuild"), cpv=cpv,
+ installed=(pkg_type=="installed"), metadata=zip(Package.metadata_keys,
+ (metadata.get(x, '') for x in Package.metadata_keys)),
+ root_config=root_config, type_name=pkg_type)
+
+ if pkg_type == "installed":
+ append("\n%s was built with the following:" % \
+ colorize("INFORM", str(pkg.cpv + _repo_separator + pkg.repo)))
+ elif pkg_type == "ebuild":
+ append("\n%s would be built with the following:" % \
+ colorize("INFORM", str(pkg.cpv + _repo_separator + pkg.repo)))
+ elif pkg_type == "binary":
+ append("\n%s (non-installed binary) was built with the following:" % \
+ colorize("INFORM", str(pkg.cpv + _repo_separator + pkg.repo)))
+
+ append('%s' % pkg_use_display(pkg, myopts))
+ if pkg_type == "installed":
+ for myvar in mydesiredvars:
+ if metadata[myvar].split() != settings.get(myvar, '').split():
+ append("%s=\"%s\"" % (myvar, metadata[myvar]))
+ append("")
+ append("")
+ writemsg_stdout("\n".join(output_buffer),
+ noiselevel=-1)
+ del output_buffer[:]
+
+ if metadata['DEFINED_PHASES']:
+ if 'info' not in metadata['DEFINED_PHASES'].split():
+ continue
+
+ writemsg_stdout(">>> Attempting to run pkg_info() for '%s'\n"
+ % pkg.cpv, noiselevel=-1)
+
+ if pkg_type == "installed":
+ ebuildpath = vardb.findname(pkg.cpv)
+ elif pkg_type == "ebuild":
+ ebuildpath = portdb.findname(pkg.cpv, myrepo=pkg.repo)
+ elif pkg_type == "binary":
+ tbz2_file = bindb.bintree.getname(pkg.cpv)
+ ebuild_file_name = pkg.cpv.split("/")[1] + ".ebuild"
+ ebuild_file_contents = portage.xpak.tbz2(tbz2_file).getfile(ebuild_file_name)
+ tmpdir = tempfile.mkdtemp()
+ ebuildpath = os.path.join(tmpdir, ebuild_file_name)
+ file = open(ebuildpath, 'w')
+ file.write(ebuild_file_contents)
+ file.close()
+
+ if not ebuildpath or not os.path.exists(ebuildpath):
+ out.ewarn("No ebuild found for '%s'" % pkg.cpv)
+ continue
+
+ if pkg_type == "installed":
+ portage.doebuild(ebuildpath, "info", settings=pkgsettings,
+ debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+ mydbapi=trees[settings['EROOT']]["vartree"].dbapi,
+ tree="vartree")
+ elif pkg_type == "ebuild":
+ portage.doebuild(ebuildpath, "info", settings=pkgsettings,
+ debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+ mydbapi=trees[settings['EROOT']]['porttree'].dbapi,
+ tree="porttree")
+ elif pkg_type == "binary":
+ portage.doebuild(ebuildpath, "info", settings=pkgsettings,
+ debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+ mydbapi=trees[settings['EROOT']]["bintree"].dbapi,
+ tree="bintree")
+ shutil.rmtree(tmpdir)
+
+def action_regen(settings, portdb, max_jobs, max_load):
+ xterm_titles = "notitles" not in settings.features
+ emergelog(xterm_titles, " === regen")
+ #regenerate cache entries
+ sys.stdout.flush()
+
+ regen = MetadataRegen(portdb, max_jobs=max_jobs,
+ max_load=max_load, main=True)
+
+ signum = run_main_scheduler(regen)
+ if signum is not None:
+ sys.exit(128 + signum)
+
+ portage.writemsg_stdout("done!\n")
+ return regen.returncode
+
+def action_search(root_config, myopts, myfiles, spinner):
+ if not myfiles:
+ print("emerge: no search terms provided.")
+ else:
+ searchinstance = search(root_config,
+ spinner, "--searchdesc" in myopts,
+ "--quiet" not in myopts, "--usepkg" in myopts,
+ "--usepkgonly" in myopts,
+ search_index=myopts.get("--search-index", "y") != "n",
+ search_similarity=myopts.get("--search-similarity"),
+ fuzzy=myopts.get("--fuzzy-search") != "n",
+ )
+ for mysearch in myfiles:
+ try:
+ searchinstance.execute(mysearch)
+ except re.error as comment:
+ print("\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment ))
+ sys.exit(1)
+ searchinstance.output()
+
+def action_sync(emerge_config, trees=DeprecationWarning,
+ mtimedb=DeprecationWarning, opts=DeprecationWarning,
+ action=DeprecationWarning):
+
+ if not isinstance(emerge_config, _emerge_config):
+ warnings.warn("_emerge.actions.action_sync() now expects "
+ "an _emerge_config instance as the first parameter",
+ DeprecationWarning, stacklevel=2)
+ emerge_config = load_emerge_config(
+ action=action, args=[], trees=trees, opts=opts)
+
+ syncer = SyncRepos(emerge_config)
+ return_messages = "--quiet" not in emerge_config.opts
+ options = {'return-messages' : return_messages}
+ if emerge_config.args:
+ options['repo'] = emerge_config.args
+ success, msgs = syncer.repo(options=options)
+ else:
+ success, msgs = syncer.auto_sync(options=options)
+ if return_messages:
+ print_results(msgs)
+
+ return os.EX_OK if success else 1
+
+
+def action_uninstall(settings, trees, ldpath_mtimes,
+ opts, action, files, spinner):
+ # For backward compat, some actions do not require leading '='.
+ ignore_missing_eq = action in ('clean', 'rage-clean', 'unmerge')
+ root = settings['ROOT']
+ eroot = settings['EROOT']
+ vardb = trees[settings['EROOT']]['vartree'].dbapi
+ valid_atoms = []
+ lookup_owners = []
+
+ # Ensure atoms are valid before calling unmerge().
+ # For backward compat, leading '=' is not required.
+ for x in files:
+ if is_valid_package_atom(x, allow_repo=True) or \
+ (ignore_missing_eq and is_valid_package_atom('=' + x)):
+
+ try:
+ atom = dep_expand(x, mydb=vardb, settings=settings)
+ except portage.exception.AmbiguousPackageName as e:
+ msg = "The short ebuild name \"" + x + \
+ "\" is ambiguous. Please specify " + \
+ "one of the following " + \
+ "fully-qualified ebuild names instead:"
+ for line in textwrap.wrap(msg, 70):
+ writemsg_level("!!! %s\n" % (line,),
+ level=logging.ERROR, noiselevel=-1)
+ for i in e.args[0]:
+ writemsg_level(" %s\n" % colorize("INFORM", i),
+ level=logging.ERROR, noiselevel=-1)
+ writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
+ return 1
+ else:
+ if atom.use and atom.use.conditional:
+ writemsg_level(
+ ("\n\n!!! '%s' contains a conditional " + \
+ "which is not allowed.\n") % (x,),
+ level=logging.ERROR, noiselevel=-1)
+ writemsg_level(
+ "!!! Please check ebuild(5) for full details.\n",
+ level=logging.ERROR)
+ return 1
+ valid_atoms.append(atom)
+
+ elif x.startswith(os.sep):
+ if not x.startswith(eroot):
+ writemsg_level(("!!! '%s' does not start with" + \
+ " $EROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
+ return 1
+ # Queue these up since it's most efficient to handle
+ # multiple files in a single iter_owners() call.
+ lookup_owners.append(x)
+
+ elif x.startswith(SETPREFIX) and action == "deselect":
+ valid_atoms.append(x)
+
+ elif "*" in x:
+ try:
+ ext_atom = Atom(x, allow_repo=True, allow_wildcard=True)
+ except InvalidAtom:
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ for cpv in vardb.cpv_all():
+ if portage.match_from_list(ext_atom, [cpv]):
+ require_metadata = False
+ atom = portage.cpv_getkey(cpv)
+ if ext_atom.operator == '=*':
+ atom = "=" + atom + "-" + \
+ portage.versions.cpv_getversion(cpv)
+ if ext_atom.slot:
+ atom += _slot_separator + ext_atom.slot
+ require_metadata = True
+ if ext_atom.repo:
+ atom += _repo_separator + ext_atom.repo
+ require_metadata = True
+
+ atom = Atom(atom, allow_repo=True)
+ if require_metadata:
+ try:
+ cpv = vardb._pkg_str(cpv, ext_atom.repo)
+ except (KeyError, InvalidData):
+ continue
+ if not portage.match_from_list(atom, [cpv]):
+ continue
+
+ valid_atoms.append(atom)
+
+ else:
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if lookup_owners:
+ relative_paths = []
+ search_for_multiple = False
+ if len(lookup_owners) > 1:
+ search_for_multiple = True
+
+ for x in lookup_owners:
+ if not search_for_multiple and os.path.isdir(x):
+ search_for_multiple = True
+ relative_paths.append(x[len(root)-1:])
+
+ owners = set()
+ for pkg, relative_path in \
+ vardb._owners.iter_owners(relative_paths):
+ owners.add(pkg.mycpv)
+ if not search_for_multiple:
+ break
+
+ if owners:
+ for cpv in owners:
+ pkg = vardb._pkg_str(cpv, None)
+ atom = '%s:%s' % (pkg.cp, pkg.slot)
+ valid_atoms.append(portage.dep.Atom(atom))
+ else:
+ writemsg_level(("!!! '%s' is not claimed " + \
+ "by any package.\n") % lookup_owners[0],
+ level=logging.WARNING, noiselevel=-1)
+
+ if files and not valid_atoms:
+ return 1
+
+ if action == 'unmerge' and \
+ '--quiet' not in opts and \
+ '--quiet-unmerge-warn' not in opts:
+ msg = "This action can remove important packages! " + \
+ "In order to be safer, use " + \
+ "`emerge -pv --depclean <atom>` to check for " + \
+ "reverse dependencies before removing packages."
+ out = portage.output.EOutput()
+ for line in textwrap.wrap(msg, 72):
+ out.ewarn(line)
+
+ if action == 'deselect':
+ return action_deselect(settings, trees, opts, valid_atoms)
+
+ # Use the same logic as the Scheduler class to trigger redirection
+ # of ebuild pkg_prerm/postrm phase output to logs as appropriate
+ # for options such as --jobs, --quiet and --quiet-build.
+ max_jobs = opts.get("--jobs", 1)
+ background = (max_jobs is True or max_jobs > 1 or
+ "--quiet" in opts or opts.get("--quiet-build") == "y")
+ sched_iface = SchedulerInterface(global_event_loop(),
+ is_background=lambda: background)
+
+ if background:
+ settings.unlock()
+ settings["PORTAGE_BACKGROUND"] = "1"
+ settings.backup_changes("PORTAGE_BACKGROUND")
+ settings.lock()
+
+ if action in ('clean', 'rage-clean', 'unmerge') or \
+ (action == 'prune' and "--nodeps" in opts):
+ # When given a list of atoms, unmerge them in the order given.
+ ordered = action in ('rage-clean', 'unmerge')
+ rval = unmerge(trees[settings['EROOT']]['root_config'], opts, action,
+ valid_atoms, ldpath_mtimes, ordered=ordered,
+ scheduler=sched_iface)
+ else:
+ rval = action_depclean(settings, trees, ldpath_mtimes,
+ opts, action, valid_atoms, spinner,
+ scheduler=sched_iface)
+
+ return rval
+
+def adjust_configs(myopts, trees):
+ for myroot, mytrees in trees.items():
+ mysettings = trees[myroot]["vartree"].settings
+ mysettings.unlock()
+
+ # For --usepkgonly mode, propagate settings from the binary package
+ # database, so that it's possible to operate without dependence on
+ # a local ebuild repository and profile.
+ if ('--usepkgonly' in myopts and
+ mytrees['bintree']._propagate_config(mysettings)):
+ # Also propagate changes to the portdbapi doebuild_settings
+ # attribute which is used by Package instances for USE
+ # calculations (in support of --binpkg-respect-use).
+ mytrees['porttree'].dbapi.doebuild_settings = \
+ portage.config(clone=mysettings)
+
+ adjust_config(myopts, mysettings)
+ mysettings.lock()
+
+def adjust_config(myopts, settings):
+ """Make emerge specific adjustments to the config."""
+
+ # Kill noauto as it will break merges otherwise.
+ if "noauto" in settings.features:
+ settings.features.remove('noauto')
+
+ fail_clean = myopts.get('--fail-clean')
+ if fail_clean is not None:
+ if fail_clean is True and \
+ 'fail-clean' not in settings.features:
+ settings.features.add('fail-clean')
+ elif fail_clean == 'n' and \
+ 'fail-clean' in settings.features:
+ settings.features.remove('fail-clean')
+
+ CLEAN_DELAY = 5
+ try:
+ CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
+ except ValueError as e:
+ portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
+ settings["CLEAN_DELAY"], noiselevel=-1)
+ settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
+ settings.backup_changes("CLEAN_DELAY")
+
+ EMERGE_WARNING_DELAY = 10
+ try:
+ EMERGE_WARNING_DELAY = int(settings.get(
+ "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
+ except ValueError as e:
+ portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
+ settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
+ settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
+ settings.backup_changes("EMERGE_WARNING_DELAY")
+
+ buildpkg = myopts.get("--buildpkg")
+ if buildpkg is True:
+ settings.features.add("buildpkg")
+ elif buildpkg == 'n':
+ settings.features.discard("buildpkg")
+
+ if "--quiet" in myopts:
+ settings["PORTAGE_QUIET"]="1"
+ settings.backup_changes("PORTAGE_QUIET")
+
+ if "--verbose" in myopts:
+ settings["PORTAGE_VERBOSE"] = "1"
+ settings.backup_changes("PORTAGE_VERBOSE")
+
+ # Set so that configs will be merged regardless of remembered status
+ if ("--noconfmem" in myopts):
+ settings["NOCONFMEM"]="1"
+ settings.backup_changes("NOCONFMEM")
+
+ # Set various debug markers... They should be merged somehow.
+ PORTAGE_DEBUG = 0
+ try:
+ PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
+ if PORTAGE_DEBUG not in (0, 1):
+ portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
+ PORTAGE_DEBUG, noiselevel=-1)
+ portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
+ noiselevel=-1)
+ PORTAGE_DEBUG = 0
+ except ValueError as e:
+ portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
+ settings["PORTAGE_DEBUG"], noiselevel=-1)
+ del e
+ if "--debug" in myopts:
+ PORTAGE_DEBUG = 1
+ settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
+ settings.backup_changes("PORTAGE_DEBUG")
+
+ if settings.get("NOCOLOR") not in ("yes","true"):
+ portage.output.havecolor = 1
+
+ # The explicit --color < y | n > option overrides the NOCOLOR environment
+ # variable and stdout auto-detection.
+ if "--color" in myopts:
+ if "y" == myopts["--color"]:
+ portage.output.havecolor = 1
+ settings["NOCOLOR"] = "false"
+ else:
+ portage.output.havecolor = 0
+ settings["NOCOLOR"] = "true"
+ settings.backup_changes("NOCOLOR")
+ elif settings.get('TERM') == 'dumb' or \
+ not sys.stdout.isatty():
+ portage.output.havecolor = 0
+ settings["NOCOLOR"] = "true"
+ settings.backup_changes("NOCOLOR")
+
+ if "--pkg-format" in myopts:
+ settings["PORTAGE_BINPKG_FORMAT"] = myopts["--pkg-format"]
+ settings.backup_changes("PORTAGE_BINPKG_FORMAT")
+
+def display_missing_pkg_set(root_config, set_name):
+
+ msg = []
+ msg.append(("emerge: There are no sets to satisfy '%s'. " + \
+ "The following sets exist:") % \
+ colorize("INFORM", set_name))
+ msg.append("")
+
+ for s in sorted(root_config.sets):
+ msg.append(" %s" % s)
+ msg.append("")
+
+ writemsg_level("".join("%s\n" % l for l in msg),
+ level=logging.ERROR, noiselevel=-1)
+
+def relative_profile_path(portdir, abs_profile):
+ realpath = os.path.realpath(abs_profile)
+ basepath = os.path.realpath(os.path.join(portdir, "profiles"))
+ if realpath.startswith(basepath):
+ profilever = realpath[1 + len(basepath):]
+ else:
+ profilever = None
+ return profilever
+
+def getportageversion(portdir, _unused, profile, chost, vardb):
+ pythonver = 'python %d.%d.%d-%s-%d' % sys.version_info[:]
+ profilever = None
+ repositories = vardb.settings.repositories
+ if profile:
+ profilever = relative_profile_path(portdir, profile)
+ if profilever is None:
+ try:
+ for parent in portage.grabfile(
+ os.path.join(profile, 'parent')):
+ profilever = relative_profile_path(portdir,
+ os.path.join(profile, parent))
+ if profilever is not None:
+ break
+ colon = parent.find(":")
+ if colon != -1:
+ p_repo_name = parent[:colon]
+ try:
+ p_repo_loc = \
+ repositories.get_location_for_name(p_repo_name)
+ except KeyError:
+ pass
+ else:
+ profilever = relative_profile_path(p_repo_loc,
+ os.path.join(p_repo_loc, 'profiles',
+ parent[colon+1:]))
+ if profilever is not None:
+ break
+ except portage.exception.PortageException:
+ pass
+
+ if profilever is None:
+ try:
+ profilever = "!" + os.readlink(profile)
+ except (OSError):
+ pass
+
+ if profilever is None:
+ profilever = "unavailable"
+
+ libcver = []
+ libclist = set()
+ for atom in expand_new_virt(vardb, portage.const.LIBC_PACKAGE_ATOM):
+ if not atom.blocker:
+ libclist.update(vardb.match(atom))
+ if libclist:
+ for cpv in sorted(libclist):
+ libc_split = portage.catpkgsplit(cpv)[1:]
+ if libc_split[-1] == "r0":
+ libc_split = libc_split[:-1]
+ libcver.append("-".join(libc_split))
+ else:
+ libcver = ["unavailable"]
+
+ gccver = getgccversion(chost)
+ unameout=platform.release()+" "+platform.machine()
+
+ return "Portage %s (%s, %s, %s, %s, %s)" % \
+ (portage.VERSION, pythonver, profilever, gccver, ",".join(libcver), unameout)
+
+
+class _emerge_config(SlotObject):
+
+ __slots__ = ('action', 'args', 'opts',
+ 'running_config', 'target_config', 'trees')
+
+ # Support unpack as tuple, for load_emerge_config backward compatibility.
+ def __iter__(self):
+ yield self.target_config.settings
+ yield self.trees
+ yield self.target_config.mtimedb
+
+ def __getitem__(self, index):
+ return list(self)[index]
+
+ def __len__(self):
+ return 3
+
+def load_emerge_config(emerge_config=None, env=None, **kargs):
+
+ if emerge_config is None:
+ emerge_config = _emerge_config(**kargs)
+
+ env = os.environ if env is None else env
+ kwargs = {'env': env}
+ for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT"),
+ ("sysroot", "SYSROOT"), ("eprefix", "EPREFIX")):
+ v = env.get(envvar)
+ if v and v.strip():
+ kwargs[k] = v
+ emerge_config.trees = portage.create_trees(trees=emerge_config.trees,
+ **kwargs)
+
+ for root_trees in emerge_config.trees.values():
+ settings = root_trees["vartree"].settings
+ settings._init_dirs()
+ setconfig = load_default_config(settings, root_trees)
+ root_config = RootConfig(settings, root_trees, setconfig)
+ if "root_config" in root_trees:
+ # Propagate changes to the existing instance,
+ # which may be referenced by a depgraph.
+ root_trees["root_config"].update(root_config)
+ else:
+ root_trees["root_config"] = root_config
+
+ target_eroot = emerge_config.trees._target_eroot
+ emerge_config.target_config = \
+ emerge_config.trees[target_eroot]['root_config']
+ emerge_config.target_config.mtimedb = portage.MtimeDB(
+ os.path.join(target_eroot, portage.CACHE_PATH, "mtimedb"))
+ emerge_config.running_config = emerge_config.trees[
+ emerge_config.trees._running_eroot]['root_config']
+ QueryCommand._db = emerge_config.trees
+
+ return emerge_config
+
+def getgccversion(chost=None):
+ """
+ rtype: C{str}
+ return: the current in-use gcc version
+ """
+
+ gcc_ver_command = ['gcc', '-dumpversion']
+ gcc_ver_prefix = 'gcc-'
+
+ gcc_not_found_error = red(
+ "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
+ "!!! to update the environment of this terminal and possibly\n" +
+ "!!! other terminals also.\n"
+ )
+
+ if chost:
+ try:
+ proc = subprocess.Popen(["gcc-config", "-c"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ myoutput = None
+ mystatus = 1
+ else:
+ myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+ mystatus = proc.wait()
+ if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
+ return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
+
+ try:
+ proc = subprocess.Popen(
+ [chost + "-" + gcc_ver_command[0]] + gcc_ver_command[1:],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ myoutput = None
+ mystatus = 1
+ else:
+ myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+ mystatus = proc.wait()
+ if mystatus == os.EX_OK:
+ return gcc_ver_prefix + myoutput
+
+ try:
+ proc = subprocess.Popen(gcc_ver_command,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ myoutput = None
+ mystatus = 1
+ else:
+ myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+ mystatus = proc.wait()
+ if mystatus == os.EX_OK:
+ return gcc_ver_prefix + myoutput
+
+ portage.writemsg(gcc_not_found_error, noiselevel=-1)
+ return "[unavailable]"
+
+# Warn about features that may confuse users and
+# lead them to report invalid bugs.
+_emerge_features_warn = frozenset(['keeptemp', 'keepwork'])
+
+def validate_ebuild_environment(trees):
+ features_warn = set()
+ for myroot in trees:
+ settings = trees[myroot]["vartree"].settings
+ settings.validate()
+ features_warn.update(
+ _emerge_features_warn.intersection(settings.features))
+
+ if features_warn:
+ msg = "WARNING: The FEATURES variable contains one " + \
+ "or more values that should be disabled under " + \
+ "normal circumstances: %s" % " ".join(features_warn)
+ out = portage.output.EOutput()
+ for line in textwrap.wrap(msg, 65):
+ out.ewarn(line)
+
+ check_locale()
+
+def check_procfs():
+ procfs_path = '/proc'
+ if platform.system() not in ("Linux",) or \
+ os.path.ismount(procfs_path):
+ return os.EX_OK
+ msg = "It seems that %s is not mounted. You have been warned." % procfs_path
+ writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+def config_protect_check(trees):
+ for root, root_trees in trees.items():
+ settings = root_trees["root_config"].settings
+ if not settings.get("CONFIG_PROTECT"):
+ msg = "!!! CONFIG_PROTECT is empty"
+ if settings["ROOT"] != "/":
+ msg += " for '%s'" % root
+ msg += "\n"
+ writemsg_level(msg, level=logging.WARN, noiselevel=-1)
+
+def apply_priorities(settings):
+ ionice(settings)
+ nice(settings)
+
+def nice(settings):
+ try:
+ os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
+ except (OSError, ValueError) as e:
+ out = portage.output.EOutput()
+ out.eerror("Failed to change nice value to '%s'" % \
+ settings.get("PORTAGE_NICENESS", "0"))
+ out.eerror("%s\n" % str(e))
+
+def ionice(settings):
+
+ ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
+ if ionice_cmd:
+ ionice_cmd = portage.util.shlex_split(ionice_cmd)
+ if not ionice_cmd:
+ return
+
+ variables = {"PID" : str(os.getpid())}
+ cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
+
+ try:
+ rval = portage.process.spawn(cmd, env=os.environ)
+ except portage.exception.CommandNotFound:
+ # The OS kernel probably doesn't support ionice,
+ # so return silently.
+ return
+
+ if rval != os.EX_OK:
+ out = portage.output.EOutput()
+ out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
+ out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
+
+def setconfig_fallback(root_config):
+ setconfig = root_config.setconfig
+ setconfig._create_default_config()
+ setconfig._parse(update=True)
+ root_config.sets = setconfig.getSets()
+
+def get_missing_sets(root_config):
+ # emerge requires existence of "world", "selected", and "system"
+ missing_sets = []
+
+ for s in ("selected", "system", "world",):
+ if s not in root_config.sets:
+ missing_sets.append(s)
+
+ return missing_sets
+
+def missing_sets_warning(root_config, missing_sets):
+ if len(missing_sets) > 2:
+ missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
+ missing_sets_str += ', and "%s"' % missing_sets[-1]
+ elif len(missing_sets) == 2:
+ missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
+ else:
+ missing_sets_str = '"%s"' % missing_sets[-1]
+ msg = ["emerge: incomplete set configuration, " + \
+ "missing set(s): %s" % missing_sets_str]
+ if root_config.sets:
+ msg.append(" sets defined: %s" % ", ".join(root_config.sets))
+ global_config_path = portage.const.GLOBAL_CONFIG_PATH
+ if portage.const.EPREFIX:
+ global_config_path = os.path.join(portage.const.EPREFIX,
+ portage.const.GLOBAL_CONFIG_PATH.lstrip(os.sep))
+ msg.append(" This usually means that '%s'" % \
+ (os.path.join(global_config_path, "sets/portage.conf"),))
+ msg.append(" is missing or corrupt.")
+ msg.append(" Falling back to default world and system set configuration!!!")
+ for line in msg:
+ writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
+
+def ensure_required_sets(trees):
+ warning_shown = False
+ for root_trees in trees.values():
+ missing_sets = get_missing_sets(root_trees["root_config"])
+ if missing_sets and not warning_shown:
+ warning_shown = True
+ missing_sets_warning(root_trees["root_config"], missing_sets)
+ if missing_sets:
+ setconfig_fallback(root_trees["root_config"])
+
+def expand_set_arguments(myfiles, myaction, root_config):
+ retval = os.EX_OK
+ setconfig = root_config.setconfig
+
+ sets = setconfig.getSets()
+
+ # In order to know exactly which atoms/sets should be added to the
+ # world file, the depgraph performs set expansion later. It will get
+ # confused about where the atoms came from if it's not allowed to
+ # expand them itself.
+ do_not_expand = myaction is None
+ newargs = []
+ for a in myfiles:
+ if a in ("system", "world"):
+ newargs.append(SETPREFIX+a)
+ else:
+ newargs.append(a)
+ myfiles = newargs
+ del newargs
+ newargs = []
+
+ # separators for set arguments
+ ARG_START = "{"
+ ARG_END = "}"
+
+ for i in range(0, len(myfiles)):
+ if myfiles[i].startswith(SETPREFIX):
+ start = 0
+ end = 0
+ x = myfiles[i][len(SETPREFIX):]
+ newset = ""
+ while x:
+ start = x.find(ARG_START)
+ end = x.find(ARG_END)
+ if start > 0 and start < end:
+ namepart = x[:start]
+ argpart = x[start+1:end]
+
+ # TODO: implement proper quoting
+ args = argpart.split(",")
+ options = {}
+ for a in args:
+ if "=" in a:
+ k, v = a.split("=", 1)
+ options[k] = v
+ else:
+ options[a] = "True"
+ setconfig.update(namepart, options)
+ newset += (x[:start-len(namepart)]+namepart)
+ x = x[end+len(ARG_END):]
+ else:
+ newset += x
+ x = ""
+ myfiles[i] = SETPREFIX+newset
+
+ sets = setconfig.getSets()
+
+ # display errors that occurred while loading the SetConfig instance
+ for e in setconfig.errors:
+ print(colorize("BAD", "Error during set creation: %s" % e))
+
+ unmerge_actions = ("unmerge", "prune", "clean", "depclean", "rage-clean")
+
+ for a in myfiles:
+ if a.startswith(SETPREFIX):
+ s = a[len(SETPREFIX):]
+ if s not in sets:
+ display_missing_pkg_set(root_config, s)
+ return (None, 1)
+ if s == "installed":
+ msg = ("The @installed set is not recommended when "
+ "updating packages because it will often "
+ "introduce unsolved blocker conflicts. Please "
+ "refer to bug #387059 for details.")
+ out = portage.output.EOutput()
+ for line in textwrap.wrap(msg, 57):
+ out.ewarn(line)
+ setconfig.active.append(s)
+
+ if do_not_expand:
+ # Loading sets can be slow, so skip it here, in order
+ # to allow the depgraph to indicate progress with the
+ # spinner while sets are loading (bug #461412).
+ newargs.append(a)
+ continue
+
+ try:
+ set_atoms = setconfig.getSetAtoms(s)
+ except portage.exception.PackageSetNotFound as e:
+ writemsg_level(("emerge: the given set '%s' " + \
+ "contains a non-existent set named '%s'.\n") % \
+ (s, e), level=logging.ERROR, noiselevel=-1)
+ if s in ('world', 'selected') and \
+ SETPREFIX + e.value in sets['selected']:
+ writemsg_level(("Use `emerge --deselect %s%s` to "
+ "remove this set from world_sets.\n") %
+ (SETPREFIX, e,), level=logging.ERROR,
+ noiselevel=-1)
+ return (None, 1)
+ if myaction in unmerge_actions and \
+ not sets[s].supportsOperation("unmerge"):
+ writemsg_level("emerge: the given set '%s' does " % s + \
+ "not support unmerge operations\n",
+ level=logging.ERROR, noiselevel=-1)
+ retval = 1
+ elif not set_atoms:
+ writemsg_level("emerge: '%s' is an empty set\n" % s,
+ level=logging.INFO, noiselevel=-1)
+ else:
+ newargs.extend(set_atoms)
+ for error_msg in sets[s].errors:
+ writemsg_level("%s\n" % (error_msg,),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ newargs.append(a)
+ return (newargs, retval)
+
+def repo_name_check(trees):
+ missing_repo_names = set()
+ for root_trees in trees.values():
+ porttree = root_trees.get("porttree")
+ if porttree:
+ portdb = porttree.dbapi
+ missing_repo_names.update(portdb.getMissingRepoNames())
+
+ # Skip warnings about missing repo_name entries for
+ # /usr/local/portage (see bug #248603).
+ try:
+ missing_repo_names.remove('/usr/local/portage')
+ except KeyError:
+ pass
+
+ if missing_repo_names:
+ msg = []
+ msg.append("WARNING: One or more repositories " + \
+ "have missing repo_name entries:")
+ msg.append("")
+ for p in missing_repo_names:
+ msg.append("\t%s/profiles/repo_name" % (p,))
+ msg.append("")
+ msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
+ "should be a plain text file containing a unique " + \
+ "name for the repository on the first line.", 70))
+ msg.append("\n")
+ writemsg_level("".join("%s\n" % l for l in msg),
+ level=logging.WARNING, noiselevel=-1)
+
+ return bool(missing_repo_names)
+
+def repo_name_duplicate_check(trees):
+ ignored_repos = {}
+ for root, root_trees in trees.items():
+ if 'porttree' in root_trees:
+ portdb = root_trees['porttree'].dbapi
+ if portdb.settings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
+ for repo_name, paths in portdb.getIgnoredRepos():
+ k = (root, repo_name, portdb.getRepositoryPath(repo_name))
+ ignored_repos.setdefault(k, []).extend(paths)
+
+ if ignored_repos:
+ msg = []
+ msg.append('WARNING: One or more repositories ' + \
+ 'have been ignored due to duplicate')
+ msg.append(' profiles/repo_name entries:')
+ msg.append('')
+ for k in sorted(ignored_repos):
+ msg.append(' %s overrides' % ", ".join(k))
+ for path in ignored_repos[k]:
+ msg.append(' %s' % (path,))
+ msg.append('')
+ msg.extend(' ' + x for x in textwrap.wrap(
+ "All profiles/repo_name entries must be unique in order " + \
+ "to avoid having duplicates ignored. " + \
+ "Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
+ "/etc/portage/make.conf if you would like to disable this warning."))
+ msg.append("\n")
+ writemsg_level(''.join('%s\n' % l for l in msg),
+ level=logging.WARNING, noiselevel=-1)
+
+ return bool(ignored_repos)
+
+def run_action(emerge_config):
+
+ # skip global updates prior to sync, since it's called after sync
+ if emerge_config.action not in ('help', 'info', 'sync', 'version') and \
+ emerge_config.opts.get('--package-moves') != 'n' and \
+ _global_updates(emerge_config.trees,
+ emerge_config.target_config.mtimedb["updates"],
+ quiet=("--quiet" in emerge_config.opts)):
+ emerge_config.target_config.mtimedb.commit()
+ # Reload the whole config from scratch.
+ load_emerge_config(emerge_config=emerge_config)
+
+ xterm_titles = "notitles" not in \
+ emerge_config.target_config.settings.features
+ if xterm_titles:
+ xtermTitle("emerge")
+
+ if "--digest" in emerge_config.opts:
+ os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
+ # Reload the whole config from scratch so that the portdbapi internal
+ # config is updated with new FEATURES.
+ load_emerge_config(emerge_config=emerge_config)
+
+ # NOTE: adjust_configs() can map options to FEATURES, so any relevant
+ # options adjustments should be made prior to calling adjust_configs().
+ if "--buildpkgonly" in emerge_config.opts:
+ emerge_config.opts["--buildpkg"] = True
+
+ if "getbinpkg" in emerge_config.target_config.settings.features:
+ emerge_config.opts["--getbinpkg"] = True
+
+ if "--getbinpkgonly" in emerge_config.opts:
+ emerge_config.opts["--getbinpkg"] = True
+
+ if "--getbinpkgonly" in emerge_config.opts:
+ emerge_config.opts["--usepkgonly"] = True
+
+ if "--getbinpkg" in emerge_config.opts:
+ emerge_config.opts["--usepkg"] = True
+
+ if "--usepkgonly" in emerge_config.opts:
+ emerge_config.opts["--usepkg"] = True
+
+ if "--buildpkgonly" in emerge_config.opts:
+ # --buildpkgonly will not merge anything, so
+ # it cancels all binary package options.
+ for opt in ("--getbinpkg", "--getbinpkgonly",
+ "--usepkg", "--usepkgonly"):
+ emerge_config.opts.pop(opt, None)
+
+ # Populate the bintree with current --getbinpkg setting.
+ # This needs to happen before:
+ # * expand_set_arguments, in case any sets use the bintree
+ # * adjust_configs and profile_check, in order to propagate settings
+ # implicit IUSE and USE_EXPAND settings from the binhost(s)
+ if (emerge_config.action in ('search', None) and
+ '--usepkg' in emerge_config.opts):
+ for mytrees in emerge_config.trees.values():
+ try:
+ mytrees['bintree'].populate(
+ getbinpkgs='--getbinpkg' in emerge_config.opts)
+ except ParseError as e:
+ writemsg('\n\n!!!%s.\nSee make.conf(5) for more info.\n'
+ % (e,), noiselevel=-1)
+ return 1
+
+ adjust_configs(emerge_config.opts, emerge_config.trees)
+
+ if profile_check(emerge_config.trees, emerge_config.action) != os.EX_OK:
+ return 1
+
+ apply_priorities(emerge_config.target_config.settings)
+
+ if ("--autounmask-continue" in emerge_config.opts and
+ emerge_config.opts.get("--autounmask") == "n"):
+ writemsg_level(
+ " %s --autounmask-continue has been disabled by --autounmask=n\n" %
+ warn("*"), level=logging.WARNING, noiselevel=-1)
+
+ for fmt in emerge_config.target_config.settings.get("PORTAGE_BINPKG_FORMAT", "").split():
+ if not fmt in portage.const.SUPPORTED_BINPKG_FORMATS:
+ if "--pkg-format" in emerge_config.opts:
+ problematic="--pkg-format"
+ else:
+ problematic="PORTAGE_BINPKG_FORMAT"
+
+ writemsg_level(("emerge: %s is not set correctly. Format " + \
+ "'%s' is not supported.\n") % (problematic, fmt),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if emerge_config.action == 'version':
+ writemsg_stdout(getportageversion(
+ emerge_config.target_config.settings["PORTDIR"],
+ None,
+ emerge_config.target_config.settings.profile_path,
+ emerge_config.target_config.settings.get("CHOST"),
+ emerge_config.target_config.trees['vartree'].dbapi) + '\n',
+ noiselevel=-1)
+ return 0
+ elif emerge_config.action == 'help':
+ emerge_help()
+ return 0
+
+ spinner = stdout_spinner()
+ if "candy" in emerge_config.target_config.settings.features:
+ spinner.update = spinner.update_scroll
+
+ if "--quiet" not in emerge_config.opts:
+ portage.deprecated_profile_check(
+ settings=emerge_config.target_config.settings)
+ repo_name_check(emerge_config.trees)
+ repo_name_duplicate_check(emerge_config.trees)
+ config_protect_check(emerge_config.trees)
+ check_procfs()
+
+ for mytrees in emerge_config.trees.values():
+ mydb = mytrees["porttree"].dbapi
+ # Freeze the portdbapi for performance (memoize all xmatch results).
+ mydb.freeze()
+
+ del mytrees, mydb
+
+ for x in emerge_config.args:
+ if x.endswith((".ebuild", ".tbz2")) and \
+ os.path.exists(os.path.abspath(x)):
+ print(colorize("BAD", "\n*** emerging by path is broken "
+ "and may not always work!!!\n"))
+ break
+
+ if emerge_config.action == "list-sets":
+ writemsg_stdout("".join("%s\n" % s for s in
+ sorted(emerge_config.target_config.sets)))
+ return os.EX_OK
+ elif emerge_config.action == "check-news":
+ news_counts = count_unread_news(
+ emerge_config.target_config.trees["porttree"].dbapi,
+ emerge_config.target_config.trees["vartree"].dbapi)
+ if any(news_counts.values()):
+ display_news_notifications(news_counts)
+ elif "--quiet" not in emerge_config.opts:
+ print("", colorize("GOOD", "*"), "No news items were found.")
+ return os.EX_OK
+
+ ensure_required_sets(emerge_config.trees)
+
+ if emerge_config.action is None and \
+ "--resume" in emerge_config.opts and emerge_config.args:
+ writemsg("emerge: unexpected argument(s) for --resume: %s\n" %
+ " ".join(emerge_config.args), noiselevel=-1)
+ return 1
+
+ # only expand sets for actions taking package arguments
+ oldargs = emerge_config.args[:]
+ if emerge_config.action in ("clean", "config", "depclean",
+ "info", "prune", "unmerge", "rage-clean", None):
+ newargs, retval = expand_set_arguments(
+ emerge_config.args, emerge_config.action,
+ emerge_config.target_config)
+ if retval != os.EX_OK:
+ return retval
+
+ # Need to handle empty sets specially, otherwise emerge will react
+ # with the help message for empty argument lists
+ if oldargs and not newargs:
+ print("emerge: no targets left after set expansion")
+ return 0
+
+ emerge_config.args = newargs
+
+ if "--tree" in emerge_config.opts and \
+ "--columns" in emerge_config.opts:
+ print("emerge: can't specify both of \"--tree\" and \"--columns\".")
+ return 1
+
+ if '--emptytree' in emerge_config.opts and \
+ '--noreplace' in emerge_config.opts:
+ writemsg_level("emerge: can't specify both of " + \
+ "\"--emptytree\" and \"--noreplace\".\n",
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if ("--quiet" in emerge_config.opts):
+ spinner.update = spinner.update_quiet
+ portage.util.noiselimit = -1
+
+ if "--fetch-all-uri" in emerge_config.opts:
+ emerge_config.opts["--fetchonly"] = True
+
+ if "--skipfirst" in emerge_config.opts and \
+ "--resume" not in emerge_config.opts:
+ emerge_config.opts["--resume"] = True
+
+ # Allow -p to remove --ask
+ if "--pretend" in emerge_config.opts:
+ emerge_config.opts.pop("--ask", None)
+
+ # forbid --ask when not in a terminal
+ # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
+ if ("--ask" in emerge_config.opts) and (not sys.stdin.isatty()):
+ portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
+ noiselevel=-1)
+ return 1
+
+ if emerge_config.target_config.settings.get("PORTAGE_DEBUG", "") == "1":
+ spinner.update = spinner.update_quiet
+ portage.util.noiselimit = 0
+ if "python-trace" in emerge_config.target_config.settings.features:
+ portage.debug.set_trace(True)
+
+ if not ("--quiet" in emerge_config.opts):
+ if '--nospinner' in emerge_config.opts or \
+ emerge_config.target_config.settings.get('TERM') == 'dumb' or \
+ not sys.stdout.isatty():
+ spinner.update = spinner.update_basic
+
+ if "--debug" in emerge_config.opts:
+ print("myaction", emerge_config.action)
+ print("myopts", emerge_config.opts)
+
+ if not emerge_config.action and not emerge_config.args and \
+ "--resume" not in emerge_config.opts:
+ emerge_help()
+ return 1
+
+ pretend = "--pretend" in emerge_config.opts
+ fetchonly = "--fetchonly" in emerge_config.opts or \
+ "--fetch-all-uri" in emerge_config.opts
+ buildpkgonly = "--buildpkgonly" in emerge_config.opts
+
+ # check if root user is the current user for the actions where emerge needs this
+ if portage.data.secpass < 2:
+ # We've already allowed "--version" and "--help" above.
+ if "--pretend" not in emerge_config.opts and \
+ emerge_config.action not in ("search", "info"):
+ need_superuser = emerge_config.action in ('clean', 'depclean',
+ 'deselect', 'prune', 'unmerge', "rage-clean") or not \
+ (fetchonly or \
+ (buildpkgonly and portage.data.secpass >= 1) or \
+ emerge_config.action in ("metadata", "regen", "sync"))
+ if portage.data.secpass < 1 or \
+ need_superuser:
+ if need_superuser:
+ access_desc = "superuser"
+ else:
+ access_desc = "portage group"
+ # Always show portage_group_warning() when only portage group
+ # access is required but the user is not in the portage group.
+ if "--ask" in emerge_config.opts:
+ writemsg_stdout("This action requires %s access...\n" % \
+ (access_desc,), noiselevel=-1)
+ if portage.data.secpass < 1 and not need_superuser:
+ portage.data.portage_group_warning()
+ uq = UserQuery(emerge_config.opts)
+ if uq.query("Would you like to add --pretend to options?",
+ "--ask-enter-invalid" in emerge_config.opts) == "No":
+ return 128 + signal.SIGINT
+ emerge_config.opts["--pretend"] = True
+ emerge_config.opts.pop("--ask")
+ else:
+ sys.stderr.write(("emerge: %s access is required\n") \
+ % access_desc)
+ if portage.data.secpass < 1 and not need_superuser:
+ portage.data.portage_group_warning()
+ return 1
+
+ # Disable emergelog for everything except build or unmerge operations.
+ # This helps minimize parallel emerge.log entries that can confuse log
+ # parsers like genlop.
+ disable_emergelog = False
+ for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
+ if x in emerge_config.opts:
+ disable_emergelog = True
+ break
+ if disable_emergelog:
+ pass
+ elif emerge_config.action in ("search", "info"):
+ disable_emergelog = True
+ elif portage.data.secpass < 1:
+ disable_emergelog = True
+
+ import _emerge.emergelog
+ _emerge.emergelog._disable = disable_emergelog
+
+ if not disable_emergelog:
+ emerge_log_dir = \
+ emerge_config.target_config.settings.get('EMERGE_LOG_DIR')
+ if emerge_log_dir:
+ try:
+ # At least the parent needs to exist for the lock file.
+ portage.util.ensure_dirs(emerge_log_dir)
+ except portage.exception.PortageException as e:
+ writemsg_level("!!! Error creating directory for " + \
+ "EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
+ (emerge_log_dir, e),
+ noiselevel=-1, level=logging.ERROR)
+ portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
+ else:
+ _emerge.emergelog._emerge_log_dir = emerge_log_dir
+ else:
+ _emerge.emergelog._emerge_log_dir = os.path.join(os.sep,
+ portage.const.EPREFIX.lstrip(os.sep), "var", "log")
+ portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
+
+ if not "--pretend" in emerge_config.opts:
+ time_fmt = "%b %d, %Y %H:%M:%S"
+ if sys.hexversion < 0x3000000:
+ time_fmt = portage._unicode_encode(time_fmt)
+ time_str = time.strftime(time_fmt, time.localtime(time.time()))
+ # Avoid potential UnicodeDecodeError in Python 2, since strftime
+ # returns bytes in Python 2, and %b may contain non-ascii chars.
+ time_str = _unicode_decode(time_str,
+ encoding=_encodings['content'], errors='replace')
+ emergelog(xterm_titles, "Started emerge on: %s" % time_str)
+ myelogstr=""
+ if emerge_config.opts:
+ opt_list = []
+ for opt, arg in emerge_config.opts.items():
+ if arg is True:
+ opt_list.append(opt)
+ elif isinstance(arg, list):
+ # arguments like --exclude that use 'append' action
+ for x in arg:
+ opt_list.append("%s=%s" % (opt, x))
+ else:
+ opt_list.append("%s=%s" % (opt, arg))
+ myelogstr=" ".join(opt_list)
+ if emerge_config.action:
+ myelogstr += " --" + emerge_config.action
+ if oldargs:
+ myelogstr += " " + " ".join(oldargs)
+ emergelog(xterm_titles, " *** emerge " + myelogstr)
+
+ oldargs = None
+
+ def emergeexitsig(signum, frame):
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ portage.util.writemsg(
+ "\n\nExiting on signal %(signal)s\n" % {"signal":signum})
+ sys.exit(128 + signum)
+
+ signal.signal(signal.SIGTERM, emergeexitsig)
+
+ def emergeexit():
+ """This gets out final log message in before we quit."""
+ if "--pretend" not in emerge_config.opts:
+ emergelog(xterm_titles, " *** terminating.")
+ if xterm_titles:
+ xtermTitleReset()
+ portage.atexit_register(emergeexit)
+
+ if emerge_config.action in ("config", "metadata", "regen", "sync"):
+ if "--pretend" in emerge_config.opts:
+ sys.stderr.write(("emerge: The '%s' action does " + \
+ "not support '--pretend'.\n") % emerge_config.action)
+ return 1
+
+ if "sync" == emerge_config.action:
+ return action_sync(emerge_config)
+ elif "metadata" == emerge_config.action:
+ action_metadata(emerge_config.target_config.settings,
+ emerge_config.target_config.trees['porttree'].dbapi,
+ emerge_config.opts)
+ elif emerge_config.action=="regen":
+ validate_ebuild_environment(emerge_config.trees)
+ return action_regen(emerge_config.target_config.settings,
+ emerge_config.target_config.trees['porttree'].dbapi,
+ emerge_config.opts.get("--jobs"),
+ emerge_config.opts.get("--load-average"))
+ # HELP action
+ elif "config" == emerge_config.action:
+ validate_ebuild_environment(emerge_config.trees)
+ return action_config(emerge_config.target_config.settings,
+ emerge_config.trees, emerge_config.opts, emerge_config.args)
+
+ # SEARCH action
+ elif "search" == emerge_config.action:
+ validate_ebuild_environment(emerge_config.trees)
+ action_search(emerge_config.target_config,
+ emerge_config.opts, emerge_config.args, spinner)
+
+ elif emerge_config.action in \
+ ('clean', 'depclean', 'deselect', 'prune', 'unmerge', 'rage-clean'):
+ validate_ebuild_environment(emerge_config.trees)
+ rval = action_uninstall(emerge_config.target_config.settings,
+ emerge_config.trees, emerge_config.target_config.mtimedb["ldpath"],
+ emerge_config.opts, emerge_config.action,
+ emerge_config.args, spinner)
+ if not (emerge_config.action == 'deselect' or
+ buildpkgonly or fetchonly or pretend):
+ post_emerge(emerge_config.action, emerge_config.opts,
+ emerge_config.args, emerge_config.target_config.root,
+ emerge_config.trees, emerge_config.target_config.mtimedb, rval)
+ return rval
+
+ elif emerge_config.action == 'info':
+
+ # Ensure atoms are valid before calling unmerge().
+ vardb = emerge_config.target_config.trees['vartree'].dbapi
+ portdb = emerge_config.target_config.trees['porttree'].dbapi
+ bindb = emerge_config.target_config.trees['bintree'].dbapi
+ valid_atoms = []
+ for x in emerge_config.args:
+ if is_valid_package_atom(x, allow_repo=True):
+ try:
+ #look at the installed files first, if there is no match
+ #look at the ebuilds, since EAPI 4 allows running pkg_info
+ #on non-installed packages
+ valid_atom = dep_expand(x, mydb=vardb)
+ if valid_atom.cp.split("/")[0] == "null":
+ valid_atom = dep_expand(x, mydb=portdb)
+
+ if valid_atom.cp.split("/")[0] == "null" and \
+ "--usepkg" in emerge_config.opts:
+ valid_atom = dep_expand(x, mydb=bindb)
+
+ valid_atoms.append(valid_atom)
+
+ except portage.exception.AmbiguousPackageName as e:
+ msg = "The short ebuild name \"" + x + \
+ "\" is ambiguous. Please specify " + \
+ "one of the following " + \
+ "fully-qualified ebuild names instead:"
+ for line in textwrap.wrap(msg, 70):
+ writemsg_level("!!! %s\n" % (line,),
+ level=logging.ERROR, noiselevel=-1)
+ for i in e.args[0]:
+ writemsg_level(" %s\n" % colorize("INFORM", i),
+ level=logging.ERROR, noiselevel=-1)
+ writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
+ return 1
+ continue
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ return action_info(emerge_config.target_config.settings,
+ emerge_config.trees, emerge_config.opts, valid_atoms)
+
+ # "update", "system", or just process files:
+ else:
+ validate_ebuild_environment(emerge_config.trees)
+
+ for x in emerge_config.args:
+ if x.startswith(SETPREFIX) or \
+ is_valid_package_atom(x, allow_repo=True):
+ continue
+ if x[:1] == os.sep:
+ continue
+ try:
+ os.lstat(x)
+ continue
+ except OSError:
+ pass
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ # GLEP 42 says to display news *after* an emerge --pretend
+ if "--pretend" not in emerge_config.opts:
+ uq = UserQuery(emerge_config.opts)
+ if display_news_notification(emerge_config.target_config,
+ emerge_config.opts) \
+ and "--ask" in emerge_config.opts \
+ and "--read-news" in emerge_config.opts \
+ and uq.query("Would you like to read the news items while " \
+ "calculating dependencies?",
+ '--ask-enter-invalid' in emerge_config.opts) == "Yes":
+ try:
+ subprocess.call(['eselect', 'news', 'read'])
+ # If eselect is not installed, Python <3.3 will throw an
+ # OSError. >=3.3 will throw a FileNotFoundError, which is a
+ # subclass of OSError.
+ except OSError:
+ writemsg("Please install eselect to use this feature.\n",
+ noiselevel=-1)
+ retval = action_build(emerge_config, spinner=spinner)
+ post_emerge(emerge_config.action, emerge_config.opts,
+ emerge_config.args, emerge_config.target_config.root,
+ emerge_config.trees, emerge_config.target_config.mtimedb, retval)
+
+ return retval
diff --git a/lib/_emerge/chk_updated_cfg_files.py b/lib/_emerge/chk_updated_cfg_files.py
new file mode 100644
index 000000000..e5e090767
--- /dev/null
+++ b/lib/_emerge/chk_updated_cfg_files.py
@@ -0,0 +1,42 @@
+# Copyright 1999-2012, 2016 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+
+import portage
+from portage import os
+from portage.localization import _
+from portage.output import bold, colorize, yellow
+from portage.util import writemsg_level
+
+def chk_updated_cfg_files(eroot, config_protect):
+ target_root = eroot
+ result = list(
+ portage.util.find_updated_config_files(target_root, config_protect))
+
+ for x in result:
+ writemsg_level("\n %s " % (colorize("WARN", "* " + _("IMPORTANT:"))),
+ level=logging.INFO, noiselevel=-1)
+ if not x[1]: # it's a protected file
+ writemsg_level( _("config file '%s' needs updating.\n") % x[0],
+ level=logging.INFO, noiselevel=-1)
+ else: # it's a protected dir
+ if len(x[1]) == 1:
+ head, tail = os.path.split(x[1][0])
+ tail = tail[len("._cfg0000_"):]
+ fpath = os.path.join(head, tail)
+ writemsg_level(_("config file '%s' needs updating.\n") % fpath,
+ level=logging.INFO, noiselevel=-1)
+ else:
+ writemsg_level(
+ _("%d config files in '%s' need updating.\n") % \
+ (len(x[1]), x[0]), level=logging.INFO, noiselevel=-1)
+
+ if result:
+ print(" " + yellow("*") + " See the " +
+ colorize("INFORM", _("CONFIGURATION FILES")) + " and " +
+ colorize("INFORM", _("CONFIGURATION FILES UPDATE TOOLS")))
+ print(" " + yellow("*") + " sections of the " + bold("emerge") + " " +
+ _("man page to learn how to update config files."))
diff --git a/lib/_emerge/clear_caches.py b/lib/_emerge/clear_caches.py
new file mode 100644
index 000000000..cb0db105b
--- /dev/null
+++ b/lib/_emerge/clear_caches.py
@@ -0,0 +1,16 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import gc
+
+def clear_caches(trees):
+ for d in trees.values():
+ d["porttree"].dbapi.melt()
+ d["porttree"].dbapi._aux_cache.clear()
+ d["bintree"].dbapi._clear_cache()
+ if d["vartree"].dbapi._linkmap is None:
+ # preserve-libs is entirely disabled
+ pass
+ else:
+ d["vartree"].dbapi._linkmap._clear_cache()
+ gc.collect()
diff --git a/lib/_emerge/countdown.py b/lib/_emerge/countdown.py
new file mode 100644
index 000000000..62e3c8dea
--- /dev/null
+++ b/lib/_emerge/countdown.py
@@ -0,0 +1,22 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys
+import time
+
+from portage.output import colorize
+
+
+def countdown(secs=5, doing='Starting'):
+ if secs:
+ print(
+ '>>> Waiting %s seconds before starting...\n'
+ '>>> (Control-C to abort)...\n'
+ '%s in:' % (secs, doing), end='')
+ for sec in range(secs, 0, -1):
+ sys.stdout.write(colorize('UNMERGE_WARN', ' %i' % sec))
+ sys.stdout.flush()
+ time.sleep(1)
+ print()
diff --git a/lib/_emerge/create_depgraph_params.py b/lib/_emerge/create_depgraph_params.py
new file mode 100644
index 000000000..08605baa1
--- /dev/null
+++ b/lib/_emerge/create_depgraph_params.py
@@ -0,0 +1,159 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+from portage.util import writemsg_level
+
+def create_depgraph_params(myopts, myaction):
+ #configure emerge engine parameters
+ #
+ # self: include _this_ package regardless of if it is merged.
+ # selective: exclude the package if it is merged
+ # recurse: go into the dependencies
+ # deep: go into the dependencies of already merged packages
+ # empty: pretend nothing is merged
+ # complete: completely account for all known dependencies
+ # bdeps: satisfy build time dependencies of packages that are
+ # already built, even though they are not strictly required
+ # remove: build graph for use in removing packages
+ # rebuilt_binaries: replace installed packages with rebuilt binaries
+ # rebuild_if_new_slot: rebuild or reinstall packages when
+ # slot/sub-slot := operator dependencies can be satisfied by a newer
+ # slot/sub-slot, so that older packages slots will become eligible for
+ # removal by the --depclean action as soon as possible
+ # ignore_built_slot_operator_deps: ignore the slot/sub-slot := operator parts
+ # of dependencies that have been recorded when packages where built
+ # ignore_soname_deps: ignore the soname dependencies of built
+ # packages, so that they do not trigger dependency resolution
+ # failures, or cause packages to be rebuilt or replaced.
+ # ignore_world: ignore the @world package set and its dependencies
+ # with_test_deps: pull in test deps for packages matched by arguments
+ # changed_deps: rebuild installed packages with outdated deps
+ # changed_deps_report: report installed packages with outdated deps
+ # changed_slot: rebuild installed packages with outdated SLOT metadata
+ # binpkg_changed_deps: reject binary packages with outdated deps
+ myparams = {"recurse" : True}
+
+ bdeps = myopts.get("--with-bdeps")
+ if bdeps is not None:
+ myparams["bdeps"] = bdeps
+ elif myaction == "remove" or (
+ myopts.get("--with-bdeps-auto") != "n" and "--usepkg" not in myopts):
+ myparams["bdeps"] = "auto"
+
+ ignore_built_slot_operator_deps = myopts.get("--ignore-built-slot-operator-deps")
+ if ignore_built_slot_operator_deps is not None:
+ myparams["ignore_built_slot_operator_deps"] = ignore_built_slot_operator_deps
+
+ myparams["ignore_soname_deps"] = myopts.get(
+ "--ignore-soname-deps", "y")
+
+ dynamic_deps = myopts.get("--dynamic-deps", "y") != "n" and "--nodeps" not in myopts
+ if dynamic_deps:
+ myparams["dynamic_deps"] = True
+
+ if myaction == "remove":
+ myparams["remove"] = True
+ myparams["complete"] = True
+ myparams["selective"] = True
+ return myparams
+
+ if myopts.get('--ignore-world') is True:
+ myparams['ignore_world'] = True
+
+ rebuild_if_new_slot = myopts.get('--rebuild-if-new-slot')
+ if rebuild_if_new_slot is not None:
+ myparams['rebuild_if_new_slot'] = rebuild_if_new_slot
+
+ changed_slot = myopts.get('--changed-slot') is True
+ if changed_slot:
+ myparams["changed_slot"] = True
+
+ if "--update" in myopts or \
+ "--newrepo" in myopts or \
+ "--newuse" in myopts or \
+ "--reinstall" in myopts or \
+ "--noreplace" in myopts or \
+ myopts.get("--changed-deps", "n") != "n" or \
+ changed_slot or \
+ myopts.get("--selective", "n") != "n":
+ myparams["selective"] = True
+
+ deep = myopts.get("--deep")
+ if deep is not None and deep != 0:
+ myparams["deep"] = deep
+
+ complete_if_new_use = \
+ myopts.get("--complete-graph-if-new-use")
+ if complete_if_new_use is not None:
+ myparams["complete_if_new_use"] = complete_if_new_use
+
+ complete_if_new_ver = \
+ myopts.get("--complete-graph-if-new-ver")
+ if complete_if_new_ver is not None:
+ myparams["complete_if_new_ver"] = complete_if_new_ver
+
+ if ("--complete-graph" in myopts or "--rebuild-if-new-rev" in myopts or
+ "--rebuild-if-new-ver" in myopts or "--rebuild-if-unbuilt" in myopts):
+ myparams["complete"] = True
+ if "--emptytree" in myopts:
+ myparams["empty"] = True
+ myparams["deep"] = True
+ myparams.pop("selective", None)
+
+ if "--nodeps" in myopts:
+ myparams.pop("recurse", None)
+ myparams.pop("deep", None)
+ myparams.pop("complete", None)
+
+ rebuilt_binaries = myopts.get('--rebuilt-binaries')
+ if rebuilt_binaries is True or \
+ rebuilt_binaries != 'n' and \
+ '--usepkgonly' in myopts and \
+ myopts.get('--deep') is True and \
+ '--update' in myopts:
+ myparams['rebuilt_binaries'] = True
+
+ binpkg_respect_use = myopts.get('--binpkg-respect-use')
+ if binpkg_respect_use is not None:
+ myparams['binpkg_respect_use'] = binpkg_respect_use
+ elif '--usepkgonly' not in myopts:
+ # If --binpkg-respect-use is not explicitly specified, we enable
+ # the behavior automatically (like requested in bug #297549), as
+ # long as it doesn't strongly conflict with other options that
+ # have been specified.
+ myparams['binpkg_respect_use'] = 'auto'
+
+ binpkg_changed_deps = myopts.get('--binpkg-changed-deps')
+ if binpkg_changed_deps is not None:
+ myparams['binpkg_changed_deps'] = binpkg_changed_deps
+ elif '--usepkgonly' not in myopts:
+ # In order to avoid dependency resolution issues due to changed
+ # dependencies, enable this automatically, as long as it doesn't
+ # strongly conflict with other options that have been specified.
+ myparams['binpkg_changed_deps'] = 'auto'
+
+ changed_deps = myopts.get('--changed-deps')
+ if changed_deps is not None:
+ myparams['changed_deps'] = changed_deps
+
+ changed_deps_report = myopts.get('--changed-deps-report', 'n') == 'y'
+ if changed_deps_report:
+ myparams['changed_deps_report'] = True
+
+ if myopts.get("--selective") == "n":
+ # --selective=n can be used to remove selective
+ # behavior that may have been implied by some
+ # other option like --update.
+ myparams.pop("selective", None)
+
+ with_test_deps = myopts.get("--with-test-deps")
+ if with_test_deps is not None:
+ myparams["with_test_deps"] = with_test_deps
+
+ if '--debug' in myopts:
+ writemsg_level('\n\nmyparams %s\n\n' % myparams,
+ noiselevel=-1, level=logging.DEBUG)
+
+ return myparams
+
diff --git a/lib/_emerge/create_world_atom.py b/lib/_emerge/create_world_atom.py
new file mode 100644
index 000000000..947f8088a
--- /dev/null
+++ b/lib/_emerge/create_world_atom.py
@@ -0,0 +1,128 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+from portage.dep import Atom, _repo_separator
+from portage.exception import InvalidData
+
+if sys.hexversion >= 0x3000000:
+ _unicode = str
+else:
+ _unicode = unicode
+
+def create_world_atom(pkg, args_set, root_config, before_install=False):
+ """Create a new atom for the world file if one does not exist. If the
+ argument atom is precise enough to identify a specific slot then a slot
+ atom will be returned. Atoms that are in the system set may also be stored
+ in world since system atoms can only match one slot while world atoms can
+ be greedy with respect to slots. Unslotted system packages will not be
+ stored in world."""
+
+ arg_atom = args_set.findAtomForPackage(pkg)
+ if not arg_atom:
+ return None
+ cp = arg_atom.cp
+ new_world_atom = cp
+ if arg_atom.repo:
+ new_world_atom += _repo_separator + arg_atom.repo
+ sets = root_config.sets
+ portdb = root_config.trees["porttree"].dbapi
+ vardb = root_config.trees["vartree"].dbapi
+
+ if arg_atom.repo is not None:
+ repos = [arg_atom.repo]
+ else:
+ # Iterate over portdbapi.porttrees, since it's common to
+ # tweak this attribute in order to adjust match behavior.
+ repos = []
+ for tree in portdb.porttrees:
+ repos.append(portdb.repositories.get_name_for_location(tree))
+
+ available_slots = set()
+ for cpv in portdb.match(Atom(cp)):
+ for repo in repos:
+ try:
+ available_slots.add(portdb._pkg_str(_unicode(cpv), repo).slot)
+ except (KeyError, InvalidData):
+ pass
+
+ slotted = len(available_slots) > 1 or \
+ (len(available_slots) == 1 and "0" not in available_slots)
+ if not slotted:
+ # check the vdb in case this is multislot
+ available_slots = set(vardb._pkg_str(cpv, None).slot \
+ for cpv in vardb.match(Atom(cp)))
+ slotted = len(available_slots) > 1 or \
+ (len(available_slots) == 1 and "0" not in available_slots)
+ if slotted and arg_atom.without_repo != cp:
+ # If the user gave a specific atom, store it as a
+ # slot atom in the world file.
+ slot_atom = pkg.slot_atom
+
+ # For USE=multislot, there are a couple of cases to
+ # handle here:
+ #
+ # 1) SLOT="0", but the real SLOT spontaneously changed to some
+ # unknown value, so just record an unslotted atom.
+ #
+ # 2) SLOT comes from an installed package and there is no
+ # matching SLOT in the portage tree.
+ #
+ # Make sure that the slot atom is available in either the
+ # portdb or the vardb, since otherwise the user certainly
+ # doesn't want the SLOT atom recorded in the world file
+ # (case 1 above). If it's only available in the vardb,
+ # the user may be trying to prevent a USE=multislot
+ # package from being removed by --depclean (case 2 above).
+
+ mydb = portdb
+ if not portdb.match(slot_atom):
+ # SLOT seems to come from an installed multislot package
+ mydb = vardb
+ # If there is no installed package matching the SLOT atom,
+ # it probably changed SLOT spontaneously due to USE=multislot,
+ # so just record an unslotted atom.
+ if vardb.match(slot_atom) or before_install:
+ # Now verify that the argument is precise
+ # enough to identify a specific slot.
+ matches = mydb.match(arg_atom)
+ matched_slots = set()
+ if before_install:
+ matched_slots.add(pkg.slot)
+ if mydb is vardb:
+ for cpv in matches:
+ matched_slots.add(mydb._pkg_str(cpv, None).slot)
+ else:
+ for cpv in matches:
+ for repo in repos:
+ try:
+ matched_slots.add(
+ portdb._pkg_str(_unicode(cpv), repo).slot)
+ except (KeyError, InvalidData):
+ pass
+
+ if len(matched_slots) == 1:
+ new_world_atom = slot_atom
+ if arg_atom.repo:
+ new_world_atom += _repo_separator + arg_atom.repo
+
+ if new_world_atom == sets["selected"].findAtomForPackage(pkg):
+ # Both atoms would be identical, so there's nothing to add.
+ return None
+ if not slotted and not arg_atom.repo:
+ # Unlike world atoms, system atoms are not greedy for slots, so they
+ # can't be safely excluded from world if they are slotted.
+ system_atom = sets["system"].findAtomForPackage(pkg)
+ if system_atom:
+ if not system_atom.cp.startswith("virtual/"):
+ return None
+ # System virtuals aren't safe to exclude from world since they can
+ # match multiple old-style virtuals but only one of them will be
+ # pulled in by update or depclean.
+ providers = portdb.settings.getvirtuals().get(system_atom.cp)
+ if providers and len(providers) == 1 and \
+ providers[0].cp == arg_atom.cp:
+ return None
+ return new_world_atom
+
diff --git a/lib/_emerge/depgraph.py b/lib/_emerge/depgraph.py
new file mode 100644
index 000000000..b63d4f242
--- /dev/null
+++ b/lib/_emerge/depgraph.py
@@ -0,0 +1,10049 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division, print_function, unicode_literals
+
+import collections
+import errno
+import functools
+import io
+import logging
+import stat
+import sys
+import textwrap
+import warnings
+from collections import deque
+from itertools import chain
+
+import portage
+from portage import os, OrderedDict
+from portage import _unicode_decode, _unicode_encode, _encodings
+from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH, VCS_DIRS
+from portage.dbapi import dbapi
+from portage.dbapi.dep_expand import dep_expand
+from portage.dbapi.DummyTree import DummyTree
+from portage.dbapi.IndexedPortdb import IndexedPortdb
+from portage.dbapi._similar_name_search import similar_name_search
+from portage.dep import Atom, best_match_to_list, extract_affecting_use, \
+ check_required_use, human_readable_required_use, match_from_list, \
+ _repo_separator
+from portage.dep._slot_operator import (ignore_built_slot_operator_deps,
+ strip_slots)
+from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use, \
+ _get_eapi_attrs
+from portage.exception import (InvalidAtom, InvalidData, InvalidDependString,
+ PackageNotFound, PortageException)
+from portage.localization import _
+from portage.output import colorize, create_color_func, \
+ darkgreen, green
+bad = create_color_func("BAD")
+from portage.package.ebuild.config import _get_feature_flags
+from portage.package.ebuild.getmaskingstatus import \
+ _getmaskingstatus, _MaskReason
+from portage._sets import SETPREFIX
+from portage._sets.base import InternalPackageSet
+from portage.util import ConfigProtect, shlex_split, new_protect_filename
+from portage.util import cmp_sort_key, writemsg, writemsg_stdout
+from portage.util import ensure_dirs
+from portage.util import writemsg_level, write_atomic
+from portage.util.digraph import digraph
+from portage.util._async.TaskScheduler import TaskScheduler
+from portage.util._eventloop.EventLoop import EventLoop
+from portage.util._eventloop.global_event_loop import global_event_loop
+from portage.versions import _pkg_str, catpkgsplit
+
+from _emerge.AtomArg import AtomArg
+from _emerge.Blocker import Blocker
+from _emerge.BlockerCache import BlockerCache
+from _emerge.BlockerDepPriority import BlockerDepPriority
+from .chk_updated_cfg_files import chk_updated_cfg_files
+from _emerge.countdown import countdown
+from _emerge.create_world_atom import create_world_atom
+from _emerge.Dependency import Dependency
+from _emerge.DependencyArg import DependencyArg
+from _emerge.DepPriority import DepPriority
+from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
+from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
+from _emerge.FakeVartree import FakeVartree
+from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
+from _emerge.is_valid_package_atom import insert_category_into_atom, \
+ is_valid_package_atom
+from _emerge.Package import Package
+from _emerge.PackageArg import PackageArg
+from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
+from _emerge.RootConfig import RootConfig
+from _emerge.search import search
+from _emerge.SetArg import SetArg
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+from _emerge.UnmergeDepPriority import UnmergeDepPriority
+from _emerge.UseFlagDisplay import pkg_use_display
+from _emerge.UserQuery import UserQuery
+
+from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
+from _emerge.resolver.DbapiProvidesIndex import DbapiProvidesIndex
+from _emerge.resolver.package_tracker import PackageTracker, PackageTrackerDbapiWrapper
+from _emerge.resolver.slot_collision import slot_conflict_handler
+from _emerge.resolver.circular_dependency import circular_dependency_handler
+from _emerge.resolver.output import Display, format_unmatched_atom
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ long = int
+ _unicode = str
+else:
+ _unicode = unicode
+
+class _scheduler_graph_config(object):
+ def __init__(self, trees, pkg_cache, graph, mergelist):
+ self.trees = trees
+ self.pkg_cache = pkg_cache
+ self.graph = graph
+ self.mergelist = mergelist
+
+def _wildcard_set(atoms):
+ pkgs = InternalPackageSet(allow_wildcard=True)
+ for x in atoms:
+ try:
+ x = Atom(x, allow_wildcard=True, allow_repo=False)
+ except portage.exception.InvalidAtom:
+ x = Atom("*/" + x, allow_wildcard=True, allow_repo=False)
+ pkgs.add(x)
+ return pkgs
+
+class _frozen_depgraph_config(object):
+
+ def __init__(self, settings, trees, myopts, params, spinner):
+ self.settings = settings
+ self.target_root = settings["EROOT"]
+ self.myopts = myopts
+ self.edebug = 0
+ if settings.get("PORTAGE_DEBUG", "") == "1":
+ self.edebug = 1
+ self.spinner = spinner
+ self.requested_depth = params.get("deep", 0)
+ self._running_root = trees[trees._running_eroot]["root_config"]
+ self.pkgsettings = {}
+ self.trees = {}
+ self._trees_orig = trees
+ self.roots = {}
+ # All Package instances
+ self._pkg_cache = {}
+ self._highest_license_masked = {}
+ # We can't know that an soname dep is unsatisfied if there are
+ # any unbuilt ebuilds in the graph, since unbuilt ebuilds have
+ # no soname data. Therefore, only enable soname dependency
+ # resolution if --usepkgonly is enabled, or for removal actions.
+ self.soname_deps_enabled = (
+ ("--usepkgonly" in myopts or "remove" in params) and
+ params.get("ignore_soname_deps") != "y")
+ dynamic_deps = "dynamic_deps" in params
+ ignore_built_slot_operator_deps = myopts.get(
+ "--ignore-built-slot-operator-deps", "n") == "y"
+ for myroot in trees:
+ self.trees[myroot] = {}
+ # Create a RootConfig instance that references
+ # the FakeVartree instead of the real one.
+ self.roots[myroot] = RootConfig(
+ trees[myroot]["vartree"].settings,
+ self.trees[myroot],
+ trees[myroot]["root_config"].setconfig)
+ for tree in ("porttree", "bintree"):
+ self.trees[myroot][tree] = trees[myroot][tree]
+ self.trees[myroot]["vartree"] = \
+ FakeVartree(trees[myroot]["root_config"],
+ pkg_cache=self._pkg_cache,
+ pkg_root_config=self.roots[myroot],
+ dynamic_deps=dynamic_deps,
+ ignore_built_slot_operator_deps=ignore_built_slot_operator_deps,
+ soname_deps=self.soname_deps_enabled)
+ self.pkgsettings[myroot] = portage.config(
+ clone=self.trees[myroot]["vartree"].settings)
+ if self.soname_deps_enabled and "remove" not in params:
+ self.trees[myroot]["bintree"] = DummyTree(
+ DbapiProvidesIndex(trees[myroot]["bintree"].dbapi))
+
+ if params.get("ignore_world", False):
+ self._required_set_names = set()
+ else:
+ self._required_set_names = set(["world"])
+
+ atoms = ' '.join(myopts.get("--exclude", [])).split()
+ self.excluded_pkgs = _wildcard_set(atoms)
+ atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split()
+ self.reinstall_atoms = _wildcard_set(atoms)
+ atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split()
+ self.usepkg_exclude = _wildcard_set(atoms)
+ atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split()
+ self.useoldpkg_atoms = _wildcard_set(atoms)
+ atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split()
+ self.rebuild_exclude = _wildcard_set(atoms)
+ atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split()
+ self.rebuild_ignore = _wildcard_set(atoms)
+
+ self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts
+ self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts
+ self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
+
+class _depgraph_sets(object):
+ def __init__(self):
+ # contains all sets added to the graph
+ self.sets = {}
+ # contains non-set atoms given as arguments
+ self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
+ # contains all atoms from all sets added to the graph, including
+ # atoms given as arguments
+ self.atoms = InternalPackageSet(allow_repo=True)
+ self.atom_arg_map = {}
+
+class _rebuild_config(object):
+ def __init__(self, frozen_config, backtrack_parameters):
+ self._graph = digraph()
+ self._frozen_config = frozen_config
+ self.rebuild_list = backtrack_parameters.rebuild_list.copy()
+ self.orig_rebuild_list = self.rebuild_list.copy()
+ self.reinstall_list = backtrack_parameters.reinstall_list.copy()
+ self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev
+ self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver
+ self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt
+ self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or
+ self.rebuild_if_unbuilt)
+
+ def add(self, dep_pkg, dep):
+ parent = dep.collapsed_parent
+ priority = dep.collapsed_priority
+ rebuild_exclude = self._frozen_config.rebuild_exclude
+ rebuild_ignore = self._frozen_config.rebuild_ignore
+ if (self.rebuild and isinstance(parent, Package) and
+ parent.built and priority.buildtime and
+ isinstance(dep_pkg, Package) and
+ not rebuild_exclude.findAtomForPackage(parent) and
+ not rebuild_ignore.findAtomForPackage(dep_pkg)):
+ self._graph.add(dep_pkg, parent, priority)
+
+ def _needs_rebuild(self, dep_pkg):
+ """Check whether packages that depend on dep_pkg need to be rebuilt."""
+ dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom)
+ if dep_pkg.built or dep_root_slot in self.orig_rebuild_list:
+ return False
+
+ if self.rebuild_if_unbuilt:
+ # dep_pkg is being installed from source, so binary
+ # packages for parents are invalid. Force rebuild
+ return True
+
+ trees = self._frozen_config.trees
+ vardb = trees[dep_pkg.root]["vartree"].dbapi
+ if self.rebuild_if_new_rev:
+ # Parent packages are valid if a package with the same
+ # cpv is already installed.
+ return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom)
+
+ # Otherwise, parent packages are valid if a package with the same
+ # version (excluding revision) is already installed.
+ assert self.rebuild_if_new_ver
+ cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
+ for inst_cpv in vardb.match(dep_pkg.slot_atom):
+ inst_cpv_norev = catpkgsplit(inst_cpv)[:-1]
+ if inst_cpv_norev == cpv_norev:
+ return False
+
+ return True
+
+ def _trigger_rebuild(self, parent, build_deps):
+ root_slot = (parent.root, parent.slot_atom)
+ if root_slot in self.rebuild_list:
+ return False
+ trees = self._frozen_config.trees
+ reinstall = False
+ for slot_atom, dep_pkg in build_deps.items():
+ dep_root_slot = (dep_pkg.root, slot_atom)
+ if self._needs_rebuild(dep_pkg):
+ self.rebuild_list.add(root_slot)
+ return True
+ elif ("--usepkg" in self._frozen_config.myopts and
+ (dep_root_slot in self.reinstall_list or
+ dep_root_slot in self.rebuild_list or
+ not dep_pkg.installed)):
+
+ # A direct rebuild dependency is being installed. We
+ # should update the parent as well to the latest binary,
+ # if that binary is valid.
+ #
+ # To validate the binary, we check whether all of the
+ # rebuild dependencies are present on the same binhost.
+ #
+ # 1) If parent is present on the binhost, but one of its
+ # rebuild dependencies is not, then the parent should
+ # be rebuilt from source.
+ # 2) Otherwise, the parent binary is assumed to be valid,
+ # because all of its rebuild dependencies are
+ # consistent.
+ bintree = trees[parent.root]["bintree"]
+ uri = bintree.get_pkgindex_uri(parent.cpv)
+ dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
+ bindb = bintree.dbapi
+ if self.rebuild_if_new_ver and uri and uri != dep_uri:
+ cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
+ for cpv in bindb.match(dep_pkg.slot_atom):
+ if cpv_norev == catpkgsplit(cpv)[:-1]:
+ dep_uri = bintree.get_pkgindex_uri(cpv)
+ if uri == dep_uri:
+ break
+ if uri and uri != dep_uri:
+ # 1) Remote binary package is invalid because it was
+ # built without dep_pkg. Force rebuild.
+ self.rebuild_list.add(root_slot)
+ return True
+ elif (parent.installed and
+ root_slot not in self.reinstall_list):
+ try:
+ bin_build_time, = bindb.aux_get(parent.cpv,
+ ["BUILD_TIME"])
+ except KeyError:
+ continue
+ if bin_build_time != _unicode(parent.build_time):
+ # 2) Remote binary package is valid, and local package
+ # is not up to date. Force reinstall.
+ reinstall = True
+ if reinstall:
+ self.reinstall_list.add(root_slot)
+ return reinstall
+
+ def trigger_rebuilds(self):
+ """
+ Trigger rebuilds where necessary. If pkgA has been updated, and pkgB
+ depends on pkgA at both build-time and run-time, pkgB needs to be
+ rebuilt.
+ """
+ need_restart = False
+ graph = self._graph
+ build_deps = {}
+
+ leaf_nodes = deque(graph.leaf_nodes())
+
+ # Trigger rebuilds bottom-up (starting with the leaves) so that parents
+ # will always know which children are being rebuilt.
+ while graph:
+ if not leaf_nodes:
+ # We'll have to drop an edge. This should be quite rare.
+ leaf_nodes.append(graph.order[-1])
+
+ node = leaf_nodes.popleft()
+ if node not in graph:
+ # This can be triggered by circular dependencies.
+ continue
+ slot_atom = node.slot_atom
+
+ # Remove our leaf node from the graph, keeping track of deps.
+ parents = graph.parent_nodes(node)
+ graph.remove(node)
+ node_build_deps = build_deps.get(node, {})
+ for parent in parents:
+ if parent == node:
+ # Ignore a direct cycle.
+ continue
+ parent_bdeps = build_deps.setdefault(parent, {})
+ parent_bdeps[slot_atom] = node
+ if not graph.child_nodes(parent):
+ leaf_nodes.append(parent)
+
+ # Trigger rebuilds for our leaf node. Because all of our children
+ # have been processed, the build_deps will be completely filled in,
+ # and self.rebuild_list / self.reinstall_list will tell us whether
+ # any of our children need to be rebuilt or reinstalled.
+ if self._trigger_rebuild(node, node_build_deps):
+ need_restart = True
+
+ return need_restart
+
+
+class _use_changes(tuple):
+ def __new__(cls, new_use, new_changes, required_use_satisfied=True):
+ obj = tuple.__new__(cls, [new_use, new_changes])
+ obj.required_use_satisfied = required_use_satisfied
+ return obj
+
+
+class _dynamic_depgraph_config(object):
+
+ """
+ ``dynamic_depgraph_config`` is an object that is used to collect settings and important data structures that are
+ used in calculating Portage dependencies. Each depgraph created by the depgraph.py code gets its own
+ ``dynamic_depgraph_config``, whereas ``frozen_depgraph_config`` is shared among all depgraphs.
+
+ **self.digraph**
+
+ Of particular importance is the instance variable ``self.digraph``, which is an instance of
+ ``portage.util.digraph``, a directed graph data structure. ``portage.util.digraph`` is used for a variety of
+ purposes in the Portage codebase, but in this particular scenario as ``self.digraph``, it is used to create a
+ dependency tree of Portage packages. So for ``self.digraph``, each *node* of the directed graph is a ``Package``,
+ while *edges* connect nodes and each edge can have a Priority. The Priority setting is used to help resolve
+ circular dependencies, and should be interpreted in the direction of parent to child.
+
+ Conceptually, think of ``self.digraph`` as containing user-specified packages or sets at the very top, with
+ dependencies hanging down as children, and dependencies of those children as children of children, etc. The depgraph
+ is intended to model dependency relationships, not the order that packages should be installed.
+
+ **resolving the digraph**
+
+ To convert a digraph to an ordered list of packages to merge in an order where all dependencies are properly
+ satisfied, we would first start by looking at leaf nodes, which are nodes that have no dependencies of their own. We
+ could then traverse the digraph upwards from the leaf nodes, towards the parents. Along the way, depending on emerge
+ options, we could make decisions what packages should be installed or rebuilt. This is how ``self.digraph`` is used
+ in the code.
+
+ **digraph creation**
+
+ The ``depgraph.py`` code creates the digraph by first adding emerge arguments to the digraph as the main parents,
+ so if ``@world`` is specified, then the world set is added as the main parents. Then, ``emerge`` will determine
+ the dependencies of these packages, and depending on what options are passed to ``emerge``, will look at installed
+ packages, binary packages and available ebuilds that could be merged to satisfy dependencies, and these will be
+ added as children in the digraph. Children of children will be added as dependencies as needed, depending on the
+ depth setting used by ``emerge``.
+
+ As the digraph is created, it is perfectly fine for Packages to be added to the digraph that conflict with one
+ another. After the digraph has been fully populated to the necessary depth, code within ``depgraph.py`` will
+ identify any conflicts that are modeled within the digraph and determine the best way to handle them.
+
+ """
+
+ def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
+ self.myparams = myparams.copy()
+ self._vdb_loaded = False
+ self._allow_backtracking = allow_backtracking
+ # Maps nodes to the reasons they were selected for reinstallation.
+ self._reinstall_nodes = {}
+ # Contains a filtered view of preferred packages that are selected
+ # from available repositories.
+ self._filtered_trees = {}
+ # Contains installed packages and new packages that have been added
+ # to the graph.
+ self._graph_trees = {}
+ # Caches visible packages returned from _select_package, for use in
+ # depgraph._iter_atoms_for_pkg() SLOT logic.
+ self._visible_pkgs = {}
+ #contains the args created by select_files
+ self._initial_arg_list = []
+ self.digraph = portage.digraph()
+ # manages sets added to the graph
+ self.sets = {}
+ # contains all nodes pulled in by self.sets
+ self._set_nodes = set()
+ # Contains only Blocker -> Uninstall edges
+ self._blocker_uninstalls = digraph()
+ # Contains only Package -> Blocker edges
+ self._blocker_parents = digraph()
+ # Contains only irrelevant Package -> Blocker edges
+ self._irrelevant_blockers = digraph()
+ # Contains only unsolvable Package -> Blocker edges
+ self._unsolvable_blockers = digraph()
+ # Contains all Blocker -> Blocked Package edges
+ # Do not initialize this until the depgraph _validate_blockers
+ # method is called, so that the _in_blocker_conflict method can
+ # assert that _validate_blockers has been called first.
+ self._blocked_pkgs = None
+ # Contains world packages that have been protected from
+ # uninstallation but may not have been added to the graph
+ # if the graph is not complete yet.
+ self._blocked_world_pkgs = {}
+ # Contains packages whose dependencies have been traversed.
+ # This use used to check if we have accounted for blockers
+ # relevant to a package.
+ self._traversed_pkg_deps = set()
+ self._parent_atoms = {}
+ self._slot_conflict_handler = None
+ self._circular_dependency_handler = None
+ self._serialized_tasks_cache = None
+ self._scheduler_graph = None
+ self._displayed_list = None
+ self._pprovided_args = []
+ self._missing_args = []
+ self._masked_installed = set()
+ self._masked_license_updates = set()
+ self._unsatisfied_deps_for_display = []
+ self._unsatisfied_blockers_for_display = None
+ self._circular_deps_for_display = None
+ self._dep_stack = []
+ self._dep_disjunctive_stack = []
+ self._unsatisfied_deps = []
+ self._initially_unsatisfied_deps = []
+ self._ignored_deps = []
+ self._highest_pkg_cache = {}
+ self._highest_pkg_cache_cp_map = {}
+ self._flatten_atoms_cache = {}
+ self._changed_deps_pkgs = {}
+
+ # Binary packages that have been rejected because their USE
+ # didn't match the user's config. It maps packages to a set
+ # of flags causing the rejection.
+ self.ignored_binaries = {}
+
+ self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
+ self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
+ self._needed_license_changes = backtrack_parameters.needed_license_changes
+ self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
+ self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
+ self._slot_operator_replace_installed = backtrack_parameters.slot_operator_replace_installed
+ self._prune_rebuilds = backtrack_parameters.prune_rebuilds
+ self._need_restart = False
+ self._need_config_reload = False
+ # For conditions that always require user intervention, such as
+ # unsatisfied REQUIRED_USE (currently has no autounmask support).
+ self._skip_restart = False
+ self._backtrack_infos = {}
+
+ self._buildpkgonly_deps_unsatisfied = False
+ self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
+ self._displayed_autounmask = False
+ self._success_without_autounmask = False
+ self._autounmask_backtrack_disabled = False
+ self._required_use_unsatisfied = False
+ self._traverse_ignored_deps = False
+ self._complete_mode = False
+ self._slot_operator_deps = {}
+ self._installed_sonames = collections.defaultdict(list)
+ self._package_tracker = PackageTracker(
+ soname_deps=depgraph._frozen_config.soname_deps_enabled)
+ # Track missed updates caused by solved conflicts.
+ self._conflict_missed_update = collections.defaultdict(dict)
+
+ for myroot in depgraph._frozen_config.trees:
+ self.sets[myroot] = _depgraph_sets()
+ vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
+ # This dbapi instance will model the state that the vdb will
+ # have after new packages have been installed.
+ fakedb = PackageTrackerDbapiWrapper(myroot, self._package_tracker)
+
+ def graph_tree():
+ pass
+ graph_tree.dbapi = fakedb
+ self._graph_trees[myroot] = {}
+ self._filtered_trees[myroot] = {}
+ # Substitute the graph tree for the vartree in dep_check() since we
+ # want atom selections to be consistent with package selections
+ # have already been made.
+ self._graph_trees[myroot]["porttree"] = graph_tree
+ self._graph_trees[myroot]["vartree"] = graph_tree
+ self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi
+ self._graph_trees[myroot]["graph"] = self.digraph
+ self._graph_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg
+ self._graph_trees[myroot]["downgrade_probe"] = depgraph._downgrade_probe
+ def filtered_tree():
+ pass
+ filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
+ self._filtered_trees[myroot]["porttree"] = filtered_tree
+ self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
+
+ # Passing in graph_tree as the vartree here could lead to better
+ # atom selections in some cases by causing atoms for packages that
+ # have been added to the graph to be preferred over other choices.
+ # However, it can trigger atom selections that result in
+ # unresolvable direct circular dependencies. For example, this
+ # happens with gwydion-dylan which depends on either itself or
+ # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
+ # gwydion-dylan-bin needs to be selected in order to avoid a
+ # an unresolvable direct circular dependency.
+ #
+ # To solve the problem described above, pass in "graph_db" so that
+ # packages that have been added to the graph are distinguishable
+ # from other available packages and installed packages. Also, pass
+ # the parent package into self._select_atoms() calls so that
+ # unresolvable direct circular dependencies can be detected and
+ # avoided when possible.
+ self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
+ self._filtered_trees[myroot]["graph"] = self.digraph
+ self._filtered_trees[myroot]["vartree"] = \
+ depgraph._frozen_config.trees[myroot]["vartree"]
+ self._filtered_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg
+ self._filtered_trees[myroot]["downgrade_probe"] = depgraph._downgrade_probe
+
+ dbs = []
+ # (db, pkg_type, built, installed, db_keys)
+ if "remove" in self.myparams:
+ # For removal operations, use _dep_check_composite_db
+ # for availability and visibility checks. This provides
+ # consistency with install operations, so we don't
+ # get install/uninstall cycles like in bug #332719.
+ self._graph_trees[myroot]["porttree"] = filtered_tree
+ else:
+ if "--usepkgonly" not in depgraph._frozen_config.myopts:
+ portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
+ db_keys = list(portdb._aux_cache_keys)
+ dbs.append((portdb, "ebuild", False, False, db_keys))
+
+ if "--usepkg" in depgraph._frozen_config.myopts:
+ bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
+ db_keys = list(bindb._aux_cache_keys)
+ dbs.append((bindb, "binary", True, False, db_keys))
+
+ vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
+ db_keys = list(depgraph._frozen_config._trees_orig[myroot
+ ]["vartree"].dbapi._aux_cache_keys)
+ dbs.append((vardb, "installed", True, True, db_keys))
+ self._filtered_trees[myroot]["dbs"] = dbs
+
+class depgraph(object):
+
+ # Represents the depth of a node that is unreachable from explicit
+ # user arguments (or their deep dependencies). Such nodes are pulled
+ # in by the _complete_graph method.
+ _UNREACHABLE_DEPTH = object()
+
+ pkg_tree_map = RootConfig.pkg_tree_map
+
+ def __init__(self, settings, trees, myopts, myparams, spinner,
+ frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
+ if frozen_config is None:
+ frozen_config = _frozen_depgraph_config(settings, trees,
+ myopts, myparams, spinner)
+ self._frozen_config = frozen_config
+ self._dynamic_config = _dynamic_depgraph_config(self, myparams,
+ allow_backtracking, backtrack_parameters)
+ self._rebuild = _rebuild_config(frozen_config, backtrack_parameters)
+
+ self._select_atoms = self._select_atoms_highest_available
+ self._select_package = self._select_pkg_highest_available
+
+ self._event_loop = (portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
+
+ self._select_atoms_parent = None
+
+ self.query = UserQuery(myopts).query
+
+ def _index_binpkgs(self):
+ for root in self._frozen_config.trees:
+ bindb = self._frozen_config.trees[root]["bintree"].dbapi
+ if bindb._provides_index:
+ # don't repeat this when backtracking
+ continue
+ root_config = self._frozen_config.roots[root]
+ for cpv in self._frozen_config._trees_orig[
+ root]["bintree"].dbapi.cpv_all():
+ bindb._provides_inject(
+ self._pkg(cpv, "binary", root_config))
+
+ def _load_vdb(self):
+ """
+ Load installed package metadata if appropriate. This used to be called
+ from the constructor, but that wasn't very nice since this procedure
+ is slow and it generates spinner output. So, now it's called on-demand
+ by various methods when necessary.
+ """
+
+ if self._dynamic_config._vdb_loaded:
+ return
+
+ for myroot in self._frozen_config.trees:
+
+ dynamic_deps = "dynamic_deps" in self._dynamic_config.myparams
+ preload_installed_pkgs = \
+ "--nodeps" not in self._frozen_config.myopts
+
+ fake_vartree = self._frozen_config.trees[myroot]["vartree"]
+ if not fake_vartree.dbapi:
+ # This needs to be called for the first depgraph, but not for
+ # backtracking depgraphs that share the same frozen_config.
+ fake_vartree.sync()
+
+ # FakeVartree.sync() populates virtuals, and we want
+ # self.pkgsettings to have them populated too.
+ self._frozen_config.pkgsettings[myroot] = \
+ portage.config(clone=fake_vartree.settings)
+
+ if preload_installed_pkgs:
+ vardb = fake_vartree.dbapi
+
+ if not dynamic_deps:
+ for pkg in vardb:
+ self._dynamic_config._package_tracker.add_installed_pkg(pkg)
+ self._add_installed_sonames(pkg)
+ else:
+ max_jobs = self._frozen_config.myopts.get("--jobs")
+ max_load = self._frozen_config.myopts.get("--load-average")
+ scheduler = TaskScheduler(
+ self._dynamic_deps_preload(fake_vartree),
+ max_jobs=max_jobs,
+ max_load=max_load,
+ event_loop=fake_vartree._portdb._event_loop)
+ scheduler.start()
+ scheduler.wait()
+
+ self._dynamic_config._vdb_loaded = True
+
+ def _dynamic_deps_preload(self, fake_vartree):
+ portdb = fake_vartree._portdb
+ for pkg in fake_vartree.dbapi:
+ self._spinner_update()
+ self._dynamic_config._package_tracker.add_installed_pkg(pkg)
+ self._add_installed_sonames(pkg)
+ ebuild_path, repo_path = \
+ portdb.findname2(pkg.cpv, myrepo=pkg.repo)
+ if ebuild_path is None:
+ fake_vartree.dynamic_deps_preload(pkg, None)
+ continue
+ metadata, ebuild_hash = portdb._pull_valid_cache(
+ pkg.cpv, ebuild_path, repo_path)
+ if metadata is not None:
+ fake_vartree.dynamic_deps_preload(pkg, metadata)
+ else:
+ proc = EbuildMetadataPhase(cpv=pkg.cpv,
+ ebuild_hash=ebuild_hash,
+ portdb=portdb, repo_path=repo_path,
+ settings=portdb.doebuild_settings)
+ proc.addExitListener(
+ self._dynamic_deps_proc_exit(pkg, fake_vartree))
+ yield proc
+
+ class _dynamic_deps_proc_exit(object):
+
+ __slots__ = ('_pkg', '_fake_vartree')
+
+ def __init__(self, pkg, fake_vartree):
+ self._pkg = pkg
+ self._fake_vartree = fake_vartree
+
+ def __call__(self, proc):
+ metadata = None
+ if proc.returncode == os.EX_OK:
+ metadata = proc.metadata
+ self._fake_vartree.dynamic_deps_preload(self._pkg, metadata)
+
+ def _spinner_update(self):
+ if self._frozen_config.spinner:
+ self._frozen_config.spinner.update()
+
+ def _compute_abi_rebuild_info(self):
+ """
+ Fill self._forced_rebuilds with packages that cause rebuilds.
+ """
+
+ debug = "--debug" in self._frozen_config.myopts
+ installed_sonames = self._dynamic_config._installed_sonames
+ package_tracker = self._dynamic_config._package_tracker
+
+ # Get all atoms that might have caused a forced rebuild.
+ atoms = {}
+ for s in self._dynamic_config._initial_arg_list:
+ if s.force_reinstall:
+ root = s.root_config.root
+ atoms.setdefault(root, set()).update(s.pset)
+
+ if debug:
+ writemsg_level("forced reinstall atoms:\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ for root in atoms:
+ writemsg_level(" root: %s\n" % root,
+ level=logging.DEBUG, noiselevel=-1)
+ for atom in atoms[root]:
+ writemsg_level(" atom: %s\n" % atom,
+ level=logging.DEBUG, noiselevel=-1)
+ writemsg_level("\n\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ # Go through all slot operator deps and check if one of these deps
+ # has a parent that is matched by one of the atoms from above.
+ forced_rebuilds = {}
+
+ for root, rebuild_atoms in atoms.items():
+
+ for slot_atom in rebuild_atoms:
+
+ inst_pkg, reinst_pkg = \
+ self._select_pkg_from_installed(root, slot_atom)
+
+ if inst_pkg is reinst_pkg or reinst_pkg is None:
+ continue
+
+ if (inst_pkg is not None and
+ inst_pkg.requires is not None):
+ for atom in inst_pkg.requires:
+ initial_providers = installed_sonames.get(
+ (root, atom))
+ if initial_providers is None:
+ continue
+ final_provider = next(
+ package_tracker.match(root, atom),
+ None)
+ if final_provider:
+ continue
+ for provider in initial_providers:
+ # Find the replacement child.
+ child = next((pkg for pkg in
+ package_tracker.match(
+ root, provider.slot_atom)
+ if not pkg.installed), None)
+
+ if child is None:
+ continue
+
+ forced_rebuilds.setdefault(
+ root, {}).setdefault(
+ child, set()).add(inst_pkg)
+
+ # Generate pseudo-deps for any slot-operator deps of
+ # inst_pkg. Its deps aren't in _slot_operator_deps
+ # because it hasn't been added to the graph, but we
+ # are interested in any rebuilds that it triggered.
+ built_slot_op_atoms = []
+ if inst_pkg is not None:
+ selected_atoms = self._select_atoms_probe(
+ inst_pkg.root, inst_pkg)
+ for atom in selected_atoms:
+ if atom.slot_operator_built:
+ built_slot_op_atoms.append(atom)
+
+ if not built_slot_op_atoms:
+ continue
+
+ # Use a cloned list, since we may append to it below.
+ deps = self._dynamic_config._slot_operator_deps.get(
+ (root, slot_atom), [])[:]
+
+ if built_slot_op_atoms and reinst_pkg is not None:
+ for child in self._dynamic_config.digraph.child_nodes(
+ reinst_pkg):
+
+ if child.installed:
+ continue
+
+ for atom in built_slot_op_atoms:
+ # NOTE: Since atom comes from inst_pkg, and
+ # reinst_pkg is the replacement parent, there's
+ # no guarantee that atom will completely match
+ # child. So, simply use atom.cp and atom.slot
+ # for matching.
+ if atom.cp != child.cp:
+ continue
+ if atom.slot and atom.slot != child.slot:
+ continue
+ deps.append(Dependency(atom=atom, child=child,
+ root=child.root, parent=reinst_pkg))
+
+ for dep in deps:
+ if dep.child.installed:
+ # Find the replacement child.
+ child = next((pkg for pkg in
+ self._dynamic_config._package_tracker.match(
+ dep.root, dep.child.slot_atom)
+ if not pkg.installed), None)
+
+ if child is None:
+ continue
+
+ inst_child = dep.child
+
+ else:
+ child = dep.child
+ inst_child = self._select_pkg_from_installed(
+ child.root, child.slot_atom)[0]
+
+ # Make sure the child's slot/subslot has changed. If it
+ # hasn't, then another child has forced this rebuild.
+ if inst_child and inst_child.slot == child.slot and \
+ inst_child.sub_slot == child.sub_slot:
+ continue
+
+ if dep.parent.installed:
+ # Find the replacement parent.
+ parent = next((pkg for pkg in
+ self._dynamic_config._package_tracker.match(
+ dep.parent.root, dep.parent.slot_atom)
+ if not pkg.installed), None)
+
+ if parent is None:
+ continue
+
+ else:
+ parent = dep.parent
+
+ # The child has forced a rebuild of the parent
+ forced_rebuilds.setdefault(root, {}
+ ).setdefault(child, set()).add(parent)
+
+ if debug:
+ writemsg_level("slot operator dependencies:\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ for (root, slot_atom), deps in self._dynamic_config._slot_operator_deps.items():
+ writemsg_level(" (%s, %s)\n" % \
+ (root, slot_atom), level=logging.DEBUG, noiselevel=-1)
+ for dep in deps:
+ writemsg_level(" parent: %s\n" % dep.parent, level=logging.DEBUG, noiselevel=-1)
+ writemsg_level(" child: %s (%s)\n" % (dep.child, dep.priority), level=logging.DEBUG, noiselevel=-1)
+
+ writemsg_level("\n\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+
+ writemsg_level("forced rebuilds:\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ for root in forced_rebuilds:
+ writemsg_level(" root: %s\n" % root,
+ level=logging.DEBUG, noiselevel=-1)
+ for child in forced_rebuilds[root]:
+ writemsg_level(" child: %s\n" % child,
+ level=logging.DEBUG, noiselevel=-1)
+ for parent in forced_rebuilds[root][child]:
+ writemsg_level(" parent: %s\n" % parent,
+ level=logging.DEBUG, noiselevel=-1)
+ writemsg_level("\n\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ self._forced_rebuilds = forced_rebuilds
+
+ def _show_abi_rebuild_info(self):
+
+ if not self._forced_rebuilds:
+ return
+
+ writemsg_stdout("\nThe following packages are causing rebuilds:\n\n", noiselevel=-1)
+
+ for root in self._forced_rebuilds:
+ for child in self._forced_rebuilds[root]:
+ writemsg_stdout(" %s causes rebuilds for:\n" % (child,), noiselevel=-1)
+ for parent in self._forced_rebuilds[root][child]:
+ writemsg_stdout(" %s\n" % (parent,), noiselevel=-1)
+
+ def _eliminate_ignored_binaries(self):
+ """
+ Eliminate any package from self._dynamic_config.ignored_binaries
+ for which a more optimal alternative exists.
+ """
+ for pkg in list(self._dynamic_config.ignored_binaries):
+
+ for selected_pkg in self._dynamic_config._package_tracker.match(
+ pkg.root, pkg.slot_atom):
+
+ if selected_pkg > pkg:
+ self._dynamic_config.ignored_binaries.pop(pkg)
+ break
+
+ # NOTE: The Package.__ge__ implementation accounts for
+ # differences in build_time, so the warning about "ignored"
+ # packages will be triggered if both packages are the same
+ # version and selected_pkg is not the most recent build.
+ if (selected_pkg.type_name == "binary" and
+ selected_pkg >= pkg):
+ self._dynamic_config.ignored_binaries.pop(pkg)
+ break
+
+ if selected_pkg.installed and \
+ selected_pkg.cpv == pkg.cpv and \
+ selected_pkg.build_time == pkg.build_time:
+ # We don't care about ignored binaries when an
+ # identical installed instance is selected to
+ # fill the slot.
+ self._dynamic_config.ignored_binaries.pop(pkg)
+ break
+
+ def _ignored_binaries_autounmask_backtrack(self):
+ """
+ Check if there are ignored binaries that would have been
+ accepted with the current autounmask USE changes.
+
+ @rtype: bool
+ @return: True if there are unnecessary rebuilds that
+ can be avoided by backtracking
+ """
+ if not all([
+ self._dynamic_config._allow_backtracking,
+ self._dynamic_config._needed_use_config_changes,
+ self._dynamic_config.ignored_binaries]):
+ return False
+
+ self._eliminate_ignored_binaries()
+
+ # _eliminate_ignored_binaries may have eliminated
+ # all of the ignored binaries
+ if not self._dynamic_config.ignored_binaries:
+ return False
+
+ use_changes = collections.defaultdict(
+ functools.partial(collections.defaultdict, dict))
+ for pkg, (new_use, changes) in self._dynamic_config._needed_use_config_changes.items():
+ if pkg in self._dynamic_config.digraph:
+ use_changes[pkg.root][pkg.slot_atom] = (pkg, new_use)
+
+ for pkg in self._dynamic_config.ignored_binaries:
+ selected_pkg, new_use = use_changes[pkg.root].get(
+ pkg.slot_atom, (None, None))
+ if new_use is None:
+ continue
+
+ if new_use != pkg.use.enabled:
+ continue
+
+ if selected_pkg > pkg:
+ continue
+
+ return True
+
+ return False
+
+ def _changed_deps_report(self):
+ """
+ Report ebuilds for which the ebuild dependencies have
+ changed since the installed instance was built. This is
+ completely silent in the following cases:
+
+ * --changed-deps or --dynamic-deps is enabled
+ * none of the packages with changed deps are in the graph
+ """
+ if (self._dynamic_config.myparams.get("changed_deps", "n") == "y" or
+ "dynamic_deps" in self._dynamic_config.myparams):
+ return
+
+ report_pkgs = []
+ for pkg, ebuild in self._dynamic_config._changed_deps_pkgs.items():
+ if pkg.repo != ebuild.repo:
+ continue
+ report_pkgs.append((pkg, ebuild))
+
+ if not report_pkgs:
+ return
+
+ # TODO: Detect and report various issues:
+ # - packages with unsatisfiable dependencies
+ # - packages involved directly in slot or blocker conflicts
+ # - direct parents of conflict packages
+ # - packages that prevent upgrade of dependencies to latest versions
+ graph = self._dynamic_config.digraph
+ in_graph = False
+ for pkg, ebuild in report_pkgs:
+ if pkg in graph:
+ in_graph = True
+ break
+
+ # Packages with changed deps are harmless if they're not in the
+ # graph, so it's safe to silently ignore them. This suppresses
+ # noise for the unaffected user, even though some of the changed
+ # dependencies might be worthy of revision bumps.
+ if not in_graph:
+ return
+
+ writemsg("\n%s\n\n" % colorize("WARN",
+ "!!! Detected ebuild dependency change(s) without revision bump:"),
+ noiselevel=-1)
+
+ for pkg, ebuild in report_pkgs:
+ writemsg(" %s::%s" % (pkg.cpv, pkg.repo), noiselevel=-1)
+ if pkg.root_config.settings["ROOT"] != "/":
+ writemsg(" for %s" % (pkg.root,), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+ msg = []
+ if '--quiet' not in self._frozen_config.myopts:
+ msg.extend([
+ "",
+ "NOTE: Refer to the following page for more information about dependency",
+ " change(s) without revision bump:",
+ "",
+ " https://wiki.gentoo.org/wiki/Project:Portage/Changed_Deps",
+ "",
+ " In order to suppress reports about dependency changes, add",
+ " --changed-deps-report=n to the EMERGE_DEFAULT_OPTS variable in",
+ " '/etc/portage/make.conf'.",
+ ])
+
+ # Include this message for --quiet mode, since the user may be experiencing
+ # problems that are solvable by using --changed-deps.
+ msg.extend([
+ "",
+ "HINT: In order to avoid problems involving changed dependencies, use the",
+ " --changed-deps option to automatically trigger rebuilds when changed",
+ " dependencies are detected. Refer to the emerge man page for more",
+ " information about this option.",
+ ])
+
+ for line in msg:
+ if line:
+ line = colorize("INFORM", line)
+ writemsg(line + "\n", noiselevel=-1)
+
+ def _show_ignored_binaries(self):
+ """
+ Show binaries that have been ignored because their USE didn't
+ match the user's config.
+ """
+ if not self._dynamic_config.ignored_binaries \
+ or '--quiet' in self._frozen_config.myopts:
+ return
+
+ self._eliminate_ignored_binaries()
+
+ ignored_binaries = {}
+
+ for pkg in self._dynamic_config.ignored_binaries:
+ for reason, info in self._dynamic_config.\
+ ignored_binaries[pkg].items():
+ ignored_binaries.setdefault(reason, {})[pkg] = info
+
+ if self._dynamic_config.myparams.get(
+ "binpkg_respect_use") in ("y", "n"):
+ ignored_binaries.pop("respect_use", None)
+
+ if self._dynamic_config.myparams.get(
+ "binpkg_changed_deps") in ("y", "n"):
+ ignored_binaries.pop("changed_deps", None)
+
+ if not ignored_binaries:
+ return
+
+ self._show_merge_list()
+
+ if ignored_binaries.get("respect_use"):
+ self._show_ignored_binaries_respect_use(
+ ignored_binaries["respect_use"])
+
+ if ignored_binaries.get("changed_deps"):
+ self._show_ignored_binaries_changed_deps(
+ ignored_binaries["changed_deps"])
+
+ def _show_ignored_binaries_respect_use(self, respect_use):
+
+ writemsg("\n!!! The following binary packages have been ignored " + \
+ "due to non matching USE:\n\n", noiselevel=-1)
+
+ for pkg, flags in respect_use.items():
+ flag_display = []
+ for flag in sorted(flags):
+ if flag not in pkg.use.enabled:
+ flag = "-" + flag
+ flag_display.append(flag)
+ flag_display = " ".join(flag_display)
+ # The user can paste this line into package.use
+ writemsg(" =%s %s" % (pkg.cpv, flag_display), noiselevel=-1)
+ if pkg.root_config.settings["ROOT"] != "/":
+ writemsg(" # for %s" % (pkg.root,), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+ msg = [
+ "",
+ "NOTE: The --binpkg-respect-use=n option will prevent emerge",
+ " from ignoring these binary packages if possible.",
+ " Using --binpkg-respect-use=y will silence this warning."
+ ]
+
+ for line in msg:
+ if line:
+ line = colorize("INFORM", line)
+ writemsg(line + "\n", noiselevel=-1)
+
+ def _show_ignored_binaries_changed_deps(self, changed_deps):
+
+ writemsg("\n!!! The following binary packages have been "
+ "ignored due to changed dependencies:\n\n",
+ noiselevel=-1)
+
+ for pkg in changed_deps:
+ msg = " %s%s%s" % (pkg.cpv, _repo_separator, pkg.repo)
+ if pkg.root_config.settings["ROOT"] != "/":
+ msg += " for %s" % pkg.root
+ writemsg("%s\n" % msg, noiselevel=-1)
+
+ msg = [
+ "",
+ "NOTE: The --binpkg-changed-deps=n option will prevent emerge",
+ " from ignoring these binary packages if possible.",
+ " Using --binpkg-changed-deps=y will silence this warning."
+ ]
+
+ for line in msg:
+ if line:
+ line = colorize("INFORM", line)
+ writemsg(line + "\n", noiselevel=-1)
+
+ def _get_missed_updates(self):
+
+ # In order to minimize noise, show only the highest
+ # missed update from each SLOT.
+ missed_updates = {}
+ for pkg, mask_reasons in \
+ chain(self._dynamic_config._runtime_pkg_mask.items(),
+ self._dynamic_config._conflict_missed_update.items()):
+ if pkg.installed:
+ # Exclude installed here since we only
+ # want to show available updates.
+ continue
+ missed_update = True
+ any_selected = False
+ for chosen_pkg in self._dynamic_config._package_tracker.match(
+ pkg.root, pkg.slot_atom):
+ any_selected = True
+ if chosen_pkg > pkg or (not chosen_pkg.installed and \
+ chosen_pkg.version == pkg.version):
+ missed_update = False
+ break
+ if any_selected and missed_update:
+ k = (pkg.root, pkg.slot_atom)
+ if k in missed_updates:
+ other_pkg, mask_type, parent_atoms = missed_updates[k]
+ if other_pkg > pkg:
+ continue
+ for mask_type, parent_atoms in mask_reasons.items():
+ if not parent_atoms:
+ continue
+ missed_updates[k] = (pkg, mask_type, parent_atoms)
+ break
+
+ return missed_updates
+
+ def _show_missed_update(self):
+
+ missed_updates = self._get_missed_updates()
+
+ if not missed_updates:
+ return
+
+ missed_update_types = {}
+ for pkg, mask_type, parent_atoms in missed_updates.values():
+ missed_update_types.setdefault(mask_type,
+ []).append((pkg, parent_atoms))
+
+ if '--quiet' in self._frozen_config.myopts and \
+ '--debug' not in self._frozen_config.myopts:
+ missed_update_types.pop("slot conflict", None)
+ missed_update_types.pop("missing dependency", None)
+
+ self._show_missed_update_slot_conflicts(
+ missed_update_types.get("slot conflict"))
+
+ self._show_missed_update_unsatisfied_dep(
+ missed_update_types.get("missing dependency"))
+
+ def _show_missed_update_unsatisfied_dep(self, missed_updates):
+
+ if not missed_updates:
+ return
+
+ self._show_merge_list()
+ backtrack_masked = []
+
+ for pkg, parent_atoms in missed_updates:
+
+ try:
+ for parent, root, atom in parent_atoms:
+ self._show_unsatisfied_dep(root, atom, myparent=parent,
+ check_backtrack=True)
+ except self._backtrack_mask:
+ # This is displayed below in abbreviated form.
+ backtrack_masked.append((pkg, parent_atoms))
+ continue
+
+ writemsg("\n!!! The following update has been skipped " + \
+ "due to unsatisfied dependencies:\n\n", noiselevel=-1)
+
+ writemsg(str(pkg.slot_atom), noiselevel=-1)
+ if pkg.root_config.settings["ROOT"] != "/":
+ writemsg(" for %s" % (pkg.root,), noiselevel=-1)
+ writemsg("\n\n", noiselevel=-1)
+
+ selected_pkg = next(self._dynamic_config._package_tracker.match(
+ pkg.root, pkg.slot_atom), None)
+
+ writemsg(" selected: %s\n" % (selected_pkg,), noiselevel=-1)
+ writemsg(" skipped: %s (see unsatisfied dependency below)\n"
+ % (pkg,), noiselevel=-1)
+
+ for parent, root, atom in parent_atoms:
+ self._show_unsatisfied_dep(root, atom, myparent=parent)
+ writemsg("\n", noiselevel=-1)
+
+ if backtrack_masked:
+ # These are shown in abbreviated form, in order to avoid terminal
+ # flooding from mask messages as reported in bug #285832.
+ writemsg("\n!!! The following update(s) have been skipped " + \
+ "due to unsatisfied dependencies\n" + \
+ "!!! triggered by backtracking:\n\n", noiselevel=-1)
+ for pkg, parent_atoms in backtrack_masked:
+ writemsg(str(pkg.slot_atom), noiselevel=-1)
+ if pkg.root_config.settings["ROOT"] != "/":
+ writemsg(" for %s" % (pkg.root,), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+ def _show_missed_update_slot_conflicts(self, missed_updates):
+
+ if not missed_updates:
+ return
+
+ self._show_merge_list()
+ msg = []
+ msg.append("\nWARNING: One or more updates/rebuilds have been " + \
+ "skipped due to a dependency conflict:\n\n")
+
+ indent = " "
+ for pkg, parent_atoms in missed_updates:
+ msg.append(str(pkg.slot_atom))
+ if pkg.root_config.settings["ROOT"] != "/":
+ msg.append(" for %s" % (pkg.root,))
+ msg.append("\n\n")
+
+ msg.append(indent)
+ msg.append(str(pkg))
+ msg.append(" conflicts with\n")
+
+ for parent, atom in parent_atoms:
+ if isinstance(parent,
+ (PackageArg, AtomArg)):
+ # For PackageArg and AtomArg types, it's
+ # redundant to display the atom attribute.
+ msg.append(2*indent)
+ msg.append(str(parent))
+ msg.append("\n")
+ else:
+ # Display the specific atom from SetArg or
+ # Package types.
+ atom, marker = format_unmatched_atom(
+ pkg, atom, self._pkg_use_enabled)
+
+ msg.append(2*indent)
+ msg.append("%s required by %s\n" % (atom, parent))
+ msg.append(2*indent)
+ msg.append(marker)
+ msg.append("\n")
+ msg.append("\n")
+
+ writemsg("".join(msg), noiselevel=-1)
+
+ def _show_slot_collision_notice(self):
+ """Show an informational message advising the user to mask one of the
+ the packages. In some cases it may be possible to resolve this
+ automatically, but support for backtracking (removal nodes that have
+ already been selected) will be required in order to handle all possible
+ cases.
+ """
+
+ if not any(self._dynamic_config._package_tracker.slot_conflicts()):
+ return
+
+ self._show_merge_list()
+
+ if self._dynamic_config._slot_conflict_handler is None:
+ self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
+ handler = self._dynamic_config._slot_conflict_handler
+
+ conflict = handler.get_conflict()
+ writemsg(conflict, noiselevel=-1)
+
+ explanation = handler.get_explanation()
+ if explanation:
+ writemsg(explanation, noiselevel=-1)
+ return
+
+ if "--quiet" in self._frozen_config.myopts:
+ return
+
+ msg = []
+ msg.append("It may be possible to solve this problem ")
+ msg.append("by using package.mask to prevent one of ")
+ msg.append("those packages from being selected. ")
+ msg.append("However, it is also possible that conflicting ")
+ msg.append("dependencies exist such that they are impossible to ")
+ msg.append("satisfy simultaneously. If such a conflict exists in ")
+ msg.append("the dependencies of two different packages, then those ")
+ msg.append("packages can not be installed simultaneously.")
+ backtrack_opt = self._frozen_config.myopts.get('--backtrack')
+ if not self._dynamic_config._allow_backtracking and \
+ (backtrack_opt is None or \
+ (backtrack_opt > 0 and backtrack_opt < 30)):
+ msg.append(" You may want to try a larger value of the ")
+ msg.append("--backtrack option, such as --backtrack=30, ")
+ msg.append("in order to see if that will solve this conflict ")
+ msg.append("automatically.")
+
+ for line in textwrap.wrap(''.join(msg), 70):
+ writemsg(line + '\n', noiselevel=-1)
+ writemsg('\n', noiselevel=-1)
+
+ msg = []
+ msg.append("For more information, see MASKED PACKAGES ")
+ msg.append("section in the emerge man page or refer ")
+ msg.append("to the Gentoo Handbook.")
+ for line in textwrap.wrap(''.join(msg), 70):
+ writemsg(line + '\n', noiselevel=-1)
+ writemsg('\n', noiselevel=-1)
+
+ def _solve_non_slot_operator_slot_conflicts(self):
+ """
+ This function solves slot conflicts which can
+ be solved by simply choosing one of the conflicting
+ and removing all the other ones.
+ It is able to solve somewhat more complex cases where
+ conflicts can only be solved simultaniously.
+ """
+ debug = "--debug" in self._frozen_config.myopts
+
+ # List all conflicts. Ignore those that involve slot operator rebuilds
+ # as the logic there needs special slot conflict behavior which isn't
+ # provided by this function.
+ conflicts = []
+ for conflict in self._dynamic_config._package_tracker.slot_conflicts():
+ slot_key = conflict.root, conflict.atom
+ if slot_key not in self._dynamic_config._slot_operator_replace_installed:
+ conflicts.append(conflict)
+
+ if not conflicts:
+ return
+
+ if debug:
+ writemsg_level(
+ "\n!!! Slot conflict handler started.\n",
+ level=logging.DEBUG, noiselevel=-1)
+
+ # Get a set of all conflicting packages.
+ conflict_pkgs = set()
+ for conflict in conflicts:
+ conflict_pkgs.update(conflict)
+
+ # Get the list of other packages which are only
+ # required by conflict packages.
+ indirect_conflict_candidates = set()
+ for pkg in conflict_pkgs:
+ indirect_conflict_candidates.update(self._dynamic_config.digraph.child_nodes(pkg))
+ indirect_conflict_candidates.difference_update(conflict_pkgs)
+
+ indirect_conflict_pkgs = set()
+ while indirect_conflict_candidates:
+ pkg = indirect_conflict_candidates.pop()
+
+ only_conflict_parents = True
+ for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
+ if parent not in conflict_pkgs and parent not in indirect_conflict_pkgs:
+ only_conflict_parents = False
+ break
+ if not only_conflict_parents:
+ continue
+
+ indirect_conflict_pkgs.add(pkg)
+ for child in self._dynamic_config.digraph.child_nodes(pkg):
+ if child in conflict_pkgs or child in indirect_conflict_pkgs:
+ continue
+ indirect_conflict_candidates.add(child)
+
+ # Create a graph containing the conflict packages
+ # and a special 'non_conflict_node' that represents
+ # all non-conflict packages.
+ conflict_graph = digraph()
+
+ non_conflict_node = "(non-conflict package)"
+ conflict_graph.add(non_conflict_node, None)
+
+ for pkg in chain(conflict_pkgs, indirect_conflict_pkgs):
+ conflict_graph.add(pkg, None)
+
+ # Add parent->child edges for each conflict package.
+ # Parents, which aren't conflict packages are represented
+ # by 'non_conflict_node'.
+ # If several conflicting packages are matched, but not all,
+ # add a tuple with the matched packages to the graph.
+ class or_tuple(tuple):
+ """
+ Helper class for debug printing.
+ """
+ def __str__(self):
+ return "(%s)" % ",".join(str(pkg) for pkg in self)
+
+ non_matching_forced = set()
+ for conflict in conflicts:
+ if debug:
+ writemsg_level(" conflict:\n", level=logging.DEBUG, noiselevel=-1)
+ writemsg_level(" root: %s\n" % conflict.root, level=logging.DEBUG, noiselevel=-1)
+ writemsg_level(" atom: %s\n" % conflict.atom, level=logging.DEBUG, noiselevel=-1)
+ for pkg in conflict:
+ writemsg_level(" pkg: %s\n" % pkg, level=logging.DEBUG, noiselevel=-1)
+
+ all_parent_atoms = set()
+ highest_pkg = None
+ inst_pkg = None
+ for pkg in conflict:
+ if pkg.installed:
+ inst_pkg = pkg
+ if highest_pkg is None or highest_pkg < pkg:
+ highest_pkg = pkg
+ all_parent_atoms.update(
+ self._dynamic_config._parent_atoms.get(pkg, []))
+
+ for parent, atom in all_parent_atoms:
+ is_arg_parent = (inst_pkg is not None and
+ not self._want_installed_pkg(inst_pkg))
+ is_non_conflict_parent = parent not in conflict_pkgs and \
+ parent not in indirect_conflict_pkgs
+
+ if debug:
+ writemsg_level(" parent: %s\n" % parent, level=logging.DEBUG, noiselevel=-1)
+ writemsg_level(" arg, non-conflict: %s, %s\n" % (is_arg_parent, is_non_conflict_parent),
+ level=logging.DEBUG, noiselevel=-1)
+ writemsg_level(" atom: %s\n" % atom, level=logging.DEBUG, noiselevel=-1)
+
+ if is_non_conflict_parent:
+ parent = non_conflict_node
+
+ matched = []
+ for pkg in conflict:
+ if (pkg is highest_pkg and
+ not highest_pkg.installed and
+ inst_pkg is not None and
+ inst_pkg.sub_slot != highest_pkg.sub_slot and
+ not self._downgrade_probe(highest_pkg)):
+ # If an upgrade is desired, force the highest
+ # version into the graph (bug #531656).
+ non_matching_forced.add(highest_pkg)
+
+ if atom.match(pkg.with_use(
+ self._pkg_use_enabled(pkg))) and \
+ not (is_arg_parent and pkg.installed):
+ matched.append(pkg)
+
+ if debug:
+ for match in matched:
+ writemsg_level(" match: %s\n" % match, level=logging.DEBUG, noiselevel=-1)
+
+ if len(matched) > 1:
+ # Even if all packages match, this parent must still
+ # be added to the conflict_graph. Otherwise, we risk
+ # removing all of these packages from the depgraph,
+ # which could cause a missed update (bug #522084).
+ conflict_graph.add(or_tuple(matched), parent)
+ elif len(matched) == 1:
+ conflict_graph.add(matched[0], parent)
+ else:
+ # This typically means that autounmask broke a
+ # USE-dep, but it could also be due to the slot
+ # not matching due to multislot (bug #220341).
+ # Either way, don't try to solve this conflict.
+ # Instead, force them all into the graph so that
+ # they are protected from removal.
+ non_matching_forced.update(conflict)
+ if debug:
+ for pkg in conflict:
+ writemsg_level(" non-match: %s\n" % pkg,
+ level=logging.DEBUG, noiselevel=-1)
+
+ for pkg in indirect_conflict_pkgs:
+ for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
+ if parent not in conflict_pkgs and \
+ parent not in indirect_conflict_pkgs:
+ parent = non_conflict_node
+ conflict_graph.add(pkg, parent)
+
+ if debug:
+ writemsg_level(
+ "\n!!! Slot conflict graph:\n",
+ level=logging.DEBUG, noiselevel=-1)
+ conflict_graph.debug_print()
+
+ # Now select required packages. Collect them in the
+ # 'forced' set.
+ forced = set([non_conflict_node])
+ forced.update(non_matching_forced)
+ unexplored = set([non_conflict_node])
+ # or_tuples get special handling. We first explore
+ # all packages in the hope of having forced one of
+ # the packages in the tuple. This way we don't have
+ # to choose one.
+ unexplored_tuples = set()
+ explored_nodes = set()
+
+ while unexplored:
+ while True:
+ try:
+ node = unexplored.pop()
+ except KeyError:
+ break
+ for child in conflict_graph.child_nodes(node):
+ # Don't explore a node more than once, in order
+ # to avoid infinite recursion. The forced set
+ # cannot be used for this purpose, since it can
+ # contain unexplored nodes from non_matching_forced.
+ if child in explored_nodes:
+ continue
+ explored_nodes.add(child)
+ forced.add(child)
+ if isinstance(child, Package):
+ unexplored.add(child)
+ else:
+ unexplored_tuples.add(child)
+
+ # Now handle unexplored or_tuples. Move on with packages
+ # once we had to choose one.
+ while unexplored_tuples:
+ nodes = unexplored_tuples.pop()
+ if any(node in forced for node in nodes):
+ # At least one of the packages in the
+ # tuple is already forced, which means the
+ # dependency represented by this tuple
+ # is satisfied.
+ continue
+
+ # We now have to choose one of packages in the tuple.
+ # In theory one could solve more conflicts if we'd be
+ # able to try different choices here, but that has lots
+ # of other problems. For now choose the package that was
+ # pulled first, as this should be the most desirable choice
+ # (otherwise it wouldn't have been the first one).
+ forced.add(nodes[0])
+ unexplored.add(nodes[0])
+ break
+
+ # Remove 'non_conflict_node' and or_tuples from 'forced'.
+ forced = set(pkg for pkg in forced if isinstance(pkg, Package))
+
+ # Add dependendencies of forced packages.
+ stack = list(forced)
+ traversed = set()
+ while stack:
+ pkg = stack.pop()
+ traversed.add(pkg)
+ for child in conflict_graph.child_nodes(pkg):
+ if (isinstance(child, Package) and
+ child not in traversed):
+ forced.add(child)
+ stack.append(child)
+
+ non_forced = set(pkg for pkg in conflict_pkgs if pkg not in forced)
+
+ if debug:
+ writemsg_level(
+ "\n!!! Slot conflict solution:\n",
+ level=logging.DEBUG, noiselevel=-1)
+ for conflict in conflicts:
+ writemsg_level(
+ " Conflict: (%s, %s)\n" % (conflict.root, conflict.atom),
+ level=logging.DEBUG, noiselevel=-1)
+ for pkg in conflict:
+ if pkg in forced:
+ writemsg_level(
+ " keep: %s\n" % pkg,
+ level=logging.DEBUG, noiselevel=-1)
+ else:
+ writemsg_level(
+ " remove: %s\n" % pkg,
+ level=logging.DEBUG, noiselevel=-1)
+
+ broken_packages = set()
+ for pkg in non_forced:
+ for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
+ if isinstance(parent, Package) and parent not in non_forced:
+ # Non-forcing set args are expected to be a parent of all
+ # packages in the conflict.
+ broken_packages.add(parent)
+ self._remove_pkg(pkg)
+
+ # Process the dependencies of choosen conflict packages
+ # again to properly account for blockers.
+ broken_packages.update(forced)
+
+ # Filter out broken packages which have been removed during
+ # recursive removal in self._remove_pkg.
+ broken_packages = list(pkg for pkg in broken_packages if pkg in broken_packages \
+ if self._dynamic_config._package_tracker.contains(pkg, installed=False))
+
+ self._dynamic_config._dep_stack.extend(broken_packages)
+
+ if broken_packages:
+ # Process dependencies. This cannot fail because we just ensured that
+ # the remaining packages satisfy all dependencies.
+ self._create_graph()
+
+ # Record missed updates.
+ for conflict in conflicts:
+ if not any(pkg in non_forced for pkg in conflict):
+ continue
+ for pkg in conflict:
+ if pkg not in non_forced:
+ continue
+
+ for other in conflict:
+ if other is pkg:
+ continue
+
+ for parent, atom in self._dynamic_config._parent_atoms.get(other, []):
+ if not atom.match(pkg.with_use(self._pkg_use_enabled(pkg))):
+ self._dynamic_config._conflict_missed_update[pkg].setdefault(
+ "slot conflict", set())
+ self._dynamic_config._conflict_missed_update[pkg]["slot conflict"].add(
+ (parent, atom))
+
+
+ def _process_slot_conflicts(self):
+ """
+ If there are any slot conflicts and backtracking is enabled,
+ _complete_graph should complete the graph before this method
+ is called, so that all relevant reverse dependencies are
+ available for use in backtracking decisions.
+ """
+
+ self._solve_non_slot_operator_slot_conflicts()
+
+ if not self._validate_blockers():
+ # Blockers don't trigger the _skip_restart flag, since
+ # backtracking may solve blockers when it solves slot
+ # conflicts (or by blind luck).
+ raise self._unknown_internal_error()
+
+ # Both _process_slot_conflict and _slot_operator_trigger_reinstalls
+ # can call _slot_operator_update_probe, which requires that
+ # self._dynamic_config._blocked_pkgs has been initialized by a
+ # call to the _validate_blockers method.
+ for conflict in self._dynamic_config._package_tracker.slot_conflicts():
+ self._process_slot_conflict(conflict)
+
+ if self._dynamic_config._allow_backtracking:
+ self._slot_operator_trigger_reinstalls()
+
+ def _process_slot_conflict(self, conflict):
+ """
+ Process slot conflict data to identify specific atoms which
+ lead to conflict. These atoms only match a subset of the
+ packages that have been pulled into a given slot.
+ """
+ root = conflict.root
+ slot_atom = conflict.atom
+ slot_nodes = conflict.pkgs
+
+ debug = "--debug" in self._frozen_config.myopts
+
+ slot_parent_atoms = set()
+ for pkg in slot_nodes:
+ parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+ if not parent_atoms:
+ continue
+ slot_parent_atoms.update(parent_atoms)
+
+ conflict_pkgs = []
+ conflict_atoms = {}
+ for pkg in slot_nodes:
+
+ if self._dynamic_config._allow_backtracking and \
+ pkg in self._dynamic_config._runtime_pkg_mask:
+ if debug:
+ writemsg_level(
+ "!!! backtracking loop detected: %s %s\n" % \
+ (pkg,
+ self._dynamic_config._runtime_pkg_mask[pkg]),
+ level=logging.DEBUG, noiselevel=-1)
+
+ parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+ if parent_atoms is None:
+ parent_atoms = set()
+ self._dynamic_config._parent_atoms[pkg] = parent_atoms
+
+ all_match = True
+ for parent_atom in slot_parent_atoms:
+ if parent_atom in parent_atoms:
+ continue
+ parent, atom = parent_atom
+ if atom.match(pkg.with_use(self._pkg_use_enabled(pkg))):
+ parent_atoms.add(parent_atom)
+ else:
+ all_match = False
+ conflict_atoms.setdefault(parent_atom, set()).add(pkg)
+
+ if not all_match:
+ conflict_pkgs.append(pkg)
+
+ if conflict_pkgs and \
+ self._dynamic_config._allow_backtracking and \
+ not self._accept_blocker_conflicts():
+ remaining = []
+ for pkg in conflict_pkgs:
+ if self._slot_conflict_backtrack_abi(pkg,
+ slot_nodes, conflict_atoms):
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+ config.setdefault("slot_conflict_abi", set()).add(pkg)
+ else:
+ remaining.append(pkg)
+ if remaining:
+ self._slot_confict_backtrack(root, slot_atom,
+ slot_parent_atoms, remaining)
+
+ def _slot_confict_backtrack(self, root, slot_atom,
+ all_parents, conflict_pkgs):
+
+ debug = "--debug" in self._frozen_config.myopts
+ existing_node = next(self._dynamic_config._package_tracker.match(
+ root, slot_atom, installed=False))
+ # In order to avoid a missed update, first mask lower versions
+ # that conflict with higher versions (the backtracker visits
+ # these in reverse order).
+ conflict_pkgs.sort(reverse=True)
+ backtrack_data = []
+ for to_be_masked in conflict_pkgs:
+ # For missed update messages, find out which
+ # atoms matched to_be_selected that did not
+ # match to_be_masked.
+ parent_atoms = \
+ self._dynamic_config._parent_atoms.get(to_be_masked, set())
+ conflict_atoms = set(parent_atom for parent_atom in all_parents \
+ if parent_atom not in parent_atoms)
+ backtrack_data.append((to_be_masked, conflict_atoms))
+
+ to_be_masked = backtrack_data[-1][0]
+
+ self._dynamic_config._backtrack_infos.setdefault(
+ "slot conflict", []).append(backtrack_data)
+ self._dynamic_config._need_restart = True
+ if debug:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("backtracking due to slot conflict:")
+ msg.append(" first package: %s" % existing_node)
+ msg.append(" package to mask: %s" % to_be_masked)
+ msg.append(" slot: %s" % slot_atom)
+ msg.append(" parents: %s" % ", ".join( \
+ "(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
+ msg.append("")
+ writemsg_level("".join("%s\n" % l for l in msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ def _slot_conflict_backtrack_abi(self, pkg, slot_nodes, conflict_atoms):
+ """
+ If one or more conflict atoms have a slot/sub-slot dep that can be resolved
+ by rebuilding the parent package, then schedule the rebuild via
+ backtracking, and return True. Otherwise, return False.
+ """
+
+ found_update = False
+ for parent_atom, conflict_pkgs in conflict_atoms.items():
+ parent, atom = parent_atom
+
+ if not isinstance(parent, Package):
+ continue
+
+ if not parent.built:
+ continue
+
+ if not atom.soname and not (
+ atom.package and atom.slot_operator_built):
+ continue
+
+ for other_pkg in slot_nodes:
+ if other_pkg in conflict_pkgs:
+ continue
+
+ dep = Dependency(atom=atom, child=other_pkg,
+ parent=parent, root=pkg.root)
+
+ new_dep = \
+ self._slot_operator_update_probe_slot_conflict(dep)
+ if new_dep is not None:
+ self._slot_operator_update_backtrack(dep,
+ new_dep=new_dep)
+ found_update = True
+
+ return found_update
+
+ def _slot_change_probe(self, dep):
+ """
+ @rtype: bool
+ @return: True if dep.child should be rebuilt due to a change
+ in sub-slot (without revbump, as in bug #456208).
+ """
+ if not (isinstance(dep.parent, Package) and \
+ not dep.parent.built and dep.child.built):
+ return None
+
+ root_config = self._frozen_config.roots[dep.root]
+ matches = []
+ try:
+ matches.append(self._pkg(dep.child.cpv, "ebuild",
+ root_config, myrepo=dep.child.repo))
+ except PackageNotFound:
+ pass
+
+ for unbuilt_child in chain(matches,
+ self._iter_match_pkgs(root_config, "ebuild",
+ Atom("=%s" % (dep.child.cpv,)))):
+ if unbuilt_child in self._dynamic_config._runtime_pkg_mask:
+ continue
+ if self._frozen_config.excluded_pkgs.findAtomForPackage(
+ unbuilt_child,
+ modified_use=self._pkg_use_enabled(unbuilt_child)):
+ continue
+ if not self._pkg_visibility_check(unbuilt_child):
+ continue
+ break
+ else:
+ return None
+
+ if unbuilt_child.slot == dep.child.slot and \
+ unbuilt_child.sub_slot == dep.child.sub_slot:
+ return None
+
+ return unbuilt_child
+
+ def _slot_change_backtrack(self, dep, new_child_slot):
+ child = dep.child
+ if "--debug" in self._frozen_config.myopts:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("backtracking due to slot/sub-slot change:")
+ msg.append(" child package: %s" % child)
+ msg.append(" child slot: %s/%s" %
+ (child.slot, child.sub_slot))
+ msg.append(" new child: %s" % new_child_slot)
+ msg.append(" new child slot: %s/%s" %
+ (new_child_slot.slot, new_child_slot.sub_slot))
+ msg.append(" parent package: %s" % dep.parent)
+ msg.append(" atom: %s" % dep.atom)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+
+ # mask unwanted binary packages if necessary
+ masks = {}
+ if not child.installed:
+ masks.setdefault(dep.child, {})["slot_operator_mask_built"] = None
+ if masks:
+ config.setdefault("slot_operator_mask_built", {}).update(masks)
+
+ # trigger replacement of installed packages if necessary
+ reinstalls = set()
+ if child.installed:
+ replacement_atom = self._replace_installed_atom(child)
+ if replacement_atom is not None:
+ reinstalls.add((child.root, replacement_atom))
+ if reinstalls:
+ config.setdefault("slot_operator_replace_installed",
+ set()).update(reinstalls)
+
+ self._dynamic_config._need_restart = True
+
+ def _slot_operator_update_backtrack(self, dep, new_child_slot=None,
+ new_dep=None):
+ if new_child_slot is None:
+ child = dep.child
+ else:
+ child = new_child_slot
+ if "--debug" in self._frozen_config.myopts:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("backtracking due to missed slot abi update:")
+ msg.append(" child package: %s" % child)
+ if new_child_slot is not None:
+ msg.append(" new child slot package: %s" % new_child_slot)
+ msg.append(" parent package: %s" % dep.parent)
+ if new_dep is not None:
+ msg.append(" new parent pkg: %s" % new_dep.parent)
+ msg.append(" atom: %s" % dep.atom)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+
+ # mask unwanted binary packages if necessary
+ abi_masks = {}
+ if new_child_slot is None:
+ if not child.installed:
+ abi_masks.setdefault(child, {})["slot_operator_mask_built"] = None
+ if not dep.parent.installed:
+ abi_masks.setdefault(dep.parent, {})["slot_operator_mask_built"] = None
+ if abi_masks:
+ config.setdefault("slot_operator_mask_built", {}).update(abi_masks)
+
+ # trigger replacement of installed packages if necessary
+ abi_reinstalls = set()
+ if dep.parent.installed:
+ if new_dep is not None:
+ replacement_atom = new_dep.parent.slot_atom
+ else:
+ replacement_atom = self._replace_installed_atom(dep.parent)
+ if replacement_atom is not None:
+ abi_reinstalls.add((dep.parent.root, replacement_atom))
+ if new_child_slot is None and child.installed:
+ replacement_atom = self._replace_installed_atom(child)
+ if replacement_atom is not None:
+ abi_reinstalls.add((child.root, replacement_atom))
+ if abi_reinstalls:
+ config.setdefault("slot_operator_replace_installed",
+ set()).update(abi_reinstalls)
+
+ self._dynamic_config._need_restart = True
+
+ def _slot_operator_update_probe_slot_conflict(self, dep):
+ new_dep = self._slot_operator_update_probe(dep, slot_conflict=True)
+
+ if new_dep is not None:
+ return new_dep
+
+ if self._dynamic_config._autounmask is True:
+
+ for autounmask_level in self._autounmask_levels():
+
+ new_dep = self._slot_operator_update_probe(dep,
+ slot_conflict=True, autounmask_level=autounmask_level)
+
+ if new_dep is not None:
+ return new_dep
+
+ return None
+
+ def _slot_operator_update_probe(self, dep, new_child_slot=False,
+ slot_conflict=False, autounmask_level=None):
+ """
+ slot/sub-slot := operators tend to prevent updates from getting pulled in,
+ since installed packages pull in packages with the slot/sub-slot that they
+ were built against. Detect this case so that we can schedule rebuilds
+ and reinstalls when appropriate.
+ NOTE: This function only searches for updates that involve upgrades
+ to higher versions, since the logic required to detect when a
+ downgrade would be desirable is not implemented.
+ """
+
+ if dep.child.installed and \
+ self._frozen_config.excluded_pkgs.findAtomForPackage(dep.child,
+ modified_use=self._pkg_use_enabled(dep.child)):
+ return None
+
+ if dep.parent.installed and \
+ self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent,
+ modified_use=self._pkg_use_enabled(dep.parent)):
+ return None
+
+ debug = "--debug" in self._frozen_config.myopts
+ selective = "selective" in self._dynamic_config.myparams
+ want_downgrade = None
+ want_downgrade_parent = None
+
+ def check_reverse_dependencies(existing_pkg, candidate_pkg,
+ replacement_parent=None):
+ """
+ Check if candidate_pkg satisfies all of existing_pkg's non-
+ slot operator parents.
+ """
+ built_slot_operator_parents = set()
+ for parent, atom in self._dynamic_config._parent_atoms.get(existing_pkg, []):
+ if atom.soname or atom.slot_operator_built:
+ built_slot_operator_parents.add(parent)
+
+ for parent, atom in self._dynamic_config._parent_atoms.get(existing_pkg, []):
+ if isinstance(parent, Package):
+ if parent in built_slot_operator_parents:
+ # This parent may need to be rebuilt, so its
+ # dependencies aren't necessarily relevant.
+ continue
+
+ if replacement_parent is not None and \
+ (replacement_parent.slot_atom == parent.slot_atom
+ or replacement_parent.cpv == parent.cpv):
+ # This parent is irrelevant because we intend to
+ # replace it with replacement_parent.
+ continue
+
+ if any(pkg is not parent and
+ (pkg.slot_atom == parent.slot_atom or
+ pkg.cpv == parent.cpv) for pkg in
+ self._dynamic_config._package_tracker.match(
+ parent.root, Atom(parent.cp))):
+ # This parent may need to be eliminated due to a
+ # slot conflict, so its dependencies aren't
+ # necessarily relevant.
+ continue
+
+ if (not self._too_deep(parent.depth) and
+ not self._frozen_config.excluded_pkgs.
+ findAtomForPackage(parent,
+ modified_use=self._pkg_use_enabled(parent))):
+ # Check for common reasons that the parent's
+ # dependency might be irrelevant.
+ if self._upgrade_available(parent):
+ # This parent could be replaced by
+ # an upgrade (bug 584626).
+ continue
+ if parent.installed and self._in_blocker_conflict(parent):
+ # This parent could be uninstalled in order
+ # to solve a blocker conflict (bug 612772).
+ continue
+ if self._dynamic_config.digraph.has_edge(parent,
+ existing_pkg):
+ # There is a direct circular dependency between
+ # parent and existing_pkg. This type of
+ # relationship tends to prevent updates
+ # of packages (bug 612874). Since candidate_pkg
+ # is available, we risk a missed update if we
+ # don't try to eliminate this parent from the
+ # graph. Therefore, we give candidate_pkg a
+ # chance, and assume that it will be masked
+ # by backtracking if necessary.
+ continue
+
+ atom_set = InternalPackageSet(initial_atoms=(atom,),
+ allow_repo=True)
+ if not atom_set.findAtomForPackage(candidate_pkg,
+ modified_use=self._pkg_use_enabled(candidate_pkg)):
+ return False
+ return True
+
+
+ for replacement_parent in self._iter_similar_available(dep.parent,
+ dep.parent.slot_atom, autounmask_level=autounmask_level):
+
+ if replacement_parent is dep.parent:
+ continue
+
+ if replacement_parent < dep.parent:
+ if want_downgrade_parent is None:
+ want_downgrade_parent = self._downgrade_probe(
+ dep.parent)
+ if not want_downgrade_parent:
+ continue
+
+ if not check_reverse_dependencies(dep.parent, replacement_parent):
+ continue
+
+ selected_atoms = None
+
+ try:
+ atoms = self._flatten_atoms(replacement_parent,
+ self._pkg_use_enabled(replacement_parent))
+ except InvalidDependString:
+ continue
+
+ if replacement_parent.requires is not None:
+ atoms = list(atoms)
+ atoms.extend(replacement_parent.requires)
+
+ # List of list of child,atom pairs for each atom.
+ replacement_candidates = []
+ # Set of all packages all atoms can agree on.
+ all_candidate_pkgs = None
+
+ for atom in atoms:
+ # The _select_atoms_probe method is expensive, so initialization
+ # of this variable is only performed on demand.
+ atom_not_selected = None
+
+ if not atom.package:
+ unevaluated_atom = None
+ if atom.match(dep.child):
+ # We are searching for a replacement_parent
+ # atom that will pull in a different child,
+ # so continue checking the rest of the atoms.
+ continue
+ else:
+
+ if atom.blocker or \
+ atom.cp != dep.child.cp:
+ continue
+
+ # Discard USE deps, we're only searching for an
+ # approximate pattern, and dealing with USE states
+ # is too complex for this purpose.
+ unevaluated_atom = atom.unevaluated_atom
+ atom = atom.without_use
+
+ if replacement_parent.built and \
+ portage.dep._match_slot(atom, dep.child):
+ # We are searching for a replacement_parent
+ # atom that will pull in a different child,
+ # so continue checking the rest of the atoms.
+ continue
+
+ candidate_pkg_atoms = []
+ candidate_pkgs = []
+ for pkg in self._iter_similar_available(
+ dep.child, atom):
+ if (dep.atom.package and
+ pkg.slot == dep.child.slot and
+ pkg.sub_slot == dep.child.sub_slot):
+ # If slot/sub-slot is identical, then there's
+ # no point in updating.
+ continue
+ if new_child_slot:
+ if pkg.slot == dep.child.slot:
+ continue
+ if pkg < dep.child:
+ # the new slot only matters if the
+ # package version is higher
+ continue
+ else:
+ if pkg.slot != dep.child.slot:
+ continue
+ if pkg < dep.child:
+ if want_downgrade is None:
+ want_downgrade = self._downgrade_probe(dep.child)
+ # be careful not to trigger a rebuild when
+ # the only version available with a
+ # different slot_operator is an older version
+ if not want_downgrade:
+ continue
+ if pkg.version == dep.child.version and not dep.child.built:
+ continue
+
+ insignificant = False
+ if not slot_conflict and \
+ selective and \
+ dep.parent.installed and \
+ dep.child.installed and \
+ dep.parent >= replacement_parent and \
+ dep.child.cpv == pkg.cpv:
+ # Then can happen if the child's sub-slot changed
+ # without a revision bump. The sub-slot change is
+ # considered insignificant until one of its parent
+ # packages needs to be rebuilt (which may trigger a
+ # slot conflict).
+ insignificant = True
+
+ if (not insignificant and
+ unevaluated_atom is not None):
+ # Evaluate USE conditionals and || deps, in order
+ # to see if this atom is really desirable, since
+ # otherwise we may trigger an undesirable rebuild
+ # as in bug #460304.
+ if selected_atoms is None:
+ selected_atoms = self._select_atoms_probe(
+ dep.child.root, replacement_parent)
+ atom_not_selected = unevaluated_atom not in selected_atoms
+ if atom_not_selected:
+ break
+
+ if not insignificant and \
+ check_reverse_dependencies(dep.child, pkg,
+ replacement_parent=replacement_parent):
+
+ candidate_pkg_atoms.append(
+ (pkg, unevaluated_atom or atom))
+ candidate_pkgs.append(pkg)
+
+ # When unevaluated_atom is None, it means that atom is
+ # an soname atom which is unconditionally selected, and
+ # _select_atoms_probe is not applicable.
+ if atom_not_selected is None and unevaluated_atom is not None:
+ if selected_atoms is None:
+ selected_atoms = self._select_atoms_probe(
+ dep.child.root, replacement_parent)
+ atom_not_selected = unevaluated_atom not in selected_atoms
+
+ if atom_not_selected:
+ continue
+ replacement_candidates.append(candidate_pkg_atoms)
+ if all_candidate_pkgs is None:
+ all_candidate_pkgs = set(candidate_pkgs)
+ else:
+ all_candidate_pkgs.intersection_update(candidate_pkgs)
+
+ if not all_candidate_pkgs:
+ # If the atoms that connect parent and child can't agree on
+ # any replacement child, we can't do anything.
+ continue
+
+ # Now select one of the pkgs as replacement. This is as easy as
+ # selecting the highest version.
+ # The more complicated part is to choose an atom for the
+ # new Dependency object. Choose the one which ranked the selected
+ # parent highest.
+ selected = None
+ for candidate_pkg_atoms in replacement_candidates:
+ for i, (pkg, atom) in enumerate(candidate_pkg_atoms):
+ if pkg not in all_candidate_pkgs:
+ continue
+ if selected is None or \
+ selected[0] < pkg or \
+ (selected[0] is pkg and i < selected[2]):
+ selected = (pkg, atom, i)
+
+ if debug:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("slot_operator_update_probe:")
+ msg.append(" existing child package: %s" % dep.child)
+ msg.append(" existing parent package: %s" % dep.parent)
+ msg.append(" new child package: %s" % selected[0])
+ msg.append(" new parent package: %s" % replacement_parent)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return Dependency(parent=replacement_parent,
+ child=selected[0], atom=selected[1])
+
+ if debug:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("slot_operator_update_probe:")
+ msg.append(" existing child package: %s" % dep.child)
+ msg.append(" existing parent package: %s" % dep.parent)
+ msg.append(" new child package: %s" % None)
+ msg.append(" new parent package: %s" % None)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return None
+
+ def _slot_operator_unsatisfied_probe(self, dep):
+
+ if dep.parent.installed and \
+ self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent,
+ modified_use=self._pkg_use_enabled(dep.parent)):
+ return False
+
+ debug = "--debug" in self._frozen_config.myopts
+
+ for replacement_parent in self._iter_similar_available(dep.parent,
+ dep.parent.slot_atom):
+
+ for atom in replacement_parent.validated_atoms:
+ if not atom.slot_operator == "=" or \
+ atom.blocker or \
+ atom.cp != dep.atom.cp:
+ continue
+
+ # Discard USE deps, we're only searching for an approximate
+ # pattern, and dealing with USE states is too complex for
+ # this purpose.
+ atom = atom.without_use
+
+ pkg, existing_node = self._select_package(dep.root, atom,
+ onlydeps=dep.onlydeps)
+
+ if pkg is not None:
+
+ if debug:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("slot_operator_unsatisfied_probe:")
+ msg.append(" existing parent package: %s" % dep.parent)
+ msg.append(" existing parent atom: %s" % dep.atom)
+ msg.append(" new parent package: %s" % replacement_parent)
+ msg.append(" new child package: %s" % pkg)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return True
+
+ if debug:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("slot_operator_unsatisfied_probe:")
+ msg.append(" existing parent package: %s" % dep.parent)
+ msg.append(" existing parent atom: %s" % dep.atom)
+ msg.append(" new parent package: %s" % None)
+ msg.append(" new child package: %s" % None)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return False
+
+ def _slot_operator_unsatisfied_backtrack(self, dep):
+
+ parent = dep.parent
+
+ if "--debug" in self._frozen_config.myopts:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("backtracking due to unsatisfied "
+ "built slot-operator dep:")
+ msg.append(" parent package: %s" % parent)
+ msg.append(" atom: %s" % dep.atom)
+ msg.append("")
+ writemsg_level("\n".join(msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+
+ # mask unwanted binary packages if necessary
+ masks = {}
+ if not parent.installed:
+ masks.setdefault(parent, {})["slot_operator_mask_built"] = None
+ if masks:
+ config.setdefault("slot_operator_mask_built", {}).update(masks)
+
+ # trigger replacement of installed packages if necessary
+ reinstalls = set()
+ if parent.installed:
+ replacement_atom = self._replace_installed_atom(parent)
+ if replacement_atom is not None:
+ reinstalls.add((parent.root, replacement_atom))
+ if reinstalls:
+ config.setdefault("slot_operator_replace_installed",
+ set()).update(reinstalls)
+
+ self._dynamic_config._need_restart = True
+
+ def _in_blocker_conflict(self, pkg):
+ """
+ Check if pkg is involved in a blocker conflict. This method
+ only works after the _validate_blockers method has been called.
+ """
+
+ if (self._dynamic_config._blocked_pkgs is None
+ and not self._validate_blockers()):
+ raise self._unknown_internal_error()
+
+ if pkg in self._dynamic_config._blocked_pkgs:
+ return True
+
+ if pkg in self._dynamic_config._blocker_parents:
+ return True
+
+ return False
+
+ def _upgrade_available(self, pkg):
+ """
+ Detect cases where an upgrade of the given package is available
+ within the same slot.
+ """
+ for available_pkg in self._iter_similar_available(pkg,
+ pkg.slot_atom):
+ if available_pkg > pkg:
+ return True
+
+ return False
+
+ def _downgrade_probe(self, pkg):
+ """
+ Detect cases where a downgrade of the given package is considered
+ desirable due to the current version being masked or unavailable.
+ """
+ available_pkg = None
+ for available_pkg in self._iter_similar_available(pkg,
+ pkg.slot_atom):
+ if available_pkg >= pkg:
+ # There's an available package of the same or higher
+ # version, so downgrade seems undesirable.
+ return False
+
+ return available_pkg is not None
+
+ def _select_atoms_probe(self, root, pkg):
+ selected_atoms = []
+ use = self._pkg_use_enabled(pkg)
+ for k in pkg._dep_keys:
+ v = pkg._metadata.get(k)
+ if not v:
+ continue
+ selected_atoms.extend(self._select_atoms(
+ root, v, myuse=use, parent=pkg)[pkg])
+ return frozenset(x.unevaluated_atom for
+ x in selected_atoms)
+
+ def _flatten_atoms(self, pkg, use):
+ """
+ Evaluate all dependency atoms of the given package, and return
+ them as a frozenset. For performance, results are cached.
+
+ @param pkg: a Package instance
+ @type pkg: Package
+ @param pkg: set of enabled USE flags
+ @type pkg: frozenset
+ @rtype: frozenset
+ @return: set of evaluated atoms
+ """
+
+ cache_key = (pkg, use)
+
+ try:
+ return self._dynamic_config._flatten_atoms_cache[cache_key]
+ except KeyError:
+ pass
+
+ atoms = []
+
+ for dep_key in pkg._dep_keys:
+ dep_string = pkg._metadata[dep_key]
+ if not dep_string:
+ continue
+
+ dep_string = portage.dep.use_reduce(
+ dep_string, uselist=use,
+ is_valid_flag=pkg.iuse.is_valid_flag,
+ flat=True, token_class=Atom, eapi=pkg.eapi)
+
+ atoms.extend(token for token in dep_string
+ if isinstance(token, Atom))
+
+ atoms = frozenset(atoms)
+
+ self._dynamic_config._flatten_atoms_cache[cache_key] = atoms
+ return atoms
+
+ def _iter_similar_available(self, graph_pkg, atom, autounmask_level=None):
+ """
+ Given a package that's in the graph, do a rough check to
+ see if a similar package is available to install. The given
+ graph_pkg itself may be yielded only if it's not installed.
+ """
+
+ usepkgonly = "--usepkgonly" in self._frozen_config.myopts
+ useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
+ use_ebuild_visibility = self._frozen_config.myopts.get(
+ '--use-ebuild-visibility', 'n') != 'n'
+
+ for pkg in self._iter_match_pkgs_any(
+ graph_pkg.root_config, atom):
+ if pkg.cp != graph_pkg.cp:
+ # discard old-style virtual match
+ continue
+ if pkg.installed:
+ continue
+ if pkg in self._dynamic_config._runtime_pkg_mask:
+ continue
+ if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ continue
+ if pkg.built:
+ if self._equiv_binary_installed(pkg):
+ continue
+ if not (not use_ebuild_visibility and
+ (usepkgonly or useoldpkg_atoms.findAtomForPackage(
+ pkg, modified_use=self._pkg_use_enabled(pkg)))) and \
+ not self._equiv_ebuild_visible(pkg,
+ autounmask_level=autounmask_level):
+ continue
+ if not self._pkg_visibility_check(pkg,
+ autounmask_level=autounmask_level):
+ continue
+ yield pkg
+
+ def _replace_installed_atom(self, inst_pkg):
+ """
+ Given an installed package, generate an atom suitable for
+ slot_operator_replace_installed backtracking info. The replacement
+ SLOT may differ from the installed SLOT, so first search by cpv.
+ """
+ built_pkgs = []
+ for pkg in self._iter_similar_available(inst_pkg,
+ Atom("=%s" % inst_pkg.cpv)):
+ if not pkg.built:
+ return pkg.slot_atom
+ elif not pkg.installed:
+ # avoid using SLOT from a built instance
+ built_pkgs.append(pkg)
+
+ for pkg in self._iter_similar_available(inst_pkg, inst_pkg.slot_atom):
+ if not pkg.built:
+ return pkg.slot_atom
+ elif not pkg.installed:
+ # avoid using SLOT from a built instance
+ built_pkgs.append(pkg)
+
+ if built_pkgs:
+ best_version = None
+ for pkg in built_pkgs:
+ if best_version is None or pkg > best_version:
+ best_version = pkg
+ return best_version.slot_atom
+
+ return None
+
+ def _slot_operator_trigger_reinstalls(self):
+ """
+ Search for packages with slot-operator deps on older slots, and schedule
+ rebuilds if they can link to a newer slot that's in the graph.
+ """
+
+ rebuild_if_new_slot = self._dynamic_config.myparams.get(
+ "rebuild_if_new_slot", "y") == "y"
+
+ for slot_key, slot_info in self._dynamic_config._slot_operator_deps.items():
+
+ for dep in slot_info:
+
+ atom = dep.atom
+
+ if not (atom.soname or atom.slot_operator_built):
+ new_child_slot = self._slot_change_probe(dep)
+ if new_child_slot is not None:
+ self._slot_change_backtrack(dep, new_child_slot)
+ continue
+
+ if not (dep.parent and
+ isinstance(dep.parent, Package) and dep.parent.built):
+ continue
+
+ # If the parent is not installed, check if it needs to be
+ # rebuilt against an installed instance, since otherwise
+ # it could trigger downgrade of an installed instance as
+ # in bug #652938.
+ want_update_probe = dep.want_update or not dep.parent.installed
+
+ # Check for slot update first, since we don't want to
+ # trigger reinstall of the child package when a newer
+ # slot will be used instead.
+ if rebuild_if_new_slot and want_update_probe:
+ new_dep = self._slot_operator_update_probe(dep,
+ new_child_slot=True)
+ if new_dep is not None:
+ self._slot_operator_update_backtrack(dep,
+ new_child_slot=new_dep.child)
+
+ if want_update_probe:
+ if self._slot_operator_update_probe(dep):
+ self._slot_operator_update_backtrack(dep)
+
+ def _reinstall_for_flags(self, pkg, forced_flags,
+ orig_use, orig_iuse, cur_use, cur_iuse):
+ """Return a set of flags that trigger reinstallation, or None if there
+ are no such flags."""
+
+ # binpkg_respect_use: Behave like newuse by default. If newuse is
+ # False and changed_use is True, then behave like changed_use.
+ binpkg_respect_use = (pkg.built and
+ self._dynamic_config.myparams.get("binpkg_respect_use")
+ in ("y", "auto"))
+ newuse = "--newuse" in self._frozen_config.myopts
+ changed_use = "changed-use" == self._frozen_config.myopts.get("--reinstall")
+ feature_flags = _get_feature_flags(
+ _get_eapi_attrs(pkg.eapi))
+
+ if newuse or (binpkg_respect_use and not changed_use):
+ flags = set(orig_iuse.symmetric_difference(
+ cur_iuse).difference(forced_flags))
+ flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
+ cur_iuse.intersection(cur_use)))
+ flags.difference_update(feature_flags)
+ if flags:
+ return flags
+
+ elif changed_use or binpkg_respect_use:
+ flags = set(orig_iuse.intersection(orig_use).symmetric_difference(
+ cur_iuse.intersection(cur_use)))
+ flags.difference_update(feature_flags)
+ if flags:
+ return flags
+ return None
+
+ def _changed_deps(self, pkg):
+
+ ebuild = None
+ try:
+ ebuild = self._pkg(pkg.cpv, "ebuild",
+ pkg.root_config, myrepo=pkg.repo)
+ except PackageNotFound:
+ # Use first available instance of the same version.
+ for ebuild in self._iter_match_pkgs(
+ pkg.root_config, "ebuild", Atom("=" + pkg.cpv)):
+ break
+
+ if ebuild is None:
+ changed = False
+ else:
+ if self._dynamic_config.myparams.get("bdeps") in ("y", "auto"):
+ depvars = Package._dep_keys
+ else:
+ depvars = Package._runtime_keys
+
+ # Use _raw_metadata, in order to avoid interaction
+ # with --dynamic-deps.
+ try:
+ built_deps = []
+ for k in depvars:
+ dep_struct = portage.dep.use_reduce(
+ pkg._raw_metadata[k], uselist=pkg.use.enabled,
+ eapi=pkg.eapi, token_class=Atom)
+ strip_slots(dep_struct)
+ built_deps.append(dep_struct)
+ except InvalidDependString:
+ changed = True
+ else:
+ unbuilt_deps = []
+ for k in depvars:
+ dep_struct = portage.dep.use_reduce(
+ ebuild._raw_metadata[k],
+ uselist=pkg.use.enabled,
+ eapi=ebuild.eapi, token_class=Atom)
+ strip_slots(dep_struct)
+ unbuilt_deps.append(dep_struct)
+
+ changed = built_deps != unbuilt_deps
+
+ if (changed and pkg.installed and
+ self._dynamic_config.myparams.get("changed_deps_report")):
+ self._dynamic_config._changed_deps_pkgs[pkg] = ebuild
+
+ return changed
+
+ def _changed_slot(self, pkg):
+ ebuild = self._equiv_ebuild(pkg)
+ return ebuild is not None and (ebuild.slot, ebuild.sub_slot) != (pkg.slot, pkg.sub_slot)
+
+ def _create_graph(self, allow_unsatisfied=False):
+ dep_stack = self._dynamic_config._dep_stack
+ dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
+ while dep_stack or dep_disjunctive_stack:
+ self._spinner_update()
+ while dep_stack:
+ dep = dep_stack.pop()
+ if isinstance(dep, Package):
+ if not self._add_pkg_deps(dep,
+ allow_unsatisfied=allow_unsatisfied):
+ return 0
+ continue
+ if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
+ return 0
+ if dep_disjunctive_stack:
+ if not self._pop_disjunction(allow_unsatisfied):
+ return 0
+ return 1
+
+ def _expand_set_args(self, input_args, add_to_digraph=False):
+ """
+ Iterate over a list of DependencyArg instances and yield all
+ instances given in the input together with additional SetArg
+ instances that are generated from nested sets.
+ @param input_args: An iterable of DependencyArg instances
+ @type input_args: Iterable
+ @param add_to_digraph: If True then add SetArg instances
+ to the digraph, in order to record parent -> child
+ relationships from nested sets
+ @type add_to_digraph: Boolean
+ @rtype: Iterable
+ @return: All args given in the input together with additional
+ SetArg instances that are generated from nested sets
+ """
+
+ traversed_set_args = set()
+
+ for arg in input_args:
+ if not isinstance(arg, SetArg):
+ yield arg
+ continue
+
+ root_config = arg.root_config
+ depgraph_sets = self._dynamic_config.sets[root_config.root]
+ arg_stack = [arg]
+ while arg_stack:
+ arg = arg_stack.pop()
+ if arg in traversed_set_args:
+ continue
+
+ # If a node with the same hash already exists in
+ # the digraph, preserve the existing instance which
+ # may have a different reset_depth attribute
+ # (distiguishes user arguments from sets added for
+ # another reason such as complete mode).
+ arg = self._dynamic_config.digraph.get(arg, arg)
+ traversed_set_args.add(arg)
+
+ if add_to_digraph:
+ self._dynamic_config.digraph.add(arg, None,
+ priority=BlockerDepPriority.instance)
+
+ yield arg
+
+ # Traverse nested sets and add them to the stack
+ # if they're not already in the graph. Also, graph
+ # edges between parent and nested sets.
+ for token in arg.pset.getNonAtoms():
+ if not token.startswith(SETPREFIX):
+ continue
+ s = token[len(SETPREFIX):]
+ nested_set = depgraph_sets.sets.get(s)
+ if nested_set is None:
+ nested_set = root_config.sets.get(s)
+ if nested_set is not None:
+ # Propagate the reset_depth attribute from
+ # parent set to nested set.
+ nested_arg = SetArg(arg=token, pset=nested_set,
+ reset_depth=arg.reset_depth,
+ root_config=root_config)
+
+ # Preserve instances already in the graph (same
+ # reason as for the "arg" variable above).
+ nested_arg = self._dynamic_config.digraph.get(
+ nested_arg, nested_arg)
+ arg_stack.append(nested_arg)
+ if add_to_digraph:
+ self._dynamic_config.digraph.add(nested_arg, arg,
+ priority=BlockerDepPriority.instance)
+ depgraph_sets.sets[nested_arg.name] = nested_arg.pset
+
+ def _add_dep(self, dep, allow_unsatisfied=False):
+ debug = "--debug" in self._frozen_config.myopts
+ buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
+ nodeps = "--nodeps" in self._frozen_config.myopts
+ if dep.blocker:
+
+ # Slot collision nodes are not allowed to block other packages since
+ # blocker validation is only able to account for one package per slot.
+ is_slot_conflict_parent = any(dep.parent in conflict.pkgs[1:] for conflict in \
+ self._dynamic_config._package_tracker.slot_conflicts())
+ if not buildpkgonly and \
+ not nodeps and \
+ not dep.collapsed_priority.ignored and \
+ not dep.collapsed_priority.optional and \
+ not is_slot_conflict_parent:
+ if dep.parent.onlydeps:
+ # It's safe to ignore blockers if the
+ # parent is an --onlydeps node.
+ return 1
+ # The blocker applies to the root where
+ # the parent is or will be installed.
+ blocker = Blocker(atom=dep.atom,
+ eapi=dep.parent.eapi,
+ priority=dep.priority, root=dep.parent.root)
+ self._dynamic_config._blocker_parents.add(blocker, dep.parent)
+ return 1
+
+ if dep.child is None:
+ dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
+ onlydeps=dep.onlydeps)
+ else:
+ # The caller has selected a specific package
+ # via self._minimize_packages().
+ dep_pkg = dep.child
+ existing_node = next(self._dynamic_config._package_tracker.match(
+ dep.root, dep_pkg.slot_atom, installed=False), None)
+
+ if not dep_pkg:
+ if (dep.collapsed_priority.optional or
+ dep.collapsed_priority.ignored):
+ # This is an unnecessary build-time dep.
+ return 1
+
+ # NOTE: For removal actions, allow_unsatisfied is always
+ # True since all existing removal actions traverse all
+ # installed deps deeply via the _complete_graph method,
+ # which calls _create_graph with allow_unsatisfied = True.
+ if allow_unsatisfied:
+ self._dynamic_config._unsatisfied_deps.append(dep)
+ return 1
+
+ # The following case occurs when
+ # _solve_non_slot_operator_slot_conflicts calls
+ # _create_graph. In this case, ignore unsatisfied deps for
+ # installed packages only if their depth is beyond the depth
+ # requested by the user and the dep was initially
+ # unsatisfied (not broken by a slot conflict in the current
+ # graph). See bug #520950.
+ # NOTE: The value of dep.parent.depth is guaranteed to be
+ # either an integer or _UNREACHABLE_DEPTH, where
+ # _UNREACHABLE_DEPTH indicates that the parent has been
+ # pulled in by the _complete_graph method (rather than by
+ # explicit arguments or their deep dependencies). These
+ # cases must be distinguished because depth is meaningless
+ # for packages that are not reachable as deep dependencies
+ # of arguments.
+ if (self._dynamic_config._complete_mode and
+ isinstance(dep.parent, Package) and
+ dep.parent.installed and
+ (dep.parent.depth is self._UNREACHABLE_DEPTH or
+ (self._frozen_config.requested_depth is not True and
+ dep.parent.depth >= self._frozen_config.requested_depth))):
+ inst_pkg, in_graph = \
+ self._select_pkg_from_installed(dep.root, dep.atom)
+ if inst_pkg is None:
+ self._dynamic_config._initially_unsatisfied_deps.append(dep)
+ return 1
+
+ self._dynamic_config._unsatisfied_deps_for_display.append(
+ ((dep.root, dep.atom), {"myparent":dep.parent}))
+
+ # The parent node should not already be in
+ # runtime_pkg_mask, since that would trigger an
+ # infinite backtracking loop.
+ if self._dynamic_config._allow_backtracking:
+ if (dep.parent not in self._dynamic_config._runtime_pkg_mask and
+ dep.atom.package and dep.atom.slot_operator_built and
+ self._slot_operator_unsatisfied_probe(dep)):
+ self._slot_operator_unsatisfied_backtrack(dep)
+ return 1
+ else:
+ # This is for backward-compatibility with previous
+ # behavior, so that installed packages with unsatisfied
+ # dependencies trigger an error message but do not
+ # cause the dependency calculation to fail. Only do
+ # this if the parent is already in the runtime package
+ # mask, since otherwise we need to backtrack.
+ if (dep.parent.installed and
+ dep.parent in self._dynamic_config._runtime_pkg_mask and
+ not any(self._iter_match_pkgs_any(
+ dep.parent.root_config, dep.atom))):
+ self._dynamic_config._initially_unsatisfied_deps.append(dep)
+ return 1
+
+ # Do not backtrack if only USE have to be changed in
+ # order to satisfy the dependency. Note that when
+ # want_restart_for_use_change sets the need_restart
+ # flag, it causes _select_pkg_highest_available to
+ # return None, and eventually we come through here
+ # and skip the "missing dependency" backtracking path.
+ dep_pkg, existing_node = \
+ self._select_package(dep.root,
+ dep.atom.without_use if dep.atom.package
+ else dep.atom, onlydeps=dep.onlydeps)
+ if dep_pkg is None:
+ self._dynamic_config._backtrack_infos["missing dependency"] = dep
+ self._dynamic_config._need_restart = True
+ if debug:
+ msg = []
+ msg.append("")
+ msg.append("")
+ msg.append("backtracking due to unsatisfied dep:")
+ msg.append(" parent: %s" % dep.parent)
+ msg.append(" priority: %s" % dep.priority)
+ msg.append(" root: %s" % dep.root)
+ msg.append(" atom: %s" % dep.atom)
+ msg.append("")
+ writemsg_level("".join("%s\n" % l for l in msg),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return 0
+
+ self._rebuild.add(dep_pkg, dep)
+
+ ignore = dep.collapsed_priority.ignored and \
+ not self._dynamic_config._traverse_ignored_deps
+ if not ignore and not self._add_pkg(dep_pkg, dep):
+ return 0
+ return 1
+
+ def _check_slot_conflict(self, pkg, atom):
+ existing_node = next(self._dynamic_config._package_tracker.match(
+ pkg.root, pkg.slot_atom, installed=False), None)
+
+ matches = None
+ if existing_node:
+ matches = pkg.cpv == existing_node.cpv
+ if pkg != existing_node and \
+ atom is not None:
+ matches = atom.match(existing_node.with_use(
+ self._pkg_use_enabled(existing_node)))
+
+ return (existing_node, matches)
+
+ def _add_pkg(self, pkg, dep):
+ """
+ Adds a package to the depgraph, queues dependencies, and handles
+ slot conflicts.
+ """
+ debug = "--debug" in self._frozen_config.myopts
+ myparent = None
+ priority = None
+ depth = 0
+ if dep is None:
+ dep = Dependency()
+ else:
+ myparent = dep.parent
+ priority = dep.priority
+ depth = dep.depth
+ if priority is None:
+ priority = DepPriority()
+
+ if debug:
+ writemsg_level(
+ "\n%s%s %s\n" % ("Child:".ljust(15), pkg,
+ pkg_use_display(pkg, self._frozen_config.myopts,
+ modified_use=self._pkg_use_enabled(pkg))),
+ level=logging.DEBUG, noiselevel=-1)
+ if isinstance(myparent,
+ (PackageArg, AtomArg)):
+ # For PackageArg and AtomArg types, it's
+ # redundant to display the atom attribute.
+ writemsg_level(
+ "%s%s\n" % ("Parent Dep:".ljust(15), myparent),
+ level=logging.DEBUG, noiselevel=-1)
+ else:
+ # Display the specific atom from SetArg or
+ # Package types.
+ uneval = ""
+ if (dep.atom and dep.atom.package and
+ dep.atom is not dep.atom.unevaluated_atom):
+ uneval = " (%s)" % (dep.atom.unevaluated_atom,)
+ writemsg_level(
+ "%s%s%s required by %s\n" %
+ ("Parent Dep:".ljust(15), dep.atom, uneval, myparent),
+ level=logging.DEBUG, noiselevel=-1)
+
+ # Ensure that the dependencies of the same package
+ # are never processed more than once.
+ previously_added = pkg in self._dynamic_config.digraph
+
+ pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+
+ arg_atoms = None
+ if True:
+ try:
+ arg_atoms = list(self._iter_atoms_for_pkg(pkg))
+ except portage.exception.InvalidDependString as e:
+ if not pkg.installed:
+ # should have been masked before it was selected
+ raise
+ del e
+
+ # NOTE: REQUIRED_USE checks are delayed until after
+ # package selection, since we want to prompt the user
+ # for USE adjustment rather than have REQUIRED_USE
+ # affect package selection and || dep choices.
+ if not pkg.built and pkg._metadata.get("REQUIRED_USE") and \
+ eapi_has_required_use(pkg.eapi):
+ required_use_is_sat = check_required_use(
+ pkg._metadata["REQUIRED_USE"],
+ self._pkg_use_enabled(pkg),
+ pkg.iuse.is_valid_flag,
+ eapi=pkg.eapi)
+ if not required_use_is_sat:
+ if dep.atom is not None and dep.parent is not None:
+ self._add_parent_atom(pkg, (dep.parent, dep.atom))
+
+ if arg_atoms:
+ for parent_atom in arg_atoms:
+ parent, atom = parent_atom
+ self._add_parent_atom(pkg, parent_atom)
+
+ atom = dep.atom
+ if atom is None:
+ atom = Atom("=" + pkg.cpv)
+ self._dynamic_config._unsatisfied_deps_for_display.append(
+ ((pkg.root, atom),
+ {"myparent" : dep.parent, "show_req_use" : pkg}))
+ self._dynamic_config._required_use_unsatisfied = True
+ self._dynamic_config._skip_restart = True
+ # Add pkg to digraph in order to enable autounmask messages
+ # for this package, which is useful when autounmask USE
+ # changes have violated REQUIRED_USE.
+ self._dynamic_config.digraph.add(pkg, dep.parent, priority=priority)
+ return 0
+
+ if not pkg.onlydeps:
+
+ existing_node, existing_node_matches = \
+ self._check_slot_conflict(pkg, dep.atom)
+ if existing_node:
+ if existing_node_matches:
+ # The existing node can be reused.
+ if pkg != existing_node:
+ pkg = existing_node
+ previously_added = True
+ try:
+ arg_atoms = list(self._iter_atoms_for_pkg(pkg))
+ except InvalidDependString as e:
+ if not pkg.installed:
+ # should have been masked before
+ # it was selected
+ raise
+
+ if debug:
+ writemsg_level(
+ "%s%s %s\n" % ("Re-used Child:".ljust(15),
+ pkg, pkg_use_display(pkg,
+ self._frozen_config.myopts,
+ modified_use=self._pkg_use_enabled(pkg))),
+ level=logging.DEBUG, noiselevel=-1)
+
+ else:
+ if debug:
+ writemsg_level(
+ "%s%s %s\n" % ("Slot Conflict:".ljust(15),
+ existing_node, pkg_use_display(existing_node,
+ self._frozen_config.myopts,
+ modified_use=self._pkg_use_enabled(existing_node))),
+ level=logging.DEBUG, noiselevel=-1)
+
+ if not previously_added:
+ self._dynamic_config._package_tracker.add_pkg(pkg)
+ self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
+ self._check_masks(pkg)
+ self._prune_highest_pkg_cache(pkg)
+
+ if not pkg.installed:
+ # Allow this package to satisfy old-style virtuals in case it
+ # doesn't already. Any pre-existing providers will be preferred
+ # over this one.
+ try:
+ pkgsettings.setinst(pkg.cpv, pkg._metadata)
+ # For consistency, also update the global virtuals.
+ settings = self._frozen_config.roots[pkg.root].settings
+ settings.unlock()
+ settings.setinst(pkg.cpv, pkg._metadata)
+ settings.lock()
+ except portage.exception.InvalidDependString:
+ if not pkg.installed:
+ # should have been masked before it was selected
+ raise
+
+ if arg_atoms:
+ self._dynamic_config._set_nodes.add(pkg)
+
+ # Do this even for onlydeps, so that the
+ # parent/child relationship is always known in case
+ # self._show_slot_collision_notice() needs to be called later.
+ # If a direct circular dependency is not an unsatisfied
+ # buildtime dependency then drop it here since otherwise
+ # it can skew the merge order calculation in an unwanted
+ # way.
+ if pkg != dep.parent or \
+ (priority.buildtime and not priority.satisfied):
+ self._dynamic_config.digraph.add(pkg,
+ dep.parent, priority=priority)
+ if dep.atom is not None and dep.parent is not None:
+ self._add_parent_atom(pkg, (dep.parent, dep.atom))
+
+ if arg_atoms:
+ for parent_atom in arg_atoms:
+ parent, atom = parent_atom
+ self._dynamic_config.digraph.add(pkg, parent, priority=priority)
+ self._add_parent_atom(pkg, parent_atom)
+
+ # This section determines whether we go deeper into dependencies or not.
+ # We want to go deeper on a few occasions:
+ # Installing package A, we need to make sure package A's deps are met.
+ # emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
+ # If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
+ if arg_atoms and depth != 0:
+ for parent, atom in arg_atoms:
+ if parent.reset_depth:
+ depth = 0
+ break
+
+ if previously_added and depth != 0 and \
+ isinstance(pkg.depth, int):
+ # Use pkg.depth if it is less than depth.
+ if isinstance(depth, int):
+ depth = min(pkg.depth, depth)
+ else:
+ # depth is _UNREACHABLE_DEPTH and pkg.depth is
+ # an int, so use the int because it's considered
+ # to be less than _UNREACHABLE_DEPTH.
+ depth = pkg.depth
+
+ pkg.depth = depth
+ deep = self._dynamic_config.myparams.get("deep", 0)
+ update = "--update" in self._frozen_config.myopts
+
+ dep.want_update = (not self._dynamic_config._complete_mode and
+ (arg_atoms or update) and
+ not self._too_deep(depth))
+
+ dep.child = pkg
+ if not pkg.onlydeps and dep.atom and (
+ dep.atom.soname or
+ dep.atom.slot_operator == "="):
+ self._add_slot_operator_dep(dep)
+
+ recurse = (deep is True or
+ not self._too_deep(self._depth_increment(depth, n=1)))
+ dep_stack = self._dynamic_config._dep_stack
+ if "recurse" not in self._dynamic_config.myparams:
+ return 1
+ elif pkg.installed and not recurse:
+ dep_stack = self._dynamic_config._ignored_deps
+
+ self._spinner_update()
+
+ if not previously_added:
+ dep_stack.append(pkg)
+ return 1
+
+ def _add_installed_sonames(self, pkg):
+ if (self._frozen_config.soname_deps_enabled and
+ pkg.provides is not None):
+ for atom in pkg.provides:
+ self._dynamic_config._installed_sonames[
+ (pkg.root, atom)].append(pkg)
+
+ def _add_pkg_soname_deps(self, pkg, allow_unsatisfied=False):
+ if (self._frozen_config.soname_deps_enabled and
+ pkg.requires is not None):
+ if isinstance(pkg.depth, int):
+ depth = pkg.depth + 1
+ else:
+ depth = pkg.depth
+ soname_provided = self._frozen_config.roots[
+ pkg.root].settings.soname_provided
+ for atom in pkg.requires:
+ if atom in soname_provided:
+ continue
+ dep = Dependency(atom=atom, blocker=False, depth=depth,
+ parent=pkg, priority=self._priority(runtime=True),
+ root=pkg.root)
+ if not self._add_dep(dep,
+ allow_unsatisfied=allow_unsatisfied):
+ return False
+ return True
+
+ def _remove_pkg(self, pkg):
+ """
+ Remove a package and all its then parentless digraph
+ children from all depgraph datastructures.
+ """
+ debug = "--debug" in self._frozen_config.myopts
+ if debug:
+ writemsg_level(
+ "Removing package: %s\n" % pkg,
+ level=logging.DEBUG, noiselevel=-1)
+
+ try:
+ children = [child for child in self._dynamic_config.digraph.child_nodes(pkg) \
+ if child is not pkg]
+ self._dynamic_config.digraph.remove(pkg)
+ except KeyError:
+ children = []
+
+ self._dynamic_config._package_tracker.discard_pkg(pkg)
+
+ self._dynamic_config._parent_atoms.pop(pkg, None)
+ self._dynamic_config._set_nodes.discard(pkg)
+
+ for child in children:
+ try:
+ self._dynamic_config._parent_atoms[child] = set((parent, atom) \
+ for (parent, atom) in self._dynamic_config._parent_atoms[child] \
+ if parent is not pkg)
+ except KeyError:
+ pass
+
+ # Remove slot operator dependencies.
+ slot_key = (pkg.root, pkg.slot_atom)
+ if slot_key in self._dynamic_config._slot_operator_deps:
+ self._dynamic_config._slot_operator_deps[slot_key] = \
+ [dep for dep in self._dynamic_config._slot_operator_deps[slot_key] \
+ if dep.child is not pkg]
+ if not self._dynamic_config._slot_operator_deps[slot_key]:
+ del self._dynamic_config._slot_operator_deps[slot_key]
+
+ # Remove blockers.
+ self._dynamic_config._blocker_parents.discard(pkg)
+ self._dynamic_config._irrelevant_blockers.discard(pkg)
+ self._dynamic_config._unsolvable_blockers.discard(pkg)
+ if self._dynamic_config._blocked_pkgs is not None:
+ self._dynamic_config._blocked_pkgs.discard(pkg)
+ self._dynamic_config._blocked_world_pkgs.pop(pkg, None)
+
+ for child in children:
+ if child in self._dynamic_config.digraph and \
+ not self._dynamic_config.digraph.parent_nodes(child):
+ self._remove_pkg(child)
+
+ # Clear caches.
+ self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
+ self._dynamic_config._highest_pkg_cache.clear()
+ self._dynamic_config._highest_pkg_cache_cp_map.clear()
+
+
+ def _check_masks(self, pkg):
+
+ slot_key = (pkg.root, pkg.slot_atom)
+
+ # Check for upgrades in the same slot that are
+ # masked due to a LICENSE change in a newer
+ # version that is not masked for any other reason.
+ other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
+ if other_pkg is not None and pkg < other_pkg:
+ self._dynamic_config._masked_license_updates.add(other_pkg)
+
+ def _add_parent_atom(self, pkg, parent_atom):
+ parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+ if parent_atoms is None:
+ parent_atoms = set()
+ self._dynamic_config._parent_atoms[pkg] = parent_atoms
+ parent_atoms.add(parent_atom)
+
+ def _add_slot_operator_dep(self, dep):
+ slot_key = (dep.root, dep.child.slot_atom)
+ slot_info = self._dynamic_config._slot_operator_deps.get(slot_key)
+ if slot_info is None:
+ slot_info = []
+ self._dynamic_config._slot_operator_deps[slot_key] = slot_info
+ slot_info.append(dep)
+
+ def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
+
+ if not self._add_pkg_soname_deps(pkg,
+ allow_unsatisfied=allow_unsatisfied):
+ return False
+
+ myroot = pkg.root
+ metadata = pkg._metadata
+ removal_action = "remove" in self._dynamic_config.myparams
+ eapi_attrs = _get_eapi_attrs(pkg.eapi)
+
+ edepend={}
+ for k in Package._dep_keys:
+ edepend[k] = metadata[k]
+
+ use_enabled = self._pkg_use_enabled(pkg)
+
+ with_test_deps = not removal_action and \
+ "with_test_deps" in \
+ self._dynamic_config.myparams and \
+ pkg.depth == 0 and \
+ "test" not in use_enabled and \
+ pkg.iuse.is_valid_flag("test") and \
+ self._is_argument(pkg)
+
+ if with_test_deps:
+ use_enabled = set(use_enabled)
+ use_enabled.add("test")
+
+ if not pkg.built and \
+ "--buildpkgonly" in self._frozen_config.myopts and \
+ "deep" not in self._dynamic_config.myparams:
+ edepend["RDEPEND"] = ""
+ edepend["PDEPEND"] = ""
+
+ if pkg.onlydeps and \
+ self._frozen_config.myopts.get("--onlydeps-with-rdeps") == 'n':
+ edepend["RDEPEND"] = ""
+ edepend["PDEPEND"] = ""
+
+ ignore_build_time_deps = False
+ if pkg.built and not removal_action:
+ if self._dynamic_config.myparams.get("bdeps") in ("y", "auto"):
+ # Pull in build time deps as requested, but marked them as
+ # "optional" since they are not strictly required. This allows
+ # more freedom in the merge order calculation for solving
+ # circular dependencies. Don't convert to PDEPEND since that
+ # could make --with-bdeps=y less effective if it is used to
+ # adjust merge order to prevent built_with_use() calls from
+ # failing.
+ pass
+ else:
+ ignore_build_time_deps = True
+
+ if removal_action and self._dynamic_config.myparams.get("bdeps", "y") == "n":
+ # Removal actions never traverse ignored buildtime
+ # dependencies, so it's safe to discard them early.
+ edepend["DEPEND"] = ""
+ edepend["BDEPEND"] = ""
+ edepend["HDEPEND"] = ""
+ ignore_build_time_deps = True
+
+ ignore_depend_deps = ignore_build_time_deps
+ ignore_bdepend_deps = ignore_build_time_deps
+ ignore_hdepend_deps = ignore_build_time_deps
+
+ if removal_action:
+ depend_root = myroot
+ else:
+ if eapi_attrs.hdepend:
+ depend_root = myroot
+ elif eapi_attrs.bdepend:
+ depend_root = pkg.root_config.settings["ESYSROOT"]
+ else:
+ depend_root = self._frozen_config._running_root.root
+ root_deps = self._frozen_config.myopts.get("--root-deps")
+ if root_deps is not None:
+ if root_deps is True:
+ depend_root = myroot
+ elif root_deps == "rdeps":
+ ignore_depend_deps = True
+
+ # If rebuild mode is not enabled, it's safe to discard ignored
+ # build-time dependencies. If you want these deps to be traversed
+ # in "complete" mode then you need to specify --with-bdeps=y.
+ if not self._rebuild.rebuild:
+ if ignore_depend_deps:
+ edepend["DEPEND"] = ""
+ if ignore_bdepend_deps:
+ edepend["BDEPEND"] = ""
+ if ignore_hdepend_deps:
+ edepend["HDEPEND"] = ""
+
+ # Since build-time deps tend to be a superset of run-time deps, order
+ # dep processing such that build-time deps are popped from
+ # _dep_disjunctive_stack first, so that choices for build-time
+ # deps influence choices for run-time deps (bug 639346).
+ deps = (
+ (myroot, edepend["RDEPEND"],
+ self._priority(runtime=True)),
+ (myroot, edepend["PDEPEND"],
+ self._priority(runtime_post=True)),
+ (depend_root, edepend["DEPEND"],
+ self._priority(buildtime=True,
+ optional=(pkg.built or ignore_depend_deps),
+ ignored=ignore_depend_deps)),
+ (self._frozen_config._running_root.root, edepend["HDEPEND"],
+ self._priority(buildtime=True,
+ optional=(pkg.built or ignore_hdepend_deps),
+ ignored=ignore_hdepend_deps)),
+ (self._frozen_config._running_root.root, edepend["BDEPEND"],
+ self._priority(buildtime=True,
+ optional=(pkg.built or ignore_bdepend_deps),
+ ignored=ignore_bdepend_deps)),
+ )
+
+ debug = "--debug" in self._frozen_config.myopts
+
+ for dep_root, dep_string, dep_priority in deps:
+ if not dep_string:
+ continue
+ if debug:
+ writemsg_level("\nParent: %s\n" % (pkg,),
+ noiselevel=-1, level=logging.DEBUG)
+ writemsg_level("Depstring: %s\n" % (dep_string,),
+ noiselevel=-1, level=logging.DEBUG)
+ writemsg_level("Priority: %s\n" % (dep_priority,),
+ noiselevel=-1, level=logging.DEBUG)
+
+ try:
+ dep_string = portage.dep.use_reduce(dep_string,
+ uselist=use_enabled,
+ is_valid_flag=pkg.iuse.is_valid_flag,
+ opconvert=True, token_class=Atom,
+ eapi=pkg.eapi)
+ except portage.exception.InvalidDependString as e:
+ if not pkg.installed:
+ # should have been masked before it was selected
+ raise
+ del e
+
+ # Try again, but omit the is_valid_flag argument, since
+ # invalid USE conditionals are a common problem and it's
+ # practical to ignore this issue for installed packages.
+ try:
+ dep_string = portage.dep.use_reduce(dep_string,
+ uselist=use_enabled,
+ opconvert=True, token_class=Atom,
+ eapi=pkg.eapi)
+ except portage.exception.InvalidDependString as e:
+ self._dynamic_config._masked_installed.add(pkg)
+ del e
+ continue
+
+ try:
+ dep_string = list(self._queue_disjunctive_deps(
+ pkg, dep_root, dep_priority, dep_string))
+ except portage.exception.InvalidDependString as e:
+ if pkg.installed:
+ self._dynamic_config._masked_installed.add(pkg)
+ del e
+ continue
+
+ # should have been masked before it was selected
+ raise
+
+ if not dep_string:
+ continue
+
+ if not self._add_pkg_dep_string(
+ pkg, dep_root, dep_priority, dep_string,
+ allow_unsatisfied):
+ return 0
+
+ self._dynamic_config._traversed_pkg_deps.add(pkg)
+ return 1
+
+ def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
+ allow_unsatisfied):
+ _autounmask_backup = self._dynamic_config._autounmask
+ if dep_priority.optional or dep_priority.ignored:
+ # Temporarily disable autounmask for deps that
+ # don't necessarily need to be satisfied.
+ self._dynamic_config._autounmask = False
+ try:
+ return self._wrapped_add_pkg_dep_string(
+ pkg, dep_root, dep_priority, dep_string,
+ allow_unsatisfied)
+ finally:
+ self._dynamic_config._autounmask = _autounmask_backup
+
+ def _ignore_dependency(self, atom, pkg, child, dep, mypriority, recurse_satisfied):
+ """
+ In some cases, dep_check will return deps that shouldn't
+ be processed any further, so they are identified and
+ discarded here. Try to discard as few as possible since
+ discarded dependencies reduce the amount of information
+ available for optimization of merge order.
+ Don't ignore dependencies if pkg has a slot operator dependency on the child
+ and the child has changed slot/sub_slot.
+ """
+ if not mypriority.satisfied:
+ return False
+ slot_operator_rebuild = False
+ if atom.slot_operator == '=' and \
+ (pkg.root, pkg.slot_atom) in self._dynamic_config._slot_operator_replace_installed and \
+ mypriority.satisfied is not child and \
+ mypriority.satisfied.installed and \
+ child and \
+ not child.installed and \
+ (child.slot != mypriority.satisfied.slot or child.sub_slot != mypriority.satisfied.sub_slot):
+ slot_operator_rebuild = True
+
+ return not atom.blocker and \
+ not recurse_satisfied and \
+ mypriority.satisfied.visible and \
+ dep.child is not None and \
+ not dep.child.installed and \
+ not any(self._dynamic_config._package_tracker.match(
+ dep.child.root, dep.child.slot_atom, installed=False)) and \
+ not slot_operator_rebuild
+
+ def _wrapped_add_pkg_dep_string(self, pkg, dep_root, dep_priority,
+ dep_string, allow_unsatisfied):
+ if isinstance(pkg.depth, int):
+ depth = pkg.depth + 1
+ else:
+ depth = pkg.depth
+
+ deep = self._dynamic_config.myparams.get("deep", 0)
+ recurse_satisfied = deep is True or depth <= deep
+ debug = "--debug" in self._frozen_config.myopts
+ strict = pkg.type_name != "installed"
+
+ if debug:
+ writemsg_level("\nParent: %s\n" % (pkg,),
+ noiselevel=-1, level=logging.DEBUG)
+ dep_repr = portage.dep.paren_enclose(dep_string,
+ unevaluated_atom=True, opconvert=True)
+ writemsg_level("Depstring: %s\n" % (dep_repr,),
+ noiselevel=-1, level=logging.DEBUG)
+ writemsg_level("Priority: %s\n" % (dep_priority,),
+ noiselevel=-1, level=logging.DEBUG)
+
+ try:
+ selected_atoms = self._select_atoms(dep_root,
+ dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
+ strict=strict, priority=dep_priority)
+ except portage.exception.InvalidDependString:
+ if pkg.installed:
+ self._dynamic_config._masked_installed.add(pkg)
+ return 1
+
+ # should have been masked before it was selected
+ raise
+
+ if debug:
+ writemsg_level("Candidates: %s\n" % \
+ ([str(x) for x in selected_atoms[pkg]],),
+ noiselevel=-1, level=logging.DEBUG)
+
+ root_config = self._frozen_config.roots[dep_root]
+ vardb = root_config.trees["vartree"].dbapi
+ traversed_virt_pkgs = set()
+
+ reinstall_atoms = self._frozen_config.reinstall_atoms
+ for atom, child in self._minimize_children(
+ pkg, dep_priority, root_config, selected_atoms[pkg]):
+
+ # If this was a specially generated virtual atom
+ # from dep_check, map it back to the original, in
+ # order to avoid distortion in places like display
+ # or conflict resolution code.
+ is_virt = hasattr(atom, '_orig_atom')
+ atom = getattr(atom, '_orig_atom', atom)
+
+ if atom.blocker and \
+ (dep_priority.optional or dep_priority.ignored):
+ # For --with-bdeps, ignore build-time only blockers
+ # that originate from built packages.
+ continue
+
+ mypriority = dep_priority.copy()
+ if not atom.blocker:
+
+ if atom.slot_operator == "=":
+ if mypriority.buildtime:
+ mypriority.buildtime_slot_op = True
+ if mypriority.runtime:
+ mypriority.runtime_slot_op = True
+
+ inst_pkgs = [inst_pkg for inst_pkg in
+ reversed(vardb.match_pkgs(atom))
+ if not reinstall_atoms.findAtomForPackage(inst_pkg,
+ modified_use=self._pkg_use_enabled(inst_pkg))]
+ if inst_pkgs:
+ for inst_pkg in inst_pkgs:
+ if self._pkg_visibility_check(inst_pkg):
+ # highest visible
+ mypriority.satisfied = inst_pkg
+ break
+ if not mypriority.satisfied:
+ # none visible, so use highest
+ mypriority.satisfied = inst_pkgs[0]
+
+ dep = Dependency(atom=atom,
+ blocker=atom.blocker, child=child, depth=depth, parent=pkg,
+ priority=mypriority, root=dep_root)
+
+ # In some cases, dep_check will return deps that shouldn't
+ # be processed any further, so they are identified and
+ # discarded here. Try to discard as few as possible since
+ # discarded dependencies reduce the amount of information
+ # available for optimization of merge order.
+ ignored = False
+ if self._ignore_dependency(atom, pkg, child, dep, mypriority, recurse_satisfied):
+ myarg = None
+ try:
+ myarg = next(self._iter_atoms_for_pkg(dep.child), None)
+ except InvalidDependString:
+ if not dep.child.installed:
+ raise
+
+ if myarg is None:
+ # Existing child selection may not be valid unless
+ # it's added to the graph immediately, since "complete"
+ # mode may select a different child later.
+ ignored = True
+ dep.child = None
+ self._dynamic_config._ignored_deps.append(dep)
+
+ if not ignored:
+ if dep_priority.ignored and \
+ not self._dynamic_config._traverse_ignored_deps:
+ if is_virt and dep.child is not None:
+ traversed_virt_pkgs.add(dep.child)
+ dep.child = None
+ self._dynamic_config._ignored_deps.append(dep)
+ else:
+ if not self._add_dep(dep,
+ allow_unsatisfied=allow_unsatisfied):
+ return 0
+ if is_virt and dep.child is not None:
+ traversed_virt_pkgs.add(dep.child)
+
+ selected_atoms.pop(pkg)
+
+ # Add selected indirect virtual deps to the graph. This
+ # takes advantage of circular dependency avoidance that's done
+ # by dep_zapdeps. We preserve actual parent/child relationships
+ # here in order to avoid distorting the dependency graph like
+ # <=portage-2.1.6.x did.
+ for virt_dep, atoms in selected_atoms.items():
+
+ virt_pkg = virt_dep.child
+ if virt_pkg not in traversed_virt_pkgs:
+ continue
+
+ if debug:
+ writemsg_level("\nCandidates: %s: %s\n" % \
+ (virt_pkg.cpv, [str(x) for x in atoms]),
+ noiselevel=-1, level=logging.DEBUG)
+
+ if not dep_priority.ignored or \
+ self._dynamic_config._traverse_ignored_deps:
+
+ inst_pkgs = [inst_pkg for inst_pkg in
+ reversed(vardb.match_pkgs(virt_dep.atom))
+ if not reinstall_atoms.findAtomForPackage(inst_pkg,
+ modified_use=self._pkg_use_enabled(inst_pkg))]
+ if inst_pkgs:
+ for inst_pkg in inst_pkgs:
+ if self._pkg_visibility_check(inst_pkg):
+ # highest visible
+ virt_dep.priority.satisfied = inst_pkg
+ break
+ if not virt_dep.priority.satisfied:
+ # none visible, so use highest
+ virt_dep.priority.satisfied = inst_pkgs[0]
+
+ if not self._add_pkg(virt_pkg, virt_dep):
+ return 0
+
+ for atom, child in self._minimize_children(
+ pkg, self._priority(runtime=True), root_config, atoms):
+
+ # If this was a specially generated virtual atom
+ # from dep_check, map it back to the original, in
+ # order to avoid distortion in places like display
+ # or conflict resolution code.
+ is_virt = hasattr(atom, '_orig_atom')
+ atom = getattr(atom, '_orig_atom', atom)
+
+ # This is a GLEP 37 virtual, so its deps are all runtime.
+ mypriority = self._priority(runtime=True)
+ if not atom.blocker:
+ inst_pkgs = [inst_pkg for inst_pkg in
+ reversed(vardb.match_pkgs(atom))
+ if not reinstall_atoms.findAtomForPackage(inst_pkg,
+ modified_use=self._pkg_use_enabled(inst_pkg))]
+ if inst_pkgs:
+ for inst_pkg in inst_pkgs:
+ if self._pkg_visibility_check(inst_pkg):
+ # highest visible
+ mypriority.satisfied = inst_pkg
+ break
+ if not mypriority.satisfied:
+ # none visible, so use highest
+ mypriority.satisfied = inst_pkgs[0]
+
+ # Dependencies of virtuals are considered to have the
+ # same depth as the virtual itself.
+ dep = Dependency(atom=atom,
+ blocker=atom.blocker, child=child, depth=virt_dep.depth,
+ parent=virt_pkg, priority=mypriority, root=dep_root,
+ collapsed_parent=pkg, collapsed_priority=dep_priority)
+
+ ignored = False
+ if self._ignore_dependency(atom, pkg, child, dep, mypriority, recurse_satisfied):
+ myarg = None
+ try:
+ myarg = next(self._iter_atoms_for_pkg(dep.child), None)
+ except InvalidDependString:
+ if not dep.child.installed:
+ raise
+
+ if myarg is None:
+ ignored = True
+ dep.child = None
+ self._dynamic_config._ignored_deps.append(dep)
+
+ if not ignored:
+ if dep_priority.ignored and \
+ not self._dynamic_config._traverse_ignored_deps:
+ if is_virt and dep.child is not None:
+ traversed_virt_pkgs.add(dep.child)
+ dep.child = None
+ self._dynamic_config._ignored_deps.append(dep)
+ else:
+ if not self._add_dep(dep,
+ allow_unsatisfied=allow_unsatisfied):
+ return 0
+ if is_virt and dep.child is not None:
+ traversed_virt_pkgs.add(dep.child)
+
+ if debug:
+ writemsg_level("\nExiting... %s\n" % (pkg,),
+ noiselevel=-1, level=logging.DEBUG)
+
+ return 1
+
+ def _minimize_children(self, parent, priority, root_config, atoms):
+ """
+ Selects packages to satisfy the given atoms, and minimizes the
+ number of selected packages. This serves to identify and eliminate
+ redundant package selections when multiple atoms happen to specify
+ a version range.
+ """
+
+ atom_pkg_map = {}
+
+ for atom in atoms:
+ if atom.blocker:
+ yield (atom, None)
+ continue
+ dep_pkg, existing_node = self._select_package(
+ root_config.root, atom, parent=parent)
+ if dep_pkg is None:
+ yield (atom, None)
+ continue
+ atom_pkg_map[atom] = dep_pkg
+
+ if len(atom_pkg_map) < 2:
+ for item in atom_pkg_map.items():
+ yield item
+ return
+
+ cp_pkg_map = {}
+ pkg_atom_map = {}
+ for atom, pkg in atom_pkg_map.items():
+ pkg_atom_map.setdefault(pkg, set()).add(atom)
+ cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
+
+ for pkgs in cp_pkg_map.values():
+ if len(pkgs) < 2:
+ for pkg in pkgs:
+ for atom in pkg_atom_map[pkg]:
+ yield (atom, pkg)
+ continue
+
+ # Use a digraph to identify and eliminate any
+ # redundant package selections.
+ atom_pkg_graph = digraph()
+ cp_atoms = set()
+ for pkg1 in pkgs:
+ for atom in pkg_atom_map[pkg1]:
+ cp_atoms.add(atom)
+ atom_pkg_graph.add(pkg1, atom)
+ atom_set = InternalPackageSet(initial_atoms=(atom,),
+ allow_repo=True)
+ for pkg2 in pkgs:
+ if pkg2 is pkg1:
+ continue
+ if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)):
+ atom_pkg_graph.add(pkg2, atom)
+
+ # In order for the following eliminate_pkg loop to produce
+ # deterministic results, the order of the pkgs list must
+ # not be random (bug 631894). Prefer to eliminate installed
+ # packages first, in case rebuilds are needed, and also sort
+ # in ascending order so that older versions are eliminated
+ # first.
+ pkgs = (sorted(pkg for pkg in pkgs if pkg.installed) +
+ sorted(pkg for pkg in pkgs if not pkg.installed))
+
+ for pkg in pkgs:
+ eliminate_pkg = True
+ for atom in atom_pkg_graph.parent_nodes(pkg):
+ if len(atom_pkg_graph.child_nodes(atom)) < 2:
+ eliminate_pkg = False
+ break
+ if eliminate_pkg:
+ atom_pkg_graph.remove(pkg)
+
+ # Yield ~, =*, < and <= atoms first, since those are more likely to
+ # cause slot conflicts, and we want those atoms to be displayed
+ # in the resulting slot conflict message (see bug #291142).
+ # Give similar treatment to slot/sub-slot atoms.
+ conflict_atoms = []
+ normal_atoms = []
+ abi_atoms = []
+ for atom in cp_atoms:
+ if atom.slot_operator_built:
+ abi_atoms.append(atom)
+ continue
+ conflict = False
+ for child_pkg in atom_pkg_graph.child_nodes(atom):
+ existing_node, matches = \
+ self._check_slot_conflict(child_pkg, atom)
+ if existing_node and not matches:
+ conflict = True
+ break
+ if conflict:
+ conflict_atoms.append(atom)
+ else:
+ normal_atoms.append(atom)
+
+ for atom in chain(abi_atoms, conflict_atoms, normal_atoms):
+ child_pkgs = atom_pkg_graph.child_nodes(atom)
+ # if more than one child, yield highest version
+ if len(child_pkgs) > 1:
+ child_pkgs.sort()
+ yield (atom, child_pkgs[-1])
+
+ def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
+ """
+ Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
+ Yields non-disjunctive deps. Raises InvalidDependString when
+ necessary.
+ """
+ for x in dep_struct:
+ if isinstance(x, list):
+ if x and x[0] == "||":
+ self._queue_disjunction(pkg, dep_root, dep_priority, [x])
+ else:
+ for y in self._queue_disjunctive_deps(
+ pkg, dep_root, dep_priority, x):
+ yield y
+ else:
+ # Note: Eventually this will check for PROPERTIES=virtual
+ # or whatever other metadata gets implemented for this
+ # purpose.
+ if x.cp.startswith('virtual/'):
+ self._queue_disjunction(pkg, dep_root, dep_priority, [x])
+ else:
+ yield x
+
+ def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
+ self._dynamic_config._dep_disjunctive_stack.append(
+ (pkg, dep_root, dep_priority, dep_struct))
+
+ def _pop_disjunction(self, allow_unsatisfied):
+ """
+ Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
+ populate self._dynamic_config._dep_stack.
+ """
+ pkg, dep_root, dep_priority, dep_struct = \
+ self._dynamic_config._dep_disjunctive_stack.pop()
+ if not self._add_pkg_dep_string(
+ pkg, dep_root, dep_priority, dep_struct, allow_unsatisfied):
+ return 0
+ return 1
+
+ def _priority(self, **kwargs):
+ if "remove" in self._dynamic_config.myparams:
+ priority_constructor = UnmergeDepPriority
+ else:
+ priority_constructor = DepPriority
+ return priority_constructor(**kwargs)
+
+ def _dep_expand(self, root_config, atom_without_category):
+ """
+ @param root_config: a root config instance
+ @type root_config: RootConfig
+ @param atom_without_category: an atom without a category component
+ @type atom_without_category: String
+ @rtype: list
+ @return: a list of atoms containing categories (possibly empty)
+ """
+ null_cp = portage.dep_getkey(insert_category_into_atom(
+ atom_without_category, "null"))
+ cat, atom_pn = portage.catsplit(null_cp)
+
+ dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
+ categories = set()
+ for db, pkg_type, built, installed, db_keys in dbs:
+ for cat in db.categories:
+ if db.cp_list("%s/%s" % (cat, atom_pn)):
+ categories.add(cat)
+
+ deps = []
+ for cat in categories:
+ deps.append(Atom(insert_category_into_atom(
+ atom_without_category, cat), allow_repo=True))
+ return deps
+
+ def _have_new_virt(self, root, atom_cp):
+ ret = False
+ for db, pkg_type, built, installed, db_keys in \
+ self._dynamic_config._filtered_trees[root]["dbs"]:
+ if db.cp_list(atom_cp):
+ ret = True
+ break
+ return ret
+
+ def _iter_atoms_for_pkg(self, pkg):
+ depgraph_sets = self._dynamic_config.sets[pkg.root]
+ atom_arg_map = depgraph_sets.atom_arg_map
+ for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
+ if atom.cp != pkg.cp and \
+ self._have_new_virt(pkg.root, atom.cp):
+ continue
+ visible_pkgs = \
+ self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
+ visible_pkgs.reverse() # descending order
+ higher_slot = None
+ for visible_pkg in visible_pkgs:
+ if visible_pkg.cp != atom.cp:
+ continue
+ if pkg >= visible_pkg:
+ # This is descending order, and we're not
+ # interested in any versions <= pkg given.
+ break
+ if pkg.slot_atom != visible_pkg.slot_atom:
+ higher_slot = visible_pkg
+ break
+ if higher_slot is not None:
+ continue
+ for arg in atom_arg_map[(atom, pkg.root)]:
+ if isinstance(arg, PackageArg) and \
+ arg.package != pkg:
+ continue
+ yield arg, atom
+
+ def select_files(self, args):
+ # Use the global event loop for spinner progress
+ # indication during file owner lookups (bug #461412).
+ def spinner_cb():
+ self._frozen_config.spinner.update()
+ spinner_cb.handle = self._event_loop.call_soon(spinner_cb)
+
+ spinner_cb.handle = None
+ try:
+ spinner = self._frozen_config.spinner
+ if spinner is not None and \
+ spinner.update is not spinner.update_quiet:
+ spinner_cb.handle = self._event_loop.call_soon(spinner_cb)
+ return self._select_files(args)
+ finally:
+ if spinner_cb.handle is not None:
+ spinner_cb.handle.cancel()
+
+ def _select_files(self, myfiles):
+ """Given a list of .tbz2s, .ebuilds sets, and deps, populate
+ self._dynamic_config._initial_arg_list and call self._resolve to create the
+ appropriate depgraph and return a favorite list."""
+ self._load_vdb()
+ if (self._frozen_config.soname_deps_enabled and
+ "remove" not in self._dynamic_config.myparams):
+ self._index_binpkgs()
+ debug = "--debug" in self._frozen_config.myopts
+ root_config = self._frozen_config.roots[self._frozen_config.target_root]
+ sets = root_config.sets
+ depgraph_sets = self._dynamic_config.sets[root_config.root]
+ myfavorites=[]
+ eroot = root_config.root
+ root = root_config.settings['ROOT']
+ vardb = self._frozen_config.trees[eroot]["vartree"].dbapi
+ real_vardb = self._frozen_config._trees_orig[eroot]["vartree"].dbapi
+ portdb = self._frozen_config.trees[eroot]["porttree"].dbapi
+ bindb = self._frozen_config.trees[eroot]["bintree"].dbapi
+ pkgsettings = self._frozen_config.pkgsettings[eroot]
+ args = []
+ onlydeps = "--onlydeps" in self._frozen_config.myopts
+ lookup_owners = []
+ for x in myfiles:
+ ext = os.path.splitext(x)[1]
+ if ext==".tbz2":
+ if not os.path.exists(x):
+ if os.path.exists(
+ os.path.join(pkgsettings["PKGDIR"], "All", x)):
+ x = os.path.join(pkgsettings["PKGDIR"], "All", x)
+ elif os.path.exists(
+ os.path.join(pkgsettings["PKGDIR"], x)):
+ x = os.path.join(pkgsettings["PKGDIR"], x)
+ else:
+ writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1)
+ writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
+ return 0, myfavorites
+ mytbz2=portage.xpak.tbz2(x)
+ mykey = None
+ cat = mytbz2.getfile("CATEGORY")
+ if cat is not None:
+ cat = _unicode_decode(cat.strip(),
+ encoding=_encodings['repo.content'])
+ mykey = cat + "/" + os.path.basename(x)[:-5]
+
+ if mykey is None:
+ writemsg(colorize("BAD", "\n*** Package is missing CATEGORY metadata: %s.\n\n" % x), noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, myfavorites
+
+ x = os.path.realpath(x)
+ for pkg in self._iter_match_pkgs(root_config, "binary", Atom('=%s' % mykey)):
+ if x == os.path.realpath(bindb.bintree.getname(pkg.cpv)):
+ break
+ else:
+ writemsg("\n%s\n\n" % colorize("BAD",
+ "*** " + _("You need to adjust PKGDIR to emerge "
+ "this package: %s") % x), noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, myfavorites
+
+ args.append(PackageArg(arg=x, package=pkg,
+ root_config=root_config))
+ elif ext==".ebuild":
+ ebuild_path = portage.util.normalize_path(os.path.abspath(x))
+ pkgdir = os.path.dirname(ebuild_path)
+ tree_root = os.path.dirname(os.path.dirname(pkgdir))
+ cp = pkgdir[len(tree_root)+1:]
+ error_msg = ("\n\n!!! '%s' is not in a valid portage tree "
+ "hierarchy or does not exist\n") % x
+ if not portage.isvalidatom(cp):
+ writemsg(error_msg, noiselevel=-1)
+ return 0, myfavorites
+ cat = portage.catsplit(cp)[0]
+ mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
+ if not portage.isvalidatom("="+mykey):
+ writemsg(error_msg, noiselevel=-1)
+ return 0, myfavorites
+ ebuild_path = portdb.findname(mykey)
+ if ebuild_path:
+ if ebuild_path != os.path.join(os.path.realpath(tree_root),
+ cp, os.path.basename(ebuild_path)):
+ writemsg(colorize("BAD", "\n*** You need to adjust repos.conf to emerge this package.\n\n"), noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, myfavorites
+ if mykey not in portdb.xmatch(
+ "match-visible", portage.cpv_getkey(mykey)):
+ writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1)
+ writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1)
+ writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1)
+ countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
+ "Continuing...")
+ else:
+ writemsg(error_msg, noiselevel=-1)
+ return 0, myfavorites
+ pkg = self._pkg(mykey, "ebuild", root_config,
+ onlydeps=onlydeps, myrepo=portdb.getRepositoryName(
+ os.path.dirname(os.path.dirname(os.path.dirname(ebuild_path)))))
+ args.append(PackageArg(arg=x, package=pkg,
+ root_config=root_config))
+ elif x.startswith(os.path.sep):
+ if not x.startswith(eroot):
+ portage.writemsg(("\n\n!!! '%s' does not start with" + \
+ " $EROOT.\n") % x, noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, []
+ # Queue these up since it's most efficient to handle
+ # multiple files in a single iter_owners() call.
+ lookup_owners.append(x)
+ elif x.startswith("." + os.sep) or \
+ x.startswith(".." + os.sep):
+ f = os.path.abspath(x)
+ if not f.startswith(eroot):
+ portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \
+ " $EROOT.\n") % (f, x), noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, []
+ lookup_owners.append(f)
+ else:
+ if x in ("system", "world"):
+ x = SETPREFIX + x
+ if x.startswith(SETPREFIX):
+ s = x[len(SETPREFIX):]
+ if s not in sets:
+ raise portage.exception.PackageSetNotFound(s)
+ if s in depgraph_sets.sets:
+ continue
+
+ try:
+ set_atoms = root_config.setconfig.getSetAtoms(s)
+ except portage.exception.PackageSetNotFound as e:
+ writemsg_level("\n\n", level=logging.ERROR,
+ noiselevel=-1)
+ for pset in list(depgraph_sets.sets.values()) + [sets[s]]:
+ for error_msg in pset.errors:
+ writemsg_level("%s\n" % (error_msg,),
+ level=logging.ERROR, noiselevel=-1)
+
+ writemsg_level(("emerge: the given set '%s' "
+ "contains a non-existent set named '%s'.\n") % \
+ (s, e), level=logging.ERROR, noiselevel=-1)
+ if s in ('world', 'selected') and \
+ SETPREFIX + e.value in sets['selected']:
+ writemsg_level(("Use `emerge --deselect %s%s` to "
+ "remove this set from world_sets.\n") %
+ (SETPREFIX, e,), level=logging.ERROR,
+ noiselevel=-1)
+ writemsg_level("\n", level=logging.ERROR,
+ noiselevel=-1)
+ return False, myfavorites
+
+ pset = sets[s]
+ depgraph_sets.sets[s] = pset
+ args.append(SetArg(arg=x, pset=pset,
+ root_config=root_config))
+ continue
+ if not is_valid_package_atom(x, allow_repo=True):
+ portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
+ noiselevel=-1)
+ portage.writemsg("!!! Please check ebuild(5) for full details.\n")
+ portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
+ self._dynamic_config._skip_restart = True
+ return (0,[])
+ # Don't expand categories or old-style virtuals here unless
+ # necessary. Expansion of old-style virtuals here causes at
+ # least the following problems:
+ # 1) It's more difficult to determine which set(s) an atom
+ # came from, if any.
+ # 2) It takes away freedom from the resolver to choose other
+ # possible expansions when necessary.
+ if "/" in x.split(":")[0]:
+ args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
+ root_config=root_config))
+ continue
+ expanded_atoms = self._dep_expand(root_config, x)
+ installed_cp_set = set()
+ for atom in expanded_atoms:
+ if vardb.cp_list(atom.cp):
+ installed_cp_set.add(atom.cp)
+
+ if len(installed_cp_set) > 1:
+ non_virtual_cps = set()
+ for atom_cp in installed_cp_set:
+ if not atom_cp.startswith("virtual/"):
+ non_virtual_cps.add(atom_cp)
+ if len(non_virtual_cps) == 1:
+ installed_cp_set = non_virtual_cps
+
+ if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
+ installed_cp = next(iter(installed_cp_set))
+ for atom in expanded_atoms:
+ if atom.cp == installed_cp:
+ available = False
+ for pkg in self._iter_match_pkgs_any(
+ root_config, atom.without_use,
+ onlydeps=onlydeps):
+ if not pkg.installed:
+ available = True
+ break
+ if available:
+ expanded_atoms = [atom]
+ break
+
+ # If a non-virtual package and one or more virtual packages
+ # are in expanded_atoms, use the non-virtual package.
+ if len(expanded_atoms) > 1:
+ number_of_virtuals = 0
+ for expanded_atom in expanded_atoms:
+ if expanded_atom.cp.startswith("virtual/"):
+ number_of_virtuals += 1
+ else:
+ candidate = expanded_atom
+ if len(expanded_atoms) - number_of_virtuals == 1:
+ expanded_atoms = [ candidate ]
+
+ if len(expanded_atoms) > 1:
+ writemsg("\n\n", noiselevel=-1)
+ ambiguous_package_name(x, expanded_atoms, root_config,
+ self._frozen_config.spinner, self._frozen_config.myopts)
+ self._dynamic_config._skip_restart = True
+ return False, myfavorites
+ if expanded_atoms:
+ atom = expanded_atoms[0]
+ else:
+ null_atom = Atom(insert_category_into_atom(x, "null"),
+ allow_repo=True)
+ cat, atom_pn = portage.catsplit(null_atom.cp)
+ virts_p = root_config.settings.get_virts_p().get(atom_pn)
+ if virts_p:
+ # Allow the depgraph to choose which virtual.
+ atom = Atom(null_atom.replace('null/', 'virtual/', 1),
+ allow_repo=True)
+ else:
+ atom = null_atom
+
+ if atom.use and atom.use.conditional:
+ writemsg(
+ ("\n\n!!! '%s' contains a conditional " + \
+ "which is not allowed.\n") % (x,), noiselevel=-1)
+ writemsg("!!! Please check ebuild(5) for full details.\n")
+ self._dynamic_config._skip_restart = True
+ return (0,[])
+
+ args.append(AtomArg(arg=x, atom=atom,
+ root_config=root_config))
+
+ if lookup_owners:
+ relative_paths = []
+ search_for_multiple = False
+ if len(lookup_owners) > 1:
+ search_for_multiple = True
+
+ for x in lookup_owners:
+ if not search_for_multiple and os.path.isdir(x):
+ search_for_multiple = True
+ relative_paths.append(x[len(root)-1:])
+
+ owners = set()
+ for pkg, relative_path in \
+ real_vardb._owners.iter_owners(relative_paths):
+ owners.add(pkg.mycpv)
+ if not search_for_multiple:
+ break
+
+ if not owners:
+ portage.writemsg(("\n\n!!! '%s' is not claimed " + \
+ "by any package.\n") % lookup_owners[0], noiselevel=-1)
+ self._dynamic_config._skip_restart = True
+ return 0, []
+
+ for cpv in owners:
+ pkg = vardb._pkg_str(cpv, None)
+ atom = Atom("%s:%s" % (pkg.cp, pkg.slot))
+ args.append(AtomArg(arg=atom, atom=atom,
+ root_config=root_config))
+
+ if "--update" in self._frozen_config.myopts:
+ # In some cases, the greedy slots behavior can pull in a slot that
+ # the user would want to uninstall due to it being blocked by a
+ # newer version in a different slot. Therefore, it's necessary to
+ # detect and discard any that should be uninstalled. Each time
+ # that arguments are updated, package selections are repeated in
+ # order to ensure consistency with the current arguments:
+ #
+ # 1) Initialize args
+ # 2) Select packages and generate initial greedy atoms
+ # 3) Update args with greedy atoms
+ # 4) Select packages and generate greedy atoms again, while
+ # accounting for any blockers between selected packages
+ # 5) Update args with revised greedy atoms
+
+ self._set_args(args)
+ greedy_args = []
+ for arg in args:
+ greedy_args.append(arg)
+ if not isinstance(arg, AtomArg):
+ continue
+ for atom in self._greedy_slots(arg.root_config, arg.atom):
+ greedy_args.append(
+ AtomArg(arg=arg.arg, atom=atom,
+ root_config=arg.root_config))
+
+ self._set_args(greedy_args)
+ del greedy_args
+
+ # Revise greedy atoms, accounting for any blockers
+ # between selected packages.
+ revised_greedy_args = []
+ for arg in args:
+ revised_greedy_args.append(arg)
+ if not isinstance(arg, AtomArg):
+ continue
+ for atom in self._greedy_slots(arg.root_config, arg.atom,
+ blocker_lookahead=True):
+ revised_greedy_args.append(
+ AtomArg(arg=arg.arg, atom=atom,
+ root_config=arg.root_config))
+ args = revised_greedy_args
+ del revised_greedy_args
+
+ args.extend(self._gen_reinstall_sets())
+ self._set_args(args)
+
+ myfavorites = set(myfavorites)
+ for arg in args:
+ if isinstance(arg, (AtomArg, PackageArg)):
+ myfavorites.add(arg.atom)
+ elif isinstance(arg, SetArg):
+ if not arg.internal:
+ myfavorites.add(arg.arg)
+ myfavorites = list(myfavorites)
+
+ if debug:
+ portage.writemsg("\n", noiselevel=-1)
+ # Order needs to be preserved since a feature of --nodeps
+ # is to allow the user to force a specific merge order.
+ self._dynamic_config._initial_arg_list = args[:]
+
+ return self._resolve(myfavorites)
+
+ def _gen_reinstall_sets(self):
+
+ atom_list = []
+ for root, atom in self._rebuild.rebuild_list:
+ atom_list.append((root, '__auto_rebuild__', atom))
+ for root, atom in self._rebuild.reinstall_list:
+ atom_list.append((root, '__auto_reinstall__', atom))
+ for root, atom in self._dynamic_config._slot_operator_replace_installed:
+ atom_list.append((root, '__auto_slot_operator_replace_installed__', atom))
+
+ set_dict = {}
+ for root, set_name, atom in atom_list:
+ set_dict.setdefault((root, set_name), []).append(atom)
+
+ for (root, set_name), atoms in set_dict.items():
+ yield SetArg(arg=(SETPREFIX + set_name),
+ # Set reset_depth=False here, since we don't want these
+ # special sets to interact with depth calculations (see
+ # the emerge --deep=DEPTH option), though we want them
+ # to behave like normal arguments in most other respects.
+ pset=InternalPackageSet(initial_atoms=atoms),
+ force_reinstall=True,
+ internal=True,
+ reset_depth=False,
+ root_config=self._frozen_config.roots[root])
+
+ def _resolve(self, myfavorites):
+ """Given self._dynamic_config._initial_arg_list, pull in the root nodes,
+ call self._creategraph to process theier deps and return
+ a favorite list."""
+ debug = "--debug" in self._frozen_config.myopts
+ onlydeps = "--onlydeps" in self._frozen_config.myopts
+ myroot = self._frozen_config.target_root
+ pkgsettings = self._frozen_config.pkgsettings[myroot]
+ pprovideddict = pkgsettings.pprovideddict
+ virtuals = pkgsettings.getvirtuals()
+ args = self._dynamic_config._initial_arg_list[:]
+
+ for arg in self._expand_set_args(args, add_to_digraph=True):
+ for atom in arg.pset.getAtoms():
+ self._spinner_update()
+ dep = Dependency(atom=atom, onlydeps=onlydeps,
+ root=myroot, parent=arg)
+ try:
+ pprovided = pprovideddict.get(atom.cp)
+ if pprovided and portage.match_from_list(atom, pprovided):
+ # A provided package has been specified on the command line.
+ self._dynamic_config._pprovided_args.append((arg, atom))
+ continue
+ if isinstance(arg, PackageArg):
+ if not self._add_pkg(arg.package, dep) or \
+ not self._create_graph():
+ if not self.need_restart():
+ writemsg(("\n\n!!! Problem " + \
+ "resolving dependencies for %s\n") % \
+ arg.arg, noiselevel=-1)
+ return 0, myfavorites
+ continue
+ if debug:
+ writemsg_level("\n Arg: %s\n Atom: %s\n" %
+ (arg, atom), noiselevel=-1, level=logging.DEBUG)
+ pkg, existing_node = self._select_package(
+ myroot, atom, onlydeps=onlydeps)
+ if not pkg:
+ pprovided_match = False
+ for virt_choice in virtuals.get(atom.cp, []):
+ expanded_atom = portage.dep.Atom(
+ atom.replace(atom.cp, virt_choice.cp, 1))
+ pprovided = pprovideddict.get(expanded_atom.cp)
+ if pprovided and \
+ portage.match_from_list(expanded_atom, pprovided):
+ # A provided package has been
+ # specified on the command line.
+ self._dynamic_config._pprovided_args.append((arg, atom))
+ pprovided_match = True
+ break
+ if pprovided_match:
+ continue
+
+ excluded = False
+ for any_match in self._iter_match_pkgs_any(
+ self._frozen_config.roots[myroot], atom):
+ if self._frozen_config.excluded_pkgs.findAtomForPackage(
+ any_match, modified_use=self._pkg_use_enabled(any_match)):
+ excluded = True
+ break
+ if excluded:
+ continue
+
+ if not (isinstance(arg, SetArg) and \
+ arg.name in ("selected", "world")):
+ self._dynamic_config._unsatisfied_deps_for_display.append(
+ ((myroot, atom), {"myparent" : arg}))
+ return 0, myfavorites
+
+ self._dynamic_config._missing_args.append((arg, atom))
+ continue
+ if atom.cp != pkg.cp:
+ # For old-style virtuals, we need to repeat the
+ # package.provided check against the selected package.
+ expanded_atom = atom.replace(atom.cp, pkg.cp)
+ pprovided = pprovideddict.get(pkg.cp)
+ if pprovided and \
+ portage.match_from_list(expanded_atom, pprovided):
+ # A provided package has been
+ # specified on the command line.
+ self._dynamic_config._pprovided_args.append((arg, atom))
+ continue
+ if pkg.installed and \
+ "selective" not in self._dynamic_config.myparams and \
+ not self._frozen_config.excluded_pkgs.findAtomForPackage(
+ pkg, modified_use=self._pkg_use_enabled(pkg)):
+ self._dynamic_config._unsatisfied_deps_for_display.append(
+ ((myroot, atom), {"myparent" : arg}))
+ # Previous behavior was to bail out in this case, but
+ # since the dep is satisfied by the installed package,
+ # it's more friendly to continue building the graph
+ # and just show a warning message. Therefore, only bail
+ # out here if the atom is not from either the system or
+ # world set.
+ if not (isinstance(arg, SetArg) and \
+ arg.name in ("selected", "system", "world")):
+ return 0, myfavorites
+
+ # Add the selected package to the graph as soon as possible
+ # so that later dep_check() calls can use it as feedback
+ # for making more consistent atom selections.
+ if not self._add_pkg(pkg, dep):
+ if self.need_restart():
+ pass
+ elif isinstance(arg, SetArg):
+ writemsg(("\n\n!!! Problem resolving " + \
+ "dependencies for %s from %s\n") % \
+ (atom, arg.arg), noiselevel=-1)
+ else:
+ writemsg(("\n\n!!! Problem resolving " + \
+ "dependencies for %s\n") % \
+ (atom,), noiselevel=-1)
+ return 0, myfavorites
+
+ except SystemExit as e:
+ raise # Needed else can't exit
+ except Exception as e:
+ writemsg("\n\n!!! Problem in '%s' dependencies.\n" % atom, noiselevel=-1)
+ writemsg("!!! %s %s\n" % (str(e), str(getattr(e, "__module__", None))))
+ raise
+
+ # Now that the root packages have been added to the graph,
+ # process the dependencies.
+ if not self._create_graph():
+ self._apply_parent_use_changes()
+ return 0, myfavorites
+
+ try:
+ self.altlist()
+ except self._unknown_internal_error:
+ return False, myfavorites
+
+ have_slot_conflict = any(self._dynamic_config._package_tracker.slot_conflicts())
+ if (have_slot_conflict and
+ not self._accept_blocker_conflicts()) or \
+ (self._dynamic_config._allow_backtracking and
+ "slot conflict" in self._dynamic_config._backtrack_infos):
+ return False, myfavorites
+
+ if self._rebuild.trigger_rebuilds():
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+ config["rebuild_list"] = self._rebuild.rebuild_list
+ config["reinstall_list"] = self._rebuild.reinstall_list
+ self._dynamic_config._need_restart = True
+ return False, myfavorites
+
+ if "config" in self._dynamic_config._backtrack_infos and \
+ ("slot_operator_mask_built" in self._dynamic_config._backtrack_infos["config"] or
+ "slot_operator_replace_installed" in self._dynamic_config._backtrack_infos["config"]) and \
+ self.need_restart():
+ return False, myfavorites
+
+ if not self._dynamic_config._prune_rebuilds and \
+ self._dynamic_config._slot_operator_replace_installed and \
+ self._get_missed_updates():
+ # When there are missed updates, we might have triggered
+ # some unnecessary rebuilds (see bug #439688). So, prune
+ # all the rebuilds and backtrack with the problematic
+ # updates masked. The next backtrack run should pull in
+ # any rebuilds that are really needed, and this
+ # prune_rebuilds path should never be entered more than
+ # once in a series of backtracking nodes (in order to
+ # avoid a backtracking loop).
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ config = backtrack_infos.setdefault("config", {})
+ config["prune_rebuilds"] = True
+ self._dynamic_config._need_restart = True
+ return False, myfavorites
+
+ if self.need_restart():
+ # want_restart_for_use_change triggers this
+ return False, myfavorites
+
+ if "--fetchonly" not in self._frozen_config.myopts and \
+ "--buildpkgonly" in self._frozen_config.myopts:
+ graph_copy = self._dynamic_config.digraph.copy()
+ removed_nodes = set()
+ for node in graph_copy:
+ if not isinstance(node, Package) or \
+ node.operation == "nomerge":
+ removed_nodes.add(node)
+ graph_copy.difference_update(removed_nodes)
+ if not graph_copy.hasallzeros(ignore_priority = \
+ DepPrioritySatisfiedRange.ignore_medium):
+ self._dynamic_config._buildpkgonly_deps_unsatisfied = True
+ self._dynamic_config._skip_restart = True
+ return False, myfavorites
+
+ if (not self._dynamic_config._prune_rebuilds and
+ self._ignored_binaries_autounmask_backtrack()):
+ config = self._dynamic_config._backtrack_infos.setdefault("config", {})
+ config["prune_rebuilds"] = True
+ self._dynamic_config._need_restart = True
+ return False, myfavorites
+
+ # Any failures except those due to autounmask *alone* should return
+ # before this point, since the success_without_autounmask flag that's
+ # set below is reserved for cases where there are *zero* other
+ # problems. For reference, see backtrack_depgraph, where it skips the
+ # get_best_run() call when success_without_autounmask is True.
+ if self._have_autounmask_changes():
+ #We failed if the user needs to change the configuration
+ self._dynamic_config._success_without_autounmask = True
+ if (self._frozen_config.myopts.get("--autounmask-continue") is True and
+ "--pretend" not in self._frozen_config.myopts):
+ # This will return false if it fails or if the user
+ # aborts via --ask.
+ if self._display_autounmask(autounmask_continue=True):
+ self._apply_autounmask_continue_state()
+ self._dynamic_config._need_config_reload = True
+ return True, myfavorites
+ return False, myfavorites
+
+ # We're true here unless we are missing binaries.
+ return (True, myfavorites)
+
+ def _apply_autounmask_continue_state(self):
+ """
+ Apply autounmask changes to Package instances, so that their
+ state will be consistent configuration file changes.
+ """
+ for node in self._dynamic_config._serialized_tasks_cache:
+ if isinstance(node, Package):
+ effective_use = self._pkg_use_enabled(node)
+ if effective_use != node.use.enabled:
+ node._metadata['USE'] = ' '.join(effective_use)
+
+ def _apply_parent_use_changes(self):
+ """
+ For parents with unsatisfied conditional dependencies, translate
+ USE change suggestions into autounmask changes.
+ """
+ if (self._dynamic_config._unsatisfied_deps_for_display and
+ self._dynamic_config._autounmask):
+ remaining_items = []
+ for item in self._dynamic_config._unsatisfied_deps_for_display:
+ pargs, kwargs = item
+ kwargs = kwargs.copy()
+ kwargs['collect_use_changes'] = True
+ if not self._show_unsatisfied_dep(*pargs, **kwargs):
+ remaining_items.append(item)
+ if len(remaining_items) != len(self._dynamic_config._unsatisfied_deps_for_display):
+ self._dynamic_config._unsatisfied_deps_for_display = remaining_items
+
+ def _set_args(self, args):
+ """
+ Create the "__non_set_args__" package set from atoms and packages given as
+ arguments. This method can be called multiple times if necessary.
+ The package selection cache is automatically invalidated, since
+ arguments influence package selections.
+ """
+
+ set_atoms = {}
+ non_set_atoms = {}
+ for root in self._dynamic_config.sets:
+ depgraph_sets = self._dynamic_config.sets[root]
+ depgraph_sets.sets.setdefault('__non_set_args__',
+ InternalPackageSet(allow_repo=True)).clear()
+ depgraph_sets.atoms.clear()
+ depgraph_sets.atom_arg_map.clear()
+ set_atoms[root] = []
+ non_set_atoms[root] = []
+
+ # We don't add set args to the digraph here since that
+ # happens at a later stage and we don't want to make
+ # any state changes here that aren't reversed by a
+ # another call to this method.
+ for arg in self._expand_set_args(args, add_to_digraph=False):
+ atom_arg_map = self._dynamic_config.sets[
+ arg.root_config.root].atom_arg_map
+ if isinstance(arg, SetArg):
+ atom_group = set_atoms[arg.root_config.root]
+ else:
+ atom_group = non_set_atoms[arg.root_config.root]
+
+ for atom in arg.pset.getAtoms():
+ atom_group.append(atom)
+ atom_key = (atom, arg.root_config.root)
+ refs = atom_arg_map.get(atom_key)
+ if refs is None:
+ refs = []
+ atom_arg_map[atom_key] = refs
+ if arg not in refs:
+ refs.append(arg)
+
+ for root in self._dynamic_config.sets:
+ depgraph_sets = self._dynamic_config.sets[root]
+ depgraph_sets.atoms.update(chain(set_atoms.get(root, []),
+ non_set_atoms.get(root, [])))
+ depgraph_sets.sets['__non_set_args__'].update(
+ non_set_atoms.get(root, []))
+
+ # Invalidate the package selection cache, since
+ # arguments influence package selections.
+ self._dynamic_config._highest_pkg_cache.clear()
+ self._dynamic_config._highest_pkg_cache_cp_map.clear()
+ for trees in self._dynamic_config._filtered_trees.values():
+ trees["porttree"].dbapi._clear_cache()
+
+ def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
+ """
+ Return a list of slot atoms corresponding to installed slots that
+ differ from the slot of the highest visible match. When
+ blocker_lookahead is True, slot atoms that would trigger a blocker
+ conflict are automatically discarded, potentially allowing automatic
+ uninstallation of older slots when appropriate.
+ """
+ highest_pkg, in_graph = self._select_package(root_config.root, atom)
+ if highest_pkg is None:
+ return []
+ vardb = root_config.trees["vartree"].dbapi
+ slots = set()
+ for cpv in vardb.match(atom):
+ # don't mix new virtuals with old virtuals
+ pkg = vardb._pkg_str(cpv, None)
+ if pkg.cp == highest_pkg.cp:
+ slots.add(pkg.slot)
+
+ slots.add(highest_pkg.slot)
+ if len(slots) == 1:
+ return []
+ greedy_pkgs = []
+ slots.remove(highest_pkg.slot)
+ while slots:
+ slot = slots.pop()
+ slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
+ pkg, in_graph = self._select_package(root_config.root, slot_atom)
+ if pkg is not None and \
+ pkg.cp == highest_pkg.cp and pkg < highest_pkg:
+ greedy_pkgs.append(pkg)
+ if not greedy_pkgs:
+ return []
+ if not blocker_lookahead:
+ return [pkg.slot_atom for pkg in greedy_pkgs]
+
+ blockers = {}
+ blocker_dep_keys = Package._dep_keys
+ for pkg in greedy_pkgs + [highest_pkg]:
+ dep_str = " ".join(pkg._metadata[k] for k in blocker_dep_keys)
+ try:
+ selected_atoms = self._select_atoms(
+ pkg.root, dep_str, self._pkg_use_enabled(pkg),
+ parent=pkg, strict=True)
+ except portage.exception.InvalidDependString:
+ continue
+ blocker_atoms = []
+ for atoms in selected_atoms.values():
+ blocker_atoms.extend(x for x in atoms if x.blocker)
+ blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
+
+ if highest_pkg not in blockers:
+ return []
+
+ # filter packages with invalid deps
+ greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
+
+ # filter packages that conflict with highest_pkg
+ greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
+ (blockers[highest_pkg].findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)) or \
+ blockers[pkg].findAtomForPackage(highest_pkg, modified_use=self._pkg_use_enabled(highest_pkg)))]
+
+ if not greedy_pkgs:
+ return []
+
+ # If two packages conflict, discard the lower version.
+ discard_pkgs = set()
+ greedy_pkgs.sort(reverse=True)
+ for i in range(len(greedy_pkgs) - 1):
+ pkg1 = greedy_pkgs[i]
+ if pkg1 in discard_pkgs:
+ continue
+ for j in range(i + 1, len(greedy_pkgs)):
+ pkg2 = greedy_pkgs[j]
+ if pkg2 in discard_pkgs:
+ continue
+ if blockers[pkg1].findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)) or \
+ blockers[pkg2].findAtomForPackage(pkg1, modified_use=self._pkg_use_enabled(pkg1)):
+ # pkg1 > pkg2
+ discard_pkgs.add(pkg2)
+
+ return [pkg.slot_atom for pkg in greedy_pkgs \
+ if pkg not in discard_pkgs]
+
+ def _select_atoms_from_graph(self, *pargs, **kwargs):
+ """
+ Prefer atoms matching packages that have already been
+ added to the graph or those that are installed and have
+ not been scheduled for replacement.
+ """
+ kwargs["trees"] = self._dynamic_config._graph_trees
+ return self._select_atoms_highest_available(*pargs, **kwargs)
+
+ def _select_atoms_highest_available(self, root, depstring,
+ myuse=None, parent=None, strict=True, trees=None, priority=None):
+ """This will raise InvalidDependString if necessary. If trees is
+ None then self._dynamic_config._filtered_trees is used."""
+
+ if not isinstance(depstring, list):
+ eapi = None
+ is_valid_flag = None
+ if parent is not None:
+ eapi = parent.eapi
+ if not parent.installed:
+ is_valid_flag = parent.iuse.is_valid_flag
+ depstring = portage.dep.use_reduce(depstring,
+ uselist=myuse, opconvert=True, token_class=Atom,
+ is_valid_flag=is_valid_flag, eapi=eapi)
+
+ if (self._dynamic_config.myparams.get(
+ "ignore_built_slot_operator_deps", "n") == "y" and
+ parent and parent.built):
+ ignore_built_slot_operator_deps(depstring)
+
+ pkgsettings = self._frozen_config.pkgsettings[root]
+ if trees is None:
+ trees = self._dynamic_config._filtered_trees
+ mytrees = trees[root]
+ atom_graph = digraph()
+ if True:
+ # Temporarily disable autounmask so that || preferences
+ # account for masking and USE settings.
+ _autounmask_backup = self._dynamic_config._autounmask
+ self._dynamic_config._autounmask = False
+ # backup state for restoration, in case of recursive
+ # calls to this method
+ backup_parent = self._select_atoms_parent
+ backup_state = mytrees.copy()
+ try:
+ # clear state from previous call, in case this
+ # call is recursive (we have a backup, that we
+ # will use to restore it later)
+ self._select_atoms_parent = None
+ mytrees.pop("pkg_use_enabled", None)
+ mytrees.pop("parent", None)
+ mytrees.pop("atom_graph", None)
+ mytrees.pop("priority", None)
+
+ mytrees["pkg_use_enabled"] = self._pkg_use_enabled
+ if parent is not None:
+ self._select_atoms_parent = parent
+ mytrees["parent"] = parent
+ mytrees["atom_graph"] = atom_graph
+ if priority is not None:
+ mytrees["priority"] = priority
+
+ mycheck = portage.dep_check(depstring, None,
+ pkgsettings, myuse=myuse,
+ myroot=root, trees=trees)
+ finally:
+ # restore state
+ self._dynamic_config._autounmask = _autounmask_backup
+ self._select_atoms_parent = backup_parent
+ mytrees.pop("pkg_use_enabled", None)
+ mytrees.pop("parent", None)
+ mytrees.pop("atom_graph", None)
+ mytrees.pop("priority", None)
+ mytrees.update(backup_state)
+ if not mycheck[0]:
+ raise portage.exception.InvalidDependString(mycheck[1])
+ if parent is None:
+ selected_atoms = mycheck[1]
+ elif parent not in atom_graph:
+ selected_atoms = {parent : mycheck[1]}
+ else:
+ # Recursively traversed virtual dependencies, and their
+ # direct dependencies, are considered to have the same
+ # depth as direct dependencies.
+ if isinstance(parent.depth, int):
+ virt_depth = parent.depth + 1
+ else:
+ # The depth may be None when called via
+ # _select_atoms_probe, or it may be
+ # _UNREACHABLE_DEPTH for complete mode.
+ virt_depth = parent.depth
+
+ chosen_atom_ids = frozenset(chain(
+ (id(atom) for atom in mycheck[1]),
+ (id(atom._orig_atom) for atom in mycheck[1]
+ if hasattr(atom, '_orig_atom')),
+ ))
+ selected_atoms = OrderedDict()
+ node_stack = [(parent, None, None)]
+ traversed_nodes = set()
+ while node_stack:
+ node, node_parent, parent_atom = node_stack.pop()
+ traversed_nodes.add(node)
+ if node is parent:
+ k = parent
+ else:
+ if node_parent is parent:
+ if priority is None:
+ node_priority = None
+ else:
+ node_priority = priority.copy()
+ else:
+ # virtuals only have runtime deps
+ node_priority = self._priority(runtime=True)
+
+ k = Dependency(atom=parent_atom,
+ blocker=parent_atom.blocker, child=node,
+ depth=virt_depth, parent=node_parent,
+ priority=node_priority, root=node.root)
+
+ child_atoms = []
+ selected_atoms[k] = child_atoms
+ for atom_node in atom_graph.child_nodes(node):
+ child_atom = atom_node[0]
+ if id(child_atom) not in chosen_atom_ids:
+ continue
+ child_atoms.append(child_atom)
+ for child_node in atom_graph.child_nodes(atom_node):
+ if child_node in traversed_nodes:
+ continue
+ if not portage.match_from_list(
+ child_atom, [child_node]):
+ # Typically this means that the atom
+ # specifies USE deps that are unsatisfied
+ # by the selected package. The caller will
+ # record this as an unsatisfied dependency
+ # when necessary.
+ continue
+ node_stack.append((child_node, node, child_atom))
+
+ return selected_atoms
+
+ def _expand_virt_from_graph(self, root, atom):
+ if not isinstance(atom, Atom):
+ atom = Atom(atom)
+
+ if not atom.cp.startswith("virtual/"):
+ yield atom
+ return
+
+ any_match = False
+ for pkg in self._dynamic_config._package_tracker.match(root, atom):
+ try:
+ rdepend = self._select_atoms_from_graph(
+ pkg.root, pkg._metadata.get("RDEPEND", ""),
+ myuse=self._pkg_use_enabled(pkg),
+ parent=pkg, strict=False)
+ except InvalidDependString as e:
+ writemsg_level("!!! Invalid RDEPEND in " + \
+ "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
+ (pkg.root, pkg.cpv, e),
+ noiselevel=-1, level=logging.ERROR)
+ continue
+
+ for atoms in rdepend.values():
+ for atom in atoms:
+ if hasattr(atom, "_orig_atom"):
+ # Ignore virtual atoms since we're only
+ # interested in expanding the real atoms.
+ continue
+ yield atom
+
+ any_match = True
+
+ if not any_match:
+ yield atom
+
+ def _virt_deps_visible(self, pkg, ignore_use=False):
+ """
+ Assumes pkg is a virtual package. Traverses virtual deps recursively
+ and returns True if all deps are visible, False otherwise. This is
+ useful for checking if it will be necessary to expand virtual slots,
+ for cases like bug #382557.
+ """
+ try:
+ rdepend = self._select_atoms(
+ pkg.root, pkg._metadata.get("RDEPEND", ""),
+ myuse=self._pkg_use_enabled(pkg),
+ parent=pkg, priority=self._priority(runtime=True))
+ except InvalidDependString as e:
+ if not pkg.installed:
+ raise
+ writemsg_level("!!! Invalid RDEPEND in " + \
+ "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
+ (pkg.root, pkg.cpv, e),
+ noiselevel=-1, level=logging.ERROR)
+ return False
+
+ for atoms in rdepend.values():
+ for atom in atoms:
+ if ignore_use:
+ atom = atom.without_use
+ pkg, existing = self._select_package(
+ pkg.root, atom)
+ if pkg is None or not self._pkg_visibility_check(pkg):
+ return False
+
+ return True
+
+ def _get_dep_chain(self, start_node, target_atom=None,
+ unsatisfied_dependency=False):
+ """
+ Returns a list of (atom, node_type) pairs that represent a dep chain.
+ If target_atom is None, the first package shown is pkg's parent.
+ If target_atom is not None the first package shown is pkg.
+ If unsatisfied_dependency is True, the first parent is select who's
+ dependency is not satisfied by 'pkg'. This is need for USE changes.
+ (Does not support target_atom.)
+ """
+ traversed_nodes = set()
+ dep_chain = []
+ node = start_node
+ child = None
+ all_parents = self._dynamic_config._parent_atoms
+ graph = self._dynamic_config.digraph
+
+ def format_pkg(pkg):
+ pkg_name = "%s%s%s" % (pkg.cpv, _repo_separator, pkg.repo)
+ return pkg_name
+
+ if target_atom is not None and isinstance(node, Package):
+ affecting_use = set()
+ for dep_str in Package._dep_keys:
+ try:
+ affecting_use.update(extract_affecting_use(
+ node._metadata[dep_str], target_atom,
+ eapi=node.eapi))
+ except InvalidDependString:
+ if not node.installed:
+ raise
+ affecting_use.difference_update(node.use.mask, node.use.force)
+ pkg_name = format_pkg(node)
+
+ if affecting_use:
+ usedep = []
+ for flag in affecting_use:
+ if flag in self._pkg_use_enabled(node):
+ usedep.append(flag)
+ else:
+ usedep.append("-"+flag)
+ pkg_name += "[%s]" % ",".join(usedep)
+
+ dep_chain.append((pkg_name, node.type_name))
+
+
+ # To build a dep chain for the given package we take
+ # "random" parents form the digraph, except for the
+ # first package, because we want a parent that forced
+ # the corresponding change (i.e '>=foo-2', instead 'foo').
+
+ traversed_nodes.add(start_node)
+
+ start_node_parent_atoms = {}
+ for ppkg, patom in all_parents.get(node, []):
+ # Get a list of suitable atoms. For use deps
+ # (aka unsatisfied_dependency is not None) we
+ # need that the start_node doesn't match the atom.
+ if not unsatisfied_dependency or \
+ not patom.match(start_node):
+ start_node_parent_atoms.setdefault(patom, []).append(ppkg)
+
+ if start_node_parent_atoms:
+ # If there are parents in all_parents then use one of them.
+ # If not, then this package got pulled in by an Arg and
+ # will be correctly handled by the code that handles later
+ # packages in the dep chain.
+ if (any(not x.package for x in start_node_parent_atoms) and
+ any(x.package for x in start_node_parent_atoms)):
+ for x in list(start_node_parent_atoms):
+ if not x.package:
+ del start_node_parent_atoms[x]
+ if next(iter(start_node_parent_atoms)).package:
+ best_match = best_match_to_list(node.cpv,
+ start_node_parent_atoms)
+ else:
+ best_match = next(iter(start_node_parent_atoms))
+
+ child = node
+ for ppkg in start_node_parent_atoms[best_match]:
+ node = ppkg
+ if ppkg in self._dynamic_config._initial_arg_list:
+ # Stop if reached the top level of the dep chain.
+ break
+
+ while node is not None:
+ traversed_nodes.add(node)
+
+ if node not in graph:
+ # The parent is not in the graph due to backtracking.
+ break
+
+ elif isinstance(node, DependencyArg):
+ if graph.parent_nodes(node):
+ node_type = "set"
+ else:
+ node_type = "argument"
+ dep_chain.append(("%s" % (node,), node_type))
+
+ elif node is not start_node:
+ for ppkg, patom in all_parents[child]:
+ if ppkg == node:
+ if child is start_node and unsatisfied_dependency and \
+ patom.match(child):
+ # This atom is satisfied by child, there must be another atom.
+ continue
+ atom = (patom.unevaluated_atom
+ if patom.package else patom)
+ break
+
+ dep_strings = set()
+ priorities = graph.nodes[node][0].get(child)
+ if priorities is None:
+ # This edge comes from _parent_atoms and was not added to
+ # the graph, and _parent_atoms does not contain priorities.
+ for k in Package._dep_keys:
+ dep_strings.add(node._metadata[k])
+ else:
+ for priority in priorities:
+ if priority.buildtime:
+ for k in Package._buildtime_keys:
+ dep_strings.add(node._metadata[k])
+ if priority.runtime:
+ dep_strings.add(node._metadata["RDEPEND"])
+ if priority.runtime_post:
+ dep_strings.add(node._metadata["PDEPEND"])
+
+ affecting_use = set()
+ for dep_str in dep_strings:
+ try:
+ affecting_use.update(extract_affecting_use(
+ dep_str, atom, eapi=node.eapi))
+ except InvalidDependString:
+ if not node.installed:
+ raise
+
+ #Don't show flags as 'affecting' if the user can't change them,
+ affecting_use.difference_update(node.use.mask, \
+ node.use.force)
+
+ pkg_name = format_pkg(node)
+ if affecting_use:
+ usedep = []
+ for flag in affecting_use:
+ if flag in self._pkg_use_enabled(node):
+ usedep.append(flag)
+ else:
+ usedep.append("-"+flag)
+ pkg_name += "[%s]" % ",".join(usedep)
+
+ dep_chain.append((pkg_name, node.type_name))
+
+ # When traversing to parents, prefer arguments over packages
+ # since arguments are root nodes. Never traverse the same
+ # package twice, in order to prevent an infinite loop.
+ child = node
+ selected_parent = None
+ parent_arg = None
+ parent_merge = None
+ parent_unsatisfied = None
+
+ for parent in self._dynamic_config.digraph.parent_nodes(node):
+ if parent in traversed_nodes:
+ continue
+ if isinstance(parent, DependencyArg):
+ parent_arg = parent
+ else:
+ if isinstance(parent, Package) and \
+ parent.operation == "merge":
+ parent_merge = parent
+ if unsatisfied_dependency and node is start_node:
+ # Make sure that pkg doesn't satisfy parent's dependency.
+ # This ensures that we select the correct parent for use
+ # flag changes.
+ for ppkg, atom in all_parents[start_node]:
+ if parent is ppkg:
+ if not atom.match(start_node):
+ parent_unsatisfied = parent
+ break
+ else:
+ selected_parent = parent
+
+ if parent_unsatisfied is not None:
+ selected_parent = parent_unsatisfied
+ elif parent_merge is not None:
+ # Prefer parent in the merge list (bug #354747).
+ selected_parent = parent_merge
+ elif parent_arg is not None:
+ if self._dynamic_config.digraph.parent_nodes(parent_arg):
+ selected_parent = parent_arg
+ else:
+ dep_chain.append(("%s" % (parent_arg,), "argument"))
+ selected_parent = None
+
+ node = selected_parent
+ return dep_chain
+
+ def _get_dep_chain_as_comment(self, pkg, unsatisfied_dependency=False):
+ dep_chain = self._get_dep_chain(pkg, unsatisfied_dependency=unsatisfied_dependency)
+ display_list = []
+ for node, node_type in dep_chain:
+ if node_type == "argument":
+ display_list.append("required by %s (argument)" % node)
+ else:
+ display_list.append("required by %s" % node)
+
+ msg = "# " + "\n# ".join(display_list) + "\n"
+ return msg
+
+
+ def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
+ check_backtrack=False, check_autounmask_breakage=False, show_req_use=None,
+ collect_use_changes=False):
+ """
+ When check_backtrack=True, no output is produced and
+ the method either returns or raises _backtrack_mask if
+ a matching package has been masked by backtracking.
+ """
+ backtrack_mask = False
+ autounmask_broke_use_dep = False
+ if atom.package:
+ xinfo = '"%s"' % atom.unevaluated_atom
+ atom_without_use = atom.without_use
+ else:
+ xinfo = '"%s"' % atom
+ atom_without_use = None
+
+ if arg:
+ xinfo='"%s"' % arg
+ if isinstance(myparent, AtomArg):
+ xinfo = '"%s"' % (myparent,)
+ # Discard null/ from failed cpv_expand category expansion.
+ xinfo = xinfo.replace("null/", "")
+ if root != self._frozen_config._running_root.root:
+ xinfo = "%s for %s" % (xinfo, root)
+ masked_packages = []
+ missing_use = []
+ missing_use_adjustable = set()
+ required_use_unsatisfied = []
+ masked_pkg_instances = set()
+ have_eapi_mask = False
+ pkgsettings = self._frozen_config.pkgsettings[root]
+ root_config = self._frozen_config.roots[root]
+ portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
+ vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
+ bindb = self._frozen_config.roots[root].trees["bintree"].dbapi
+ dbs = self._dynamic_config._filtered_trees[root]["dbs"]
+ use_ebuild_visibility = self._frozen_config.myopts.get(
+ '--use-ebuild-visibility', 'n') != 'n'
+
+ for db, pkg_type, built, installed, db_keys in dbs:
+ if installed:
+ continue
+ if atom.soname:
+ if not isinstance(db, DbapiProvidesIndex):
+ continue
+ cpv_list = db.match(atom)
+ elif hasattr(db, "xmatch"):
+ cpv_list = db.xmatch("match-all-cpv-only", atom.without_use)
+ else:
+ cpv_list = db.match(atom.without_use)
+
+ if atom.soname:
+ repo_list = [None]
+ elif atom.repo is None and hasattr(db, "getRepositories"):
+ repo_list = db.getRepositories(catpkg=atom.cp)
+ else:
+ repo_list = [atom.repo]
+
+ # descending order
+ cpv_list.reverse()
+ for cpv in cpv_list:
+ for repo in repo_list:
+ if not db.cpv_exists(cpv, myrepo=repo):
+ continue
+
+ metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, pkg_type, \
+ built, installed, db_keys, myrepo=repo, _pkg_use_enabled=self._pkg_use_enabled)
+ if metadata is not None and \
+ portage.eapi_is_supported(metadata["EAPI"]):
+ if not repo:
+ repo = metadata.get('repository')
+ pkg = self._pkg(cpv, pkg_type, root_config,
+ installed=installed, myrepo=repo)
+ # pkg._metadata contains calculated USE for ebuilds,
+ # required later for getMissingLicenses.
+ metadata = pkg._metadata
+ if pkg.invalid:
+ # Avoid doing any operations with packages that
+ # have invalid metadata. It would be unsafe at
+ # least because it could trigger unhandled
+ # exceptions in places like check_required_use().
+ masked_packages.append(
+ (root_config, pkgsettings, cpv, repo, metadata, mreasons))
+ continue
+ if atom.soname and not atom.match(pkg):
+ continue
+ if (atom_without_use is not None and
+ not atom_without_use.match(pkg)):
+ continue
+ if pkg in self._dynamic_config._runtime_pkg_mask:
+ backtrack_reasons = \
+ self._dynamic_config._runtime_pkg_mask[pkg]
+ mreasons.append('backtracking: %s' % \
+ ', '.join(sorted(backtrack_reasons)))
+ backtrack_mask = True
+ if not mreasons and self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
+ modified_use=self._pkg_use_enabled(pkg)):
+ mreasons = ["exclude option"]
+ if mreasons:
+ masked_pkg_instances.add(pkg)
+ if atom.package and atom.unevaluated_atom.use:
+ try:
+ if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required) \
+ or atom.violated_conditionals(self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag).use:
+ missing_use.append(pkg)
+ if atom.match(pkg):
+ autounmask_broke_use_dep = True
+ if not mreasons:
+ continue
+ except InvalidAtom:
+ writemsg("violated_conditionals raised " + \
+ "InvalidAtom: '%s' parent: %s" % \
+ (atom, myparent), noiselevel=-1)
+ raise
+ if not mreasons and \
+ not pkg.built and \
+ pkg._metadata.get("REQUIRED_USE") and \
+ eapi_has_required_use(pkg.eapi):
+ if not check_required_use(
+ pkg._metadata["REQUIRED_USE"],
+ self._pkg_use_enabled(pkg),
+ pkg.iuse.is_valid_flag,
+ eapi=pkg.eapi):
+ required_use_unsatisfied.append(pkg)
+ continue
+
+ root_slot = (pkg.root, pkg.slot_atom)
+ if pkg.built and root_slot in self._rebuild.rebuild_list:
+ mreasons = ["need to rebuild from source"]
+ elif pkg.installed and root_slot in self._rebuild.reinstall_list:
+ mreasons = ["need to rebuild from source"]
+ elif (pkg.built and not mreasons and
+ self._dynamic_config.ignored_binaries.get(
+ pkg, {}).get("respect_use")):
+ mreasons = ["use flag configuration mismatch"]
+ elif (pkg.built and not mreasons and
+ self._dynamic_config.ignored_binaries.get(
+ pkg, {}).get("changed_deps")):
+ mreasons = ["changed deps"]
+ elif (pkg.built and use_ebuild_visibility and
+ not self._equiv_ebuild_visible(pkg)):
+ equiv_ebuild = self._equiv_ebuild(pkg)
+ if equiv_ebuild is None:
+ if portdb.cpv_exists(pkg.cpv):
+ mreasons = ["ebuild corrupt"]
+ else:
+ mreasons = ["ebuild not available"]
+ elif not mreasons:
+ mreasons = get_masking_status(
+ equiv_ebuild, pkgsettings, root_config,
+ use=self._pkg_use_enabled(equiv_ebuild))
+ if mreasons:
+ metadata = equiv_ebuild._metadata
+
+ masked_packages.append(
+ (root_config, pkgsettings, cpv, repo, metadata, mreasons))
+
+ if check_backtrack:
+ if backtrack_mask:
+ raise self._backtrack_mask()
+ else:
+ return
+
+ if check_autounmask_breakage:
+ if autounmask_broke_use_dep:
+ raise self._autounmask_breakage()
+ else:
+ return
+
+ missing_use_reasons = []
+ missing_iuse_reasons = []
+ for pkg in missing_use:
+ use = self._pkg_use_enabled(pkg)
+ missing_iuse = []
+ #Use the unevaluated atom here, because some flags might have gone
+ #lost during evaluation.
+ required_flags = atom.unevaluated_atom.use.required
+ missing_iuse = pkg.iuse.get_missing_iuse(required_flags)
+
+ mreasons = []
+ if missing_iuse:
+ mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
+ missing_iuse_reasons.append((pkg, mreasons))
+ else:
+ need_enable = sorted(atom.use.enabled.difference(use).intersection(pkg.iuse.all))
+ need_disable = sorted(atom.use.disabled.intersection(use).intersection(pkg.iuse.all))
+
+ untouchable_flags = \
+ frozenset(chain(pkg.use.mask, pkg.use.force))
+ if any(x in untouchable_flags for x in
+ chain(need_enable, need_disable)):
+ continue
+
+ missing_use_adjustable.add(pkg)
+ required_use = pkg._metadata.get("REQUIRED_USE")
+ required_use_warning = ""
+ if required_use:
+ old_use = self._pkg_use_enabled(pkg)
+ new_use = set(self._pkg_use_enabled(pkg))
+ for flag in need_enable:
+ new_use.add(flag)
+ for flag in need_disable:
+ new_use.discard(flag)
+ if check_required_use(required_use, old_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.eapi) \
+ and not check_required_use(required_use, new_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.eapi):
+ required_use_warning = ", this change violates use flag constraints " + \
+ "defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
+
+ if need_enable or need_disable:
+ changes = []
+ changes.extend(colorize("red", "+" + x) \
+ for x in need_enable)
+ changes.extend(colorize("blue", "-" + x) \
+ for x in need_disable)
+ mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
+ missing_use_reasons.append((pkg, mreasons))
+
+ if not missing_iuse and myparent and atom.unevaluated_atom.use.conditional:
+ # Lets see if the violated use deps are conditional.
+ # If so, suggest to change them on the parent.
+
+ # If the child package is masked then a change to
+ # parent USE is not a valid solution (a normal mask
+ # message should be displayed instead).
+ if pkg in masked_pkg_instances:
+ continue
+
+ mreasons = []
+ violated_atom = atom.unevaluated_atom.violated_conditionals(self._pkg_use_enabled(pkg), \
+ pkg.iuse.is_valid_flag, self._pkg_use_enabled(myparent))
+ if not (violated_atom.use.enabled or violated_atom.use.disabled):
+ #all violated use deps are conditional
+ changes = []
+ conditional = violated_atom.use.conditional
+ involved_flags = set(chain(conditional.equal, conditional.not_equal, \
+ conditional.enabled, conditional.disabled))
+
+ untouchable_flags = \
+ frozenset(chain(myparent.use.mask, myparent.use.force))
+ if any(x in untouchable_flags for x in involved_flags):
+ continue
+
+ required_use = myparent._metadata.get("REQUIRED_USE")
+ required_use_warning = ""
+ if required_use:
+ old_use = self._pkg_use_enabled(myparent)
+ new_use = set(self._pkg_use_enabled(myparent))
+ for flag in involved_flags:
+ if flag in old_use:
+ new_use.discard(flag)
+ else:
+ new_use.add(flag)
+ if check_required_use(required_use, old_use,
+ myparent.iuse.is_valid_flag,
+ eapi=myparent.eapi) and \
+ not check_required_use(required_use, new_use,
+ myparent.iuse.is_valid_flag,
+ eapi=myparent.eapi):
+ required_use_warning = ", this change violates use flag constraints " + \
+ "defined by %s: '%s'" % (myparent.cpv, \
+ human_readable_required_use(required_use))
+
+ target_use = {}
+ for flag in involved_flags:
+ if flag in self._pkg_use_enabled(myparent):
+ target_use[flag] = False
+ changes.append(colorize("blue", "-" + flag))
+ else:
+ target_use[flag] = True
+ changes.append(colorize("red", "+" + flag))
+
+ if collect_use_changes and not required_use_warning:
+ previous_changes = self._dynamic_config._needed_use_config_changes.get(myparent)
+ self._pkg_use_enabled(myparent, target_use=target_use)
+ if previous_changes is not self._dynamic_config._needed_use_config_changes.get(myparent):
+ return True
+
+ mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
+ if (myparent, mreasons) not in missing_use_reasons:
+ missing_use_reasons.append((myparent, mreasons))
+
+ if collect_use_changes:
+ return False
+
+ unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
+ in missing_use_reasons if pkg not in masked_pkg_instances]
+
+ unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
+ in missing_iuse_reasons if pkg not in masked_pkg_instances]
+
+ show_missing_use = False
+ if unmasked_use_reasons:
+ # Only show the latest version.
+ show_missing_use = []
+ pkg_reason = None
+ parent_reason = None
+ for pkg, mreasons in unmasked_use_reasons:
+ if pkg is myparent:
+ if parent_reason is None:
+ #This happens if a use change on the parent
+ #leads to a satisfied conditional use dep.
+ parent_reason = (pkg, mreasons)
+ elif pkg_reason is None:
+ #Don't rely on the first pkg in unmasked_use_reasons,
+ #being the highest version of the dependency.
+ pkg_reason = (pkg, mreasons)
+ if pkg_reason:
+ show_missing_use.append(pkg_reason)
+ if parent_reason:
+ show_missing_use.append(parent_reason)
+
+ elif unmasked_iuse_reasons:
+ masked_with_iuse = False
+ for pkg in masked_pkg_instances:
+ #Use atom.unevaluated here, because some flags might have gone
+ #lost during evaluation.
+ if not pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
+ # Package(s) with required IUSE are masked,
+ # so display a normal masking message.
+ masked_with_iuse = True
+ break
+ if not masked_with_iuse:
+ show_missing_use = unmasked_iuse_reasons
+
+ if required_use_unsatisfied:
+ # If there's a higher unmasked version in missing_use_adjustable
+ # then we want to show that instead.
+ for pkg in missing_use_adjustable:
+ if pkg not in masked_pkg_instances and \
+ pkg > required_use_unsatisfied[0]:
+ required_use_unsatisfied = False
+ break
+
+ mask_docs = False
+
+ if show_req_use is None and required_use_unsatisfied:
+ # We have an unmasked package that only requires USE adjustment
+ # in order to satisfy REQUIRED_USE, and nothing more. We assume
+ # that the user wants the latest version, so only the first
+ # instance is displayed.
+ show_req_use = required_use_unsatisfied[0]
+
+ if show_req_use is not None:
+
+ pkg = show_req_use
+ output_cpv = pkg.cpv + _repo_separator + pkg.repo
+ writemsg("\n!!! " + \
+ colorize("BAD", "The ebuild selected to satisfy ") + \
+ colorize("INFORM", xinfo) + \
+ colorize("BAD", " has unmet requirements.") + "\n",
+ noiselevel=-1)
+ use_display = pkg_use_display(pkg, self._frozen_config.myopts)
+ writemsg("- %s %s\n" % (output_cpv, use_display),
+ noiselevel=-1)
+ writemsg("\n The following REQUIRED_USE flag constraints " + \
+ "are unsatisfied:\n", noiselevel=-1)
+ reduced_noise = check_required_use(
+ pkg._metadata["REQUIRED_USE"],
+ self._pkg_use_enabled(pkg),
+ pkg.iuse.is_valid_flag,
+ eapi=pkg.eapi).tounicode()
+ writemsg(" %s\n" % \
+ human_readable_required_use(reduced_noise),
+ noiselevel=-1)
+ normalized_required_use = \
+ " ".join(pkg._metadata["REQUIRED_USE"].split())
+ if reduced_noise != normalized_required_use:
+ writemsg("\n The above constraints " + \
+ "are a subset of the following complete expression:\n",
+ noiselevel=-1)
+ writemsg(" %s\n" % \
+ human_readable_required_use(normalized_required_use),
+ noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+ elif show_missing_use:
+ writemsg("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
+ writemsg("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
+ for pkg, mreasons in show_missing_use:
+ writemsg("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
+
+ elif masked_packages:
+ writemsg("\n!!! " + \
+ colorize("BAD", "All ebuilds that could satisfy ") + \
+ colorize("INFORM", xinfo) + \
+ colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
+ writemsg("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
+ have_eapi_mask = show_masked_packages(masked_packages)
+ if have_eapi_mask:
+ writemsg("\n", noiselevel=-1)
+ msg = ("The current version of portage supports " + \
+ "EAPI '%s'. You must upgrade to a newer version" + \
+ " of portage before EAPI masked packages can" + \
+ " be installed.") % portage.const.EAPI
+ writemsg("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+ mask_docs = True
+ else:
+ cp_exists = False
+ if atom.package and not atom.cp.startswith("null/"):
+ for pkg in self._iter_match_pkgs_any(
+ root_config, Atom(atom.cp)):
+ cp_exists = True
+ break
+
+ writemsg("\nemerge: there are no %s to satisfy " %
+ ("binary packages" if
+ self._frozen_config.myopts.get("--usepkgonly", "y") == True
+ else "ebuilds") + green(xinfo) + ".\n", noiselevel=-1)
+ if isinstance(myparent, AtomArg) and \
+ not cp_exists and \
+ self._frozen_config.myopts.get(
+ "--misspell-suggestions", "y") != "n":
+
+ writemsg("\nemerge: searching for similar names..."
+ , noiselevel=-1)
+
+ search_index = self._frozen_config.myopts.get("--search-index", "y") != "n"
+ # fakedbapi is indexed
+ dbs = [vardb]
+ if "--usepkgonly" not in self._frozen_config.myopts:
+ dbs.append(IndexedPortdb(portdb) if search_index else portdb)
+ if "--usepkg" in self._frozen_config.myopts:
+ # bindbapi is indexed
+ dbs.append(bindb)
+
+ matches = similar_name_search(dbs, atom)
+
+ if len(matches) == 1:
+ writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
+ , noiselevel=-1)
+ elif len(matches) > 1:
+ writemsg(
+ "\nemerge: Maybe you meant any of these: %s?\n" % \
+ (", ".join(matches),), noiselevel=-1)
+ else:
+ # Generally, this would only happen if
+ # all dbapis are empty.
+ writemsg(" nothing similar found.\n"
+ , noiselevel=-1)
+ msg = []
+ if not isinstance(myparent, AtomArg):
+ # It's redundant to show parent for AtomArg since
+ # it's the same as 'xinfo' displayed above.
+ dep_chain = self._get_dep_chain(myparent, atom)
+ for node, node_type in dep_chain:
+ msg.append('(dependency required by "%s" [%s])' % \
+ (colorize('INFORM', "%s" % (node)), node_type))
+
+ if msg:
+ writemsg("\n".join(msg), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+ if mask_docs:
+ show_mask_docs()
+ writemsg("\n", noiselevel=-1)
+
+ def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
+ for db, pkg_type, built, installed, db_keys in \
+ self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
+ for pkg in self._iter_match_pkgs(root_config,
+ pkg_type, atom, onlydeps=onlydeps):
+ yield pkg
+
+ def _iter_match_pkgs(self, root_config, pkg_type, atom,
+ onlydeps=False):
+ if atom.package:
+ return self._iter_match_pkgs_atom(root_config, pkg_type,
+ atom, onlydeps=onlydeps)
+ else:
+ return self._iter_match_pkgs_soname(root_config, pkg_type,
+ atom, onlydeps=onlydeps)
+
+ def _iter_match_pkgs_soname(self, root_config, pkg_type, atom,
+ onlydeps=False):
+ db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
+ installed = pkg_type == 'installed'
+
+ if isinstance(db, DbapiProvidesIndex):
+ # descending order
+ for cpv in reversed(db.match(atom)):
+ yield self._pkg(cpv, pkg_type, root_config,
+ installed=installed, onlydeps=onlydeps)
+
+ def _iter_match_pkgs_atom(self, root_config, pkg_type, atom,
+ onlydeps=False):
+ """
+ Iterate over Package instances of pkg_type matching the given atom.
+ This does not check visibility and it also does not match USE for
+ unbuilt ebuilds since USE are lazily calculated after visibility
+ checks (to avoid the expense when possible).
+ """
+
+ db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
+ atom_exp = dep_expand(atom, mydb=db, settings=root_config.settings)
+ cp_list = db.cp_list(atom_exp.cp)
+ matched_something = False
+ installed = pkg_type == 'installed'
+
+ if cp_list:
+ atom_set = InternalPackageSet(initial_atoms=(atom,),
+ allow_repo=True)
+
+ # descending order
+ cp_list.reverse()
+ for cpv in cp_list:
+ # Call match_from_list on one cpv at a time, in order
+ # to avoid unnecessary match_from_list comparisons on
+ # versions that are never yielded from this method.
+ if match_from_list(atom_exp, [cpv]):
+ try:
+ pkg = self._pkg(cpv, pkg_type, root_config,
+ installed=installed, onlydeps=onlydeps,
+ myrepo=getattr(cpv, 'repo', None))
+ except portage.exception.PackageNotFound:
+ pass
+ else:
+ # A cpv can be returned from dbapi.match() as an
+ # old-style virtual match even in cases when the
+ # package does not actually PROVIDE the virtual.
+ # Filter out any such false matches here.
+
+ # Make sure that cpv from the current repo satisfies the atom.
+ # This might not be the case if there are several repos with
+ # the same cpv, but different metadata keys, like SLOT.
+ # Also, parts of the match that require metadata access
+ # are deferred until we have cached the metadata in a
+ # Package instance.
+ if not atom_set.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ continue
+ matched_something = True
+ yield pkg
+
+ # USE=multislot can make an installed package appear as if
+ # it doesn't satisfy a slot dependency. Rebuilding the ebuild
+ # won't do any good as long as USE=multislot is enabled since
+ # the newly built package still won't have the expected slot.
+ # Therefore, assume that such SLOT dependencies are already
+ # satisfied rather than forcing a rebuild.
+ if not matched_something and installed and \
+ atom.slot is not None and not atom.slot_operator_built:
+
+ if "remove" in self._dynamic_config.myparams:
+ # We need to search the portdbapi, which is not in our
+ # normal dbs list, in order to find the real SLOT.
+ portdb = self._frozen_config.trees[root_config.root]["porttree"].dbapi
+ db_keys = list(portdb._aux_cache_keys)
+ dbs = [(portdb, "ebuild", False, False, db_keys)]
+ else:
+ dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
+
+ cp_list = db.cp_list(atom_exp.cp)
+ if cp_list:
+ atom_set = InternalPackageSet(
+ initial_atoms=(atom.without_slot,), allow_repo=True)
+ atom_exp_without_slot = atom_exp.without_slot
+ cp_list.reverse()
+ for cpv in cp_list:
+ if not match_from_list(atom_exp_without_slot, [cpv]):
+ continue
+ slot_available = False
+ for other_db, other_type, other_built, \
+ other_installed, other_keys in dbs:
+ try:
+ if portage.dep._match_slot(atom,
+ other_db._pkg_str(_unicode(cpv), None)):
+ slot_available = True
+ break
+ except (KeyError, InvalidData):
+ pass
+ if not slot_available:
+ continue
+ inst_pkg = self._pkg(cpv, "installed",
+ root_config, installed=installed, myrepo=atom.repo)
+ # Remove the slot from the atom and verify that
+ # the package matches the resulting atom.
+ if atom_set.findAtomForPackage(inst_pkg):
+ yield inst_pkg
+ return
+
+ def _select_pkg_highest_available(self, root, atom, onlydeps=False, parent=None):
+ if atom.package:
+ cache_key = (root, atom, atom.unevaluated_atom, onlydeps,
+ self._dynamic_config._autounmask)
+ self._dynamic_config._highest_pkg_cache_cp_map.\
+ setdefault((root, atom.cp), []).append(cache_key)
+ else:
+ cache_key = (root, atom, onlydeps,
+ self._dynamic_config._autounmask)
+ self._dynamic_config._highest_pkg_cache_cp_map.\
+ setdefault((root, atom), []).append(cache_key)
+ ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
+ if ret is not None:
+ return ret
+ ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps, parent=parent)
+ self._dynamic_config._highest_pkg_cache[cache_key] = ret
+ pkg, existing = ret
+ if pkg is not None:
+ if self._pkg_visibility_check(pkg) and \
+ not (pkg.installed and pkg.masks):
+ self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
+ return ret
+
+ def _is_argument(self, pkg):
+ for arg, atom in self._iter_atoms_for_pkg(pkg):
+ if isinstance(arg, (AtomArg, PackageArg)):
+ return True
+ return False
+
+ def _prune_highest_pkg_cache(self, pkg):
+ cache = self._dynamic_config._highest_pkg_cache
+ key_map = self._dynamic_config._highest_pkg_cache_cp_map
+ for cp in pkg.provided_cps:
+ for cache_key in key_map.pop((pkg.root, cp), []):
+ cache.pop(cache_key, None)
+ if pkg.provides is not None:
+ for atom in pkg.provides:
+ for cache_key in key_map.pop((pkg.root, atom), []):
+ cache.pop(cache_key, None)
+
+ def _want_installed_pkg(self, pkg):
+ """
+ Given an installed package returned from select_pkg, return
+ True if the user has not explicitly requested for this package
+ to be replaced (typically via an atom on the command line).
+ """
+ if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ return True
+
+ arg = False
+ try:
+ for arg, atom in self._iter_atoms_for_pkg(pkg):
+ if arg.force_reinstall:
+ return False
+ except InvalidDependString:
+ pass
+
+ if "selective" in self._dynamic_config.myparams:
+ return True
+
+ return not arg
+
+ def _want_update_pkg(self, parent, pkg):
+
+ if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ return False
+
+ arg_atoms = None
+ try:
+ arg_atoms = list(self._iter_atoms_for_pkg(pkg))
+ except InvalidDependString:
+ if not pkg.installed:
+ # should have been masked before it was selected
+ raise
+
+ depth = parent.depth or 0
+ if isinstance(depth, int):
+ depth += 1
+
+ if arg_atoms:
+ for arg, atom in arg_atoms:
+ if arg.reset_depth:
+ depth = 0
+ break
+
+ update = "--update" in self._frozen_config.myopts
+
+ return (not self._dynamic_config._complete_mode and
+ (arg_atoms or update) and
+ not self._too_deep(depth))
+
+ def _too_deep(self, depth):
+ """
+ Check if a package depth is deeper than the max allowed depth.
+
+ @param depth: the depth of a particular package
+ @type depth: int or _UNREACHABLE_DEPTH
+ @rtype: bool
+ @return: True if the package is deeper than the max allowed depth
+ """
+ deep = self._dynamic_config.myparams.get("deep", 0)
+ if depth is self._UNREACHABLE_DEPTH:
+ return True
+ elif deep is True:
+ return False
+ else:
+ # All non-integer cases are handled above,
+ # so both values must be int type.
+ return depth > deep
+
+ def _depth_increment(self, depth, n=1):
+ """
+ Return depth + n if depth is an int, otherwise return depth.
+
+ @param depth: the depth of a particular package
+ @type depth: int or _UNREACHABLE_DEPTH
+ @param n: number to add (default is 1)
+ @type n: int
+ @rtype: int or _UNREACHABLE_DEPTH
+ @return: depth + 1 or _UNREACHABLE_DEPTH
+ """
+ return depth + n if isinstance(depth, int) else depth
+
+ def _equiv_ebuild(self, pkg):
+ try:
+ return self._pkg(
+ pkg.cpv, "ebuild", pkg.root_config, myrepo=pkg.repo)
+ except portage.exception.PackageNotFound:
+ return next(self._iter_match_pkgs(pkg.root_config,
+ "ebuild", Atom("=%s" % (pkg.cpv,))), None)
+
+ def _equiv_ebuild_visible(self, pkg, autounmask_level=None):
+ try:
+ pkg_eb = self._pkg(
+ pkg.cpv, "ebuild", pkg.root_config, myrepo=pkg.repo)
+ except portage.exception.PackageNotFound:
+ pkg_eb_visible = False
+ for pkg_eb in self._iter_match_pkgs(pkg.root_config,
+ "ebuild", Atom("=%s" % (pkg.cpv,))):
+ if self._pkg_visibility_check(pkg_eb, autounmask_level):
+ pkg_eb_visible = True
+ break
+ if not pkg_eb_visible:
+ return False
+ else:
+ if not self._pkg_visibility_check(pkg_eb, autounmask_level):
+ return False
+
+ return True
+
+ def _equiv_binary_installed(self, pkg):
+ build_time = pkg.build_time
+ if not build_time:
+ return False
+
+ try:
+ inst_pkg = self._pkg(pkg.cpv, "installed",
+ pkg.root_config, installed=True)
+ except PackageNotFound:
+ return False
+
+ return build_time == inst_pkg.build_time
+
+ class _AutounmaskLevel(object):
+ __slots__ = ("allow_use_changes", "allow_unstable_keywords", "allow_license_changes", \
+ "allow_missing_keywords", "allow_unmasks")
+
+ def __init__(self):
+ self.allow_use_changes = False
+ self.allow_license_changes = False
+ self.allow_unstable_keywords = False
+ self.allow_missing_keywords = False
+ self.allow_unmasks = False
+
+ def _autounmask_levels(self):
+ """
+ Iterate over the different allowed things to unmask.
+
+ 0. USE
+ 1. USE + license
+ 2. USE + ~arch + license
+ 3. USE + ~arch + license + missing keywords
+ 4. USE + license + masks
+ 5. USE + ~arch + license + masks
+ 6. USE + ~arch + license + missing keywords + masks
+
+ Some thoughts:
+ * Do least invasive changes first.
+ * Try unmasking alone before unmasking + missing keywords
+ to avoid -9999 versions if possible
+ """
+
+ if self._dynamic_config._autounmask is not True:
+ return
+
+ autounmask_keep_keywords = self._frozen_config.myopts.get("--autounmask-keep-keywords", "n") != "n"
+ autounmask_keep_masks = self._frozen_config.myopts.get("--autounmask-keep-masks", "n") != "n"
+ autounmask_level = self._AutounmaskLevel()
+
+ autounmask_level.allow_use_changes = True
+ yield autounmask_level
+
+ autounmask_level.allow_license_changes = True
+ yield autounmask_level
+
+ if not autounmask_keep_keywords:
+ autounmask_level.allow_unstable_keywords = True
+ yield autounmask_level
+
+ if not (autounmask_keep_keywords or autounmask_keep_masks):
+ autounmask_level.allow_unstable_keywords = True
+ autounmask_level.allow_missing_keywords = True
+ yield autounmask_level
+
+ if not autounmask_keep_masks:
+ # 4. USE + license + masks
+ # Try to respect keywords while discarding
+ # package.mask (see bug #463394).
+ autounmask_level.allow_unstable_keywords = False
+ autounmask_level.allow_missing_keywords = False
+ autounmask_level.allow_unmasks = True
+ yield autounmask_level
+
+ if not (autounmask_keep_keywords or autounmask_keep_masks):
+ autounmask_level.allow_unstable_keywords = True
+
+ for missing_keyword, unmask in ((False, True), (True, True)):
+
+ autounmask_level.allow_missing_keywords = missing_keyword
+ autounmask_level.allow_unmasks = unmask
+
+ yield autounmask_level
+
+
+ def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False, parent=None):
+ pkg, existing = self._wrapped_select_pkg_highest_available_imp(
+ root, atom, onlydeps=onlydeps, parent=parent)
+
+ default_selection = (pkg, existing)
+
+ if self._dynamic_config._autounmask is True:
+ if pkg is not None and \
+ pkg.installed and \
+ not self._want_installed_pkg(pkg):
+ pkg = None
+
+ # Temporarily reset _need_restart state, in order to
+ # avoid interference as reported in bug #459832.
+ earlier_need_restart = self._dynamic_config._need_restart
+ self._dynamic_config._need_restart = False
+ try:
+ for autounmask_level in self._autounmask_levels():
+ if pkg is not None:
+ break
+
+ pkg, existing = \
+ self._wrapped_select_pkg_highest_available_imp(
+ root, atom, onlydeps=onlydeps,
+ autounmask_level=autounmask_level, parent=parent)
+
+ if pkg is not None and \
+ pkg.installed and \
+ not self._want_installed_pkg(pkg):
+ pkg = None
+
+ if self._dynamic_config._need_restart:
+ return None, None
+ finally:
+ if earlier_need_restart:
+ self._dynamic_config._need_restart = True
+
+ if pkg is None:
+ # This ensures that we can fall back to an installed package
+ # that may have been rejected in the autounmask path above.
+ return default_selection
+
+ return pkg, existing
+
+ def _pkg_visibility_check(self, pkg, autounmask_level=None, trust_graph=True):
+
+ if pkg.visible:
+ return True
+
+ if trust_graph and pkg in self._dynamic_config.digraph:
+ # Sometimes we need to temporarily disable
+ # dynamic_config._autounmask, but for overall
+ # consistency in dependency resolution, in most
+ # cases we want to treat packages in the graph
+ # as though they are visible.
+ return True
+
+ if not self._dynamic_config._autounmask or autounmask_level is None:
+ return False
+
+ pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+ root_config = self._frozen_config.roots[pkg.root]
+ mreasons = _get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
+
+ masked_by_unstable_keywords = False
+ masked_by_missing_keywords = False
+ missing_licenses = None
+ masked_by_something_else = False
+ masked_by_p_mask = False
+
+ for reason in mreasons:
+ hint = reason.unmask_hint
+
+ if hint is None:
+ masked_by_something_else = True
+ elif hint.key == "unstable keyword":
+ masked_by_unstable_keywords = True
+ if hint.value == "**":
+ masked_by_missing_keywords = True
+ elif hint.key == "p_mask":
+ masked_by_p_mask = True
+ elif hint.key == "license":
+ missing_licenses = hint.value
+ else:
+ masked_by_something_else = True
+
+ if masked_by_something_else:
+ return False
+
+ if pkg in self._dynamic_config._needed_unstable_keywords:
+ #If the package is already keyworded, remove the mask.
+ masked_by_unstable_keywords = False
+ masked_by_missing_keywords = False
+
+ if pkg in self._dynamic_config._needed_p_mask_changes:
+ #If the package is already keyworded, remove the mask.
+ masked_by_p_mask = False
+
+ if missing_licenses:
+ #If the needed licenses are already unmasked, remove the mask.
+ missing_licenses.difference_update(self._dynamic_config._needed_license_changes.get(pkg, set()))
+
+ if not (masked_by_unstable_keywords or masked_by_p_mask or missing_licenses):
+ #Package has already been unmasked.
+ return True
+
+ if (masked_by_unstable_keywords and not autounmask_level.allow_unstable_keywords) or \
+ (masked_by_missing_keywords and not autounmask_level.allow_missing_keywords) or \
+ (masked_by_p_mask and not autounmask_level.allow_unmasks) or \
+ (missing_licenses and not autounmask_level.allow_license_changes):
+ #We are not allowed to do the needed changes.
+ return False
+
+ if masked_by_unstable_keywords:
+ self._dynamic_config._needed_unstable_keywords.add(pkg)
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ backtrack_infos.setdefault("config", {})
+ backtrack_infos["config"].setdefault("needed_unstable_keywords", set())
+ backtrack_infos["config"]["needed_unstable_keywords"].add(pkg)
+
+ if masked_by_p_mask:
+ self._dynamic_config._needed_p_mask_changes.add(pkg)
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ backtrack_infos.setdefault("config", {})
+ backtrack_infos["config"].setdefault("needed_p_mask_changes", set())
+ backtrack_infos["config"]["needed_p_mask_changes"].add(pkg)
+
+ if missing_licenses:
+ self._dynamic_config._needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ backtrack_infos.setdefault("config", {})
+ backtrack_infos["config"].setdefault("needed_license_changes", set())
+ backtrack_infos["config"]["needed_license_changes"].add((pkg, frozenset(missing_licenses)))
+
+ return True
+
+ def _pkg_use_enabled(self, pkg, target_use=None):
+ """
+ If target_use is None, returns pkg.use.enabled + changes in _needed_use_config_changes.
+ If target_use is given, the need changes are computed to make the package useable.
+ Example: target_use = { "foo": True, "bar": False }
+ The flags target_use must be in the pkg's IUSE.
+ @rtype: frozenset
+ @return: set of effectively enabled USE flags, including changes
+ made by autounmask
+ """
+ if pkg.built:
+ return pkg.use.enabled
+ needed_use_config_change = self._dynamic_config._needed_use_config_changes.get(pkg)
+
+ if target_use is None:
+ if needed_use_config_change is None:
+ return pkg.use.enabled
+ else:
+ return needed_use_config_change[0]
+
+ if needed_use_config_change is not None:
+ old_use = needed_use_config_change[0]
+ new_use = set()
+ old_changes = needed_use_config_change[1]
+ new_changes = old_changes.copy()
+ else:
+ old_use = pkg.use.enabled
+ new_use = set()
+ old_changes = {}
+ new_changes = {}
+
+ for flag, state in target_use.items():
+ real_flag = pkg.iuse.get_real_flag(flag)
+ if real_flag is None:
+ # Triggered by use-dep defaults.
+ continue
+ if state:
+ if real_flag not in old_use:
+ if new_changes.get(real_flag) == False:
+ return old_use
+ new_changes[real_flag] = True
+ new_use.add(flag)
+ else:
+ if real_flag in old_use:
+ if new_changes.get(real_flag) == True:
+ return old_use
+ new_changes[real_flag] = False
+ new_use.update(old_use.difference(target_use))
+
+ def want_restart_for_use_change(pkg, new_use):
+ if pkg not in self._dynamic_config.digraph.nodes:
+ return False
+
+ for key in Package._dep_keys + ("LICENSE",):
+ dep = pkg._metadata[key]
+ old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
+ new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
+
+ if old_val != new_val:
+ return True
+
+ parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+ if not parent_atoms:
+ return False
+
+ new_use, changes = self._dynamic_config._needed_use_config_changes.get(pkg)
+ for ppkg, atom in parent_atoms:
+ if not atom.use:
+ continue
+
+ # Backtrack only if changes break a USE dependency.
+ enabled = atom.use.enabled
+ disabled = atom.use.disabled
+ for k, v in changes.items():
+ want_enabled = k in enabled
+ if (want_enabled or k in disabled) and want_enabled != v:
+ return True
+
+ return False
+
+ # Always return frozenset since the result needs to be
+ # hashable (see bug #531112).
+ new_use = frozenset(new_use)
+
+ if new_changes != old_changes:
+ #Don't do the change if it violates REQUIRED_USE.
+ required_use_satisfied = True
+ required_use = pkg._metadata.get("REQUIRED_USE")
+ if required_use and check_required_use(required_use, old_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.eapi) and \
+ not check_required_use(required_use, new_use,
+ pkg.iuse.is_valid_flag, eapi=pkg.eapi):
+ required_use_satisfied = False
+
+ if any(x in pkg.use.mask for x in new_changes) or \
+ any(x in pkg.use.force for x in new_changes):
+ return old_use
+
+ changes = _use_changes(new_use, new_changes,
+ required_use_satisfied=required_use_satisfied)
+ self._dynamic_config._needed_use_config_changes[pkg] = changes
+ backtrack_infos = self._dynamic_config._backtrack_infos
+ backtrack_infos.setdefault("config", {})
+ backtrack_infos["config"].setdefault("needed_use_config_changes", [])
+ backtrack_infos["config"]["needed_use_config_changes"].append((pkg, changes))
+ if want_restart_for_use_change(pkg, new_use):
+ self._dynamic_config._need_restart = True
+ return new_use
+
+ def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, autounmask_level=None, parent=None):
+ root_config = self._frozen_config.roots[root]
+ pkgsettings = self._frozen_config.pkgsettings[root]
+ dbs = self._dynamic_config._filtered_trees[root]["dbs"]
+ vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
+ # List of acceptable packages, ordered by type preference.
+ matched_packages = []
+ highest_version = None
+ atom_cp = None
+ have_new_virt = None
+ if atom.package:
+ atom_cp = atom.cp
+ have_new_virt = (atom_cp.startswith("virtual/") and
+ self._have_new_virt(root, atom_cp))
+
+ existing_node = None
+ myeb = None
+ rebuilt_binaries = 'rebuilt_binaries' in self._dynamic_config.myparams
+ usepkg = "--usepkg" in self._frozen_config.myopts
+ usepkgonly = "--usepkgonly" in self._frozen_config.myopts
+ empty = "empty" in self._dynamic_config.myparams
+ selective = "selective" in self._dynamic_config.myparams
+ reinstall = False
+ avoid_update = "--update" not in self._frozen_config.myopts
+ dont_miss_updates = "--update" in self._frozen_config.myopts
+ use_ebuild_visibility = self._frozen_config.myopts.get(
+ '--use-ebuild-visibility', 'n') != 'n'
+ reinstall_atoms = self._frozen_config.reinstall_atoms
+ usepkg_exclude = self._frozen_config.usepkg_exclude
+ useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
+ matched_oldpkg = []
+ # Behavior of the "selective" parameter depends on
+ # whether or not a package matches an argument atom.
+ # If an installed package provides an old-style
+ # virtual that is no longer provided by an available
+ # package, the installed package may match an argument
+ # atom even though none of the available packages do.
+ # Therefore, "selective" logic does not consider
+ # whether or not an installed package matches an
+ # argument atom. It only considers whether or not
+ # available packages match argument atoms, which is
+ # represented by the found_available_arg flag.
+ found_available_arg = False
+ packages_with_invalid_use_config = []
+ for find_existing_node in True, False:
+ if existing_node:
+ break
+ for db, pkg_type, built, installed, db_keys in dbs:
+ if existing_node:
+ break
+ if installed and not find_existing_node:
+ want_reinstall = reinstall or empty or \
+ (found_available_arg and not selective)
+ if want_reinstall and matched_packages:
+ continue
+
+ # For unbuilt ebuilds, ignore USE deps for the initial
+ # match since we want to ensure that updates aren't
+ # missed solely due to the user's USE configuration.
+ for pkg in self._iter_match_pkgs(root_config, pkg_type,
+ atom.without_use if (atom.package and not built) else atom,
+ onlydeps=onlydeps):
+ if have_new_virt is True and pkg.cp != atom_cp:
+ # pull in a new-style virtual instead
+ continue
+ if pkg in self._dynamic_config._runtime_pkg_mask:
+ # The package has been masked by the backtracking logic
+ continue
+ root_slot = (pkg.root, pkg.slot_atom)
+ if pkg.built and root_slot in self._rebuild.rebuild_list:
+ continue
+ if (pkg.installed and
+ root_slot in self._rebuild.reinstall_list):
+ continue
+
+ if not pkg.installed and \
+ self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
+ modified_use=self._pkg_use_enabled(pkg)):
+ continue
+
+ if built and not installed and usepkg_exclude.findAtomForPackage(pkg, \
+ modified_use=self._pkg_use_enabled(pkg)):
+ break
+
+ useoldpkg = useoldpkg_atoms.findAtomForPackage(pkg, \
+ modified_use=self._pkg_use_enabled(pkg))
+
+ if packages_with_invalid_use_config and (not built or not useoldpkg) and \
+ (not pkg.installed or dont_miss_updates):
+ # Check if a higher version was rejected due to user
+ # USE configuration. The packages_with_invalid_use_config
+ # list only contains unbuilt ebuilds since USE can't
+ # be changed for built packages.
+ higher_version_rejected = False
+ repo_priority = pkg.repo_priority
+ for rejected in packages_with_invalid_use_config:
+ if rejected.cp != pkg.cp:
+ continue
+ if rejected > pkg:
+ higher_version_rejected = True
+ break
+ if portage.dep.cpvequal(rejected.cpv, pkg.cpv):
+ # If version is identical then compare
+ # repo priority (see bug #350254).
+ rej_repo_priority = rejected.repo_priority
+ if rej_repo_priority is not None and \
+ (repo_priority is None or
+ rej_repo_priority > repo_priority):
+ higher_version_rejected = True
+ break
+ if higher_version_rejected:
+ continue
+
+ cpv = pkg.cpv
+ reinstall_for_flags = None
+
+ if not pkg.installed or \
+ (matched_packages and not avoid_update):
+ # Only enforce visibility on installed packages
+ # if there is at least one other visible package
+ # available. By filtering installed masked packages
+ # here, packages that have been masked since they
+ # were installed can be automatically downgraded
+ # to an unmasked version. NOTE: This code needs to
+ # be consistent with masking behavior inside
+ # _dep_check_composite_db, in order to prevent
+ # incorrect choices in || deps like bug #351828.
+
+ if not self._pkg_visibility_check(pkg, autounmask_level):
+ continue
+
+ # Enable upgrade or downgrade to a version
+ # with visible KEYWORDS when the installed
+ # version is masked by KEYWORDS, but never
+ # reinstall the same exact version only due
+ # to a KEYWORDS mask. See bug #252167.
+
+ identical_binary = False
+ if pkg.type_name != "ebuild" and matched_packages:
+ # Don't re-install a binary package that is
+ # identical to the currently installed package
+ # (see bug #354441).
+ if usepkg and pkg.installed:
+ for selected_pkg in matched_packages:
+ if selected_pkg.type_name == "binary" and \
+ selected_pkg.cpv == pkg.cpv and \
+ selected_pkg.build_time == \
+ pkg.build_time:
+ identical_binary = True
+ break
+
+ if (not identical_binary and pkg.built and
+ (use_ebuild_visibility or matched_packages)):
+ # If the ebuild no longer exists or it's
+ # keywords have been dropped, reject built
+ # instances (installed or binary).
+ # If --usepkgonly is enabled, assume that
+ # the ebuild status should be ignored unless
+ # --use-ebuild-visibility has been specified.
+ if not use_ebuild_visibility and (usepkgonly or useoldpkg):
+ if pkg.installed and pkg.masks:
+ continue
+ elif not self._equiv_ebuild_visible(pkg,
+ autounmask_level=autounmask_level):
+ continue
+
+ # Calculation of USE for unbuilt ebuilds is relatively
+ # expensive, so it is only performed lazily, after the
+ # above visibility checks are complete.
+
+ myarg = None
+ try:
+ for myarg, myarg_atom in self._iter_atoms_for_pkg(pkg):
+ if myarg.force_reinstall:
+ reinstall = True
+ break
+ except InvalidDependString:
+ if not installed:
+ # masked by corruption
+ continue
+ if not installed and myarg:
+ found_available_arg = True
+
+ if atom.package and atom.unevaluated_atom.use:
+ #Make sure we don't miss a 'missing IUSE'.
+ if pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
+ # Don't add this to packages_with_invalid_use_config
+ # since IUSE cannot be adjusted by the user.
+ continue
+
+ if atom.package and atom.use is not None:
+
+ if autounmask_level and autounmask_level.allow_use_changes and not pkg.built:
+ target_use = {}
+ for flag in atom.use.enabled:
+ target_use[flag] = True
+ for flag in atom.use.disabled:
+ target_use[flag] = False
+ use = self._pkg_use_enabled(pkg, target_use)
+ else:
+ use = self._pkg_use_enabled(pkg)
+
+ use_match = True
+ can_adjust_use = not pkg.built
+ is_valid_flag = pkg.iuse.is_valid_flag
+ missing_enabled = frozenset(x for x in
+ atom.use.missing_enabled if not is_valid_flag(x))
+ missing_disabled = frozenset(x for x in
+ atom.use.missing_disabled if not is_valid_flag(x))
+
+ if atom.use.enabled:
+ if any(x in atom.use.enabled for x in missing_disabled):
+ use_match = False
+ can_adjust_use = False
+ need_enabled = atom.use.enabled.difference(use)
+ if need_enabled:
+ need_enabled = need_enabled.difference(missing_enabled)
+ if need_enabled:
+ use_match = False
+ if can_adjust_use:
+ if any(x in pkg.use.mask for x in need_enabled):
+ can_adjust_use = False
+
+ if atom.use.disabled:
+ if any(x in atom.use.disabled for x in missing_enabled):
+ use_match = False
+ can_adjust_use = False
+ need_disabled = atom.use.disabled.intersection(use)
+ if need_disabled:
+ need_disabled = need_disabled.difference(missing_disabled)
+ if need_disabled:
+ use_match = False
+ if can_adjust_use:
+ if any(x in pkg.use.force and x not in
+ pkg.use.mask for x in need_disabled):
+ can_adjust_use = False
+
+ if not use_match:
+ if can_adjust_use:
+ # Above we must ensure that this package has
+ # absolutely no use.force, use.mask, or IUSE
+ # issues that the user typically can't make
+ # adjustments to solve (see bug #345979).
+ # FIXME: Conditional USE deps complicate
+ # issues. This code currently excludes cases
+ # in which the user can adjust the parent
+ # package's USE in order to satisfy the dep.
+ packages_with_invalid_use_config.append(pkg)
+ continue
+
+ if atom_cp is None or pkg.cp == atom_cp:
+ if highest_version is None:
+ highest_version = pkg
+ elif pkg > highest_version:
+ highest_version = pkg
+ # At this point, we've found the highest visible
+ # match from the current repo. Any lower versions
+ # from this repo are ignored, so this so the loop
+ # will always end with a break statement below
+ # this point.
+ if find_existing_node:
+ # Use reversed iteration in order to get
+ # descending order here, so that the highest
+ # version involved in a slot conflict is
+ # selected. This is needed for correct operation
+ # of conflict_downgrade logic in the dep_zapdeps
+ # function (see bug 554070).
+ e_pkg = next(reversed(list(
+ self._dynamic_config._package_tracker.match(
+ root, pkg.slot_atom, installed=False))), None)
+
+ if not e_pkg:
+ break
+
+ # Use PackageSet.findAtomForPackage()
+ # for PROVIDE support.
+ if atom.match(e_pkg.with_use(
+ self._pkg_use_enabled(e_pkg))):
+ if highest_version and \
+ (atom_cp is None or
+ e_pkg.cp == atom_cp) and \
+ e_pkg < highest_version and \
+ e_pkg.slot_atom != highest_version.slot_atom:
+ # There is a higher version available in a
+ # different slot, so this existing node is
+ # irrelevant.
+ pass
+ else:
+ matched_packages.append(e_pkg)
+ existing_node = e_pkg
+ break
+ # Compare built package to current config and
+ # reject the built package if necessary.
+ reinstall_use = ("--newuse" in self._frozen_config.myopts or \
+ "--reinstall" in self._frozen_config.myopts)
+ changed_deps = (
+ self._dynamic_config.myparams.get(
+ "changed_deps", "n") != "n")
+ changed_deps_report = self._dynamic_config.myparams.get(
+ "changed_deps_report")
+ binpkg_changed_deps = (
+ self._dynamic_config.myparams.get(
+ "binpkg_changed_deps", "n") != "n")
+ respect_use = self._dynamic_config.myparams.get("binpkg_respect_use") in ("y", "auto")
+ if built and not useoldpkg and \
+ (not installed or matched_packages) and \
+ not (installed and
+ self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg))):
+ if myeb and "--newrepo" in self._frozen_config.myopts and myeb.repo != pkg.repo:
+ break
+ elif self._dynamic_config.myparams.get("changed_slot") and self._changed_slot(pkg):
+ if installed:
+ break
+ else:
+ # Continue searching for a binary package
+ # with the desired SLOT metadata.
+ continue
+ elif reinstall_use or (not installed and respect_use):
+ iuses = pkg.iuse.all
+ old_use = self._pkg_use_enabled(pkg)
+ if myeb:
+ now_use = self._pkg_use_enabled(myeb)
+ forced_flags = set(chain(
+ myeb.use.force, myeb.use.mask))
+ else:
+ pkgsettings.setcpv(pkg)
+ now_use = pkgsettings["PORTAGE_USE"].split()
+ forced_flags = set(chain(
+ pkgsettings.useforce, pkgsettings.usemask))
+ cur_iuse = iuses
+ if myeb and not usepkgonly and not useoldpkg:
+ cur_iuse = myeb.iuse.all
+ reinstall_for_flags = self._reinstall_for_flags(pkg,
+ forced_flags, old_use, iuses, now_use, cur_iuse)
+ if reinstall_for_flags:
+ if not pkg.installed:
+ self._dynamic_config.\
+ ignored_binaries.setdefault(
+ pkg, {}).setdefault(
+ "respect_use", set()).update(
+ reinstall_for_flags)
+ # Continue searching for a binary
+ # package instance built with the
+ # desired USE settings.
+ continue
+ break
+
+ installed_changed_deps = False
+ if installed and (changed_deps or changed_deps_report):
+ installed_changed_deps = self._changed_deps(pkg)
+
+ if ((installed_changed_deps and changed_deps) or
+ (not installed and binpkg_changed_deps and
+ self._changed_deps(pkg))):
+ if not installed:
+ self._dynamic_config.\
+ ignored_binaries.setdefault(
+ pkg, {})["changed_deps"] = True
+ # Continue searching for a binary
+ # package instance built with the
+ # desired USE settings.
+ continue
+ break
+
+ # Compare current config to installed package
+ # and do not reinstall if possible.
+ if not installed and not useoldpkg and cpv in vardb.match(atom):
+ inst_pkg = vardb.match_pkgs(
+ Atom('=' + pkg.cpv))[0]
+ if "--newrepo" in self._frozen_config.myopts and pkg.repo != inst_pkg.repo:
+ reinstall = True
+ elif reinstall_use:
+ forced_flags = set()
+ forced_flags.update(pkg.use.force)
+ forced_flags.update(pkg.use.mask)
+ old_use = inst_pkg.use.enabled
+ old_iuse = inst_pkg.iuse.all
+ cur_use = self._pkg_use_enabled(pkg)
+ cur_iuse = pkg.iuse.all
+ reinstall_for_flags = \
+ self._reinstall_for_flags(pkg,
+ forced_flags, old_use, old_iuse,
+ cur_use, cur_iuse)
+ if reinstall_for_flags:
+ reinstall = True
+ if reinstall_atoms.findAtomForPackage(pkg, \
+ modified_use=self._pkg_use_enabled(pkg)):
+ reinstall = True
+ if not built:
+ myeb = pkg
+ elif useoldpkg:
+ matched_oldpkg.append(pkg)
+ matched_packages.append(pkg)
+ if reinstall_for_flags:
+ self._dynamic_config._reinstall_nodes[pkg] = \
+ reinstall_for_flags
+ break
+
+ if not matched_packages:
+ return None, None
+
+ if "--debug" in self._frozen_config.myopts:
+ for pkg in matched_packages:
+ portage.writemsg("%s %s%s%s\n" % \
+ ((pkg.type_name + ":").rjust(10),
+ pkg.cpv, _repo_separator, pkg.repo), noiselevel=-1)
+
+ # Filter out any old-style virtual matches if they are
+ # mixed with new-style virtual matches.
+ cp = atom_cp
+ if len(matched_packages) > 1 and \
+ cp is not None and \
+ "virtual" == portage.catsplit(cp)[0]:
+ for pkg in matched_packages:
+ if pkg.cp != cp:
+ continue
+ # Got a new-style virtual, so filter
+ # out any old-style virtuals.
+ matched_packages = [pkg for pkg in matched_packages \
+ if pkg.cp == cp]
+ break
+
+ if existing_node is not None and \
+ existing_node in matched_packages:
+ return existing_node, existing_node
+
+ if len(matched_packages) > 1:
+ if parent is not None and \
+ (parent.root, parent.slot_atom) in self._dynamic_config._slot_operator_replace_installed:
+ # We're forcing a rebuild of the parent because we missed some
+ # update because of a slot operator dep.
+ if atom.slot_operator == "=" and atom.sub_slot is None:
+ # This one is a slot operator dep. Exclude the installed packages if a newer non-installed
+ # pkg exists.
+ highest_installed = None
+ for pkg in matched_packages:
+ if pkg.installed:
+ if highest_installed is None or pkg.version > highest_installed.version:
+ highest_installed = pkg
+
+ if highest_installed and self._want_update_pkg(parent, highest_installed):
+ non_installed = [pkg for pkg in matched_packages \
+ if not pkg.installed and pkg.version > highest_installed.version]
+
+ if non_installed:
+ matched_packages = non_installed
+
+ if rebuilt_binaries:
+ inst_pkg = None
+ built_pkg = None
+ unbuilt_pkg = None
+ for pkg in matched_packages:
+ if pkg.installed:
+ inst_pkg = pkg
+ elif pkg.built:
+ built_pkg = pkg
+ else:
+ if unbuilt_pkg is None or pkg > unbuilt_pkg:
+ unbuilt_pkg = pkg
+ if built_pkg is not None and inst_pkg is not None:
+ # Only reinstall if binary package BUILD_TIME is
+ # non-empty, in order to avoid cases like to
+ # bug #306659 where BUILD_TIME fields are missing
+ # in local and/or remote Packages file.
+ built_timestamp = built_pkg.build_time
+ installed_timestamp = inst_pkg.build_time
+
+ if unbuilt_pkg is not None and unbuilt_pkg > built_pkg:
+ pass
+ elif "--rebuilt-binaries-timestamp" in self._frozen_config.myopts:
+ minimal_timestamp = self._frozen_config.myopts["--rebuilt-binaries-timestamp"]
+ if built_timestamp and \
+ built_timestamp > installed_timestamp and \
+ built_timestamp >= minimal_timestamp:
+ return built_pkg, existing_node
+ else:
+ #Don't care if the binary has an older BUILD_TIME than the installed
+ #package. This is for closely tracking a binhost.
+ #Use --rebuilt-binaries-timestamp 0 if you want only newer binaries
+ #pulled in here.
+ if built_timestamp and \
+ built_timestamp != installed_timestamp:
+ return built_pkg, existing_node
+
+ inst_pkg = None
+ for pkg in matched_packages:
+ if pkg.installed:
+ inst_pkg = pkg
+ if pkg.installed and pkg.invalid:
+ matched_packages = [x for x in \
+ matched_packages if x is not pkg]
+
+ if (inst_pkg is not None and parent is not None and
+ not self._want_update_pkg(parent, inst_pkg)):
+ return inst_pkg, existing_node
+
+ if avoid_update:
+ for pkg in matched_packages:
+ if pkg.installed and self._pkg_visibility_check(pkg, autounmask_level):
+ return pkg, existing_node
+
+ visible_matches = []
+ if matched_oldpkg:
+ visible_matches = [pkg.cpv for pkg in matched_oldpkg \
+ if self._pkg_visibility_check(pkg, autounmask_level)]
+ if not visible_matches:
+ visible_matches = [pkg.cpv for pkg in matched_packages \
+ if self._pkg_visibility_check(pkg, autounmask_level)]
+ if visible_matches:
+ bestmatch = portage.best(visible_matches)
+ else:
+ # all are masked, so ignore visibility
+ bestmatch = portage.best([pkg.cpv for pkg in matched_packages])
+ matched_packages = [pkg for pkg in matched_packages \
+ if portage.dep.cpvequal(pkg.cpv, bestmatch)]
+
+ # ordered by type preference ("ebuild" type is the last resort)
+ return matched_packages[-1], existing_node
+
+ def _select_pkg_from_graph(self, root, atom, onlydeps=False, parent=None):
+ """
+ Select packages that have already been added to the graph or
+ those that are installed and have not been scheduled for
+ replacement.
+ """
+ graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
+ matches = graph_db.match_pkgs(atom)
+ if not matches:
+ return None, None
+
+ # There may be multiple matches, and they may
+ # conflict with eachother, so choose the highest
+ # version that has already been added to the graph.
+ for pkg in reversed(matches):
+ if pkg in self._dynamic_config.digraph:
+ return pkg, pkg
+
+ # Fall back to installed packages
+ return self._select_pkg_from_installed(root, atom, onlydeps=onlydeps, parent=parent)
+
+ def _select_pkg_from_installed(self, root, atom, onlydeps=False, parent=None):
+ """
+ Select packages that are installed.
+ """
+ matches = list(self._iter_match_pkgs(self._frozen_config.roots[root],
+ "installed", atom))
+ if not matches:
+ return None, None
+ if len(matches) > 1:
+ matches.reverse() # ascending order
+ unmasked = [pkg for pkg in matches if \
+ self._pkg_visibility_check(pkg)]
+ if unmasked:
+ if len(unmasked) == 1:
+ matches = unmasked
+ else:
+ # Account for packages with masks (like KEYWORDS masks)
+ # that are usually ignored in visibility checks for
+ # installed packages, in order to handle cases like
+ # bug #350285.
+ unmasked = [pkg for pkg in matches if not pkg.masks]
+ if unmasked:
+ matches = unmasked
+ if len(matches) > 1:
+ # Now account for packages for which existing
+ # ebuilds are masked or unavailable (bug #445506).
+ unmasked = [pkg for pkg in matches if
+ self._equiv_ebuild_visible(pkg)]
+ if unmasked:
+ matches = unmasked
+
+ pkg = matches[-1] # highest match
+ in_graph = next(self._dynamic_config._package_tracker.match(
+ root, pkg.slot_atom, installed=False), None)
+
+ return pkg, in_graph
+
+ def _complete_graph(self, required_sets=None):
+ """
+ Add any deep dependencies of required sets (args, system, world) that
+ have not been pulled into the graph yet. This ensures that the graph
+ is consistent such that initially satisfied deep dependencies are not
+ broken in the new graph. Initially unsatisfied dependencies are
+ irrelevant since we only want to avoid breaking dependencies that are
+ initially satisfied.
+
+ Since this method can consume enough time to disturb users, it is
+ currently only enabled by the --complete-graph option.
+
+ @param required_sets: contains required sets (currently only used
+ for depclean and prune removal operations)
+ @type required_sets: dict
+ """
+ if "--buildpkgonly" in self._frozen_config.myopts or \
+ "recurse" not in self._dynamic_config.myparams:
+ return 1
+
+ complete_if_new_use = self._dynamic_config.myparams.get(
+ "complete_if_new_use", "y") == "y"
+ complete_if_new_ver = self._dynamic_config.myparams.get(
+ "complete_if_new_ver", "y") == "y"
+ rebuild_if_new_slot = self._dynamic_config.myparams.get(
+ "rebuild_if_new_slot", "y") == "y"
+ complete_if_new_slot = rebuild_if_new_slot
+
+ if "complete" not in self._dynamic_config.myparams and \
+ (complete_if_new_use or
+ complete_if_new_ver or complete_if_new_slot):
+ # Enable complete mode if an installed package will change somehow.
+ use_change = False
+ version_change = False
+ for node in self._dynamic_config.digraph:
+ if not isinstance(node, Package) or \
+ node.operation != "merge":
+ continue
+ vardb = self._frozen_config.roots[
+ node.root].trees["vartree"].dbapi
+
+ if complete_if_new_use or complete_if_new_ver:
+ inst_pkg = vardb.match_pkgs(node.slot_atom)
+ if inst_pkg and inst_pkg[0].cp == node.cp:
+ inst_pkg = inst_pkg[0]
+ if complete_if_new_ver:
+ if inst_pkg < node or node < inst_pkg:
+ version_change = True
+ break
+ elif not (inst_pkg.slot == node.slot and
+ inst_pkg.sub_slot == node.sub_slot):
+ # slot/sub-slot change without revbump gets
+ # similar treatment to a version change
+ version_change = True
+ break
+
+ # Intersect enabled USE with IUSE, in order to
+ # ignore forced USE from implicit IUSE flags, since
+ # they're probably irrelevant and they are sensitive
+ # to use.mask/force changes in the profile.
+ if complete_if_new_use and \
+ (node.iuse.all != inst_pkg.iuse.all or
+ self._pkg_use_enabled(node).intersection(node.iuse.all) !=
+ self._pkg_use_enabled(inst_pkg).intersection(inst_pkg.iuse.all)):
+ use_change = True
+ break
+
+ if complete_if_new_slot:
+ cp_list = vardb.match_pkgs(Atom(node.cp))
+ if (cp_list and cp_list[0].cp == node.cp and
+ not any(node.slot == pkg.slot and
+ node.sub_slot == pkg.sub_slot for pkg in cp_list)):
+ version_change = True
+ break
+
+ if use_change or version_change:
+ self._dynamic_config.myparams["complete"] = True
+
+ if "complete" not in self._dynamic_config.myparams:
+ return 1
+
+ self._load_vdb()
+
+ # Put the depgraph into a mode that causes it to only
+ # select packages that have already been added to the
+ # graph or those that are installed and have not been
+ # scheduled for replacement. Also, toggle the "deep"
+ # parameter so that all dependencies are traversed and
+ # accounted for.
+ self._dynamic_config._complete_mode = True
+ self._select_atoms = self._select_atoms_from_graph
+ if "remove" in self._dynamic_config.myparams:
+ self._select_package = self._select_pkg_from_installed
+ else:
+ self._select_package = self._select_pkg_from_graph
+ self._dynamic_config._traverse_ignored_deps = True
+ already_deep = self._dynamic_config.myparams.get("deep") is True
+ if not already_deep:
+ self._dynamic_config.myparams["deep"] = True
+
+ # Invalidate the package selection cache, since
+ # _select_package has just changed implementations.
+ for trees in self._dynamic_config._filtered_trees.values():
+ trees["porttree"].dbapi._clear_cache()
+
+ args = self._dynamic_config._initial_arg_list[:]
+ for root in self._frozen_config.roots:
+ if root != self._frozen_config.target_root and \
+ ("remove" in self._dynamic_config.myparams or
+ self._frozen_config.myopts.get("--root-deps") is not None):
+ # Only pull in deps for the relevant root.
+ continue
+ depgraph_sets = self._dynamic_config.sets[root]
+ required_set_names = self._frozen_config._required_set_names.copy()
+ remaining_args = required_set_names.copy()
+ if required_sets is None or root not in required_sets:
+ pass
+ else:
+ # Removal actions may override sets with temporary
+ # replacements that have had atoms removed in order
+ # to implement --deselect behavior.
+ required_set_names = set(required_sets[root])
+ depgraph_sets.sets.clear()
+ depgraph_sets.sets.update(required_sets[root])
+ if "remove" not in self._dynamic_config.myparams and \
+ root == self._frozen_config.target_root and \
+ already_deep:
+ remaining_args.difference_update(depgraph_sets.sets)
+ if not remaining_args and \
+ not self._dynamic_config._ignored_deps and \
+ not self._dynamic_config._dep_stack:
+ continue
+ root_config = self._frozen_config.roots[root]
+ for s in required_set_names:
+ pset = depgraph_sets.sets.get(s)
+ if pset is None:
+ pset = root_config.sets[s]
+ atom = SETPREFIX + s
+ args.append(SetArg(arg=atom, pset=pset,
+ reset_depth=False, root_config=root_config))
+
+ self._set_args(args)
+ for arg in self._expand_set_args(args, add_to_digraph=True):
+ for atom in arg.pset.getAtoms():
+ self._dynamic_config._dep_stack.append(
+ Dependency(atom=atom, root=arg.root_config.root,
+ parent=arg, depth=self._UNREACHABLE_DEPTH))
+
+ if True:
+ if self._dynamic_config._ignored_deps:
+ self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
+ self._dynamic_config._ignored_deps = []
+ if not self._create_graph(allow_unsatisfied=True):
+ return 0
+ # Check the unsatisfied deps to see if any initially satisfied deps
+ # will become unsatisfied due to an upgrade. Initially unsatisfied
+ # deps are irrelevant since we only want to avoid breaking deps
+ # that are initially satisfied.
+ while self._dynamic_config._unsatisfied_deps:
+ dep = self._dynamic_config._unsatisfied_deps.pop()
+ vardb = self._frozen_config.roots[
+ dep.root].trees["vartree"].dbapi
+ matches = vardb.match_pkgs(dep.atom)
+ if not matches:
+ self._dynamic_config._initially_unsatisfied_deps.append(dep)
+ continue
+ # An scheduled installation broke a deep dependency.
+ # Add the installed package to the graph so that it
+ # will be appropriately reported as a slot collision
+ # (possibly solvable via backtracking).
+ pkg = matches[-1] # highest match
+
+ if (self._dynamic_config._allow_backtracking and
+ not self._want_installed_pkg(pkg) and (dep.atom.soname or (
+ dep.atom.package and dep.atom.slot_operator_built))):
+ # If pkg was already scheduled for rebuild by the previous
+ # calculation, then pulling in the installed instance will
+ # trigger a slot conflict that may go unsolved. Therefore,
+ # trigger a rebuild of the parent if appropriate.
+ dep.child = pkg
+ new_dep = self._slot_operator_update_probe(dep)
+ if new_dep is not None:
+ self._slot_operator_update_backtrack(
+ dep, new_dep=new_dep)
+ continue
+
+ if not self._add_pkg(pkg, dep):
+ return 0
+ if not self._create_graph(allow_unsatisfied=True):
+ return 0
+ return 1
+
+ def _pkg(self, cpv, type_name, root_config, installed=False,
+ onlydeps=False, myrepo = None):
+ """
+ Get a package instance from the cache, or create a new
+ one if necessary. Raises PackageNotFound from aux_get if it
+ failures for some reason (package does not exist or is
+ corrupt).
+ """
+
+ # Ensure that we use the specially optimized RootConfig instance
+ # that refers to FakeVartree instead of the real vartree.
+ root_config = self._frozen_config.roots[root_config.root]
+ pkg = self._frozen_config._pkg_cache.get(
+ Package._gen_hash_key(cpv=cpv, type_name=type_name,
+ repo_name=myrepo, root_config=root_config,
+ installed=installed, onlydeps=onlydeps))
+ if pkg is None and onlydeps and not installed:
+ # Maybe it already got pulled in as a "merge" node.
+ for candidate in self._dynamic_config._package_tracker.match(
+ root_config.root, Atom("="+cpv)):
+ if candidate.type_name == type_name and \
+ candidate.repo_name == myrepo and \
+ candidate.root_config is root_config and \
+ candidate.installed == installed and \
+ not candidate.onlydeps:
+ pkg = candidate
+
+ if pkg is None:
+ tree_type = self.pkg_tree_map[type_name]
+ db = root_config.trees[tree_type].dbapi
+ db_keys = list(self._frozen_config._trees_orig[root_config.root][
+ tree_type].dbapi._aux_cache_keys)
+
+ try:
+ metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
+ except KeyError:
+ raise portage.exception.PackageNotFound(cpv)
+
+ # Ensure that this cpv is linked to the correct db, since the
+ # caller might have passed in a cpv from a different db, in
+ # order get an instance from this db with the same cpv.
+ # If db has a _db attribute, use that instead, in order to
+ # to use the underlying db of DbapiProvidesIndex or similar.
+ db = getattr(db, '_db', db)
+ if getattr(cpv, '_db', None) is not db:
+ cpv = _pkg_str(cpv, db=db)
+
+ pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
+ installed=installed, metadata=metadata, onlydeps=onlydeps,
+ root_config=root_config, type_name=type_name)
+
+ self._frozen_config._pkg_cache[pkg] = pkg
+
+ if not self._pkg_visibility_check(pkg) and \
+ 'LICENSE' in pkg.masks and len(pkg.masks) == 1:
+ slot_key = (pkg.root, pkg.slot_atom)
+ other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
+ if other_pkg is None or pkg > other_pkg:
+ self._frozen_config._highest_license_masked[slot_key] = pkg
+
+ return pkg
+
+ def _validate_blockers(self):
+ """Remove any blockers from the digraph that do not match any of the
+ packages within the graph. If necessary, create hard deps to ensure
+ correct merge order such that mutually blocking packages are never
+ installed simultaneously. Also add runtime blockers from all installed
+ packages if any of them haven't been added already (bug 128809).
+
+ Normally, this method is called only after the graph is complete, and
+ after _solve_non_slot_operator_slot_conflicts has had an opportunity
+ to solve slot conflicts (possibly removing some blockers). It can also
+ be called earlier, in order to get a preview of the blocker data, but
+ then it needs to be called again after the graph is complete.
+ """
+
+ # The _in_blocker_conflict method needs to assert that this method
+ # has been called before it, by checking that it is not None.
+ self._dynamic_config._blocked_pkgs = digraph()
+
+ if "--buildpkgonly" in self._frozen_config.myopts or \
+ "--nodeps" in self._frozen_config.myopts:
+ return True
+
+ if True:
+ # Pull in blockers from all installed packages that haven't already
+ # been pulled into the depgraph, in order to ensure that they are
+ # respected (bug 128809). Due to the performance penalty that is
+ # incurred by all the additional dep_check calls that are required,
+ # blockers returned from dep_check are cached on disk by the
+ # BlockerCache class.
+
+ # For installed packages, always ignore blockers from DEPEND since
+ # only runtime dependencies should be relevant for packages that
+ # are already built.
+ dep_keys = Package._runtime_keys
+ for myroot in self._frozen_config.trees:
+
+ if self._frozen_config.myopts.get("--root-deps") is not None and \
+ myroot != self._frozen_config.target_root:
+ continue
+
+ vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
+ pkgsettings = self._frozen_config.pkgsettings[myroot]
+ root_config = self._frozen_config.roots[myroot]
+ final_db = PackageTrackerDbapiWrapper(
+ myroot, self._dynamic_config._package_tracker)
+
+ blocker_cache = BlockerCache(myroot, vardb)
+ stale_cache = set(blocker_cache)
+ for pkg in vardb:
+ cpv = pkg.cpv
+ stale_cache.discard(cpv)
+ pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
+ pkg_deps_added = \
+ pkg in self._dynamic_config._traversed_pkg_deps
+
+ # Check for masked installed packages. Only warn about
+ # packages that are in the graph in order to avoid warning
+ # about those that will be automatically uninstalled during
+ # the merge process or by --depclean. Always warn about
+ # packages masked by license, since the user likely wants
+ # to adjust ACCEPT_LICENSE.
+ if pkg in self._dynamic_config._package_tracker:
+ if not self._pkg_visibility_check(pkg,
+ trust_graph=False) and \
+ (pkg_in_graph or 'LICENSE' in pkg.masks):
+ self._dynamic_config._masked_installed.add(pkg)
+ else:
+ self._check_masks(pkg)
+
+ blocker_atoms = None
+ blockers = None
+ if pkg_deps_added:
+ blockers = []
+ try:
+ blockers.extend(
+ self._dynamic_config._blocker_parents.child_nodes(pkg))
+ except KeyError:
+ pass
+ try:
+ blockers.extend(
+ self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
+ except KeyError:
+ pass
+ if blockers:
+ # Select just the runtime blockers.
+ blockers = [blocker for blocker in blockers \
+ if blocker.priority.runtime or \
+ blocker.priority.runtime_post]
+ if blockers is not None:
+ blockers = set(blocker.atom for blocker in blockers)
+
+ # If this node has any blockers, create a "nomerge"
+ # node for it so that they can be enforced.
+ self._spinner_update()
+ blocker_data = blocker_cache.get(cpv)
+ if blocker_data is not None and \
+ blocker_data.counter != pkg.counter:
+ blocker_data = None
+
+ # If blocker data from the graph is available, use
+ # it to validate the cache and update the cache if
+ # it seems invalid.
+ if blocker_data is not None and \
+ blockers is not None:
+ if not blockers.symmetric_difference(
+ blocker_data.atoms):
+ continue
+ blocker_data = None
+
+ if blocker_data is None and \
+ blockers is not None:
+ # Re-use the blockers from the graph.
+ blocker_atoms = sorted(blockers)
+ blocker_data = \
+ blocker_cache.BlockerData(pkg.counter, blocker_atoms)
+ blocker_cache[pkg.cpv] = blocker_data
+ continue
+
+ if blocker_data:
+ blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
+ else:
+ # Use aux_get() to trigger FakeVartree global
+ # updates on *DEPEND when appropriate.
+ depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
+ # It is crucial to pass in final_db here in order to
+ # optimize dep_check calls by eliminating atoms via
+ # dep_wordreduce and dep_eval calls.
+ try:
+ success, atoms = portage.dep_check(depstr,
+ final_db, pkgsettings, myuse=self._pkg_use_enabled(pkg),
+ trees=self._dynamic_config._graph_trees, myroot=myroot)
+ except SystemExit:
+ raise
+ except Exception as e:
+ # This is helpful, for example, if a ValueError
+ # is thrown from cpv_expand due to multiple
+ # matches (this can happen if an atom lacks a
+ # category).
+ show_invalid_depstring_notice(
+ pkg, "%s" % (e,))
+ del e
+ raise
+ if not success:
+ replacement_pkgs = self._dynamic_config._package_tracker.match(
+ myroot, pkg.slot_atom)
+ if any(replacement_pkg.operation == "merge" for
+ replacement_pkg in replacement_pkgs):
+ # This package is being replaced anyway, so
+ # ignore invalid dependencies so as not to
+ # annoy the user too much (otherwise they'd be
+ # forced to manually unmerge it first).
+ continue
+ show_invalid_depstring_notice(pkg, atoms)
+ return False
+ blocker_atoms = [myatom for myatom in atoms \
+ if myatom.blocker]
+ blocker_atoms.sort()
+ blocker_cache[cpv] = \
+ blocker_cache.BlockerData(pkg.counter, blocker_atoms)
+ if blocker_atoms:
+ try:
+ for atom in blocker_atoms:
+ blocker = Blocker(atom=atom,
+ eapi=pkg.eapi,
+ priority=self._priority(runtime=True),
+ root=myroot)
+ self._dynamic_config._blocker_parents.add(blocker, pkg)
+ except portage.exception.InvalidAtom as e:
+ depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
+ show_invalid_depstring_notice(
+ pkg, "Invalid Atom: %s" % (e,))
+ return False
+ for cpv in stale_cache:
+ del blocker_cache[cpv]
+ blocker_cache.flush()
+ del blocker_cache
+
+ # Discard any "uninstall" tasks scheduled by previous calls
+ # to this method, since those tasks may not make sense given
+ # the current graph state.
+ previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
+ if previous_uninstall_tasks:
+ self._dynamic_config._blocker_uninstalls = digraph()
+ self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
+
+ # Revert state from previous calls.
+ self._dynamic_config._blocker_parents.update(
+ self._dynamic_config._irrelevant_blockers)
+ self._dynamic_config._irrelevant_blockers.clear()
+ self._dynamic_config._unsolvable_blockers.clear()
+
+ for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
+ self._spinner_update()
+ root_config = self._frozen_config.roots[blocker.root]
+ virtuals = root_config.settings.getvirtuals()
+ myroot = blocker.root
+ initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
+
+ provider_virtual = False
+ if blocker.cp in virtuals and \
+ not self._have_new_virt(blocker.root, blocker.cp):
+ provider_virtual = True
+
+ # Use this to check PROVIDE for each matched package
+ # when necessary.
+ atom_set = InternalPackageSet(
+ initial_atoms=[blocker.atom])
+
+ if provider_virtual:
+ atoms = []
+ for provider_entry in virtuals[blocker.cp]:
+ atoms.append(Atom(blocker.atom.replace(
+ blocker.cp, provider_entry.cp, 1)))
+ else:
+ atoms = [blocker.atom]
+
+ blocked_initial = set()
+ for atom in atoms:
+ for pkg in initial_db.match_pkgs(atom):
+ if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
+ blocked_initial.add(pkg)
+
+ blocked_final = set()
+ for atom in atoms:
+ for pkg in self._dynamic_config._package_tracker.match(myroot, atom):
+ if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
+ blocked_final.add(pkg)
+
+ if not blocked_initial and not blocked_final:
+ parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
+ self._dynamic_config._blocker_parents.remove(blocker)
+ # Discard any parents that don't have any more blockers.
+ for pkg in parent_pkgs:
+ self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
+ if not self._dynamic_config._blocker_parents.child_nodes(pkg):
+ self._dynamic_config._blocker_parents.remove(pkg)
+ continue
+ for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
+ unresolved_blocks = False
+ depends_on_order = set()
+ for pkg in blocked_initial:
+ if pkg.slot_atom == parent.slot_atom and \
+ not blocker.atom.blocker.overlap.forbid:
+ # New !!atom blockers do not allow temporary
+ # simulaneous installation, so unlike !atom
+ # blockers, !!atom blockers aren't ignored
+ # when they match other packages occupying
+ # the same slot.
+ continue
+ if parent.installed:
+ # Two currently installed packages conflict with
+ # eachother. Ignore this case since the damage
+ # is already done and this would be likely to
+ # confuse users if displayed like a normal blocker.
+ continue
+
+ self._dynamic_config._blocked_pkgs.add(pkg, blocker)
+
+ if parent.operation == "merge":
+ # Maybe the blocked package can be replaced or simply
+ # unmerged to resolve this block.
+ depends_on_order.add((pkg, parent))
+ continue
+ # None of the above blocker resolutions techniques apply,
+ # so apparently this one is unresolvable.
+ unresolved_blocks = True
+ for pkg in blocked_final:
+ if pkg.slot_atom == parent.slot_atom and \
+ not blocker.atom.blocker.overlap.forbid:
+ # New !!atom blockers do not allow temporary
+ # simulaneous installation, so unlike !atom
+ # blockers, !!atom blockers aren't ignored
+ # when they match other packages occupying
+ # the same slot.
+ continue
+ if parent.operation == "nomerge" and \
+ pkg.operation == "nomerge":
+ # This blocker will be handled the next time that a
+ # merge of either package is triggered.
+ continue
+
+ self._dynamic_config._blocked_pkgs.add(pkg, blocker)
+
+ # Maybe the blocking package can be
+ # unmerged to resolve this block.
+ if parent.operation == "merge" and pkg.installed:
+ depends_on_order.add((pkg, parent))
+ continue
+ elif parent.operation == "nomerge":
+ depends_on_order.add((parent, pkg))
+ continue
+ # None of the above blocker resolutions techniques apply,
+ # so apparently this one is unresolvable.
+ unresolved_blocks = True
+
+ # Make sure we don't unmerge any package that have been pulled
+ # into the graph.
+ if not unresolved_blocks and depends_on_order:
+ for inst_pkg, inst_task in depends_on_order:
+ if self._dynamic_config.digraph.contains(inst_pkg) and \
+ self._dynamic_config.digraph.parent_nodes(inst_pkg):
+ unresolved_blocks = True
+ break
+
+ if not unresolved_blocks and depends_on_order:
+ for inst_pkg, inst_task in depends_on_order:
+ uninst_task = Package(built=inst_pkg.built,
+ cpv=inst_pkg.cpv, installed=inst_pkg.installed,
+ metadata=inst_pkg._metadata,
+ operation="uninstall",
+ root_config=inst_pkg.root_config,
+ type_name=inst_pkg.type_name)
+ # Enforce correct merge order with a hard dep.
+ self._dynamic_config.digraph.addnode(uninst_task, inst_task,
+ priority=BlockerDepPriority.instance)
+ # Count references to this blocker so that it can be
+ # invalidated after nodes referencing it have been
+ # merged.
+ self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
+ if not unresolved_blocks and not depends_on_order:
+ self._dynamic_config._irrelevant_blockers.add(blocker, parent)
+ self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
+ if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
+ self._dynamic_config._blocker_parents.remove(blocker)
+ if not self._dynamic_config._blocker_parents.child_nodes(parent):
+ self._dynamic_config._blocker_parents.remove(parent)
+ if unresolved_blocks:
+ self._dynamic_config._unsolvable_blockers.add(blocker, parent)
+
+ return True
+
+ def _accept_blocker_conflicts(self):
+ acceptable = False
+ for x in ("--buildpkgonly", "--fetchonly",
+ "--fetch-all-uri", "--nodeps"):
+ if x in self._frozen_config.myopts:
+ acceptable = True
+ break
+ return acceptable
+
+ def _merge_order_bias(self, mygraph):
+ """
+ For optimal leaf node selection, promote deep system runtime deps and
+ order nodes from highest to lowest overall reference count.
+ """
+
+ node_info = {}
+ for node in mygraph.order:
+ node_info[node] = len(mygraph.parent_nodes(node))
+ deep_system_deps = _find_deep_system_runtime_deps(mygraph)
+
+ def cmp_merge_preference(node1, node2):
+
+ if node1.operation == 'uninstall':
+ if node2.operation == 'uninstall':
+ return 0
+ return 1
+
+ if node2.operation == 'uninstall':
+ if node1.operation == 'uninstall':
+ return 0
+ return -1
+
+ node1_sys = node1 in deep_system_deps
+ node2_sys = node2 in deep_system_deps
+ if node1_sys != node2_sys:
+ if node1_sys:
+ return -1
+ return 1
+
+ return node_info[node2] - node_info[node1]
+
+ mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
+
+ def altlist(self, reversed=DeprecationWarning):
+
+ if reversed is not DeprecationWarning:
+ warnings.warn("The reversed parameter of "
+ "_emerge.depgraph.depgraph.altlist() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ while self._dynamic_config._serialized_tasks_cache is None:
+ self._resolve_conflicts()
+ try:
+ self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
+ self._serialize_tasks()
+ except self._serialize_tasks_retry:
+ pass
+
+ retlist = self._dynamic_config._serialized_tasks_cache
+ if reversed is not DeprecationWarning and reversed:
+ # TODO: remove the "reversed" parameter (builtin name collision)
+ retlist = list(retlist)
+ retlist.reverse()
+ retlist = tuple(retlist)
+
+ return retlist
+
+ def _implicit_libc_deps(self, mergelist, graph):
+ """
+ Create implicit dependencies on libc, in order to ensure that libc
+ is installed as early as possible (see bug #303567).
+ """
+ libc_pkgs = {}
+ implicit_libc_roots = (self._frozen_config._running_root.root,)
+ for root in implicit_libc_roots:
+ vardb = self._frozen_config.trees[root]["vartree"].dbapi
+ for atom in self._expand_virt_from_graph(root,
+ portage.const.LIBC_PACKAGE_ATOM):
+ if atom.blocker:
+ continue
+ for pkg in self._dynamic_config._package_tracker.match(root, atom):
+ if pkg.operation == "merge" and \
+ not vardb.cpv_exists(pkg.cpv):
+ libc_pkgs.setdefault(pkg.root, set()).add(pkg)
+
+ if not libc_pkgs:
+ return
+
+ earlier_libc_pkgs = set()
+
+ for pkg in mergelist:
+ if not isinstance(pkg, Package):
+ # a satisfied blocker
+ continue
+ root_libc_pkgs = libc_pkgs.get(pkg.root)
+ if root_libc_pkgs is not None and \
+ pkg.operation == "merge":
+ if pkg in root_libc_pkgs:
+ earlier_libc_pkgs.add(pkg)
+ else:
+ for libc_pkg in root_libc_pkgs:
+ if libc_pkg in earlier_libc_pkgs:
+ graph.add(libc_pkg, pkg,
+ priority=DepPriority(buildtime=True))
+
+ def schedulerGraph(self):
+ """
+ The scheduler graph is identical to the normal one except that
+ uninstall edges are reversed in specific cases that require
+ conflicting packages to be temporarily installed simultaneously.
+ This is intended for use by the Scheduler in it's parallelization
+ logic. It ensures that temporary simultaneous installation of
+ conflicting packages is avoided when appropriate (especially for
+ !!atom blockers), but allowed in specific cases that require it.
+
+ Note that this method calls break_refs() which alters the state of
+ internal Package instances such that this depgraph instance should
+ not be used to perform any more calculations.
+ """
+
+ # NOTE: altlist initializes self._dynamic_config._scheduler_graph
+ mergelist = self.altlist()
+ self._implicit_libc_deps(mergelist,
+ self._dynamic_config._scheduler_graph)
+
+ # Break DepPriority.satisfied attributes which reference
+ # installed Package instances.
+ for parents, children, node in \
+ self._dynamic_config._scheduler_graph.nodes.values():
+ for priorities in chain(parents.values(), children.values()):
+ for priority in priorities:
+ if priority.satisfied:
+ priority.satisfied = True
+
+ pkg_cache = self._frozen_config._pkg_cache
+ graph = self._dynamic_config._scheduler_graph
+ trees = self._frozen_config.trees
+ pruned_pkg_cache = {}
+ for key, pkg in pkg_cache.items():
+ if pkg in graph or \
+ (pkg.installed and pkg in trees[pkg.root]['vartree'].dbapi):
+ pruned_pkg_cache[key] = pkg
+
+ for root in trees:
+ trees[root]['vartree']._pkg_cache = pruned_pkg_cache
+
+ self.break_refs()
+ sched_config = \
+ _scheduler_graph_config(trees, pruned_pkg_cache, graph, mergelist)
+
+ return sched_config
+
+ def break_refs(self):
+ """
+ Break any references in Package instances that lead back to the depgraph.
+ This is useful if you want to hold references to packages without also
+ holding the depgraph on the heap. It should only be called after the
+ depgraph and _frozen_config will not be used for any more calculations.
+ """
+ for root_config in self._frozen_config.roots.values():
+ root_config.update(self._frozen_config._trees_orig[
+ root_config.root]["root_config"])
+ # Both instances are now identical, so discard the
+ # original which should have no other references.
+ self._frozen_config._trees_orig[
+ root_config.root]["root_config"] = root_config
+
+ def _resolve_conflicts(self):
+
+ if "complete" not in self._dynamic_config.myparams and \
+ self._dynamic_config._allow_backtracking and \
+ any(self._dynamic_config._package_tracker.slot_conflicts()) and \
+ not self._accept_blocker_conflicts():
+ self._dynamic_config.myparams["complete"] = True
+
+ if not self._complete_graph():
+ raise self._unknown_internal_error()
+
+ self._process_slot_conflicts()
+
+ def _serialize_tasks(self):
+
+ debug = "--debug" in self._frozen_config.myopts
+
+ if debug:
+ writemsg("\ndigraph:\n\n", noiselevel=-1)
+ self._dynamic_config.digraph.debug_print()
+ writemsg("\n", noiselevel=-1)
+
+ scheduler_graph = self._dynamic_config.digraph.copy()
+
+ if '--nodeps' in self._frozen_config.myopts:
+ # Preserve the package order given on the command line.
+ return ([node for node in scheduler_graph \
+ if isinstance(node, Package) \
+ and node.operation == 'merge'], scheduler_graph)
+
+ mygraph=self._dynamic_config.digraph.copy()
+
+ removed_nodes = set()
+
+ # Prune off all DependencyArg instances since they aren't
+ # needed, and because of nested sets this is faster than doing
+ # it with multiple digraph.root_nodes() calls below. This also
+ # takes care of nested sets that have circular references,
+ # which wouldn't be matched by digraph.root_nodes().
+ for node in mygraph:
+ if isinstance(node, DependencyArg):
+ removed_nodes.add(node)
+ if removed_nodes:
+ mygraph.difference_update(removed_nodes)
+ removed_nodes.clear()
+
+ # Prune "nomerge" root nodes if nothing depends on them, since
+ # otherwise they slow down merge order calculation. Don't remove
+ # non-root nodes since they help optimize merge order in some cases
+ # such as revdep-rebuild.
+
+ while True:
+ for node in mygraph.root_nodes():
+ if not isinstance(node, Package) or \
+ node.installed or node.onlydeps:
+ removed_nodes.add(node)
+ if removed_nodes:
+ self._spinner_update()
+ mygraph.difference_update(removed_nodes)
+ if not removed_nodes:
+ break
+ removed_nodes.clear()
+ self._merge_order_bias(mygraph)
+ def cmp_circular_bias(n1, n2):
+ """
+ RDEPEND is stronger than PDEPEND and this function
+ measures such a strength bias within a circular
+ dependency relationship.
+ """
+ n1_n2_medium = n2 in mygraph.child_nodes(n1,
+ ignore_priority=priority_range.ignore_medium_soft)
+ n2_n1_medium = n1 in mygraph.child_nodes(n2,
+ ignore_priority=priority_range.ignore_medium_soft)
+ if n1_n2_medium == n2_n1_medium:
+ return 0
+ elif n1_n2_medium:
+ return 1
+ return -1
+ myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
+ retlist=[]
+ # Contains uninstall tasks that have been scheduled to
+ # occur after overlapping blockers have been installed.
+ scheduled_uninstalls = set()
+ # Contains any Uninstall tasks that have been ignored
+ # in order to avoid the circular deps code path. These
+ # correspond to blocker conflicts that could not be
+ # resolved.
+ ignored_uninstall_tasks = set()
+ have_uninstall_task = False
+ complete = "complete" in self._dynamic_config.myparams
+ ignore_world = self._dynamic_config.myparams.get("ignore_world", False)
+ asap_nodes = []
+
+ def get_nodes(**kwargs):
+ """
+ Returns leaf nodes excluding Uninstall instances
+ since those should be executed as late as possible.
+ """
+ return [node for node in mygraph.leaf_nodes(**kwargs) \
+ if isinstance(node, Package) and \
+ (node.operation != "uninstall" or \
+ node in scheduled_uninstalls)]
+
+ # sys-apps/portage needs special treatment if ROOT="/"
+ running_root = self._frozen_config._running_root.root
+ runtime_deps = InternalPackageSet(
+ initial_atoms=[PORTAGE_PACKAGE_ATOM])
+ running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
+ Atom(PORTAGE_PACKAGE_ATOM))
+ replacement_portage = list(self._dynamic_config._package_tracker.match(
+ running_root, Atom(PORTAGE_PACKAGE_ATOM)))
+
+ if running_portage:
+ running_portage = running_portage[0]
+ else:
+ running_portage = None
+
+ if replacement_portage:
+ replacement_portage = replacement_portage[0]
+ else:
+ replacement_portage = None
+
+ if replacement_portage == running_portage:
+ replacement_portage = None
+
+ if running_portage is not None:
+ try:
+ portage_rdepend = self._select_atoms_highest_available(
+ running_root, running_portage._metadata["RDEPEND"],
+ myuse=self._pkg_use_enabled(running_portage),
+ parent=running_portage, strict=False)
+ except portage.exception.InvalidDependString as e:
+ portage.writemsg("!!! Invalid RDEPEND in " + \
+ "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
+ (running_root, running_portage.cpv, e), noiselevel=-1)
+ del e
+ portage_rdepend = {running_portage : []}
+ for atoms in portage_rdepend.values():
+ runtime_deps.update(atom for atom in atoms \
+ if not atom.blocker)
+
+ # Merge libc asap, in order to account for implicit
+ # dependencies. See bug #303567.
+ implicit_libc_roots = (running_root,)
+ for root in implicit_libc_roots:
+ libc_pkgs = set()
+ vardb = self._frozen_config.trees[root]["vartree"].dbapi
+ for atom in self._expand_virt_from_graph(root,
+ portage.const.LIBC_PACKAGE_ATOM):
+ if atom.blocker:
+ continue
+
+ for pkg in self._dynamic_config._package_tracker.match(root, atom):
+ if pkg.operation == "merge" and \
+ not vardb.cpv_exists(pkg.cpv):
+ libc_pkgs.add(pkg)
+
+ if libc_pkgs:
+ # If there's also an os-headers upgrade, we need to
+ # pull that in first. See bug #328317.
+ for atom in self._expand_virt_from_graph(root,
+ portage.const.OS_HEADERS_PACKAGE_ATOM):
+ if atom.blocker:
+ continue
+
+ for pkg in self._dynamic_config._package_tracker.match(root, atom):
+ if pkg.operation == "merge" and \
+ not vardb.cpv_exists(pkg.cpv):
+ asap_nodes.append(pkg)
+
+ asap_nodes.extend(libc_pkgs)
+
+ def gather_deps(ignore_priority, mergeable_nodes,
+ selected_nodes, node):
+ """
+ Recursively gather a group of nodes that RDEPEND on
+ eachother. This ensures that they are merged as a group
+ and get their RDEPENDs satisfied as soon as possible.
+ """
+ if node in selected_nodes:
+ return True
+ if node not in mergeable_nodes:
+ return False
+ if node == replacement_portage and any(
+ getattr(rdep, 'operation', None) != 'uninstall'
+ for rdep in mygraph.child_nodes(node,
+ ignore_priority=priority_range.ignore_medium_soft)):
+ # Make sure that portage always has all of its
+ # RDEPENDs installed first, but ignore uninstalls
+ # (these occur when new portage blocks older repoman).
+ return False
+ selected_nodes.add(node)
+ for child in mygraph.child_nodes(node,
+ ignore_priority=ignore_priority):
+ if not gather_deps(ignore_priority,
+ mergeable_nodes, selected_nodes, child):
+ return False
+ return True
+
+ def ignore_uninst_or_med(priority):
+ if priority is BlockerDepPriority.instance:
+ return True
+ return priority_range.ignore_medium(priority)
+
+ def ignore_uninst_or_med_soft(priority):
+ if priority is BlockerDepPriority.instance:
+ return True
+ return priority_range.ignore_medium_soft(priority)
+
+ tree_mode = "--tree" in self._frozen_config.myopts
+ # Tracks whether or not the current iteration should prefer asap_nodes
+ # if available. This is set to False when the previous iteration
+ # failed to select any nodes. It is reset whenever nodes are
+ # successfully selected.
+ prefer_asap = True
+
+ # Controls whether or not the current iteration should drop edges that
+ # are "satisfied" by installed packages, in order to solve circular
+ # dependencies. The deep runtime dependencies of installed packages are
+ # not checked in this case (bug #199856), so it must be avoided
+ # whenever possible.
+ drop_satisfied = False
+
+ # State of variables for successive iterations that loosen the
+ # criteria for node selection.
+ #
+ # iteration prefer_asap drop_satisfied
+ # 1 True False
+ # 2 False False
+ # 3 False True
+ #
+ # If no nodes are selected on the last iteration, it is due to
+ # unresolved blockers or circular dependencies.
+
+ while mygraph:
+ self._spinner_update()
+ selected_nodes = None
+ ignore_priority = None
+ if drop_satisfied or (prefer_asap and asap_nodes):
+ priority_range = DepPrioritySatisfiedRange
+ else:
+ priority_range = DepPriorityNormalRange
+ if prefer_asap and asap_nodes:
+ # ASAP nodes are merged before their soft deps. Go ahead and
+ # select root nodes here if necessary, since it's typical for
+ # the parent to have been removed from the graph already.
+ asap_nodes = [node for node in asap_nodes \
+ if mygraph.contains(node)]
+ for i in range(priority_range.SOFT,
+ priority_range.MEDIUM_SOFT + 1):
+ ignore_priority = priority_range.ignore_priority[i]
+ for node in asap_nodes:
+ if not mygraph.child_nodes(node,
+ ignore_priority=ignore_priority):
+ selected_nodes = [node]
+ asap_nodes.remove(node)
+ break
+ if selected_nodes:
+ break
+
+ if not selected_nodes and \
+ not (prefer_asap and asap_nodes):
+ for i in range(priority_range.NONE,
+ priority_range.MEDIUM_SOFT + 1):
+ ignore_priority = priority_range.ignore_priority[i]
+ nodes = get_nodes(ignore_priority=ignore_priority)
+ if nodes:
+ # If there is a mixture of merges and uninstalls,
+ # do the uninstalls first.
+ good_uninstalls = None
+ if len(nodes) > 1:
+ good_uninstalls = []
+ for node in nodes:
+ if node.operation == "uninstall":
+ good_uninstalls.append(node)
+
+ if good_uninstalls:
+ nodes = good_uninstalls
+ else:
+ nodes = nodes
+
+ if good_uninstalls or len(nodes) == 1 or \
+ (ignore_priority is None and \
+ not asap_nodes and not tree_mode):
+ # Greedily pop all of these nodes since no
+ # relationship has been ignored. This optimization
+ # destroys --tree output, so it's disabled in tree
+ # mode.
+ selected_nodes = nodes
+ else:
+ # For optimal merge order:
+ # * Only pop one node.
+ # * Removing a root node (node without a parent)
+ # will not produce a leaf node, so avoid it.
+ # * It's normal for a selected uninstall to be a
+ # root node, so don't check them for parents.
+ if asap_nodes:
+ prefer_asap_parents = (True, False)
+ else:
+ prefer_asap_parents = (False,)
+ for check_asap_parent in prefer_asap_parents:
+ if check_asap_parent:
+ for node in nodes:
+ parents = mygraph.parent_nodes(node,
+ ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
+ if any(x in asap_nodes for x in parents):
+ selected_nodes = [node]
+ break
+ else:
+ for node in nodes:
+ if mygraph.parent_nodes(node):
+ selected_nodes = [node]
+ break
+ if selected_nodes:
+ break
+ if selected_nodes:
+ break
+
+ if not selected_nodes:
+ nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
+ if nodes:
+ mergeable_nodes = set(nodes)
+ if prefer_asap and asap_nodes:
+ nodes = asap_nodes
+ # When gathering the nodes belonging to a runtime cycle,
+ # we want to minimize the number of nodes gathered, since
+ # this tends to produce a more optimal merge order.
+ # Ignoring all medium_soft deps serves this purpose.
+ # In the case of multiple runtime cycles, where some cycles
+ # may depend on smaller independent cycles, it's optimal
+ # to merge smaller independent cycles before other cycles
+ # that depend on them. Therefore, we search for the
+ # smallest cycle in order to try and identify and prefer
+ # these smaller independent cycles.
+ ignore_priority = priority_range.ignore_medium_soft
+ smallest_cycle = None
+ for node in nodes:
+ if not mygraph.parent_nodes(node):
+ continue
+ selected_nodes = set()
+ if gather_deps(ignore_priority,
+ mergeable_nodes, selected_nodes, node):
+ if smallest_cycle is None or \
+ len(selected_nodes) < len(smallest_cycle):
+ smallest_cycle = selected_nodes
+
+ selected_nodes = smallest_cycle
+
+ if selected_nodes is not None:
+ cycle_digraph = mygraph.copy()
+ cycle_digraph.difference_update([x for x in
+ cycle_digraph if x not in selected_nodes])
+
+ leaves = cycle_digraph.leaf_nodes()
+ if leaves:
+ # NOTE: This case should only be triggered when
+ # prefer_asap is True, since otherwise these
+ # leaves would have been selected to merge
+ # before this point. Since these "leaves" may
+ # actually have some low-priority dependencies
+ # that we have intentionally ignored, select
+ # only one node here, so that merge order
+ # accounts for as many dependencies as possible.
+ selected_nodes = [leaves[0]]
+
+ if debug:
+ writemsg("\nruntime cycle digraph (%s nodes):\n\n" %
+ (len(selected_nodes),), noiselevel=-1)
+ cycle_digraph.debug_print()
+ writemsg("\n", noiselevel=-1)
+
+ if leaves:
+ writemsg("runtime cycle leaf: %s\n\n" %
+ (selected_nodes[0],), noiselevel=-1)
+
+ if prefer_asap and asap_nodes and not selected_nodes:
+ # We failed to find any asap nodes to merge, so ignore
+ # them for the next iteration.
+ prefer_asap = False
+ continue
+
+ if selected_nodes and ignore_priority is not None:
+ # Try to merge ignored medium_soft deps as soon as possible
+ # if they're not satisfied by installed packages.
+ for node in selected_nodes:
+ children = set(mygraph.child_nodes(node))
+ soft = children.difference(
+ mygraph.child_nodes(node,
+ ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
+ medium_soft = children.difference(
+ mygraph.child_nodes(node,
+ ignore_priority = \
+ DepPrioritySatisfiedRange.ignore_medium_soft))
+ medium_soft.difference_update(soft)
+ for child in medium_soft:
+ if child in selected_nodes:
+ continue
+ if child in asap_nodes:
+ continue
+ # Merge PDEPEND asap for bug #180045.
+ asap_nodes.append(child)
+
+ if selected_nodes and len(selected_nodes) > 1:
+ if not isinstance(selected_nodes, list):
+ selected_nodes = list(selected_nodes)
+ selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
+
+ if not selected_nodes and myblocker_uninstalls:
+ # An Uninstall task needs to be executed in order to
+ # avoid conflict if possible.
+
+ if drop_satisfied:
+ priority_range = DepPrioritySatisfiedRange
+ else:
+ priority_range = DepPriorityNormalRange
+
+ mergeable_nodes = get_nodes(
+ ignore_priority=ignore_uninst_or_med)
+
+ min_parent_deps = None
+ uninst_task = None
+
+ for task in myblocker_uninstalls.leaf_nodes():
+ # Do some sanity checks so that system or world packages
+ # don't get uninstalled inappropriately here (only really
+ # necessary when --complete-graph has not been enabled).
+
+ if task in ignored_uninstall_tasks:
+ continue
+
+ if task in scheduled_uninstalls:
+ # It's been scheduled but it hasn't
+ # been executed yet due to dependence
+ # on installation of blocking packages.
+ continue
+
+ root_config = self._frozen_config.roots[task.root]
+ inst_pkg = self._pkg(task.cpv, "installed", root_config,
+ installed=True)
+
+ if self._dynamic_config.digraph.contains(inst_pkg):
+ continue
+
+ forbid_overlap = False
+ heuristic_overlap = False
+ for blocker in myblocker_uninstalls.parent_nodes(task):
+ if not eapi_has_strong_blocks(blocker.eapi):
+ heuristic_overlap = True
+ elif blocker.atom.blocker.overlap.forbid:
+ forbid_overlap = True
+ break
+ if forbid_overlap and running_root == task.root:
+ continue
+
+ if heuristic_overlap and running_root == task.root:
+ # Never uninstall sys-apps/portage or it's essential
+ # dependencies, except through replacement.
+ try:
+ runtime_dep_atoms = \
+ list(runtime_deps.iterAtomsForPackage(task))
+ except portage.exception.InvalidDependString as e:
+ portage.writemsg("!!! Invalid PROVIDE in " + \
+ "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
+ (task.root, task.cpv, e), noiselevel=-1)
+ del e
+ continue
+
+ # Don't uninstall a runtime dep if it appears
+ # to be the only suitable one installed.
+ skip = False
+ vardb = root_config.trees["vartree"].dbapi
+ for atom in runtime_dep_atoms:
+ other_version = None
+ for pkg in vardb.match_pkgs(atom):
+ if pkg.cpv == task.cpv and \
+ pkg.counter == task.counter:
+ continue
+ other_version = pkg
+ break
+ if other_version is None:
+ skip = True
+ break
+ if skip:
+ continue
+
+ # For packages in the system set, don't take
+ # any chances. If the conflict can't be resolved
+ # by a normal replacement operation then abort.
+ skip = False
+ try:
+ for atom in root_config.sets[
+ "system"].iterAtomsForPackage(task):
+ skip = True
+ break
+ except portage.exception.InvalidDependString as e:
+ portage.writemsg("!!! Invalid PROVIDE in " + \
+ "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
+ (task.root, task.cpv, e), noiselevel=-1)
+ del e
+ skip = True
+ if skip:
+ continue
+
+ # Note that the world check isn't always
+ # necessary since self._complete_graph() will
+ # add all packages from the system and world sets to the
+ # graph. This just allows unresolved conflicts to be
+ # detected as early as possible, which makes it possible
+ # to avoid calling self._complete_graph() when it is
+ # unnecessary due to blockers triggering an abortion.
+ if not (complete or ignore_world):
+ # For packages in the world set, go ahead an uninstall
+ # when necessary, as long as the atom will be satisfied
+ # in the final state.
+ skip = False
+ try:
+ for atom in root_config.sets[
+ "selected"].iterAtomsForPackage(task):
+ satisfied = False
+ for pkg in self._dynamic_config._package_tracker.match(task.root, atom):
+ if pkg == inst_pkg:
+ continue
+ satisfied = True
+ break
+ if not satisfied:
+ skip = True
+ self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
+ break
+ except portage.exception.InvalidDependString as e:
+ portage.writemsg("!!! Invalid PROVIDE in " + \
+ "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
+ (task.root, task.cpv, e), noiselevel=-1)
+ del e
+ skip = True
+ if skip:
+ continue
+
+ # Check the deps of parent nodes to ensure that
+ # the chosen task produces a leaf node. Maybe
+ # this can be optimized some more to make the
+ # best possible choice, but the current algorithm
+ # is simple and should be near optimal for most
+ # common cases.
+ self._spinner_update()
+ mergeable_parent = False
+ parent_deps = set()
+ parent_deps.add(task)
+ for parent in mygraph.parent_nodes(task):
+ parent_deps.update(mygraph.child_nodes(parent,
+ ignore_priority=priority_range.ignore_medium_soft))
+ if min_parent_deps is not None and \
+ len(parent_deps) >= min_parent_deps:
+ # This task is no better than a previously selected
+ # task, so abort search now in order to avoid wasting
+ # any more cpu time on this task. This increases
+ # performance dramatically in cases when there are
+ # hundreds of blockers to solve, like when
+ # upgrading to a new slot of kde-meta.
+ mergeable_parent = None
+ break
+ if parent in mergeable_nodes and \
+ gather_deps(ignore_uninst_or_med_soft,
+ mergeable_nodes, set(), parent):
+ mergeable_parent = True
+
+ if not mergeable_parent:
+ continue
+
+ if min_parent_deps is None or \
+ len(parent_deps) < min_parent_deps:
+ min_parent_deps = len(parent_deps)
+ uninst_task = task
+
+ if uninst_task is not None and min_parent_deps == 1:
+ # This is the best possible result, so so abort search
+ # now in order to avoid wasting any more cpu time.
+ break
+
+ if uninst_task is not None:
+ # The uninstall is performed only after blocking
+ # packages have been merged on top of it. File
+ # collisions between blocking packages are detected
+ # and removed from the list of files to be uninstalled.
+ scheduled_uninstalls.add(uninst_task)
+ parent_nodes = mygraph.parent_nodes(uninst_task)
+
+ # Reverse the parent -> uninstall edges since we want
+ # to do the uninstall after blocking packages have
+ # been merged on top of it.
+ mygraph.remove(uninst_task)
+ for blocked_pkg in parent_nodes:
+ mygraph.add(blocked_pkg, uninst_task,
+ priority=BlockerDepPriority.instance)
+ scheduler_graph.remove_edge(uninst_task, blocked_pkg)
+ scheduler_graph.add(blocked_pkg, uninst_task,
+ priority=BlockerDepPriority.instance)
+
+ # Sometimes a merge node will render an uninstall
+ # node unnecessary (due to occupying the same SLOT),
+ # and we want to avoid executing a separate uninstall
+ # task in that case.
+ for slot_node in self._dynamic_config._package_tracker.match(
+ uninst_task.root, uninst_task.slot_atom):
+ if slot_node.operation == "merge":
+ mygraph.add(slot_node, uninst_task,
+ priority=BlockerDepPriority.instance)
+
+ # Reset the state variables for leaf node selection and
+ # continue trying to select leaf nodes.
+ prefer_asap = True
+ drop_satisfied = False
+ continue
+
+ if not selected_nodes:
+ # Only select root nodes as a last resort. This case should
+ # only trigger when the graph is nearly empty and the only
+ # remaining nodes are isolated (no parents or children). Since
+ # the nodes must be isolated, ignore_priority is not needed.
+ selected_nodes = get_nodes()
+
+ if not selected_nodes and not drop_satisfied:
+ drop_satisfied = True
+ continue
+
+ if not selected_nodes and myblocker_uninstalls:
+ # If possible, drop an uninstall task here in order to avoid
+ # the circular deps code path. The corresponding blocker will
+ # still be counted as an unresolved conflict.
+ uninst_task = None
+ for node in myblocker_uninstalls.leaf_nodes():
+ try:
+ mygraph.remove(node)
+ except KeyError:
+ pass
+ else:
+ uninst_task = node
+ ignored_uninstall_tasks.add(node)
+ break
+
+ if uninst_task is not None:
+ # Reset the state variables for leaf node selection and
+ # continue trying to select leaf nodes.
+ prefer_asap = True
+ drop_satisfied = False
+ continue
+
+ if not selected_nodes:
+ self._dynamic_config._circular_deps_for_display = mygraph
+ self._dynamic_config._skip_restart = True
+ raise self._unknown_internal_error()
+
+ # At this point, we've succeeded in selecting one or more nodes, so
+ # reset state variables for leaf node selection.
+ prefer_asap = True
+ drop_satisfied = False
+
+ mygraph.difference_update(selected_nodes)
+
+ for node in selected_nodes:
+ if isinstance(node, Package) and \
+ node.operation == "nomerge":
+ continue
+
+ # Handle interactions between blockers
+ # and uninstallation tasks.
+ solved_blockers = set()
+ uninst_task = None
+ if isinstance(node, Package) and \
+ "uninstall" == node.operation:
+ have_uninstall_task = True
+ uninst_task = node
+ else:
+ vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
+ inst_pkg = vardb.match_pkgs(node.slot_atom)
+ if inst_pkg:
+ # The package will be replaced by this one, so remove
+ # the corresponding Uninstall task if necessary.
+ inst_pkg = inst_pkg[0]
+ uninst_task = Package(built=inst_pkg.built,
+ cpv=inst_pkg.cpv, installed=inst_pkg.installed,
+ metadata=inst_pkg._metadata,
+ operation="uninstall",
+ root_config=inst_pkg.root_config,
+ type_name=inst_pkg.type_name)
+ try:
+ mygraph.remove(uninst_task)
+ except KeyError:
+ pass
+
+ if uninst_task is not None and \
+ uninst_task not in ignored_uninstall_tasks and \
+ myblocker_uninstalls.contains(uninst_task):
+ blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
+ myblocker_uninstalls.remove(uninst_task)
+ # Discard any blockers that this Uninstall solves.
+ for blocker in blocker_nodes:
+ if not myblocker_uninstalls.child_nodes(blocker):
+ myblocker_uninstalls.remove(blocker)
+ if blocker not in \
+ self._dynamic_config._unsolvable_blockers:
+ solved_blockers.add(blocker)
+
+ retlist.append(node)
+
+ if (isinstance(node, Package) and \
+ "uninstall" == node.operation) or \
+ (uninst_task is not None and \
+ uninst_task in scheduled_uninstalls):
+ # Include satisfied blockers in the merge list
+ # since the user might be interested and also
+ # it serves as an indicator that blocking packages
+ # will be temporarily installed simultaneously.
+ for blocker in solved_blockers:
+ retlist.append(blocker)
+
+ unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
+ for node in myblocker_uninstalls.root_nodes():
+ unsolvable_blockers.add(node)
+
+ # If any Uninstall tasks need to be executed in order
+ # to avoid a conflict, complete the graph with any
+ # dependencies that may have been initially
+ # neglected (to ensure that unsafe Uninstall tasks
+ # are properly identified and blocked from execution).
+ if have_uninstall_task and \
+ not complete and \
+ not unsolvable_blockers:
+ self._dynamic_config.myparams["complete"] = True
+ if '--debug' in self._frozen_config.myopts:
+ msg = []
+ msg.append("enabling 'complete' depgraph mode " + \
+ "due to uninstall task(s):")
+ msg.append("")
+ for node in retlist:
+ if isinstance(node, Package) and \
+ node.operation == 'uninstall':
+ msg.append("\t%s" % (node,))
+ writemsg_level("\n%s\n" % \
+ "".join("%s\n" % line for line in msg),
+ level=logging.DEBUG, noiselevel=-1)
+ raise self._serialize_tasks_retry("")
+
+ # Set satisfied state on blockers, but not before the
+ # above retry path, since we don't want to modify the
+ # state in that case.
+ for node in retlist:
+ if isinstance(node, Blocker):
+ node.satisfied = True
+
+ for blocker in unsolvable_blockers:
+ retlist.append(blocker)
+
+ retlist = tuple(retlist)
+
+ if unsolvable_blockers and \
+ not self._accept_blocker_conflicts():
+ self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
+ self._dynamic_config._serialized_tasks_cache = retlist
+ self._dynamic_config._scheduler_graph = scheduler_graph
+ # Blockers don't trigger the _skip_restart flag, since
+ # backtracking may solve blockers when it solves slot
+ # conflicts (or by blind luck).
+ raise self._unknown_internal_error()
+
+ have_slot_conflict = any(self._dynamic_config._package_tracker.slot_conflicts())
+ if have_slot_conflict and \
+ not self._accept_blocker_conflicts():
+ self._dynamic_config._serialized_tasks_cache = retlist
+ self._dynamic_config._scheduler_graph = scheduler_graph
+ raise self._unknown_internal_error()
+
+ return retlist, scheduler_graph
+
+ def _show_circular_deps(self, mygraph):
+ self._dynamic_config._circular_dependency_handler = \
+ circular_dependency_handler(self, mygraph)
+ handler = self._dynamic_config._circular_dependency_handler
+
+ self._frozen_config.myopts.pop("--quiet", None)
+ self._frozen_config.myopts["--verbose"] = True
+ self._frozen_config.myopts["--tree"] = True
+ portage.writemsg("\n\n", noiselevel=-1)
+ self.display(handler.merge_list)
+ prefix = colorize("BAD", " * ")
+ portage.writemsg("\n", noiselevel=-1)
+ portage.writemsg(prefix + "Error: circular dependencies:\n",
+ noiselevel=-1)
+ portage.writemsg("\n", noiselevel=-1)
+
+ if handler.circular_dep_message is None:
+ handler.debug_print()
+ portage.writemsg("\n", noiselevel=-1)
+
+ if handler.circular_dep_message is not None:
+ portage.writemsg(handler.circular_dep_message, noiselevel=-1)
+
+ suggestions = handler.suggestions
+ if suggestions:
+ writemsg("\n\nIt might be possible to break this cycle\n", noiselevel=-1)
+ if len(suggestions) == 1:
+ writemsg("by applying the following change:\n", noiselevel=-1)
+ else:
+ writemsg("by applying " + colorize("bold", "any of") + \
+ " the following changes:\n", noiselevel=-1)
+ writemsg("".join(suggestions), noiselevel=-1)
+ writemsg("\nNote that this change can be reverted, once the package has" + \
+ " been installed.\n", noiselevel=-1)
+ if handler.large_cycle_count:
+ writemsg("\nNote that the dependency graph contains a lot of cycles.\n" + \
+ "Several changes might be required to resolve all cycles.\n" + \
+ "Temporarily changing some use flag for all packages might be the better option.\n", noiselevel=-1)
+ else:
+ writemsg("\n\n", noiselevel=-1)
+ writemsg(prefix + "Note that circular dependencies " + \
+ "can often be avoided by temporarily\n", noiselevel=-1)
+ writemsg(prefix + "disabling USE flags that trigger " + \
+ "optional dependencies.\n", noiselevel=-1)
+
+ def _show_merge_list(self):
+ if self._dynamic_config._serialized_tasks_cache is not None and \
+ not (self._dynamic_config._displayed_list is not None and \
+ self._dynamic_config._displayed_list is self._dynamic_config._serialized_tasks_cache):
+ self.display(self._dynamic_config._serialized_tasks_cache)
+
+ def _show_unsatisfied_blockers(self, blockers):
+ self._show_merge_list()
+ msg = "Error: The above package list contains " + \
+ "packages which cannot be installed " + \
+ "at the same time on the same system."
+ prefix = colorize("BAD", " * ")
+ portage.writemsg("\n", noiselevel=-1)
+ for line in textwrap.wrap(msg, 70):
+ portage.writemsg(prefix + line + "\n", noiselevel=-1)
+
+ # Display the conflicting packages along with the packages
+ # that pulled them in. This is helpful for troubleshooting
+ # cases in which blockers don't solve automatically and
+ # the reasons are not apparent from the normal merge list
+ # display.
+
+ conflict_pkgs = {}
+ for blocker in blockers:
+ for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
+ self._dynamic_config._blocker_parents.parent_nodes(blocker)):
+
+ is_slot_conflict_pkg = False
+ for conflict in self._dynamic_config._package_tracker.slot_conflicts():
+ if conflict.root == pkg.root and conflict.atom == pkg.slot_atom:
+ is_slot_conflict_pkg = True
+ break
+ if is_slot_conflict_pkg:
+ # The slot conflict display has better noise reduction
+ # than the unsatisfied blockers display, so skip
+ # unsatisfied blockers display for packages involved
+ # directly in slot conflicts (see bug #385391).
+ continue
+ parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
+ if not parent_atoms:
+ atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
+ if atom is not None:
+ parent_atoms = set([("@selected", atom)])
+ if parent_atoms:
+ conflict_pkgs[pkg] = parent_atoms
+
+ if conflict_pkgs:
+ # Reduce noise by pruning packages that are only
+ # pulled in by other conflict packages.
+ pruned_pkgs = set()
+ for pkg, parent_atoms in conflict_pkgs.items():
+ relevant_parent = False
+ for parent, atom in parent_atoms:
+ if parent not in conflict_pkgs:
+ relevant_parent = True
+ break
+ if not relevant_parent:
+ pruned_pkgs.add(pkg)
+ for pkg in pruned_pkgs:
+ del conflict_pkgs[pkg]
+
+ if conflict_pkgs:
+ msg = []
+ msg.append("\n")
+ indent = " "
+ for pkg, parent_atoms in conflict_pkgs.items():
+
+ # Prefer packages that are not directly involved in a conflict.
+ # It can be essential to see all the packages here, so don't
+ # omit any. If the list is long, people can simply use a pager.
+ preferred_parents = set()
+ for parent_atom in parent_atoms:
+ parent, atom = parent_atom
+ if parent not in conflict_pkgs:
+ preferred_parents.add(parent_atom)
+
+ ordered_list = list(preferred_parents)
+ if len(parent_atoms) > len(ordered_list):
+ for parent_atom in parent_atoms:
+ if parent_atom not in preferred_parents:
+ ordered_list.append(parent_atom)
+
+ msg.append(indent + "%s pulled in by\n" % pkg)
+
+ for parent_atom in ordered_list:
+ parent, atom = parent_atom
+ msg.append(2*indent)
+ if isinstance(parent,
+ (PackageArg, AtomArg)):
+ # For PackageArg and AtomArg types, it's
+ # redundant to display the atom attribute.
+ msg.append(str(parent))
+ else:
+ # Display the specific atom from SetArg or
+ # Package types.
+ if atom.package and atom != atom.unevaluated_atom:
+ # Show the unevaluated atom, since it can reveal
+ # issues with conditional use-flags missing
+ # from IUSE.
+ msg.append("%s (%s) required by %s" %
+ (atom.unevaluated_atom, atom, parent))
+ else:
+ msg.append("%s required by %s" % (atom, parent))
+ msg.append("\n")
+
+ msg.append("\n")
+
+ writemsg("".join(msg), noiselevel=-1)
+
+ if "--quiet" not in self._frozen_config.myopts:
+ show_blocker_docs_link()
+
+ def display(self, mylist, favorites=[], verbosity=None):
+
+ # This is used to prevent display_problems() from
+ # redundantly displaying this exact same merge list
+ # again via _show_merge_list().
+ self._dynamic_config._displayed_list = mylist
+
+ if "--tree" in self._frozen_config.myopts:
+ mylist = tuple(reversed(mylist))
+
+ display = Display()
+
+ return display(self, mylist, favorites, verbosity)
+
+ def _display_autounmask(self, autounmask_continue=False):
+ """
+ Display --autounmask message and optionally write it to config files
+ (using CONFIG_PROTECT). The message includes the comments and the changes.
+ """
+
+ if self._dynamic_config._displayed_autounmask:
+ return
+
+ self._dynamic_config._displayed_autounmask = True
+
+ ask = "--ask" in self._frozen_config.myopts
+ autounmask_write = autounmask_continue or \
+ self._frozen_config.myopts.get("--autounmask-write",
+ ask) is True
+ autounmask_unrestricted_atoms = \
+ self._frozen_config.myopts.get("--autounmask-unrestricted-atoms", "n") == True
+ quiet = "--quiet" in self._frozen_config.myopts
+ pretend = "--pretend" in self._frozen_config.myopts
+ enter_invalid = '--ask-enter-invalid' in self._frozen_config.myopts
+
+ def check_if_latest(pkg, check_visibility=False):
+ is_latest = True
+ is_latest_in_slot = True
+ dbs = self._dynamic_config._filtered_trees[pkg.root]["dbs"]
+ root_config = self._frozen_config.roots[pkg.root]
+
+ for db, pkg_type, built, installed, db_keys in dbs:
+ for other_pkg in self._iter_match_pkgs(root_config, pkg_type, Atom(pkg.cp)):
+ if (check_visibility and
+ not self._pkg_visibility_check(other_pkg)):
+ continue
+ if other_pkg.cp != pkg.cp:
+ # old-style PROVIDE virtual means there are no
+ # normal matches for this pkg_type
+ break
+ if other_pkg > pkg:
+ is_latest = False
+ if other_pkg.slot_atom == pkg.slot_atom:
+ is_latest_in_slot = False
+ break
+ else:
+ # iter_match_pkgs yields highest version first, so
+ # there's no need to search this pkg_type any further
+ break
+
+ if not is_latest_in_slot:
+ break
+
+ return is_latest, is_latest_in_slot
+
+ #Set of roots we have autounmask changes for.
+ roots = set()
+
+ masked_by_missing_keywords = False
+ unstable_keyword_msg = {}
+ for pkg in self._dynamic_config._needed_unstable_keywords:
+ self._show_merge_list()
+ if pkg in self._dynamic_config.digraph:
+ root = pkg.root
+ roots.add(root)
+ unstable_keyword_msg.setdefault(root, [])
+ is_latest, is_latest_in_slot = check_if_latest(pkg)
+ pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+ mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
+ use=self._pkg_use_enabled(pkg))
+ for reason in mreasons:
+ if reason.unmask_hint and \
+ reason.unmask_hint.key == 'unstable keyword':
+ keyword = reason.unmask_hint.value
+ if keyword == "**":
+ masked_by_missing_keywords = True
+
+ unstable_keyword_msg[root].append(self._get_dep_chain_as_comment(pkg))
+ if autounmask_unrestricted_atoms:
+ if is_latest:
+ unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
+ elif is_latest_in_slot:
+ unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, keyword))
+ else:
+ unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
+ else:
+ unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
+
+ p_mask_change_msg = {}
+ for pkg in self._dynamic_config._needed_p_mask_changes:
+ self._show_merge_list()
+ if pkg in self._dynamic_config.digraph:
+ root = pkg.root
+ roots.add(root)
+ p_mask_change_msg.setdefault(root, [])
+ is_latest, is_latest_in_slot = check_if_latest(pkg)
+ pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+ mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
+ use=self._pkg_use_enabled(pkg))
+ for reason in mreasons:
+ if reason.unmask_hint and \
+ reason.unmask_hint.key == 'p_mask':
+ keyword = reason.unmask_hint.value
+
+ comment, filename = portage.getmaskingreason(
+ pkg.cpv, metadata=pkg._metadata,
+ settings=pkgsettings,
+ portdb=pkg.root_config.trees["porttree"].dbapi,
+ return_location=True)
+
+ p_mask_change_msg[root].append(self._get_dep_chain_as_comment(pkg))
+ if filename:
+ p_mask_change_msg[root].append("# %s:\n" % filename)
+ if comment:
+ comment = [line for line in
+ comment.splitlines() if line]
+ for line in comment:
+ p_mask_change_msg[root].append("%s\n" % line)
+ if autounmask_unrestricted_atoms:
+ if is_latest:
+ p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
+ elif is_latest_in_slot:
+ p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.slot))
+ else:
+ p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
+ else:
+ p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
+
+ use_changes_msg = {}
+ for pkg, needed_use_config_change in self._dynamic_config._needed_use_config_changes.items():
+ self._show_merge_list()
+ if pkg in self._dynamic_config.digraph:
+ root = pkg.root
+ roots.add(root)
+ use_changes_msg.setdefault(root, [])
+ # NOTE: For USE changes, call check_if_latest with
+ # check_visibility=True, since we want to generate
+ # a >= atom if possible. Don't do this for keyword
+ # or mask changes, since that may cause undesired
+ # versions to be unmasked! See bug #536392.
+ is_latest, is_latest_in_slot = check_if_latest(
+ pkg, check_visibility=True)
+ changes = needed_use_config_change[1]
+ adjustments = []
+ for flag, state in changes.items():
+ if state:
+ adjustments.append(flag)
+ else:
+ adjustments.append("-" + flag)
+ use_changes_msg[root].append(self._get_dep_chain_as_comment(pkg, unsatisfied_dependency=True))
+ if is_latest:
+ use_changes_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
+ elif is_latest_in_slot:
+ use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, " ".join(adjustments)))
+ else:
+ use_changes_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
+
+ license_msg = {}
+ for pkg, missing_licenses in self._dynamic_config._needed_license_changes.items():
+ self._show_merge_list()
+ if pkg in self._dynamic_config.digraph:
+ root = pkg.root
+ roots.add(root)
+ license_msg.setdefault(root, [])
+ is_latest, is_latest_in_slot = check_if_latest(pkg)
+
+ license_msg[root].append(self._get_dep_chain_as_comment(pkg))
+ if is_latest:
+ license_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
+ elif is_latest_in_slot:
+ license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, " ".join(sorted(missing_licenses))))
+ else:
+ license_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
+
+ def find_config_file(abs_user_config, file_name):
+ """
+ Searches /etc/portage for an appropriate file to append changes to.
+ If the file_name is a file it is returned, if it is a directory, the
+ last file in it is returned. Order of traversal is the identical to
+ portage.util.grablines(recursive=True).
+
+ file_name - String containing a file name like "package.use"
+ return value - String. Absolute path of file to write to. None if
+ no suitable file exists.
+ """
+ file_path = os.path.join(abs_user_config, file_name)
+
+ try:
+ os.lstat(file_path)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ # The file doesn't exist, so we'll
+ # simply create it.
+ return file_path
+
+ # Disk or file system trouble?
+ return None
+
+ last_file_path = None
+ stack = [file_path]
+ while stack:
+ p = stack.pop()
+ try:
+ st = os.stat(p)
+ except OSError:
+ pass
+ else:
+ if stat.S_ISREG(st.st_mode):
+ last_file_path = p
+ elif stat.S_ISDIR(st.st_mode):
+ if os.path.basename(p) in VCS_DIRS:
+ continue
+ try:
+ contents = os.listdir(p)
+ except OSError:
+ pass
+ else:
+ contents.sort(reverse=True)
+ for child in contents:
+ if child.startswith(".") or \
+ child.endswith("~"):
+ continue
+ stack.append(os.path.join(p, child))
+ # If the directory is empty add a file with name
+ # pattern file_name.default
+ if last_file_path is None:
+ last_file_path = os.path.join(file_path, file_path, "zz-autounmask")
+ with open(last_file_path, "a+") as default:
+ default.write("# " + file_name)
+
+ return last_file_path
+
+ write_to_file = autounmask_write and not pretend
+ #Make sure we have a file to write to before doing any write.
+ file_to_write_to = {}
+ problems = []
+ if write_to_file:
+ for root in roots:
+ settings = self._frozen_config.roots[root].settings
+ abs_user_config = os.path.join(
+ settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
+
+ if root in unstable_keyword_msg:
+ if not os.path.exists(os.path.join(abs_user_config,
+ "package.keywords")):
+ filename = "package.accept_keywords"
+ else:
+ filename = "package.keywords"
+ file_to_write_to[(abs_user_config, "package.keywords")] = \
+ find_config_file(abs_user_config, filename)
+
+ if root in p_mask_change_msg:
+ file_to_write_to[(abs_user_config, "package.unmask")] = \
+ find_config_file(abs_user_config, "package.unmask")
+
+ if root in use_changes_msg:
+ file_to_write_to[(abs_user_config, "package.use")] = \
+ find_config_file(abs_user_config, "package.use")
+
+ if root in license_msg:
+ file_to_write_to[(abs_user_config, "package.license")] = \
+ find_config_file(abs_user_config, "package.license")
+
+ for (abs_user_config, f), path in file_to_write_to.items():
+ if path is None:
+ problems.append("!!! No file to write for '%s'\n" % os.path.join(abs_user_config, f))
+
+ write_to_file = not problems
+
+ def format_msg(lines):
+ lines = lines[:]
+ for i, line in enumerate(lines):
+ if line.startswith("#"):
+ continue
+ lines[i] = colorize("INFORM", line.rstrip()) + "\n"
+ return "".join(lines)
+
+ for root in roots:
+ settings = self._frozen_config.roots[root].settings
+ abs_user_config = os.path.join(
+ settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
+
+ if len(roots) > 1:
+ writemsg("\nFor %s:\n" % abs_user_config, noiselevel=-1)
+
+ def _writemsg(reason, file):
+ writemsg(('\nThe following %s are necessary to proceed:\n'
+ ' (see "%s" in the portage(5) man page for more details)\n')
+ % (colorize('BAD', reason), file), noiselevel=-1)
+
+ if root in unstable_keyword_msg:
+ _writemsg('keyword changes', 'package.accept_keywords')
+ writemsg(format_msg(unstable_keyword_msg[root]), noiselevel=-1)
+
+ if root in p_mask_change_msg:
+ _writemsg('mask changes', 'package.unmask')
+ writemsg(format_msg(p_mask_change_msg[root]), noiselevel=-1)
+
+ if root in use_changes_msg:
+ _writemsg('USE changes', 'package.use')
+ writemsg(format_msg(use_changes_msg[root]), noiselevel=-1)
+
+ if root in license_msg:
+ _writemsg('license changes', 'package.license')
+ writemsg(format_msg(license_msg[root]), noiselevel=-1)
+
+ protect_obj = {}
+ if write_to_file and not autounmask_continue:
+ for root in roots:
+ settings = self._frozen_config.roots[root].settings
+ protect_obj[root] = ConfigProtect(
+ settings["PORTAGE_CONFIGROOT"],
+ shlex_split(settings.get("CONFIG_PROTECT", "")),
+ shlex_split(settings.get("CONFIG_PROTECT_MASK", "")),
+ case_insensitive=("case-insensitive-fs"
+ in settings.features))
+
+ def write_changes(root, changes, file_to_write_to):
+ file_contents = None
+ try:
+ with io.open(
+ _unicode_encode(file_to_write_to,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'],
+ errors='replace') as f:
+ file_contents = f.readlines()
+ except IOError as e:
+ if e.errno == errno.ENOENT:
+ file_contents = []
+ else:
+ problems.append("!!! Failed to read '%s': %s\n" % \
+ (file_to_write_to, e))
+ if file_contents is not None:
+ file_contents.extend(changes)
+ if (not autounmask_continue and
+ protect_obj[root].isprotected(file_to_write_to)):
+ # We want to force new_protect_filename to ensure
+ # that the user will see all our changes via
+ # dispatch-conf, even if file_to_write_to doesn't
+ # exist yet, so we specify force=True.
+ file_to_write_to = new_protect_filename(file_to_write_to,
+ force=True)
+ try:
+ write_atomic(file_to_write_to, "".join(file_contents))
+ except PortageException:
+ problems.append("!!! Failed to write '%s'\n" % file_to_write_to)
+
+ if not quiet and (p_mask_change_msg or masked_by_missing_keywords):
+ msg = [
+ "",
+ "NOTE: The --autounmask-keep-masks option will prevent emerge",
+ " from creating package.unmask or ** keyword changes."
+ ]
+ for line in msg:
+ if line:
+ line = colorize("INFORM", line)
+ writemsg(line + "\n", noiselevel=-1)
+
+ if ask and write_to_file and file_to_write_to:
+ prompt = "\nWould you like to add these " + \
+ "changes to your config files?"
+ if self.query(prompt, enter_invalid) == 'No':
+ write_to_file = False
+
+ if write_to_file and file_to_write_to:
+ for root in roots:
+ settings = self._frozen_config.roots[root].settings
+ abs_user_config = os.path.join(
+ settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
+ ensure_dirs(abs_user_config)
+
+ if root in unstable_keyword_msg:
+ write_changes(root, unstable_keyword_msg[root],
+ file_to_write_to.get((abs_user_config, "package.keywords")))
+
+ if root in p_mask_change_msg:
+ write_changes(root, p_mask_change_msg[root],
+ file_to_write_to.get((abs_user_config, "package.unmask")))
+
+ if root in use_changes_msg:
+ write_changes(root, use_changes_msg[root],
+ file_to_write_to.get((abs_user_config, "package.use")))
+
+ if root in license_msg:
+ write_changes(root, license_msg[root],
+ file_to_write_to.get((abs_user_config, "package.license")))
+
+ if problems:
+ writemsg("\nThe following problems occurred while writing autounmask changes:\n", \
+ noiselevel=-1)
+ writemsg("".join(problems), noiselevel=-1)
+ elif write_to_file and roots:
+ writemsg("\nAutounmask changes successfully written.\n",
+ noiselevel=-1)
+ if autounmask_continue:
+ return True
+ for root in roots:
+ chk_updated_cfg_files(root,
+ [os.path.join(os.sep, USER_CONFIG_PATH)])
+ elif not pretend and not autounmask_write and roots:
+ writemsg("\nUse --autounmask-write to write changes to config files (honoring\n"
+ "CONFIG_PROTECT). Carefully examine the list of proposed changes,\n"
+ "paying special attention to mask or keyword changes that may expose\n"
+ "experimental or unstable packages.\n",
+ noiselevel=-1)
+
+ if self._dynamic_config._autounmask_backtrack_disabled:
+ msg = [
+ "In order to avoid wasting time, backtracking has terminated early",
+ "due to the above autounmask change(s). The --autounmask-backtrack=y",
+ "option can be used to force further backtracking, but there is no",
+ "guarantee that it will produce a solution.",
+ ]
+ writemsg("\n", noiselevel=-1)
+ for line in msg:
+ writemsg(" %s %s\n" % (colorize("WARN", "*"), line),
+ noiselevel=-1)
+
+ def display_problems(self):
+ """
+ Display problems with the dependency graph such as slot collisions.
+ This is called internally by display() to show the problems _after_
+ the merge list where it is most likely to be seen, but if display()
+ is not going to be called then this method should be called explicitly
+ to ensure that the user is notified of problems with the graph.
+ """
+
+ if self._dynamic_config._circular_deps_for_display is not None:
+ self._show_circular_deps(
+ self._dynamic_config._circular_deps_for_display)
+
+ unresolved_conflicts = False
+ have_slot_conflict = any(self._dynamic_config._package_tracker.slot_conflicts())
+ if have_slot_conflict:
+ unresolved_conflicts = True
+ self._show_slot_collision_notice()
+ if self._dynamic_config._unsatisfied_blockers_for_display is not None:
+ unresolved_conflicts = True
+ self._show_unsatisfied_blockers(
+ self._dynamic_config._unsatisfied_blockers_for_display)
+
+ # Only show missed updates if there are no unresolved conflicts,
+ # since they may be irrelevant after the conflicts are solved.
+ if not unresolved_conflicts:
+ self._show_missed_update()
+
+ if self._frozen_config.myopts.get("--verbose-slot-rebuilds", 'y') != 'n':
+ self._compute_abi_rebuild_info()
+ self._show_abi_rebuild_info()
+
+ self._show_ignored_binaries()
+
+ self._changed_deps_report()
+
+ self._display_autounmask()
+
+ for depgraph_sets in self._dynamic_config.sets.values():
+ for pset in depgraph_sets.sets.values():
+ for error_msg in pset.errors:
+ writemsg_level("%s\n" % (error_msg,),
+ level=logging.ERROR, noiselevel=-1)
+
+ # TODO: Add generic support for "set problem" handlers so that
+ # the below warnings aren't special cases for world only.
+
+ if self._dynamic_config._missing_args:
+ world_problems = False
+ if "world" in self._dynamic_config.sets[
+ self._frozen_config.target_root].sets:
+ # Filter out indirect members of world (from nested sets)
+ # since only direct members of world are desired here.
+ world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["selected"]
+ for arg, atom in self._dynamic_config._missing_args:
+ if arg.name in ("selected", "world") and atom in world_set:
+ world_problems = True
+ break
+
+ if world_problems:
+ writemsg("\n!!! Problems have been " + \
+ "detected with your world file\n",
+ noiselevel=-1)
+ writemsg("!!! Please run " + \
+ green("emaint --check world")+"\n\n",
+ noiselevel=-1)
+
+ if self._dynamic_config._missing_args:
+ writemsg("\n" + colorize("BAD", "!!!") + \
+ " Ebuilds for the following packages are either all\n",
+ noiselevel=-1)
+ writemsg(colorize("BAD", "!!!") + \
+ " masked or don't exist:\n",
+ noiselevel=-1)
+ writemsg(" ".join(str(atom) for arg, atom in \
+ self._dynamic_config._missing_args) + "\n",
+ noiselevel=-1)
+
+ if self._dynamic_config._pprovided_args:
+ arg_refs = {}
+ for arg, atom in self._dynamic_config._pprovided_args:
+ if isinstance(arg, SetArg):
+ parent = arg.name
+ arg_atom = (atom, atom)
+ else:
+ parent = "args"
+ arg_atom = (arg.arg, atom)
+ refs = arg_refs.setdefault(arg_atom, [])
+ if parent not in refs:
+ refs.append(parent)
+ msg = []
+ msg.append(bad("\nWARNING: "))
+ if len(self._dynamic_config._pprovided_args) > 1:
+ msg.append("Requested packages will not be " + \
+ "merged because they are listed in\n")
+ else:
+ msg.append("A requested package will not be " + \
+ "merged because it is listed in\n")
+ msg.append("package.provided:\n\n")
+ problems_sets = set()
+ for (arg, atom), refs in arg_refs.items():
+ ref_string = ""
+ if refs:
+ problems_sets.update(refs)
+ refs.sort()
+ ref_string = ", ".join(["'%s'" % name for name in refs])
+ ref_string = " pulled in by " + ref_string
+ msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
+ msg.append("\n")
+ if "selected" in problems_sets or "world" in problems_sets:
+ msg.append("This problem can be solved in one of the following ways:\n\n")
+ msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
+ msg.append(" B) Uninstall offending packages (cleans them from world).\n")
+ msg.append(" C) Remove offending entries from package.provided.\n\n")
+ msg.append("The best course of action depends on the reason that an offending\n")
+ msg.append("package.provided entry exists.\n\n")
+ writemsg("".join(msg), noiselevel=-1)
+
+ masked_packages = []
+ for pkg in self._dynamic_config._masked_license_updates:
+ root_config = pkg.root_config
+ pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+ mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
+ masked_packages.append((root_config, pkgsettings,
+ pkg.cpv, pkg.repo, pkg._metadata, mreasons))
+ if masked_packages:
+ writemsg("\n" + colorize("BAD", "!!!") + \
+ " The following updates are masked by LICENSE changes:\n",
+ noiselevel=-1)
+ show_masked_packages(masked_packages)
+ show_mask_docs()
+ writemsg("\n", noiselevel=-1)
+
+ masked_packages = []
+ for pkg in self._dynamic_config._masked_installed:
+ root_config = pkg.root_config
+ pkgsettings = self._frozen_config.pkgsettings[pkg.root]
+ mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
+ masked_packages.append((root_config, pkgsettings,
+ pkg.cpv, pkg.repo, pkg._metadata, mreasons))
+ if masked_packages:
+ writemsg("\n" + colorize("BAD", "!!!") + \
+ " The following installed packages are masked:\n",
+ noiselevel=-1)
+ show_masked_packages(masked_packages)
+ show_mask_docs()
+ writemsg("\n", noiselevel=-1)
+
+ for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
+ self._show_unsatisfied_dep(*pargs, **kwargs)
+
+ if self._dynamic_config._buildpkgonly_deps_unsatisfied:
+ self._show_merge_list()
+ writemsg("\n!!! --buildpkgonly requires all "
+ "dependencies to be merged.\n", noiselevel=-1)
+ writemsg("!!! Cannot merge requested packages. "
+ "Merge deps and try again.\n\n", noiselevel=-1)
+
+ def saveNomergeFavorites(self):
+ """Find atoms in favorites that are not in the mergelist and add them
+ to the world file if necessary."""
+ for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
+ "--oneshot", "--onlydeps", "--pretend"):
+ if x in self._frozen_config.myopts:
+ return
+ root_config = self._frozen_config.roots[self._frozen_config.target_root]
+ world_set = root_config.sets["selected"]
+
+ world_locked = False
+ if hasattr(world_set, "lock"):
+ world_set.lock()
+ world_locked = True
+
+ if hasattr(world_set, "load"):
+ world_set.load() # maybe it's changed on disk
+
+ args_set = self._dynamic_config.sets[
+ self._frozen_config.target_root].sets['__non_set_args__']
+ added_favorites = set()
+ for x in self._dynamic_config._set_nodes:
+ if x.operation != "nomerge":
+ continue
+
+ if x.root != root_config.root:
+ continue
+
+ try:
+ myfavkey = create_world_atom(x, args_set, root_config)
+ if myfavkey:
+ if myfavkey in added_favorites:
+ continue
+ added_favorites.add(myfavkey)
+ except portage.exception.InvalidDependString as e:
+ writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
+ (x.cpv, e), noiselevel=-1)
+ writemsg("!!! see '%s'\n\n" % os.path.join(
+ x.root, portage.VDB_PATH, x.cpv, "PROVIDE"), noiselevel=-1)
+ del e
+ all_added = []
+ for arg in self._dynamic_config._initial_arg_list:
+ if not isinstance(arg, SetArg):
+ continue
+ if arg.root_config.root != root_config.root:
+ continue
+ if arg.internal:
+ # __auto_* sets
+ continue
+ k = arg.name
+ if k in ("selected", "world") or \
+ not root_config.sets[k].world_candidate:
+ continue
+ s = SETPREFIX + k
+ if s in world_set:
+ continue
+ all_added.append(SETPREFIX + k)
+ all_added.extend(added_favorites)
+ all_added.sort()
+ if all_added:
+ skip = False
+ if "--ask" in self._frozen_config.myopts:
+ writemsg_stdout("\n", noiselevel=-1)
+ for a in all_added:
+ writemsg_stdout(" %s %s\n" % (colorize("GOOD", "*"), a),
+ noiselevel=-1)
+ writemsg_stdout("\n", noiselevel=-1)
+ prompt = "Would you like to add these packages to your world " \
+ "favorites?"
+ enter_invalid = '--ask-enter-invalid' in \
+ self._frozen_config.myopts
+ if self.query(prompt, enter_invalid) == "No":
+ skip = True
+
+ if not skip:
+ for a in all_added:
+ if a.startswith(SETPREFIX):
+ filename = "world_sets"
+ else:
+ filename = "world"
+ writemsg_stdout(
+ ">>> Recording %s in \"%s\" favorites file...\n" %
+ (colorize("INFORM", _unicode(a)), filename), noiselevel=-1)
+ world_set.update(all_added)
+
+ if world_locked:
+ world_set.unlock()
+
+ def _loadResumeCommand(self, resume_data, skip_masked=True,
+ skip_missing=True):
+ """
+ Add a resume command to the graph and validate it in the process. This
+ will raise a PackageNotFound exception if a package is not available.
+ """
+
+ self._load_vdb()
+
+ if not isinstance(resume_data, dict):
+ return False
+
+ mergelist = resume_data.get("mergelist")
+ if not isinstance(mergelist, list):
+ mergelist = []
+
+ favorites = resume_data.get("favorites")
+ if isinstance(favorites, list):
+ args = self._load_favorites(favorites)
+ else:
+ args = []
+
+ serialized_tasks = []
+ masked_tasks = []
+ for x in mergelist:
+ if not (isinstance(x, list) and len(x) == 4):
+ continue
+ pkg_type, myroot, pkg_key, action = x
+ if pkg_type not in self.pkg_tree_map:
+ continue
+ if action != "merge":
+ continue
+ root_config = self._frozen_config.roots[myroot]
+
+ # Use the resume "favorites" list to see if a repo was specified
+ # for this package.
+ depgraph_sets = self._dynamic_config.sets[root_config.root]
+ repo = None
+ for atom in depgraph_sets.atoms.getAtoms():
+ if atom.repo and portage.dep.match_from_list(atom, [pkg_key]):
+ repo = atom.repo
+ break
+
+ atom = "=" + pkg_key
+ if repo:
+ atom = atom + _repo_separator + repo
+
+ try:
+ atom = Atom(atom, allow_repo=True)
+ except InvalidAtom:
+ continue
+
+ pkg = None
+ for pkg in self._iter_match_pkgs(root_config, pkg_type, atom):
+ if not self._pkg_visibility_check(pkg) or \
+ self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
+ modified_use=self._pkg_use_enabled(pkg)):
+ continue
+ break
+
+ if pkg is None:
+ # It does no exist or it is corrupt.
+ if skip_missing:
+ # TODO: log these somewhere
+ continue
+ raise portage.exception.PackageNotFound(pkg_key)
+
+ if "merge" == pkg.operation and \
+ self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
+ modified_use=self._pkg_use_enabled(pkg)):
+ continue
+
+ if "merge" == pkg.operation and not self._pkg_visibility_check(pkg):
+ if skip_masked:
+ masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
+ else:
+ self._dynamic_config._unsatisfied_deps_for_display.append(
+ ((pkg.root, "="+pkg.cpv), {"myparent":None}))
+
+ self._dynamic_config._package_tracker.add_pkg(pkg)
+ serialized_tasks.append(pkg)
+ self._spinner_update()
+
+ if self._dynamic_config._unsatisfied_deps_for_display:
+ return False
+
+ if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
+ self._dynamic_config._serialized_tasks_cache = serialized_tasks
+ self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
+ else:
+ self._select_package = self._select_pkg_from_graph
+ self._dynamic_config.myparams["selective"] = True
+ # Always traverse deep dependencies in order to account for
+ # potentially unsatisfied dependencies of installed packages.
+ # This is necessary for correct --keep-going or --resume operation
+ # in case a package from a group of circularly dependent packages
+ # fails. In this case, a package which has recently been installed
+ # may have an unsatisfied circular dependency (pulled in by
+ # PDEPEND, for example). So, even though a package is already
+ # installed, it may not have all of it's dependencies satisfied, so
+ # it may not be usable. If such a package is in the subgraph of
+ # deep depenedencies of a scheduled build, that build needs to
+ # be cancelled. In order for this type of situation to be
+ # recognized, deep traversal of dependencies is required.
+ self._dynamic_config.myparams["deep"] = True
+
+ for task in serialized_tasks:
+ if isinstance(task, Package) and \
+ task.operation == "merge":
+ if not self._add_pkg(task, None):
+ return False
+
+ # Packages for argument atoms need to be explicitly
+ # added via _add_pkg() so that they are included in the
+ # digraph (needed at least for --tree display).
+ for arg in self._expand_set_args(args, add_to_digraph=True):
+ for atom in arg.pset.getAtoms():
+ pkg, existing_node = self._select_package(
+ arg.root_config.root, atom)
+ if existing_node is None and \
+ pkg is not None:
+ if not self._add_pkg(pkg, Dependency(atom=atom,
+ root=pkg.root, parent=arg)):
+ return False
+
+ # Allow unsatisfied deps here to avoid showing a masking
+ # message for an unsatisfied dep that isn't necessarily
+ # masked.
+ if not self._create_graph(allow_unsatisfied=True):
+ return False
+
+ unsatisfied_deps = []
+ for dep in self._dynamic_config._unsatisfied_deps:
+ if not isinstance(dep.parent, Package):
+ continue
+ if dep.parent.operation == "merge":
+ unsatisfied_deps.append(dep)
+ continue
+
+ # For unsatisfied deps of installed packages, only account for
+ # them if they are in the subgraph of dependencies of a package
+ # which is scheduled to be installed.
+ unsatisfied_install = False
+ traversed = set()
+ dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
+ while dep_stack:
+ node = dep_stack.pop()
+ if not isinstance(node, Package):
+ continue
+ if node.operation == "merge":
+ unsatisfied_install = True
+ break
+ if node in traversed:
+ continue
+ traversed.add(node)
+ dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
+
+ if unsatisfied_install:
+ unsatisfied_deps.append(dep)
+
+ if masked_tasks or unsatisfied_deps:
+ # This probably means that a required package
+ # was dropped via --skipfirst. It makes the
+ # resume list invalid, so convert it to a
+ # UnsatisfiedResumeDep exception.
+ raise self.UnsatisfiedResumeDep(self,
+ masked_tasks + unsatisfied_deps)
+ self._dynamic_config._serialized_tasks_cache = None
+ try:
+ self.altlist()
+ except self._unknown_internal_error:
+ return False
+
+ return True
+
+ def _load_favorites(self, favorites):
+ """
+ Use a list of favorites to resume state from a
+ previous select_files() call. This creates similar
+ DependencyArg instances to those that would have
+ been created by the original select_files() call.
+ This allows Package instances to be matched with
+ DependencyArg instances during graph creation.
+ """
+ root_config = self._frozen_config.roots[self._frozen_config.target_root]
+ sets = root_config.sets
+ depgraph_sets = self._dynamic_config.sets[root_config.root]
+ args = []
+ for x in favorites:
+ if not isinstance(x, basestring):
+ continue
+ if x in ("system", "world"):
+ x = SETPREFIX + x
+ if x.startswith(SETPREFIX):
+ s = x[len(SETPREFIX):]
+ if s not in sets:
+ continue
+ if s in depgraph_sets.sets:
+ continue
+ pset = sets[s]
+ depgraph_sets.sets[s] = pset
+ args.append(SetArg(arg=x, pset=pset,
+ root_config=root_config))
+ else:
+ try:
+ x = Atom(x, allow_repo=True)
+ except portage.exception.InvalidAtom:
+ continue
+ args.append(AtomArg(arg=x, atom=x,
+ root_config=root_config))
+
+ self._set_args(args)
+ return args
+
+ class UnsatisfiedResumeDep(portage.exception.PortageException):
+ """
+ A dependency of a resume list is not installed. This
+ can occur when a required package is dropped from the
+ merge list via --skipfirst.
+ """
+ def __init__(self, depgraph, value):
+ portage.exception.PortageException.__init__(self, value)
+ self.depgraph = depgraph
+
+ class _internal_exception(portage.exception.PortageException):
+ def __init__(self, value=""):
+ portage.exception.PortageException.__init__(self, value)
+
+ class _unknown_internal_error(_internal_exception):
+ """
+ Used by the depgraph internally to terminate graph creation.
+ The specific reason for the failure should have been dumped
+ to stderr, unfortunately, the exact reason for the failure
+ may not be known.
+ """
+
+ class _serialize_tasks_retry(_internal_exception):
+ """
+ This is raised by the _serialize_tasks() method when it needs to
+ be called again for some reason. The only case that it's currently
+ used for is when neglected dependencies need to be added to the
+ graph in order to avoid making a potentially unsafe decision.
+ """
+
+ class _backtrack_mask(_internal_exception):
+ """
+ This is raised by _show_unsatisfied_dep() when it's called with
+ check_backtrack=True and a matching package has been masked by
+ backtracking.
+ """
+
+ class _autounmask_breakage(_internal_exception):
+ """
+ This is raised by _show_unsatisfied_dep() when it's called with
+ check_autounmask_breakage=True and a matching package has been
+ been disqualified due to autounmask changes.
+ """
+
+ def need_restart(self):
+ return self._dynamic_config._need_restart and \
+ not self._dynamic_config._skip_restart
+
+ def need_config_change(self):
+ """
+ Returns true if backtracking should terminate due to a needed
+ configuration change.
+ """
+ if (self._dynamic_config._success_without_autounmask or
+ self._dynamic_config._required_use_unsatisfied):
+ return True
+
+ if (self._dynamic_config._slot_conflict_handler is None and
+ not self._accept_blocker_conflicts() and
+ any(self._dynamic_config._package_tracker.slot_conflicts())):
+ self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
+ if self._dynamic_config._slot_conflict_handler.changes:
+ # Terminate backtracking early if the slot conflict
+ # handler finds some changes to suggest. The case involving
+ # sci-libs/L and sci-libs/M in SlotCollisionTestCase will
+ # otherwise fail with --autounmask-backtrack=n, since
+ # backtracking will eventually lead to some autounmask
+ # changes. Changes suggested by the slot conflict handler
+ # are more likely to be useful.
+ return True
+
+ if (self._dynamic_config._allow_backtracking and
+ self._frozen_config.myopts.get("--autounmask-backtrack") != 'y' and
+ self._have_autounmask_changes()):
+
+ if (self._frozen_config.myopts.get("--autounmask-continue") is True and
+ self._frozen_config.myopts.get("--autounmask-backtrack") != 'n'):
+ # --autounmask-continue implies --autounmask-backtrack=y behavior,
+ # for backward compatibility.
+ return False
+
+ # This disables backtracking when there are autounmask
+ # config changes. The display_problems method will notify
+ # the user that --autounmask-backtrack=y can be used to
+ # force backtracking in this case.
+ self._dynamic_config._autounmask_backtrack_disabled = True
+ return True
+
+ return False
+
+ def _have_autounmask_changes(self):
+ digraph_nodes = self._dynamic_config.digraph.nodes
+ return (any(x in digraph_nodes for x in
+ self._dynamic_config._needed_unstable_keywords) or
+ any(x in digraph_nodes for x in
+ self._dynamic_config._needed_p_mask_changes) or
+ any(x in digraph_nodes for x in
+ self._dynamic_config._needed_use_config_changes) or
+ any(x in digraph_nodes for x in
+ self._dynamic_config._needed_license_changes))
+
+ def need_config_reload(self):
+ return self._dynamic_config._need_config_reload
+
+ def autounmask_breakage_detected(self):
+ try:
+ for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
+ self._show_unsatisfied_dep(
+ *pargs, check_autounmask_breakage=True, **kwargs)
+ except self._autounmask_breakage:
+ return True
+ return False
+
+ def get_backtrack_infos(self):
+ return self._dynamic_config._backtrack_infos
+
+
+class _dep_check_composite_db(dbapi):
+ """
+ A dbapi-like interface that is optimized for use in dep_check() calls.
+ This is built on top of the existing depgraph package selection logic.
+ Some packages that have been added to the graph may be masked from this
+ view in order to influence the atom preference selection that occurs
+ via dep_check().
+ """
+ def __init__(self, depgraph, root):
+ dbapi.__init__(self)
+ self._depgraph = depgraph
+ self._root = root
+ self._match_cache = {}
+ self._cpv_pkg_map = {}
+
+ def _clear_cache(self):
+ self._match_cache.clear()
+ self._cpv_pkg_map.clear()
+
+ def cp_list(self, cp):
+ """
+ Emulate cp_list just so it can be used to check for existence
+ of new-style virtuals. Since it's a waste of time to return
+ more than one cpv for this use case, a maximum of one cpv will
+ be returned.
+ """
+ if isinstance(cp, Atom):
+ atom = cp
+ else:
+ atom = Atom(cp)
+ ret = []
+ for pkg in self._depgraph._iter_match_pkgs_any(
+ self._depgraph._frozen_config.roots[self._root], atom):
+ if pkg.cp == cp:
+ ret.append(pkg.cpv)
+ break
+
+ return ret
+
+ def match_pkgs(self, atom):
+ cache_key = (atom, atom.unevaluated_atom)
+ ret = self._match_cache.get(cache_key)
+ if ret is not None:
+ for pkg in ret:
+ self._cpv_pkg_map[pkg.cpv] = pkg
+ return ret[:]
+
+ atom_set = InternalPackageSet(initial_atoms=(atom,))
+ ret = []
+ pkg, existing = self._depgraph._select_package(self._root, atom)
+
+ if pkg is not None and self._visible(pkg, atom_set):
+ ret.append(pkg)
+
+ if pkg is not None and \
+ atom.sub_slot is None and \
+ pkg.cp.startswith("virtual/") and \
+ (("remove" not in self._depgraph._dynamic_config.myparams and
+ "--update" not in self._depgraph._frozen_config.myopts) or
+ not ret):
+ # For new-style virtual lookahead that occurs inside dep_check()
+ # for bug #141118, examine all slots. This is needed so that newer
+ # slots will not unnecessarily be pulled in when a satisfying lower
+ # slot is already installed. For example, if virtual/jdk-1.5 is
+ # satisfied via gcj-jdk then there's no need to pull in a newer
+ # slot to satisfy a virtual/jdk dependency, unless --update is
+ # enabled.
+ sub_slots = set()
+ resolved_sub_slots = set()
+ for virt_pkg in self._depgraph._iter_match_pkgs_any(
+ self._depgraph._frozen_config.roots[self._root], atom):
+ if virt_pkg.cp != pkg.cp:
+ continue
+ sub_slots.add((virt_pkg.slot, virt_pkg.sub_slot))
+
+ sub_slot_key = (pkg.slot, pkg.sub_slot)
+ if ret:
+ # We've added pkg to ret already, and only one package
+ # per slot/sub_slot is desired here.
+ sub_slots.discard(sub_slot_key)
+ resolved_sub_slots.add(sub_slot_key)
+ else:
+ sub_slots.add(sub_slot_key)
+
+ while sub_slots:
+ slot, sub_slot = sub_slots.pop()
+ slot_atom = atom.with_slot("%s/%s" % (slot, sub_slot))
+ pkg, existing = self._depgraph._select_package(
+ self._root, slot_atom)
+ if not pkg:
+ continue
+ if not self._visible(pkg, atom_set,
+ avoid_slot_conflict=False):
+ # Try to force a virtual update to be pulled in
+ # when appropriate for bug #526160.
+ selected = pkg
+ for candidate in \
+ self._iter_virt_update(pkg, atom_set):
+
+ if candidate.slot != slot:
+ continue
+
+ if (candidate.slot, candidate.sub_slot) in \
+ resolved_sub_slots:
+ continue
+
+ if selected is None or \
+ selected < candidate:
+ selected = candidate
+
+ if selected is pkg:
+ continue
+ pkg = selected
+
+ resolved_sub_slots.add((pkg.slot, pkg.sub_slot))
+ ret.append(pkg)
+
+ if len(ret) > 1:
+ ret = sorted(set(ret))
+
+ self._match_cache[cache_key] = ret
+ for pkg in ret:
+ self._cpv_pkg_map[pkg.cpv] = pkg
+ return ret[:]
+
+ def _visible(self, pkg, atom_set, avoid_slot_conflict=True,
+ probe_virt_update=True):
+ if pkg.installed and not self._depgraph._want_installed_pkg(pkg):
+ return False
+ if pkg.installed and \
+ (pkg.masks or not self._depgraph._pkg_visibility_check(pkg)):
+ # Account for packages with masks (like KEYWORDS masks)
+ # that are usually ignored in visibility checks for
+ # installed packages, in order to handle cases like
+ # bug #350285.
+ myopts = self._depgraph._frozen_config.myopts
+ use_ebuild_visibility = myopts.get(
+ '--use-ebuild-visibility', 'n') != 'n'
+ avoid_update = "--update" not in myopts and \
+ "remove" not in self._depgraph._dynamic_config.myparams
+ usepkgonly = "--usepkgonly" in myopts
+ if not avoid_update:
+ if not use_ebuild_visibility and usepkgonly:
+ return False
+ elif not self._depgraph._equiv_ebuild_visible(pkg):
+ return False
+
+ if pkg.cp.startswith("virtual/"):
+
+ if not self._depgraph._virt_deps_visible(
+ pkg, ignore_use=True):
+ return False
+
+ if probe_virt_update and \
+ self._have_virt_update(pkg, atom_set):
+ # Force virtual updates to be pulled in when appropriate
+ # for bug #526160.
+ return False
+
+ if not avoid_slot_conflict:
+ # This is useful when trying to pull in virtual updates,
+ # since we don't want another instance that was previously
+ # pulled in to mask an update that we're trying to pull
+ # into the same slot.
+ return True
+
+ # Use reversed iteration in order to get descending order here,
+ # so that the highest version involved in a slot conflict is
+ # selected (see bug 554070).
+ in_graph = next(reversed(list(
+ self._depgraph._dynamic_config._package_tracker.match(
+ self._root, pkg.slot_atom, installed=False))), None)
+
+ if in_graph is None:
+ # Mask choices for packages which are not the highest visible
+ # version within their slot (since they usually trigger slot
+ # conflicts).
+ highest_visible, in_graph = self._depgraph._select_package(
+ self._root, pkg.slot_atom)
+ # Note: highest_visible is not necessarily the real highest
+ # visible, especially when --update is not enabled, so use
+ # < operator instead of !=.
+ if (highest_visible is not None and pkg < highest_visible
+ and atom_set.findAtomForPackage(highest_visible,
+ modified_use=self._depgraph._pkg_use_enabled(highest_visible))):
+ return False
+ elif in_graph != pkg:
+ # Mask choices for packages that would trigger a slot
+ # conflict with a previously selected package.
+ if not atom_set.findAtomForPackage(in_graph,
+ modified_use=self._depgraph._pkg_use_enabled(in_graph)):
+ # Only mask if the graph package matches the given
+ # atom (fixes bug #515230).
+ return True
+ return False
+ return True
+
+ def _iter_virt_update(self, pkg, atom_set):
+
+ if self._depgraph._select_atoms_parent is not None and \
+ self._depgraph._want_update_pkg(
+ self._depgraph._select_atoms_parent, pkg):
+
+ for new_child in self._depgraph._iter_similar_available(
+ pkg, next(iter(atom_set))):
+
+ if not self._depgraph._virt_deps_visible(
+ new_child, ignore_use=True):
+ continue
+
+ if not self._visible(new_child, atom_set,
+ avoid_slot_conflict=False,
+ probe_virt_update=False):
+ continue
+
+ yield new_child
+
+ def _have_virt_update(self, pkg, atom_set):
+
+ for new_child in self._iter_virt_update(pkg, atom_set):
+ if pkg < new_child:
+ return True
+
+ return False
+
+ def aux_get(self, cpv, wants):
+ metadata = self._cpv_pkg_map[cpv]._metadata
+ return [metadata.get(x, "") for x in wants]
+
+ def match(self, atom):
+ return [pkg.cpv for pkg in self.match_pkgs(atom)]
+
+def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
+
+ if "--quiet" in myopts:
+ writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
+ writemsg("!!! one of the following fully-qualified ebuild names instead:\n\n", noiselevel=-1)
+ for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
+ writemsg(" " + colorize("INFORM", cp) + "\n", noiselevel=-1)
+ return
+
+ s = search(root_config, spinner, "--searchdesc" in myopts,
+ "--quiet" not in myopts, "--usepkg" in myopts,
+ "--usepkgonly" in myopts, search_index = False)
+ null_cp = portage.dep_getkey(insert_category_into_atom(
+ arg, "null"))
+ cat, atom_pn = portage.catsplit(null_cp)
+ s.searchkey = atom_pn
+ for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
+ s.addCP(cp)
+ s.output()
+ writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
+ writemsg("!!! one of the above fully-qualified ebuild names instead.\n\n", noiselevel=-1)
+
+def _spinner_start(spinner, myopts):
+ if spinner is None:
+ return
+ if "--quiet" not in myopts and \
+ ("--pretend" in myopts or "--ask" in myopts or \
+ "--tree" in myopts or "--verbose" in myopts):
+ action = ""
+ if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
+ action = "fetched"
+ elif "--buildpkgonly" in myopts:
+ action = "built"
+ else:
+ action = "merged"
+ if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
+ if "--unordered-display" in myopts:
+ portage.writemsg_stdout("\n" + \
+ darkgreen("These are the packages that " + \
+ "would be %s:" % action) + "\n\n")
+ else:
+ portage.writemsg_stdout("\n" + \
+ darkgreen("These are the packages that " + \
+ "would be %s, in reverse order:" % action) + "\n\n")
+ else:
+ portage.writemsg_stdout("\n" + \
+ darkgreen("These are the packages that " + \
+ "would be %s, in order:" % action) + "\n\n")
+
+ show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
+ if not show_spinner:
+ spinner.update = spinner.update_quiet
+
+ if show_spinner:
+ portage.writemsg_stdout("Calculating dependencies ")
+
+def _spinner_stop(spinner):
+ if spinner is None or \
+ spinner.update == spinner.update_quiet:
+ return
+
+ if spinner.update != spinner.update_basic:
+ # update_basic is used for non-tty output,
+ # so don't output backspaces in that case.
+ portage.writemsg_stdout("\b\b")
+
+ portage.writemsg_stdout("... done!\n")
+
+def backtrack_depgraph(settings, trees, myopts, myparams,
+ myaction, myfiles, spinner):
+ """
+ Raises PackageSetNotFound if myfiles contains a missing package set.
+ """
+ _spinner_start(spinner, myopts)
+ try:
+ return _backtrack_depgraph(settings, trees, myopts, myparams,
+ myaction, myfiles, spinner)
+ finally:
+ _spinner_stop(spinner)
+
+
+def _backtrack_depgraph(settings, trees, myopts, myparams, myaction, myfiles, spinner):
+
+ debug = "--debug" in myopts
+ mydepgraph = None
+ max_retries = myopts.get('--backtrack', 10)
+ max_depth = max(1, (max_retries + 1) // 2)
+ allow_backtracking = max_retries > 0
+ backtracker = Backtracker(max_depth)
+ backtracked = 0
+
+ frozen_config = _frozen_depgraph_config(settings, trees,
+ myopts, myparams, spinner)
+
+ while backtracker:
+
+ if debug and mydepgraph is not None:
+ writemsg_level(
+ "\n\nbacktracking try %s \n\n" % \
+ backtracked, noiselevel=-1, level=logging.DEBUG)
+ mydepgraph.display_problems()
+
+ backtrack_parameters = backtracker.get()
+ if debug and backtrack_parameters.runtime_pkg_mask:
+ writemsg_level(
+ "\n\nruntime_pkg_mask: %s \n\n" %
+ backtrack_parameters.runtime_pkg_mask,
+ noiselevel=-1, level=logging.DEBUG)
+
+ mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
+ frozen_config=frozen_config,
+ allow_backtracking=allow_backtracking,
+ backtrack_parameters=backtrack_parameters)
+ success, favorites = mydepgraph.select_files(myfiles)
+
+ if success or mydepgraph.need_config_change():
+ break
+ elif not allow_backtracking:
+ break
+ elif backtracked >= max_retries:
+ break
+ elif mydepgraph.need_restart():
+ backtracked += 1
+ backtracker.feedback(mydepgraph.get_backtrack_infos())
+ else:
+ break
+
+ if not (success or mydepgraph.need_config_change()) and backtracked:
+
+ if debug:
+ writemsg_level(
+ "\n\nbacktracking aborted after %s tries\n\n" % \
+ backtracked, noiselevel=-1, level=logging.DEBUG)
+ mydepgraph.display_problems()
+
+ mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
+ frozen_config=frozen_config,
+ allow_backtracking=False,
+ backtrack_parameters=backtracker.get_best_run())
+ success, favorites = mydepgraph.select_files(myfiles)
+
+ if not success and mydepgraph.autounmask_breakage_detected():
+ if debug:
+ writemsg_level(
+ "\n\nautounmask breakage detected\n\n",
+ noiselevel=-1, level=logging.DEBUG)
+ mydepgraph.display_problems()
+ myopts["--autounmask"] = "n"
+ mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
+ frozen_config=frozen_config, allow_backtracking=False)
+ success, favorites = mydepgraph.select_files(myfiles)
+
+ return (success, mydepgraph, favorites)
+
+
+def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
+ """
+ Raises PackageSetNotFound if myfiles contains a missing package set.
+ """
+ _spinner_start(spinner, myopts)
+ try:
+ return _resume_depgraph(settings, trees, mtimedb, myopts,
+ myparams, spinner)
+ finally:
+ _spinner_stop(spinner)
+
+def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
+ """
+ Construct a depgraph for the given resume list. This will raise
+ PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
+ TODO: Return reasons for dropped_tasks, for display/logging.
+ @rtype: tuple
+ @return: (success, depgraph, dropped_tasks)
+ """
+ skip_masked = True
+ skip_unsatisfied = True
+ mergelist = mtimedb["resume"]["mergelist"]
+ dropped_tasks = {}
+ frozen_config = _frozen_depgraph_config(settings, trees,
+ myopts, myparams, spinner)
+ while True:
+ mydepgraph = depgraph(settings, trees,
+ myopts, myparams, spinner, frozen_config=frozen_config)
+ try:
+ success = mydepgraph._loadResumeCommand(mtimedb["resume"],
+ skip_masked=skip_masked)
+ except depgraph.UnsatisfiedResumeDep as e:
+ if not skip_unsatisfied:
+ raise
+
+ graph = mydepgraph._dynamic_config.digraph
+ unsatisfied_parents = {}
+ traversed_nodes = set()
+ unsatisfied_stack = [(dep.parent, dep.atom) for dep in e.value]
+ while unsatisfied_stack:
+ pkg, atom = unsatisfied_stack.pop()
+ if atom is not None and \
+ mydepgraph._select_pkg_from_installed(
+ pkg.root, atom)[0] is not None:
+ continue
+ atoms = unsatisfied_parents.get(pkg)
+ if atoms is None:
+ atoms = []
+ unsatisfied_parents[pkg] = atoms
+ if atom is not None:
+ atoms.append(atom)
+ if pkg in traversed_nodes:
+ continue
+ traversed_nodes.add(pkg)
+
+ # If this package was pulled in by a parent
+ # package scheduled for merge, removing this
+ # package may cause the the parent package's
+ # dependency to become unsatisfied.
+ for parent_node, atom in \
+ mydepgraph._dynamic_config._parent_atoms.get(pkg, []):
+ if not isinstance(parent_node, Package) \
+ or parent_node.operation not in ("merge", "nomerge"):
+ continue
+ # We need to traverse all priorities here, in order to
+ # ensure that a package with an unsatisfied depenedency
+ # won't get pulled in, even indirectly via a soft
+ # dependency.
+ unsatisfied_stack.append((parent_node, atom))
+
+ unsatisfied_tuples = frozenset(tuple(parent_node)
+ for parent_node in unsatisfied_parents
+ if isinstance(parent_node, Package))
+ pruned_mergelist = []
+ for x in mergelist:
+ if isinstance(x, list) and \
+ tuple(x) not in unsatisfied_tuples:
+ pruned_mergelist.append(x)
+
+ # If the mergelist doesn't shrink then this loop is infinite.
+ if len(pruned_mergelist) == len(mergelist):
+ # This happens if a package can't be dropped because
+ # it's already installed, but it has unsatisfied PDEPEND.
+ raise
+ mergelist[:] = pruned_mergelist
+
+ # Exclude installed packages that have been removed from the graph due
+ # to failure to build/install runtime dependencies after the dependent
+ # package has already been installed.
+ dropped_tasks.update((pkg, atoms) for pkg, atoms in \
+ unsatisfied_parents.items() if pkg.operation != "nomerge")
+
+ del e, graph, traversed_nodes, \
+ unsatisfied_parents, unsatisfied_stack
+ continue
+ else:
+ break
+ return (success, mydepgraph, dropped_tasks)
+
+def get_mask_info(root_config, cpv, pkgsettings,
+ db, pkg_type, built, installed, db_keys, myrepo = None, _pkg_use_enabled=None):
+ try:
+ metadata = dict(zip(db_keys,
+ db.aux_get(cpv, db_keys, myrepo=myrepo)))
+ except KeyError:
+ metadata = None
+
+ if metadata is None:
+ mreasons = ["corruption"]
+ else:
+ eapi = metadata['EAPI']
+ if not portage.eapi_is_supported(eapi):
+ mreasons = ['EAPI %s' % eapi]
+ else:
+ pkg = Package(type_name=pkg_type, root_config=root_config,
+ cpv=cpv, built=built, installed=installed, metadata=metadata)
+
+ modified_use = None
+ if _pkg_use_enabled is not None:
+ modified_use = _pkg_use_enabled(pkg)
+
+ mreasons = get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=modified_use)
+
+ return metadata, mreasons
+
+def show_masked_packages(masked_packages):
+ shown_licenses = set()
+ shown_comments = set()
+ # Maybe there is both an ebuild and a binary. Only
+ # show one of them to avoid redundant appearance.
+ shown_cpvs = set()
+ have_eapi_mask = False
+ for (root_config, pkgsettings, cpv, repo,
+ metadata, mreasons) in masked_packages:
+ output_cpv = cpv
+ if repo:
+ output_cpv += _repo_separator + repo
+ if output_cpv in shown_cpvs:
+ continue
+ shown_cpvs.add(output_cpv)
+ eapi_masked = metadata is not None and \
+ not portage.eapi_is_supported(metadata["EAPI"])
+ if eapi_masked:
+ have_eapi_mask = True
+ # When masked by EAPI, metadata is mostly useless since
+ # it doesn't contain essential things like SLOT.
+ metadata = None
+ comment, filename = None, None
+ if not eapi_masked and \
+ "package.mask" in mreasons:
+ comment, filename = \
+ portage.getmaskingreason(
+ cpv, metadata=metadata,
+ settings=pkgsettings,
+ portdb=root_config.trees["porttree"].dbapi,
+ return_location=True)
+ missing_licenses = []
+ if not eapi_masked and metadata is not None:
+ try:
+ missing_licenses = \
+ pkgsettings._getMissingLicenses(
+ cpv, metadata)
+ except portage.exception.InvalidDependString:
+ # This will have already been reported
+ # above via mreasons.
+ pass
+
+ writemsg("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n",
+ noiselevel=-1)
+
+ if comment and comment not in shown_comments:
+ writemsg(filename + ":\n" + comment + "\n",
+ noiselevel=-1)
+ shown_comments.add(comment)
+ portdb = root_config.trees["porttree"].dbapi
+ for l in missing_licenses:
+ if l in shown_licenses:
+ continue
+ l_path = portdb.findLicensePath(l)
+ if l_path is None:
+ continue
+ msg = ("A copy of the '%s' license" + \
+ " is located at '%s'.\n\n") % (l, l_path)
+ writemsg(msg, noiselevel=-1)
+ shown_licenses.add(l)
+ return have_eapi_mask
+
+def show_mask_docs():
+ writemsg("For more information, see the MASKED PACKAGES "
+ "section in the emerge\n", noiselevel=-1)
+ writemsg("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
+
+def show_blocker_docs_link():
+ writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
+ writemsg("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n", noiselevel=-1)
+ writemsg("https://wiki.gentoo.org/wiki/Handbook:X86/Working/Portage#Blocked_packages\n\n", noiselevel=-1)
+
+def get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
+ return [mreason.message for \
+ mreason in _get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=use)]
+
+def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
+ mreasons = _getmaskingstatus(
+ pkg, settings=pkgsettings,
+ portdb=root_config.trees["porttree"].dbapi, myrepo=myrepo)
+
+ if not pkg.installed:
+ if not pkgsettings._accept_chost(pkg.cpv, pkg._metadata):
+ mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
+ pkg._metadata["CHOST"]))
+
+ if pkg.invalid:
+ for msgs in pkg.invalid.values():
+ for msg in msgs:
+ mreasons.append(
+ _MaskReason("invalid", "invalid: %s" % (msg,)))
+
+ if not pkg._metadata["SLOT"]:
+ mreasons.append(
+ _MaskReason("invalid", "SLOT: undefined"))
+
+ return mreasons
diff --git a/lib/_emerge/emergelog.py b/lib/_emerge/emergelog.py
new file mode 100644
index 000000000..aea94f74e
--- /dev/null
+++ b/lib/_emerge/emergelog.py
@@ -0,0 +1,56 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import io
+import sys
+import time
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.data import secpass
+from portage.output import xtermTitle
+
+# We disable emergelog by default, since it's called from
+# dblink.merge() and we don't want that to trigger log writes
+# unless it's really called via emerge.
+_disable = True
+_emerge_log_dir = '/var/log'
+
+def emergelog(xterm_titles, mystr, short_msg=None):
+
+ if _disable:
+ return
+
+ mystr = _unicode_decode(mystr)
+
+ if short_msg is not None:
+ short_msg = _unicode_decode(short_msg)
+
+ if xterm_titles and short_msg:
+ if "HOSTNAME" in os.environ:
+ short_msg = os.environ["HOSTNAME"]+": "+short_msg
+ xtermTitle(short_msg)
+ try:
+ file_path = os.path.join(_emerge_log_dir, 'emerge.log')
+ existing_log = os.path.isfile(file_path)
+ mylogfile = io.open(_unicode_encode(file_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='a', encoding=_encodings['content'],
+ errors='backslashreplace')
+ if not existing_log:
+ portage.util.apply_secpass_permissions(file_path,
+ uid=portage.portage_uid, gid=portage.portage_gid,
+ mode=0o660)
+ mylock = portage.locks.lockfile(file_path)
+ try:
+ mylogfile.write("%.0f: %s\n" % (time.time(), mystr))
+ mylogfile.close()
+ finally:
+ portage.locks.unlockfile(mylock)
+ except (IOError,OSError,portage.exception.PortageException) as e:
+ if secpass >= 1:
+ portage.util.writemsg("emergelog(): %s\n" % (e,), noiselevel=-1)
diff --git a/lib/_emerge/getloadavg.py b/lib/_emerge/getloadavg.py
new file mode 100644
index 000000000..6a2794fb1
--- /dev/null
+++ b/lib/_emerge/getloadavg.py
@@ -0,0 +1,28 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+
+getloadavg = getattr(os, "getloadavg", None)
+if getloadavg is None:
+ def getloadavg():
+ """
+ Uses /proc/loadavg to emulate os.getloadavg().
+ Raises OSError if the load average was unobtainable.
+ """
+ try:
+ with open('/proc/loadavg') as f:
+ loadavg_str = f.readline()
+ except IOError:
+ # getloadavg() is only supposed to raise OSError, so convert
+ raise OSError('unknown')
+ loadavg_split = loadavg_str.split()
+ if len(loadavg_split) < 3:
+ raise OSError('unknown')
+ loadavg_floats = []
+ for i in range(3):
+ try:
+ loadavg_floats.append(float(loadavg_split[i]))
+ except ValueError:
+ raise OSError('unknown')
+ return tuple(loadavg_floats)
diff --git a/lib/_emerge/help.py b/lib/_emerge/help.py
new file mode 100644
index 000000000..dd3a3475c
--- /dev/null
+++ b/lib/_emerge/help.py
@@ -0,0 +1,25 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+from portage.output import bold, turquoise, green
+
+def help():
+ print(bold("emerge:")+" command-line interface to the Portage system")
+ print(bold("Usage:"))
+ print(" "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] [ "+turquoise("ebuild")+" | "+turquoise("tbz2")+" | "+turquoise("file")+" | "+turquoise("@set")+" | "+turquoise("atom")+" ] [ ... ]")
+ print(" "+turquoise("emerge")+" [ "+green("options")+" ] [ "+green("action")+" ] < "+turquoise("@system")+" | "+turquoise("@world")+" >")
+ print(" "+turquoise("emerge")+" < "+turquoise("--sync")+" | "+turquoise("--metadata")+" | "+turquoise("--info")+" >")
+ print(" "+turquoise("emerge")+" "+turquoise("--resume")+" [ "+green("--pretend")+" | "+green("--ask")+" | "+green("--skipfirst")+" ]")
+ print(" "+turquoise("emerge")+" "+turquoise("--help"))
+ print(bold("Options:")+" "+green("-")+"["+green("abBcCdDefgGhjkKlnNoOpPqrsStuvVw")+"]")
+ print(" [ " + green("--color")+" < " + turquoise("y") + " | "+ turquoise("n")+" > ] [ "+green("--columns")+" ]")
+ print(" [ "+green("--complete-graph")+" ] [ "+green("--deep")+" ]")
+ print(" [ "+green("--jobs") + " " + turquoise("JOBS")+" ] [ "+green("--keep-going")+" ] [ " + green("--load-average")+" " + turquoise("LOAD") + " ]")
+ print(" [ "+green("--newrepo")+" ] [ "+green("--newuse")+" ] [ "+green("--noconfmem")+" ] [ "+green("--nospinner")+" ]")
+ print(" [ "+green("--oneshot")+" ] [ "+green("--onlydeps")+" ] [ "+ green("--quiet-build")+" [ " + turquoise("y") + " | "+ turquoise("n")+" ] ]")
+ print(" [ "+green("--reinstall ")+turquoise("changed-use")+" ] [ " + green("--with-bdeps")+" < " + turquoise("y") + " | "+ turquoise("n")+" > ]")
+ print(bold("Actions:")+" [ "+green("--depclean")+" | "+green("--list-sets")+" | "+green("--search")+" | "+green("--sync")+" | "+green("--version")+" ]")
+ print()
+ print(" For more help consult the man page.")
diff --git a/lib/_emerge/is_valid_package_atom.py b/lib/_emerge/is_valid_package_atom.py
new file mode 100644
index 000000000..17f764266
--- /dev/null
+++ b/lib/_emerge/is_valid_package_atom.py
@@ -0,0 +1,23 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import re
+from portage.dep import isvalidatom
+
+def insert_category_into_atom(atom, category):
+ # Handle '*' character for "extended syntax" wildcard support.
+ alphanum = re.search(r'[\*\w]', atom, re.UNICODE)
+ if alphanum:
+ ret = atom[:alphanum.start()] + "%s/" % category + \
+ atom[alphanum.start():]
+ else:
+ ret = None
+ return ret
+
+def is_valid_package_atom(x, allow_repo=False, allow_build_id=True):
+ if "/" not in x.split(":")[0]:
+ x2 = insert_category_into_atom(x, 'cat')
+ if x2 != None:
+ x = x2
+ return isvalidatom(x, allow_blockers=False, allow_repo=allow_repo,
+ allow_build_id=allow_build_id)
diff --git a/lib/_emerge/main.py b/lib/_emerge/main.py
new file mode 100644
index 000000000..e8b2c2e13
--- /dev/null
+++ b/lib/_emerge/main.py
@@ -0,0 +1,1295 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import argparse
+import locale
+import platform
+import sys
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'logging',
+ 'portage.dep:Atom',
+ 'portage.util:writemsg_level',
+ 'textwrap',
+ '_emerge.actions:load_emerge_config,run_action,' + \
+ 'validate_ebuild_environment',
+ '_emerge.help:help@emerge_help',
+ '_emerge.is_valid_package_atom:insert_category_into_atom'
+)
+from portage import os
+from portage.sync import _SUBMODULE_PATH_MAP
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+options=[
+"--alphabetical",
+"--ask-enter-invalid",
+"--buildpkgonly",
+"--changed-use",
+"--changelog", "--columns",
+"--debug",
+"--digest",
+"--emptytree",
+"--verbose-conflicts",
+"--fetchonly", "--fetch-all-uri",
+"--ignore-default-opts",
+"--noconfmem",
+"--newrepo",
+"--newuse",
+"--nodeps", "--noreplace",
+"--nospinner", "--oneshot",
+"--onlydeps", "--pretend",
+"--quiet-repo-display",
+"--quiet-unmerge-warn",
+"--resume",
+"--searchdesc",
+"--skipfirst",
+"--tree",
+"--unordered-display",
+"--update",
+]
+
+shortmapping={
+"1":"--oneshot",
+"B":"--buildpkgonly",
+"c":"--depclean",
+"C":"--unmerge",
+"d":"--debug",
+"e":"--emptytree",
+"f":"--fetchonly", "F":"--fetch-all-uri",
+"h":"--help",
+"l":"--changelog",
+"n":"--noreplace", "N":"--newuse",
+"o":"--onlydeps", "O":"--nodeps",
+"p":"--pretend", "P":"--prune",
+"r":"--resume",
+"s":"--search", "S":"--searchdesc",
+"t":"--tree",
+"u":"--update", "U":"--changed-use",
+"V":"--version"
+}
+
+COWSAY_MOO = r"""
+
+ Larry loves Gentoo (%s)
+
+ _______________________
+< Have you mooed today? >
+ -----------------------
+ \ ^__^
+ \ (oo)\_______
+ (__)\ )\/\
+ ||----w |
+ || ||
+
+"""
+
+def multiple_actions(action1, action2):
+ sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
+ sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
+ sys.exit(1)
+
+def insert_optional_args(args):
+ """
+ Parse optional arguments and insert a value if one has
+ not been provided. This is done before feeding the args
+ to the optparse parser since that parser does not support
+ this feature natively.
+ """
+
+ class valid_integers(object):
+ def __contains__(self, s):
+ try:
+ return int(s) >= 0
+ except (ValueError, OverflowError):
+ return False
+
+ valid_integers = valid_integers()
+
+ class valid_floats(object):
+ def __contains__(self, s):
+ try:
+ return float(s) >= 0
+ except (ValueError, OverflowError):
+ return False
+
+ valid_floats = valid_floats()
+
+ y_or_n = ('y', 'n',)
+
+ new_args = []
+
+ default_arg_opts = {
+ '--alert' : y_or_n,
+ '--ask' : y_or_n,
+ '--autounmask' : y_or_n,
+ '--autounmask-continue' : y_or_n,
+ '--autounmask-only' : y_or_n,
+ '--autounmask-keep-keywords' : y_or_n,
+ '--autounmask-keep-masks': y_or_n,
+ '--autounmask-unrestricted-atoms' : y_or_n,
+ '--autounmask-write' : y_or_n,
+ '--binpkg-changed-deps' : y_or_n,
+ '--buildpkg' : y_or_n,
+ '--changed-deps' : y_or_n,
+ '--changed-slot' : y_or_n,
+ '--changed-deps-report' : y_or_n,
+ '--complete-graph' : y_or_n,
+ '--deep' : valid_integers,
+ '--depclean-lib-check' : y_or_n,
+ '--deselect' : y_or_n,
+ '--binpkg-respect-use' : y_or_n,
+ '--fail-clean' : y_or_n,
+ '--fuzzy-search' : y_or_n,
+ '--getbinpkg' : y_or_n,
+ '--getbinpkgonly' : y_or_n,
+ '--ignore-world' : y_or_n,
+ '--jobs' : valid_integers,
+ '--keep-going' : y_or_n,
+ '--load-average' : valid_floats,
+ '--onlydeps-with-rdeps' : y_or_n,
+ '--package-moves' : y_or_n,
+ '--quiet' : y_or_n,
+ '--quiet-build' : y_or_n,
+ '--quiet-fail' : y_or_n,
+ '--read-news' : y_or_n,
+ '--rebuild-if-new-slot': y_or_n,
+ '--rebuild-if-new-rev' : y_or_n,
+ '--rebuild-if-new-ver' : y_or_n,
+ '--rebuild-if-unbuilt' : y_or_n,
+ '--rebuilt-binaries' : y_or_n,
+ '--root-deps' : ('rdeps',),
+ '--select' : y_or_n,
+ '--selective' : y_or_n,
+ "--use-ebuild-visibility": y_or_n,
+ '--usepkg' : y_or_n,
+ '--usepkgonly' : y_or_n,
+ '--verbose' : y_or_n,
+ '--verbose-slot-rebuilds': y_or_n,
+ '--with-test-deps' : y_or_n,
+ }
+
+ short_arg_opts = {
+ 'D' : valid_integers,
+ 'j' : valid_integers,
+ }
+
+ # Don't make things like "-kn" expand to "-k n"
+ # since existence of -n makes it too ambiguous.
+ short_arg_opts_n = {
+ 'a' : y_or_n,
+ 'A' : y_or_n,
+ 'b' : y_or_n,
+ 'g' : y_or_n,
+ 'G' : y_or_n,
+ 'k' : y_or_n,
+ 'K' : y_or_n,
+ 'q' : y_or_n,
+ 'v' : y_or_n,
+ 'w' : y_or_n,
+ }
+
+ arg_stack = args[:]
+ arg_stack.reverse()
+ while arg_stack:
+ arg = arg_stack.pop()
+
+ default_arg_choices = default_arg_opts.get(arg)
+ if default_arg_choices is not None:
+ new_args.append(arg)
+ if arg_stack and arg_stack[-1] in default_arg_choices:
+ new_args.append(arg_stack.pop())
+ else:
+ # insert default argument
+ new_args.append('True')
+ continue
+
+ if arg[:1] != "-" or arg[:2] == "--":
+ new_args.append(arg)
+ continue
+
+ match = None
+ for k, arg_choices in short_arg_opts.items():
+ if k in arg:
+ match = k
+ break
+
+ if match is None:
+ for k, arg_choices in short_arg_opts_n.items():
+ if k in arg:
+ match = k
+ break
+
+ if match is None:
+ new_args.append(arg)
+ continue
+
+ if len(arg) == 2:
+ new_args.append(arg)
+ if arg_stack and arg_stack[-1] in arg_choices:
+ new_args.append(arg_stack.pop())
+ else:
+ # insert default argument
+ new_args.append('True')
+ continue
+
+ # Insert an empty placeholder in order to
+ # satisfy the requirements of optparse.
+
+ new_args.append("-" + match)
+ opt_arg = None
+ saved_opts = None
+
+ if arg[1:2] == match:
+ if match not in short_arg_opts_n and arg[2:] in arg_choices:
+ opt_arg = arg[2:]
+ else:
+ saved_opts = arg[2:]
+ opt_arg = "True"
+ else:
+ saved_opts = arg[1:].replace(match, "")
+ opt_arg = "True"
+
+ if opt_arg is None and arg_stack and \
+ arg_stack[-1] in arg_choices:
+ opt_arg = arg_stack.pop()
+
+ if opt_arg is None:
+ new_args.append("True")
+ else:
+ new_args.append(opt_arg)
+
+ if saved_opts is not None:
+ # Recycle these on arg_stack since they
+ # might contain another match.
+ arg_stack.append("-" + saved_opts)
+
+ return new_args
+
+def _find_bad_atoms(atoms, less_strict=False):
+ """
+ Declares all atoms as invalid that have an operator,
+ a use dependency, a blocker or a repo spec.
+ It accepts atoms with wildcards.
+ In less_strict mode it accepts operators and repo specs.
+ """
+ bad_atoms = []
+ for x in ' '.join(atoms).split():
+ atom = x
+ if "/" not in x.split(":")[0]:
+ x_cat = insert_category_into_atom(x, 'dummy-category')
+ if x_cat is not None:
+ atom = x_cat
+
+ bad_atom = False
+ try:
+ atom = Atom(atom, allow_wildcard=True, allow_repo=less_strict)
+ except portage.exception.InvalidAtom:
+ bad_atom = True
+
+ if bad_atom or (atom.operator and not less_strict) or atom.blocker or atom.use:
+ bad_atoms.append(x)
+ return bad_atoms
+
+
+def parse_opts(tmpcmdline, silent=False):
+ myaction=None
+ myopts = {}
+ myfiles=[]
+
+ actions = frozenset([
+ "clean", "check-news", "config", "depclean", "help",
+ "info", "list-sets", "metadata", "moo",
+ "prune", "rage-clean", "regen", "search",
+ "sync", "unmerge", "version",
+ ])
+
+ longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
+ y_or_n = ("y", "n")
+ true_y_or_n = ("True", "y", "n")
+ true_y = ("True", "y")
+ argument_options = {
+
+ "--alert": {
+ "shortopt" : "-A",
+ "help" : "alert (terminal bell) on prompts",
+ "choices" : true_y_or_n
+ },
+
+ "--ask": {
+ "shortopt" : "-a",
+ "help" : "prompt before performing any actions",
+ "choices" : true_y_or_n
+ },
+
+ "--autounmask": {
+ "help" : "automatically unmask packages",
+ "choices" : true_y_or_n
+ },
+
+ "--autounmask-backtrack": {
+ "help": ("continue backtracking when there are autounmask "
+ "configuration changes"),
+ "choices":("y", "n")
+ },
+
+ "--autounmask-continue": {
+ "help" : "write autounmask changes and continue",
+ "choices" : true_y_or_n
+ },
+
+ "--autounmask-only": {
+ "help" : "only perform --autounmask",
+ "choices" : true_y_or_n
+ },
+
+ "--autounmask-unrestricted-atoms": {
+ "help" : "write autounmask changes with >= atoms if possible",
+ "choices" : true_y_or_n
+ },
+
+ "--autounmask-keep-keywords": {
+ "help" : "don't add package.accept_keywords entries",
+ "choices" : true_y_or_n
+ },
+
+ "--autounmask-keep-masks": {
+ "help" : "don't add package.unmask entries",
+ "choices" : true_y_or_n
+ },
+
+ "--autounmask-write": {
+ "help" : "write changes made by --autounmask to disk",
+ "choices" : true_y_or_n
+ },
+
+ "--accept-properties": {
+ "help":"temporarily override ACCEPT_PROPERTIES",
+ "action":"store"
+ },
+
+ "--accept-restrict": {
+ "help":"temporarily override ACCEPT_RESTRICT",
+ "action":"store"
+ },
+
+ "--backtrack": {
+
+ "help" : "Specifies how many times to backtrack if dependency " + \
+ "calculation fails ",
+
+ "action" : "store"
+ },
+
+ "--binpkg-changed-deps": {
+ "help" : ("reject binary packages with outdated "
+ "dependencies"),
+ "choices" : true_y_or_n
+ },
+
+ "--buildpkg": {
+ "shortopt" : "-b",
+ "help" : "build binary packages",
+ "choices" : true_y_or_n
+ },
+
+ "--buildpkg-exclude": {
+ "help" :"A space separated list of package atoms for which " + \
+ "no binary packages should be built. This option overrides all " + \
+ "possible ways to enable building of binary packages.",
+
+ "action" : "append"
+ },
+
+ "--changed-deps": {
+ "help" : ("replace installed packages with "
+ "outdated dependencies"),
+ "choices" : true_y_or_n
+ },
+
+ "--changed-deps-report": {
+ "help" : ("report installed packages with "
+ "outdated dependencies"),
+ "choices" : true_y_or_n
+ },
+
+ "--changed-slot": {
+ "help" : ("replace installed packages with "
+ "outdated SLOT metadata"),
+ "choices" : true_y_or_n
+ },
+
+ "--config-root": {
+ "help":"specify the location for portage configuration files",
+ "action":"store"
+ },
+ "--color": {
+ "help":"enable or disable color output",
+ "choices":("y", "n")
+ },
+
+ "--complete-graph": {
+ "help" : "completely account for all known dependencies",
+ "choices" : true_y_or_n
+ },
+
+ "--complete-graph-if-new-use": {
+ "help" : "trigger --complete-graph behavior if USE or IUSE will change for an installed package",
+ "choices" : y_or_n
+ },
+
+ "--complete-graph-if-new-ver": {
+ "help" : "trigger --complete-graph behavior if an installed package version will change (upgrade or downgrade)",
+ "choices" : y_or_n
+ },
+
+ "--deep": {
+
+ "shortopt" : "-D",
+
+ "help" : "Specifies how deep to recurse into dependencies " + \
+ "of packages given as arguments. If no argument is given, " + \
+ "depth is unlimited. Default behavior is to skip " + \
+ "dependencies of installed packages.",
+
+ "action" : "store"
+ },
+
+ "--depclean-lib-check": {
+ "help" : "check for consumers of libraries before removing them",
+ "choices" : true_y_or_n
+ },
+
+ "--deselect": {
+ "help" : "remove atoms/sets from the world file",
+ "choices" : true_y_or_n
+ },
+
+ "--dynamic-deps": {
+ "help": "substitute the dependencies of installed packages with the dependencies of unbuilt ebuilds",
+ "choices": y_or_n
+ },
+
+ "--exclude": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge won't install any ebuild or binary package that " + \
+ "matches any of the given package atoms.",
+
+ "action" : "append"
+ },
+
+ "--fail-clean": {
+ "help" : "clean temp files after build failure",
+ "choices" : true_y_or_n
+ },
+
+ "--fuzzy-search": {
+ "help": "Enable or disable fuzzy search",
+ "choices": true_y_or_n
+ },
+
+ "--ignore-built-slot-operator-deps": {
+ "help": "Ignore the slot/sub-slot := operator parts of dependencies that have "
+ "been recorded when packages where built. This option is intended "
+ "only for debugging purposes, and it only affects built packages "
+ "that specify slot/sub-slot := operator dependencies using the "
+ "experimental \"4-slot-abi\" EAPI.",
+ "choices": y_or_n
+ },
+
+ "--ignore-soname-deps": {
+ "help": "Ignore the soname dependencies of binary and "
+ "installed packages. This option is enabled by "
+ "default, since soname dependencies are relatively "
+ "new, and the required metadata is not guaranteed to "
+ "exist for binary and installed packages built with "
+ "older versions of portage.",
+ "choices": y_or_n
+ },
+
+ "--ignore-world": {
+ "help" : "ignore the @world package set and its dependencies",
+ "choices" : true_y_or_n
+ },
+
+ "--jobs": {
+
+ "shortopt" : "-j",
+
+ "help" : "Specifies the number of packages to build " + \
+ "simultaneously.",
+
+ "action" : "store"
+ },
+
+ "--keep-going": {
+ "help" : "continue as much as possible after an error",
+ "choices" : true_y_or_n
+ },
+
+ "--load-average": {
+
+ "help" :"Specifies that no new builds should be started " + \
+ "if there are other builds running and the load average " + \
+ "is at least LOAD (a floating-point number).",
+
+ "action" : "store"
+ },
+
+ "--misspell-suggestions": {
+ "help" : "enable package name misspell suggestions",
+ "choices" : ("y", "n")
+ },
+
+ "--with-bdeps": {
+ "help":"include unnecessary build time dependencies",
+ "choices":("y", "n")
+ },
+ "--with-bdeps-auto": {
+ "help":("automatically enable --with-bdeps for installation"
+ " actions, unless --usepkg is enabled"),
+ "choices":("y", "n")
+ },
+ "--reinstall": {
+ "help":"specify conditions to trigger package reinstallation",
+ "choices":["changed-use"]
+ },
+
+ "--reinstall-atoms": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge will treat matching packages as if they are not " + \
+ "installed, and reinstall them if necessary. Implies --deep.",
+
+ "action" : "append",
+ },
+
+ "--binpkg-respect-use": {
+ "help" : "discard binary packages if their use flags \
+ don't match the current configuration",
+ "choices" : true_y_or_n
+ },
+
+ "--getbinpkg": {
+ "shortopt" : "-g",
+ "help" : "fetch binary packages",
+ "choices" : true_y_or_n
+ },
+
+ "--getbinpkgonly": {
+ "shortopt" : "-G",
+ "help" : "fetch binary packages only",
+ "choices" : true_y_or_n
+ },
+
+ "--usepkg-exclude": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge will ignore matching binary packages. ",
+
+ "action" : "append",
+ },
+
+ "--onlydeps-with-rdeps": {
+ "help" : "modify interpretation of depedencies",
+ "choices" : true_y_or_n
+ },
+
+ "--rebuild-exclude": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge will not rebuild these packages due to the " + \
+ "--rebuild flag. ",
+
+ "action" : "append",
+ },
+
+ "--rebuild-ignore": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge will not rebuild packages that depend on matching " + \
+ "packages due to the --rebuild flag. ",
+
+ "action" : "append",
+ },
+
+ "--package-moves": {
+ "help" : "perform package moves when necessary",
+ "choices" : true_y_or_n
+ },
+
+ "--prefix": {
+ "help" : "specify the installation prefix",
+ "action" : "store"
+ },
+
+ "--pkg-format": {
+ "help" : "format of result binary package",
+ "action" : "store",
+ },
+
+ "--quiet": {
+ "shortopt" : "-q",
+ "help" : "reduced or condensed output",
+ "choices" : true_y_or_n
+ },
+
+ "--quiet-build": {
+ "help" : "redirect build output to logs",
+ "choices" : true_y_or_n,
+ },
+
+ "--quiet-fail": {
+ "help" : "suppresses display of the build log on stdout",
+ "choices" : true_y_or_n,
+ },
+
+ "--read-news": {
+ "help" : "offer to read unread news via eselect",
+ "choices" : true_y_or_n
+ },
+
+
+ "--rebuild-if-new-slot": {
+ "help" : ("Automatically rebuild or reinstall packages when slot/sub-slot := "
+ "operator dependencies can be satisfied by a newer slot, so that "
+ "older packages slots will become eligible for removal by the "
+ "--depclean action as soon as possible."),
+ "choices" : true_y_or_n
+ },
+
+ "--rebuild-if-new-rev": {
+ "help" : "Rebuild packages when dependencies that are " + \
+ "used at both build-time and run-time are built, " + \
+ "if the dependency is not already installed with the " + \
+ "same version and revision.",
+ "choices" : true_y_or_n
+ },
+
+ "--rebuild-if-new-ver": {
+ "help" : "Rebuild packages when dependencies that are " + \
+ "used at both build-time and run-time are built, " + \
+ "if the dependency is not already installed with the " + \
+ "same version. Revision numbers are ignored.",
+ "choices" : true_y_or_n
+ },
+
+ "--rebuild-if-unbuilt": {
+ "help" : "Rebuild packages when dependencies that are " + \
+ "used at both build-time and run-time are built.",
+ "choices" : true_y_or_n
+ },
+
+ "--rebuilt-binaries": {
+ "help" : "replace installed packages with binary " + \
+ "packages that have been rebuilt",
+ "choices" : true_y_or_n
+ },
+
+ "--rebuilt-binaries-timestamp": {
+ "help" : "use only binaries that are newer than this " + \
+ "timestamp for --rebuilt-binaries",
+ "action" : "store"
+ },
+
+ "--root": {
+ "help" : "specify the target root filesystem for merging packages",
+ "action" : "store"
+ },
+
+ "--root-deps": {
+ "help" : "modify interpretation of depedencies",
+ "choices" :("True", "rdeps")
+ },
+
+ "--search-index": {
+ "help": "Enable or disable indexed search (enabled by default)",
+ "choices": y_or_n
+ },
+
+ "--search-similarity": {
+ "help": ("Set minimum similarity percentage for fuzzy seach "
+ "(a floating-point number between 0 and 100)"),
+ "action": "store"
+ },
+
+ "--select": {
+ "shortopt" : "-w",
+ "help" : "add specified packages to the world set " + \
+ "(inverse of --oneshot)",
+ "choices" : true_y_or_n
+ },
+
+ "--selective": {
+ "help" : "identical to --noreplace",
+ "choices" : true_y_or_n
+ },
+
+ "--sync-submodule": {
+ "help" : ("Restrict sync to the specified submodule(s)."
+ " (--sync action only)"),
+ "choices" : tuple(_SUBMODULE_PATH_MAP),
+ "action" : "append",
+ },
+
+ "--sysroot": {
+ "help":"specify the location for build dependencies specified in DEPEND",
+ "action":"store"
+ },
+
+ "--use-ebuild-visibility": {
+ "help" : "use unbuilt ebuild metadata for visibility checks on built packages",
+ "choices" : true_y_or_n
+ },
+
+ "--useoldpkg-atoms": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge will prefer matching binary packages over newer unbuilt packages. ",
+
+ "action" : "append",
+ },
+
+ "--usepkg": {
+ "shortopt" : "-k",
+ "help" : "use binary packages",
+ "choices" : true_y_or_n
+ },
+
+ "--usepkgonly": {
+ "shortopt" : "-K",
+ "help" : "use only binary packages",
+ "choices" : true_y_or_n
+ },
+
+ "--verbose": {
+ "shortopt" : "-v",
+ "help" : "verbose output",
+ "choices" : true_y_or_n
+ },
+ "--verbose-slot-rebuilds": {
+ "help" : "verbose slot rebuild output",
+ "choices" : true_y_or_n
+ },
+ "--with-test-deps": {
+ "help" : "pull in test deps for packages " + \
+ "matched by arguments",
+ "choices" : true_y_or_n
+ },
+ }
+
+ parser = argparse.ArgumentParser(add_help=False)
+
+ for action_opt in actions:
+ parser.add_argument("--" + action_opt, action="store_true",
+ dest=action_opt.replace("-", "_"), default=False)
+ for myopt in options:
+ parser.add_argument(myopt, action="store_true",
+ dest=myopt.lstrip("--").replace("-", "_"), default=False)
+ for shortopt, longopt in shortmapping.items():
+ parser.add_argument("-" + shortopt, action="store_true",
+ dest=longopt.lstrip("--").replace("-", "_"), default=False)
+ for myalias, myopt in longopt_aliases.items():
+ parser.add_argument(myalias, action="store_true",
+ dest=myopt.lstrip("--").replace("-", "_"), default=False)
+
+ for myopt, kwargs in argument_options.items():
+ shortopt = kwargs.pop("shortopt", None)
+ args = [myopt]
+ if shortopt is not None:
+ args.append(shortopt)
+ parser.add_argument(dest=myopt.lstrip("--").replace("-", "_"),
+ *args, **kwargs)
+
+ tmpcmdline = insert_optional_args(tmpcmdline)
+
+ myoptions, myargs = parser.parse_known_args(args=tmpcmdline)
+
+ if myoptions.alert in true_y:
+ myoptions.alert = True
+ else:
+ myoptions.alert = None
+
+ if myoptions.ask in true_y:
+ myoptions.ask = True
+ else:
+ myoptions.ask = None
+
+ if myoptions.autounmask in true_y:
+ myoptions.autounmask = True
+
+ if myoptions.autounmask_continue in true_y:
+ myoptions.autounmask_continue = True
+
+ if myoptions.autounmask_only in true_y:
+ myoptions.autounmask_only = True
+ else:
+ myoptions.autounmask_only = None
+
+ if myoptions.autounmask_unrestricted_atoms in true_y:
+ myoptions.autounmask_unrestricted_atoms = True
+
+ if myoptions.autounmask_keep_keywords in true_y:
+ myoptions.autounmask_keep_keywords = True
+
+ if myoptions.autounmask_keep_masks in true_y:
+ myoptions.autounmask_keep_masks = True
+
+ if myoptions.autounmask_write in true_y:
+ myoptions.autounmask_write = True
+
+ if myoptions.binpkg_changed_deps is not None:
+ if myoptions.binpkg_changed_deps in true_y:
+ myoptions.binpkg_changed_deps = 'y'
+ else:
+ myoptions.binpkg_changed_deps = 'n'
+
+ if myoptions.buildpkg in true_y:
+ myoptions.buildpkg = True
+
+ if myoptions.buildpkg_exclude:
+ bad_atoms = _find_bad_atoms(myoptions.buildpkg_exclude, less_strict=True)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --buildpkg-exclude parameter: '%s'\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.changed_deps is not None:
+ if myoptions.changed_deps in true_y:
+ myoptions.changed_deps = 'y'
+ else:
+ myoptions.changed_deps = 'n'
+
+ if myoptions.changed_deps_report is not None:
+ if myoptions.changed_deps_report in true_y:
+ myoptions.changed_deps_report = 'y'
+ else:
+ myoptions.changed_deps_report = 'n'
+
+ if myoptions.changed_slot is not None:
+ if myoptions.changed_slot in true_y:
+ myoptions.changed_slot = True
+ else:
+ myoptions.changed_slot = None
+
+ if myoptions.changed_use is not False:
+ myoptions.reinstall = "changed-use"
+ myoptions.changed_use = False
+
+ if myoptions.deselect in true_y:
+ myoptions.deselect = True
+
+ if myoptions.binpkg_respect_use is not None:
+ if myoptions.binpkg_respect_use in true_y:
+ myoptions.binpkg_respect_use = 'y'
+ else:
+ myoptions.binpkg_respect_use = 'n'
+
+ if myoptions.complete_graph in true_y:
+ myoptions.complete_graph = True
+ else:
+ myoptions.complete_graph = None
+
+ if myoptions.depclean_lib_check in true_y:
+ myoptions.depclean_lib_check = True
+
+ if myoptions.exclude:
+ bad_atoms = _find_bad_atoms(myoptions.exclude)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --exclude parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.reinstall_atoms:
+ bad_atoms = _find_bad_atoms(myoptions.reinstall_atoms)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --reinstall-atoms parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.rebuild_exclude:
+ bad_atoms = _find_bad_atoms(myoptions.rebuild_exclude)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --rebuild-exclude parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.rebuild_ignore:
+ bad_atoms = _find_bad_atoms(myoptions.rebuild_ignore)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --rebuild-ignore parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.usepkg_exclude:
+ bad_atoms = _find_bad_atoms(myoptions.usepkg_exclude)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --usepkg-exclude parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.useoldpkg_atoms:
+ bad_atoms = _find_bad_atoms(myoptions.useoldpkg_atoms)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --useoldpkg-atoms parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.fail_clean in true_y:
+ myoptions.fail_clean = True
+
+ if myoptions.fuzzy_search in true_y:
+ myoptions.fuzzy_search = True
+
+ if myoptions.getbinpkg in true_y:
+ myoptions.getbinpkg = True
+ else:
+ myoptions.getbinpkg = None
+
+ if myoptions.getbinpkgonly in true_y:
+ myoptions.getbinpkgonly = True
+ else:
+ myoptions.getbinpkgonly = None
+
+ if myoptions.ignore_world in true_y:
+ myoptions.ignore_world = True
+
+ if myoptions.keep_going in true_y:
+ myoptions.keep_going = True
+ else:
+ myoptions.keep_going = None
+
+ if myoptions.package_moves in true_y:
+ myoptions.package_moves = True
+
+ if myoptions.quiet in true_y:
+ myoptions.quiet = True
+ else:
+ myoptions.quiet = None
+
+ if myoptions.quiet_build in true_y:
+ myoptions.quiet_build = 'y'
+
+ if myoptions.quiet_fail in true_y:
+ myoptions.quiet_fail = 'y'
+
+ if myoptions.read_news in true_y:
+ myoptions.read_news = True
+ else:
+ myoptions.read_news = None
+
+
+ if myoptions.rebuild_if_new_slot in true_y:
+ myoptions.rebuild_if_new_slot = 'y'
+
+ if myoptions.rebuild_if_new_ver in true_y:
+ myoptions.rebuild_if_new_ver = True
+ else:
+ myoptions.rebuild_if_new_ver = None
+
+ if myoptions.rebuild_if_new_rev in true_y:
+ myoptions.rebuild_if_new_rev = True
+ myoptions.rebuild_if_new_ver = None
+ else:
+ myoptions.rebuild_if_new_rev = None
+
+ if myoptions.rebuild_if_unbuilt in true_y:
+ myoptions.rebuild_if_unbuilt = True
+ myoptions.rebuild_if_new_rev = None
+ myoptions.rebuild_if_new_ver = None
+ else:
+ myoptions.rebuild_if_unbuilt = None
+
+ if myoptions.rebuilt_binaries in true_y:
+ myoptions.rebuilt_binaries = True
+
+ if myoptions.root_deps in true_y:
+ myoptions.root_deps = True
+
+ if myoptions.select in true_y:
+ myoptions.select = True
+ myoptions.oneshot = False
+ elif myoptions.select == "n":
+ myoptions.oneshot = True
+
+ if myoptions.selective in true_y:
+ myoptions.selective = True
+
+ if myoptions.backtrack is not None:
+
+ try:
+ backtrack = int(myoptions.backtrack)
+ except (OverflowError, ValueError):
+ backtrack = -1
+
+ if backtrack < 0:
+ backtrack = None
+ if not silent:
+ parser.error("Invalid --backtrack parameter: '%s'\n" % \
+ (myoptions.backtrack,))
+
+ myoptions.backtrack = backtrack
+
+ if myoptions.deep is not None:
+ deep = None
+ if myoptions.deep == "True":
+ deep = True
+ else:
+ try:
+ deep = int(myoptions.deep)
+ except (OverflowError, ValueError):
+ deep = -1
+
+ if deep is not True and deep < 0:
+ deep = None
+ if not silent:
+ parser.error("Invalid --deep parameter: '%s'\n" % \
+ (myoptions.deep,))
+
+ myoptions.deep = deep
+
+ if myoptions.jobs:
+ jobs = None
+ if myoptions.jobs == "True":
+ jobs = True
+ else:
+ try:
+ jobs = int(myoptions.jobs)
+ except ValueError:
+ jobs = -1
+
+ if jobs is not True and \
+ jobs < 1:
+ jobs = None
+ if not silent:
+ parser.error("Invalid --jobs parameter: '%s'\n" % \
+ (myoptions.jobs,))
+
+ myoptions.jobs = jobs
+
+ if myoptions.load_average == "True":
+ myoptions.load_average = None
+
+ if myoptions.load_average:
+ try:
+ load_average = float(myoptions.load_average)
+ except ValueError:
+ load_average = 0.0
+
+ if load_average <= 0.0:
+ load_average = None
+ if not silent:
+ parser.error("Invalid --load-average parameter: '%s'\n" % \
+ (myoptions.load_average,))
+
+ myoptions.load_average = load_average
+
+ if myoptions.rebuilt_binaries_timestamp:
+ try:
+ rebuilt_binaries_timestamp = int(myoptions.rebuilt_binaries_timestamp)
+ except ValueError:
+ rebuilt_binaries_timestamp = -1
+
+ if rebuilt_binaries_timestamp < 0:
+ rebuilt_binaries_timestamp = 0
+ if not silent:
+ parser.error("Invalid --rebuilt-binaries-timestamp parameter: '%s'\n" % \
+ (myoptions.rebuilt_binaries_timestamp,))
+
+ myoptions.rebuilt_binaries_timestamp = rebuilt_binaries_timestamp
+
+ if myoptions.search_similarity:
+ try:
+ search_similarity = float(myoptions.search_similarity)
+ except ValueError:
+ parser.error("Invalid --search-similarity parameter "
+ "(not a number): '{}'\n".format(
+ myoptions.search_similarity))
+
+ if search_similarity < 0 or search_similarity > 100:
+ parser.error("Invalid --search-similarity parameter "
+ "(not between 0 and 100): '{}'\n".format(
+ myoptions.search_similarity))
+
+ myoptions.search_similarity = search_similarity
+
+ if myoptions.use_ebuild_visibility in true_y:
+ myoptions.use_ebuild_visibility = True
+ else:
+ # None or "n"
+ pass
+
+ if myoptions.usepkg in true_y:
+ myoptions.usepkg = True
+ else:
+ myoptions.usepkg = None
+
+ if myoptions.usepkgonly in true_y:
+ myoptions.usepkgonly = True
+ else:
+ myoptions.usepkgonly = None
+
+ if myoptions.verbose in true_y:
+ myoptions.verbose = True
+ else:
+ myoptions.verbose = None
+
+ if myoptions.with_test_deps in true_y:
+ myoptions.with_test_deps = True
+ else:
+ myoptions.with_test_deps = None
+
+ for myopt in options:
+ v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
+ if v:
+ myopts[myopt] = True
+
+ for myopt in argument_options:
+ v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
+ if v is not None:
+ myopts[myopt] = v
+
+ if myoptions.searchdesc:
+ myoptions.search = True
+
+ for action_opt in actions:
+ v = getattr(myoptions, action_opt.replace("-", "_"))
+ if v:
+ if myaction:
+ multiple_actions(myaction, action_opt)
+ sys.exit(1)
+ myaction = action_opt
+
+ if myaction is None and myoptions.deselect is True:
+ myaction = 'deselect'
+
+ myfiles += myargs
+
+ return myaction, myopts, myfiles
+
+def profile_check(trees, myaction):
+ if myaction in ("help", "info", "search", "sync", "version"):
+ return os.EX_OK
+ for root_trees in trees.values():
+ if (root_trees["root_config"].settings.profiles and
+ 'ARCH' in root_trees["root_config"].settings):
+ continue
+ # generate some profile related warning messages
+ validate_ebuild_environment(trees)
+ msg = ("Your current profile is invalid. If you have just changed "
+ "your profile configuration, you should revert back to the "
+ "previous configuration. Allowed actions are limited to "
+ "--help, --info, --search, --sync, and --version.")
+ writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+ return os.EX_OK
+
+def emerge_main(args=None):
+ """
+ @param args: command arguments (default: sys.argv[1:])
+ @type args: list
+ """
+ if args is None:
+ args = sys.argv[1:]
+
+ args = portage._decode_argv(args)
+
+ # Use system locale.
+ try:
+ locale.setlocale(locale.LC_ALL, "")
+ except locale.Error as e:
+ writemsg_level("setlocale: %s\n" % e, level=logging.WARN)
+
+ # Disable color until we're sure that it should be enabled (after
+ # EMERGE_DEFAULT_OPTS has been parsed).
+ portage.output.havecolor = 0
+
+ # This first pass is just for options that need to be known as early as
+ # possible, such as --config-root. They will be parsed again later,
+ # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
+ # the value of --config-root).
+ myaction, myopts, myfiles = parse_opts(args, silent=True)
+ if "--debug" in myopts:
+ os.environ["PORTAGE_DEBUG"] = "1"
+ if "--config-root" in myopts:
+ os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
+ if "--sysroot" in myopts:
+ os.environ["SYSROOT"] = myopts["--sysroot"]
+ if "--root" in myopts:
+ os.environ["ROOT"] = myopts["--root"]
+ if "--prefix" in myopts:
+ os.environ["EPREFIX"] = myopts["--prefix"]
+ if "--accept-properties" in myopts:
+ os.environ["ACCEPT_PROPERTIES"] = myopts["--accept-properties"]
+ if "--accept-restrict" in myopts:
+ os.environ["ACCEPT_RESTRICT"] = myopts["--accept-restrict"]
+
+ # optimize --help (no need to load config / EMERGE_DEFAULT_OPTS)
+ if myaction == "help":
+ emerge_help()
+ return os.EX_OK
+ elif myaction == "moo":
+ print(COWSAY_MOO % platform.system())
+ return os.EX_OK
+ elif myaction == "sync":
+ # need to set this to True now in order for the repository config
+ # loading to allow new repos with non-existent directories
+ portage._sync_mode = True
+
+ # Verify that /dev/null exists and is a device file as a cheap early
+ # filter for obviously broken /dev/s.
+ try:
+ if os.stat(os.devnull).st_rdev == 0:
+ writemsg_level("Failed to validate a sane '/dev'.\n"
+ "'/dev/null' is not a device file.\n",
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+ except OSError:
+ writemsg_level("Failed to validate a sane '/dev'.\n"
+ "'/dev/null' does not exist.\n",
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ # Verify that BASH process substitution works as another cheap early
+ # filter. Process substitution uses '/dev/fd'.
+ with open(os.devnull, 'r+b') as dev_null:
+ fd_pipes = {
+ 0: dev_null.fileno(),
+ 1: dev_null.fileno(),
+ 2: dev_null.fileno(),
+ }
+ if portage.process.spawn_bash("[[ $(< <(echo foo) ) == foo ]]",
+ fd_pipes=fd_pipes) != 0:
+ writemsg_level("Failed to validate a sane '/dev'.\n"
+ "bash process substitution doesn't work; this may be an "
+ "indication of a broken '/dev/fd'.\n",
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ # Portage needs to ensure a sane umask for the files it creates.
+ os.umask(0o22)
+ emerge_config = load_emerge_config(
+ action=myaction, args=myfiles, opts=myopts)
+
+ # Make locale variables from configuration files (make.defaults, make.conf) affect locale of emerge process.
+ for locale_var_name in ("LANGUAGE", "LC_ALL", "LC_ADDRESS", "LC_COLLATE", "LC_CTYPE",
+ "LC_IDENTIFICATION", "LC_MEASUREMENT", "LC_MESSAGES", "LC_MONETARY",
+ "LC_NAME", "LC_NUMERIC", "LC_PAPER", "LC_TELEPHONE", "LC_TIME", "LANG"):
+ locale_var_value = emerge_config.running_config.settings.get(locale_var_name)
+ if locale_var_value is not None:
+ os.environ.setdefault(locale_var_name, locale_var_value)
+ try:
+ locale.setlocale(locale.LC_ALL, "")
+ except locale.Error as e:
+ writemsg_level("setlocale: %s\n" % e, level=logging.WARN)
+
+ tmpcmdline = []
+ if "--ignore-default-opts" not in myopts:
+ tmpcmdline.extend(portage.util.shlex_split(
+ emerge_config.target_config.settings.get(
+ "EMERGE_DEFAULT_OPTS", "")))
+ tmpcmdline.extend(args)
+ emerge_config.action, emerge_config.opts, emerge_config.args = \
+ parse_opts(tmpcmdline)
+
+ try:
+ return run_action(emerge_config)
+ finally:
+ # Call destructors for our portdbapi instances.
+ for x in emerge_config.trees.values():
+ if "porttree" in x.lazy_items:
+ continue
+ x["porttree"].dbapi.close_caches()
diff --git a/lib/_emerge/post_emerge.py b/lib/_emerge/post_emerge.py
new file mode 100644
index 000000000..7e6063c52
--- /dev/null
+++ b/lib/_emerge/post_emerge.py
@@ -0,0 +1,168 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+import textwrap
+
+import portage
+from portage import os
+from portage.emaint.modules.logs.logs import CleanLogs
+from portage.news import count_unread_news, display_news_notifications
+from portage.output import colorize
+from portage.util._dyn_libs.display_preserved_libs import \
+ display_preserved_libs
+from portage.util._info_files import chk_updated_info_files
+
+from .chk_updated_cfg_files import chk_updated_cfg_files
+from .emergelog import emergelog
+from ._flush_elog_mod_echo import _flush_elog_mod_echo
+
+def clean_logs(settings):
+
+ if "clean-logs" not in settings.features:
+ return
+
+ logdir = settings.get("PORT_LOGDIR")
+ if logdir is None or not os.path.isdir(logdir):
+ return
+
+ cleanlogs = CleanLogs()
+ returncode, msgs = cleanlogs.clean(settings=settings)
+ if not returncode:
+ out = portage.output.EOutput()
+ for msg in msgs:
+ out.eerror(msg)
+
+def display_news_notification(root_config, myopts):
+ if "news" not in root_config.settings.features:
+ return False
+ portdb = root_config.trees["porttree"].dbapi
+ vardb = root_config.trees["vartree"].dbapi
+ news_counts = count_unread_news(portdb, vardb)
+ if all(v == 0 for v in news_counts.values()):
+ return False
+ display_news_notifications(news_counts)
+ return True
+
+def show_depclean_suggestion():
+ out = portage.output.EOutput()
+ msg = "After world updates, it is important to remove " + \
+ "obsolete packages with emerge --depclean. Refer " + \
+ "to `man emerge` for more information."
+ for line in textwrap.wrap(msg, 72):
+ out.ewarn(line)
+
+def post_emerge(myaction, myopts, myfiles,
+ target_root, trees, mtimedb, retval):
+ """
+ Misc. things to run at the end of a merge session.
+
+ Update Info Files
+ Update Config Files
+ Update News Items
+ Commit mtimeDB
+ Display preserved libs warnings
+
+ @param myaction: The action returned from parse_opts()
+ @type myaction: String
+ @param myopts: emerge options
+ @type myopts: dict
+ @param myfiles: emerge arguments
+ @type myfiles: list
+ @param target_root: The target EROOT for myaction
+ @type target_root: String
+ @param trees: A dictionary mapping each ROOT to it's package databases
+ @type trees: dict
+ @param mtimedb: The mtimeDB to store data needed across merge invocations
+ @type mtimedb: MtimeDB class instance
+ @param retval: Emerge's return value
+ @type retval: Int
+ """
+
+ root_config = trees[target_root]["root_config"]
+ vardbapi = trees[target_root]['vartree'].dbapi
+ settings = vardbapi.settings
+ info_mtimes = mtimedb["info"]
+
+ # Load the most current variables from ${ROOT}/etc/profile.env
+ settings.unlock()
+ settings.reload()
+ settings.regenerate()
+ settings.lock()
+
+ config_protect = portage.util.shlex_split(
+ settings.get("CONFIG_PROTECT", ""))
+ infodirs = settings.get("INFOPATH","").split(":") + \
+ settings.get("INFODIR","").split(":")
+
+ os.chdir("/")
+
+ if retval == os.EX_OK:
+ exit_msg = " *** exiting successfully."
+ else:
+ exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
+ emergelog("notitles" not in settings.features, exit_msg)
+
+ _flush_elog_mod_echo()
+
+ if not vardbapi._pkgs_changed:
+ # GLEP 42 says to display news *after* an emerge --pretend
+ if "--pretend" in myopts:
+ display_news_notification(root_config, myopts)
+ # If vdb state has not changed then there's nothing else to do.
+ return
+
+ vdb_path = os.path.join(root_config.settings['EROOT'], portage.VDB_PATH)
+ portage.util.ensure_dirs(vdb_path)
+ vdb_lock = None
+ if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
+ vardbapi.lock()
+ vdb_lock = True
+
+ if vdb_lock:
+ try:
+ if "noinfo" not in settings.features:
+ chk_updated_info_files(target_root,
+ infodirs, info_mtimes)
+ mtimedb.commit()
+ finally:
+ if vdb_lock:
+ vardbapi.unlock()
+
+ # Explicitly load and prune the PreservedLibsRegistry in order
+ # to ensure that we do not display stale data.
+ vardbapi._plib_registry.load()
+
+ if vardbapi._plib_registry.hasEntries():
+ if "--quiet" in myopts:
+ print()
+ print(colorize("WARN", "!!!") + " existing preserved libs found")
+ else:
+ print()
+ print(colorize("WARN", "!!!") + " existing preserved libs:")
+ display_preserved_libs(vardbapi)
+ print("Use " + colorize("GOOD", "emerge @preserved-rebuild") +
+ " to rebuild packages using these libraries")
+
+ chk_updated_cfg_files(settings['EROOT'], config_protect)
+
+ display_news_notification(root_config, myopts)
+
+ postemerge = os.path.join(settings["PORTAGE_CONFIGROOT"],
+ portage.USER_CONFIG_PATH, "bin", "post_emerge")
+ if os.access(postemerge, os.X_OK):
+ hook_retval = portage.process.spawn(
+ [postemerge], env=settings.environ())
+ if hook_retval != os.EX_OK:
+ portage.util.writemsg_level(
+ " %s spawn failed of %s\n" %
+ (colorize("BAD", "*"), postemerge,),
+ level=logging.ERROR, noiselevel=-1)
+
+ clean_logs(settings)
+
+ if "--quiet" not in myopts and \
+ myaction is None and "@world" in myfiles:
+ show_depclean_suggestion()
diff --git a/lib/_emerge/resolver/DbapiProvidesIndex.py b/lib/_emerge/resolver/DbapiProvidesIndex.py
new file mode 100644
index 000000000..1650edd4e
--- /dev/null
+++ b/lib/_emerge/resolver/DbapiProvidesIndex.py
@@ -0,0 +1,102 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import bisect
+import collections
+import sys
+
+class DbapiProvidesIndex(object):
+ """
+ The DbapiProvidesIndex class is used to wrap existing dbapi
+ interfaces, index packages by the sonames that they provide, and
+ implement the dbapi.match method for SonameAtom instances. Since
+ this class acts as a wrapper, it can be used conditionally, so that
+ soname indexing overhead is avoided when soname dependency
+ resolution is disabled.
+
+ Since it's possible for soname atom match results to consist of
+ packages with multiple categories or names, it is essential that
+ Package.__lt__ behave meaningfully when Package.cp is dissimilar,
+ so that match results will be correctly ordered by version for each
+ value of Package.cp.
+ """
+
+ _copy_attrs = ('aux_get', 'aux_update', 'categories', 'cpv_all',
+ 'cpv_exists', 'cp_all', 'cp_list', 'getfetchsizes',
+ 'settings', '_aux_cache_keys', '_clear_cache',
+ '_cpv_sort_ascending', '_iuse_implicit_cnstr', '_pkg_str',
+ '_pkg_str_aux_keys')
+
+ def __init__(self, db):
+ self._db = db
+ for k in self._copy_attrs:
+ try:
+ setattr(self, k, getattr(db, k))
+ except AttributeError:
+ pass
+ self._provides_index = collections.defaultdict(list)
+
+ def match(self, atom, use_cache=DeprecationWarning):
+ if atom.soname:
+ result = self._match_soname(atom)
+ else:
+ result = self._db.match(atom)
+ return result
+
+ def _match_soname(self, atom):
+ result = self._provides_index.get(atom)
+ if result is None:
+ result = []
+ else:
+ result = [pkg.cpv for pkg in result]
+ return result
+
+ def _provides_inject(self, pkg):
+ index = self._provides_index
+ for atom in pkg.provides:
+ # Use bisect.insort for ordered match results.
+ bisect.insort(index[atom], pkg)
+
+class PackageDbapiProvidesIndex(DbapiProvidesIndex):
+ """
+ This class extends DbapiProvidesIndex in order to make it suitable
+ for wrapping a PackageVirtualDbapi instance.
+ """
+
+ _copy_attrs = DbapiProvidesIndex._copy_attrs + (
+ "clear", "get", "_cpv_map")
+
+ def clear(self):
+ self._db.clear()
+ self._provides_index.clear()
+
+ def __bool__(self):
+ return bool(self._db)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+ def __iter__(self):
+ return iter(self._db)
+
+ def __contains__(self, item):
+ return item in self._db
+
+ def match_pkgs(self, atom):
+ return [self._db._cpv_map[cpv] for cpv in self.match(atom)]
+
+ def cpv_inject(self, pkg):
+ self._db.cpv_inject(pkg)
+ self._provides_inject(pkg)
+
+ def cpv_remove(self, pkg):
+ self._db.cpv_remove(pkg)
+ index = self._provides_index
+ for atom in pkg.provides:
+ items = index[atom]
+ try:
+ items.remove(pkg)
+ except ValueError:
+ pass
+ if not items:
+ del index[atom]
diff --git a/lib/_emerge/resolver/__init__.py b/lib/_emerge/resolver/__init__.py
new file mode 100644
index 000000000..21a391aee
--- /dev/null
+++ b/lib/_emerge/resolver/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/_emerge/resolver/backtracking.py b/lib/_emerge/resolver/backtracking.py
new file mode 100644
index 000000000..c29b9d42a
--- /dev/null
+++ b/lib/_emerge/resolver/backtracking.py
@@ -0,0 +1,264 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import copy
+
+class BacktrackParameter(object):
+
+ __slots__ = (
+ "needed_unstable_keywords", "runtime_pkg_mask", "needed_use_config_changes", "needed_license_changes",
+ "prune_rebuilds", "rebuild_list", "reinstall_list", "needed_p_mask_changes",
+ "slot_operator_mask_built", "slot_operator_replace_installed"
+ )
+
+ def __init__(self):
+ self.needed_unstable_keywords = set()
+ self.needed_p_mask_changes = set()
+ self.runtime_pkg_mask = {}
+ self.needed_use_config_changes = {}
+ self.needed_license_changes = {}
+ self.rebuild_list = set()
+ self.reinstall_list = set()
+ self.slot_operator_replace_installed = set()
+ self.slot_operator_mask_built = set()
+ self.prune_rebuilds = False
+
+ def __deepcopy__(self, memo=None):
+ if memo is None:
+ memo = {}
+ result = BacktrackParameter()
+ memo[id(self)] = result
+
+ #Shallow copies are enough here, as we only need to ensure that nobody adds stuff
+ #to our sets and dicts. The existing content is immutable.
+ result.needed_unstable_keywords = copy.copy(self.needed_unstable_keywords)
+ result.needed_p_mask_changes = copy.copy(self.needed_p_mask_changes)
+ result.needed_use_config_changes = copy.copy(self.needed_use_config_changes)
+ result.needed_license_changes = copy.copy(self.needed_license_changes)
+ result.rebuild_list = copy.copy(self.rebuild_list)
+ result.reinstall_list = copy.copy(self.reinstall_list)
+ result.slot_operator_replace_installed = copy.copy(self.slot_operator_replace_installed)
+ result.slot_operator_mask_built = self.slot_operator_mask_built.copy()
+ result.prune_rebuilds = self.prune_rebuilds
+
+ # runtime_pkg_mask contains nested dicts that must also be copied
+ result.runtime_pkg_mask = {}
+ for k, v in self.runtime_pkg_mask.items():
+ result.runtime_pkg_mask[k] = copy.copy(v)
+
+ return result
+
+ def __eq__(self, other):
+ return self.needed_unstable_keywords == other.needed_unstable_keywords and \
+ self.needed_p_mask_changes == other.needed_p_mask_changes and \
+ self.runtime_pkg_mask == other.runtime_pkg_mask and \
+ self.needed_use_config_changes == other.needed_use_config_changes and \
+ self.needed_license_changes == other.needed_license_changes and \
+ self.rebuild_list == other.rebuild_list and \
+ self.reinstall_list == other.reinstall_list and \
+ self.slot_operator_replace_installed == other.slot_operator_replace_installed and \
+ self.slot_operator_mask_built == other.slot_operator_mask_built and \
+ self.prune_rebuilds == other.prune_rebuilds
+
+
+class _BacktrackNode(object):
+
+ __slots__ = (
+ "parameter", "depth", "mask_steps", "terminal",
+ )
+
+ def __init__(self, parameter=BacktrackParameter(), depth=0, mask_steps=0, terminal=True):
+ self.parameter = parameter
+ self.depth = depth
+ self.mask_steps = mask_steps
+ self.terminal = terminal
+
+ def __eq__(self, other):
+ return self.parameter == other.parameter
+
+
+class Backtracker(object):
+
+ __slots__ = (
+ "_max_depth", "_unexplored_nodes", "_current_node", "_nodes", "_root",
+ )
+
+ def __init__(self, max_depth):
+ self._max_depth = max_depth
+ self._unexplored_nodes = []
+ self._current_node = None
+ self._nodes = []
+
+ self._root = _BacktrackNode()
+ self._add(self._root)
+
+
+ def _add(self, node, explore=True):
+ """
+ Adds a newly computed backtrack parameter. Makes sure that it doesn't already exist and
+ that we don't backtrack deeper than we are allowed by --backtrack.
+ """
+ if not self._check_runtime_pkg_mask(node.parameter.runtime_pkg_mask):
+ return
+
+ if node.mask_steps <= self._max_depth and node not in self._nodes:
+ if explore:
+ self._unexplored_nodes.append(node)
+ self._nodes.append(node)
+
+
+ def get(self):
+ """
+ Returns a backtrack parameter. The backtrack graph is explored with depth first.
+ """
+ if self._unexplored_nodes:
+ node = self._unexplored_nodes.pop()
+ self._current_node = node
+ return copy.deepcopy(node.parameter)
+ else:
+ return None
+
+
+ def __len__(self):
+ return len(self._unexplored_nodes)
+
+ def _check_runtime_pkg_mask(self, runtime_pkg_mask):
+ """
+ If a package gets masked that caused other packages to be masked
+ before, we revert the mask for other packages (bug 375573).
+ """
+
+ for pkg, mask_info in runtime_pkg_mask.items():
+
+ if "missing dependency" in mask_info or \
+ "slot_operator_mask_built" in mask_info:
+ continue
+
+ entry_is_valid = False
+
+ for ppkg, patom in runtime_pkg_mask[pkg].get("slot conflict", set()):
+ if ppkg not in runtime_pkg_mask:
+ entry_is_valid = True
+ break
+
+ if not entry_is_valid:
+ return False
+
+ return True
+
+ def _feedback_slot_conflicts(self, conflicts_data):
+ # Only create BacktrackNode instances for the first
+ # conflict which occurred, since the conflicts that
+ # occurred later may have been caused by the first
+ # conflict.
+ self._feedback_slot_conflict(conflicts_data[0])
+
+ def _feedback_slot_conflict(self, conflict_data):
+ for pkg, parent_atoms in conflict_data:
+ new_node = copy.deepcopy(self._current_node)
+ new_node.depth += 1
+ new_node.mask_steps += 1
+ new_node.terminal = False
+ new_node.parameter.runtime_pkg_mask.setdefault(
+ pkg, {})["slot conflict"] = parent_atoms
+ self._add(new_node)
+
+
+ def _feedback_missing_dep(self, dep):
+ new_node = copy.deepcopy(self._current_node)
+ new_node.depth += 1
+ new_node.mask_steps += 1
+ new_node.terminal = False
+
+ new_node.parameter.runtime_pkg_mask.setdefault(
+ dep.parent, {})["missing dependency"] = \
+ set([(dep.parent, dep.root, dep.atom)])
+
+ self._add(new_node)
+
+
+ def _feedback_config(self, changes, explore=True):
+ """
+ Handle config changes. Don't count config changes for the maximum backtrack depth.
+ """
+ new_node = copy.deepcopy(self._current_node)
+ new_node.depth += 1
+ para = new_node.parameter
+
+ for change, data in changes.items():
+ if change == "needed_unstable_keywords":
+ para.needed_unstable_keywords.update(data)
+ elif change == "needed_p_mask_changes":
+ para.needed_p_mask_changes.update(data)
+ elif change == "needed_license_changes":
+ for pkg, missing_licenses in data:
+ para.needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
+ elif change == "needed_use_config_changes":
+ for pkg, (new_use, new_changes) in data:
+ para.needed_use_config_changes[pkg] = (new_use, new_changes)
+ elif change == "slot_conflict_abi":
+ new_node.terminal = False
+ elif change == "slot_operator_mask_built":
+ para.slot_operator_mask_built.update(data)
+ for pkg, mask_reasons in data.items():
+ para.runtime_pkg_mask.setdefault(pkg,
+ {}).update(mask_reasons)
+ elif change == "slot_operator_replace_installed":
+ para.slot_operator_replace_installed.update(data)
+ elif change == "rebuild_list":
+ para.rebuild_list.update(data)
+ elif change == "reinstall_list":
+ para.reinstall_list.update(data)
+ elif change == "prune_rebuilds":
+ para.prune_rebuilds = True
+ para.slot_operator_replace_installed.clear()
+ for pkg in para.slot_operator_mask_built:
+ runtime_masks = para.runtime_pkg_mask.get(pkg)
+ if runtime_masks is None:
+ continue
+ runtime_masks.pop("slot_operator_mask_built", None)
+ if not runtime_masks:
+ para.runtime_pkg_mask.pop(pkg)
+ para.slot_operator_mask_built.clear()
+
+ self._add(new_node, explore=explore)
+ self._current_node = new_node
+
+
+ def feedback(self, infos):
+ """
+ Takes information from the depgraph and computes new backtrack parameters to try.
+ """
+ assert self._current_node is not None, "call feedback() only after get() was called"
+
+ #Not all config changes require a restart, that's why they can appear together
+ #with other conflicts.
+ if "config" in infos:
+ self._feedback_config(infos["config"], explore=(len(infos)==1))
+
+ #There is at most one of the following types of conflicts for a given restart.
+ if "slot conflict" in infos:
+ self._feedback_slot_conflicts(infos["slot conflict"])
+ elif "missing dependency" in infos:
+ self._feedback_missing_dep(infos["missing dependency"])
+
+
+ def backtracked(self):
+ """
+ If we didn't backtrack, there is only the root.
+ """
+ return len(self._nodes) > 1
+
+
+ def get_best_run(self):
+ """
+ Like, get() but returns the backtrack parameter that has as many config changes as possible,
+ but has no masks. This makes --autounmask effective, but prevents confusing error messages
+ with "masked by backtracking".
+ """
+ best_node = self._root
+ for node in self._nodes:
+ if node.terminal and node.depth > best_node.depth:
+ best_node = node
+
+ return copy.deepcopy(best_node.parameter)
diff --git a/lib/_emerge/resolver/circular_dependency.py b/lib/_emerge/resolver/circular_dependency.py
new file mode 100644
index 000000000..5c119567b
--- /dev/null
+++ b/lib/_emerge/resolver/circular_dependency.py
@@ -0,0 +1,289 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function, unicode_literals
+
+from itertools import chain, product
+import logging
+
+from portage.dep import use_reduce, extract_affecting_use, check_required_use, get_required_use_flags
+from portage.exception import InvalidDependString
+from portage.output import colorize
+from portage.util import writemsg_level
+from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.Package import Package
+
+class circular_dependency_handler(object):
+
+ MAX_AFFECTING_USE = 10
+
+ def __init__(self, depgraph, graph):
+ self.depgraph = depgraph
+ self.graph = graph
+ self.all_parent_atoms = depgraph._dynamic_config._parent_atoms
+
+ if "--debug" in depgraph._frozen_config.myopts:
+ # Show this debug output before doing the calculations
+ # that follow, so at least we have this debug info
+ # if we happen to hit a bug later.
+ writemsg_level("\n\ncircular dependency graph:\n\n",
+ level=logging.DEBUG, noiselevel=-1)
+ self.debug_print()
+
+ self.cycles, self.shortest_cycle = self._find_cycles()
+ #Guess if it is a large cluster of cycles. This usually requires
+ #a global USE change.
+ self.large_cycle_count = len(self.cycles) > 3
+ self.merge_list = self._prepare_reduced_merge_list()
+ #The digraph dump
+ self.circular_dep_message = self._prepare_circular_dep_message()
+ #Suggestions, in machine and human readable form
+ self.solutions, self.suggestions = self._find_suggestions()
+
+ def _find_cycles(self):
+ shortest_cycle = None
+ cycles = self.graph.get_cycles(ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
+ for cycle in cycles:
+ if not shortest_cycle or len(cycle) < len(shortest_cycle):
+ shortest_cycle = cycle
+ return cycles, shortest_cycle
+
+ def _prepare_reduced_merge_list(self):
+ """
+ Create a merge to be displayed by depgraph.display().
+ This merge list contains only packages involved in
+ the circular deps.
+ """
+ display_order = []
+ tempgraph = self.graph.copy()
+ while tempgraph:
+ nodes = tempgraph.leaf_nodes()
+ if not nodes:
+ node = tempgraph.order[0]
+ else:
+ node = nodes[0]
+ display_order.append(node)
+ tempgraph.remove(node)
+ return tuple(display_order)
+
+ def _prepare_circular_dep_message(self):
+ """
+ Like digraph.debug_print(), but prints only the shortest cycle.
+ """
+ if not self.shortest_cycle:
+ return None
+
+ msg = []
+ indent = ""
+ for pos, pkg in enumerate(self.shortest_cycle):
+ parent = self.shortest_cycle[pos-1]
+ priorities = self.graph.nodes[parent][0][pkg]
+ if pos > 0:
+ msg.append(indent + "%s (%s)" % (pkg, priorities[-1],))
+ else:
+ msg.append(indent + "%s depends on" % pkg)
+ indent += " "
+
+ pkg = self.shortest_cycle[0]
+ parent = self.shortest_cycle[-1]
+ priorities = self.graph.nodes[parent][0][pkg]
+ msg.append(indent + "%s (%s)" % (pkg, priorities[-1],))
+
+ return "\n".join(msg)
+
+ def _get_use_mask_and_force(self, pkg):
+ return pkg.use.mask, pkg.use.force
+
+ def _get_autounmask_changes(self, pkg):
+ needed_use_config_change = self.depgraph._dynamic_config._needed_use_config_changes.get(pkg)
+ if needed_use_config_change is None:
+ return frozenset()
+
+ use, changes = needed_use_config_change
+ return frozenset(changes.keys())
+
+ def _find_suggestions(self):
+ if not self.shortest_cycle:
+ return None, None
+
+ suggestions = []
+ final_solutions = {}
+
+ for pos, pkg in enumerate(self.shortest_cycle):
+ parent = self.shortest_cycle[pos-1]
+ priorities = self.graph.nodes[parent][0][pkg]
+ parent_atoms = self.all_parent_atoms.get(pkg)
+
+ if priorities[-1].buildtime:
+ dep = " ".join(parent._metadata[k]
+ for k in Package._buildtime_keys)
+ elif priorities[-1].runtime:
+ dep = parent._metadata["RDEPEND"]
+
+ for ppkg, atom in parent_atoms:
+ if ppkg == parent:
+ changed_parent = ppkg
+ parent_atom = atom.unevaluated_atom
+ break
+
+ try:
+ affecting_use = extract_affecting_use(dep, parent_atom,
+ eapi=parent.eapi)
+ except InvalidDependString:
+ if not parent.installed:
+ raise
+ affecting_use = set()
+
+ # Make sure we don't want to change a flag that is
+ # a) in use.mask or use.force
+ # b) changed by autounmask
+
+ usemask, useforce = self._get_use_mask_and_force(parent)
+ autounmask_changes = self._get_autounmask_changes(parent)
+ untouchable_flags = frozenset(chain(usemask, useforce, autounmask_changes))
+
+ affecting_use.difference_update(untouchable_flags)
+
+ #If any of the flags we're going to touch is in REQUIRED_USE, add all
+ #other flags in REQUIRED_USE to affecting_use, to not lose any solution.
+ required_use_flags = get_required_use_flags(
+ parent._metadata.get("REQUIRED_USE", ""),
+ eapi=parent.eapi)
+
+ if affecting_use.intersection(required_use_flags):
+ # TODO: Find out exactly which REQUIRED_USE flags are
+ # entangled with affecting_use. We have to limit the
+ # number of flags since the number of loops is
+ # exponentially related (see bug #374397).
+ total_flags = set()
+ total_flags.update(affecting_use, required_use_flags)
+ total_flags.difference_update(untouchable_flags)
+ if len(total_flags) <= self.MAX_AFFECTING_USE:
+ affecting_use = total_flags
+
+ affecting_use = tuple(affecting_use)
+
+ if not affecting_use:
+ continue
+
+ if len(affecting_use) > self.MAX_AFFECTING_USE:
+ # Limit the number of combinations explored (bug #555698).
+ # First, discard irrelevent flags that are not enabled.
+ # Since extract_affecting_use doesn't distinguish between
+ # positive and negative effects (flag? vs. !flag?), assume
+ # a positive relationship.
+ current_use = self.depgraph._pkg_use_enabled(parent)
+ affecting_use = tuple(flag for flag in affecting_use
+ if flag in current_use)
+
+ if len(affecting_use) > self.MAX_AFFECTING_USE:
+ # There are too many USE combinations to explore in
+ # a reasonable amount of time.
+ continue
+
+ #We iterate over all possible settings of these use flags and gather
+ #a set of possible changes
+ #TODO: Use the information encoded in REQUIRED_USE
+ solutions = set()
+ for use_state in product(("disabled", "enabled"),
+ repeat=len(affecting_use)):
+ current_use = set(self.depgraph._pkg_use_enabled(parent))
+ for flag, state in zip(affecting_use, use_state):
+ if state == "enabled":
+ current_use.add(flag)
+ else:
+ current_use.discard(flag)
+ try:
+ reduced_dep = use_reduce(dep,
+ uselist=current_use, flat=True)
+ except InvalidDependString:
+ if not parent.installed:
+ raise
+ reduced_dep = None
+
+ if reduced_dep is not None and \
+ parent_atom not in reduced_dep:
+ #We found an assignment that removes the atom from 'dep'.
+ #Make sure it doesn't conflict with REQUIRED_USE.
+ required_use = parent._metadata.get("REQUIRED_USE", "")
+
+ if check_required_use(required_use, current_use,
+ parent.iuse.is_valid_flag,
+ eapi=parent.eapi):
+ use = self.depgraph._pkg_use_enabled(parent)
+ solution = set()
+ for flag, state in zip(affecting_use, use_state):
+ if state == "enabled" and \
+ flag not in use:
+ solution.add((flag, True))
+ elif state == "disabled" and \
+ flag in use:
+ solution.add((flag, False))
+ solutions.add(frozenset(solution))
+
+ for solution in solutions:
+ ignore_solution = False
+ for other_solution in solutions:
+ if solution is other_solution:
+ continue
+ if solution.issuperset(other_solution):
+ ignore_solution = True
+ if ignore_solution:
+ continue
+
+ #Check if a USE change conflicts with use requirements of the parents.
+ #If a requiremnet is hard, ignore the suggestion.
+ #If the requirment is conditional, warn the user that other changes might be needed.
+ followup_change = False
+ parent_parent_atoms = self.depgraph._dynamic_config._parent_atoms.get(changed_parent)
+ for ppkg, atom in parent_parent_atoms:
+
+ atom = atom.unevaluated_atom
+ if not atom.use:
+ continue
+
+ for flag, state in solution:
+ if flag in atom.use.enabled or flag in atom.use.disabled:
+ ignore_solution = True
+ break
+ elif atom.use.conditional:
+ for flags in atom.use.conditional.values():
+ if flag in flags:
+ followup_change = True
+ break
+
+ if ignore_solution:
+ break
+
+ if ignore_solution:
+ continue
+
+ changes = []
+ for flag, state in solution:
+ if state:
+ changes.append(colorize("red", "+"+flag))
+ else:
+ changes.append(colorize("blue", "-"+flag))
+ msg = "- %s (Change USE: %s)\n" \
+ % (parent.cpv, " ".join(changes))
+ if followup_change:
+ msg += " (This change might require USE changes on parent packages.)"
+ suggestions.append(msg)
+ final_solutions.setdefault(pkg, set()).add(solution)
+
+ return final_solutions, suggestions
+
+ def debug_print(self):
+ """
+ Create a copy of the digraph, prune all root nodes,
+ and call the debug_print() method.
+ """
+ graph = self.graph.copy()
+ while True:
+ root_nodes = graph.root_nodes(
+ ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
+ if not root_nodes:
+ break
+ graph.difference_update(root_nodes)
+
+ graph.debug_print()
diff --git a/lib/_emerge/resolver/output.py b/lib/_emerge/resolver/output.py
new file mode 100644
index 000000000..24340576c
--- /dev/null
+++ b/lib/_emerge/resolver/output.py
@@ -0,0 +1,1033 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Resolver output display operation.
+"""
+
+from __future__ import unicode_literals
+
+__all__ = (
+ "Display", "format_unmatched_atom",
+ )
+
+import sys
+
+import portage
+from portage import os
+from portage.dbapi.dep_expand import dep_expand
+from portage.dep import Atom, cpvequal, _repo_separator, _slot_separator
+from portage.eapi import _get_eapi_attrs
+from portage.exception import InvalidDependString, SignatureException
+from portage.localization import localized_size
+from portage.package.ebuild.config import _get_feature_flags
+from portage.package.ebuild._spawn_nofetch import spawn_nofetch
+from portage.output import ( blue, colorize, create_color_func,
+ darkblue, darkgreen, green, nc_len, teal)
+bad = create_color_func("BAD")
+from portage._sets.base import InternalPackageSet
+from portage.util import writemsg_stdout
+from portage.versions import best, cpv_getversion
+
+from _emerge.Blocker import Blocker
+from _emerge.create_world_atom import create_world_atom
+from _emerge.resolver.output_helpers import ( _DisplayConfig, _tree_display,
+ _PackageCounters, _create_use_string, _calc_changelog, PkgInfo)
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+ _unicode = str
+else:
+ _unicode = unicode
+
+class Display(object):
+ """Formats and outputs the depgrah supplied it for merge/re-merge, etc.
+
+ __call__()
+ @param depgraph: list
+ @param favorites: defaults to []
+ @param verbosity: integer, defaults to None
+ """
+
+ def __init__(self):
+ self.changelogs = []
+ self.print_msg = []
+ self.blockers = []
+ self.counters = _PackageCounters()
+ self.resolver = None
+ self.resolved = None
+ self.vardb = None
+ self.portdb = None
+ self.verboseadd = ''
+ self.oldlp = None
+ self.myfetchlist = None
+ self.indent = ''
+ self.use_expand = None
+ self.use_expand_hidden = None
+ self.pkgsettings = None
+ self.forced_flags = None
+ self.newlp = None
+ self.conf = None
+ self.blocker_style = None
+
+
+ def _blockers(self, blocker):
+ """Adds colorized strings to
+ self.print_msg and self.blockers
+
+ @param blocker: _emerge.Blocker.Blocker instance
+ @rtype: bool
+ Modifies class globals: self.blocker_style, self.resolved,
+ self.print_msg
+ """
+ if blocker.satisfied:
+ self.blocker_style = "PKG_BLOCKER_SATISFIED"
+ addl = "%s " % (colorize(self.blocker_style, "b"),)
+ else:
+ self.blocker_style = "PKG_BLOCKER"
+ addl = "%s " % (colorize(self.blocker_style, "B"),)
+ addl += self.empty_space_in_brackets()
+ self.resolved = dep_expand(
+ _unicode(blocker.atom).lstrip("!"), mydb=self.vardb,
+ settings=self.pkgsettings
+ )
+ if self.conf.columns and self.conf.quiet:
+ addl += " " + colorize(self.blocker_style, _unicode(self.resolved))
+ else:
+ addl = "[%s %s] %s%s" % \
+ (colorize(self.blocker_style, "blocks"),
+ addl, self.indent,
+ colorize(self.blocker_style, _unicode(self.resolved))
+ )
+ block_parents = self.conf.blocker_parents.parent_nodes(blocker)
+ block_parents = set(_unicode(pnode.cpv) for pnode in block_parents)
+ block_parents = ", ".join(block_parents)
+ if blocker.atom.blocker.overlap.forbid:
+ blocking_desc = "hard blocking"
+ else:
+ blocking_desc = "blocking"
+ if self.resolved != blocker.atom:
+ addl += colorize(self.blocker_style,
+ " (\"%s\" is %s %s)" %
+ (_unicode(blocker.atom).lstrip("!"),
+ blocking_desc, block_parents))
+ else:
+ addl += colorize(self.blocker_style,
+ " (is %s %s)" % (blocking_desc, block_parents))
+ if blocker.satisfied:
+ if not self.conf.columns:
+ self.print_msg.append(addl)
+ else:
+ self.blockers.append(addl)
+
+ def include_mask_str(self):
+ return self.conf.verbosity > 1
+
+ def gen_mask_str(self, pkg):
+ """
+ @param pkg: _emerge.Package.Package instance
+ """
+ hardmasked = pkg.isHardMasked()
+ mask_str = " "
+
+ if hardmasked:
+ mask_str = colorize("BAD", "#")
+ else:
+ keyword_mask = pkg.get_keyword_mask()
+
+ if keyword_mask is None:
+ pass
+ elif keyword_mask == "missing":
+ mask_str = colorize("BAD", "*")
+ else:
+ mask_str = colorize("WARN", "~")
+
+ return mask_str
+
+ def empty_space_in_brackets(self):
+ space = ""
+ if self.include_mask_str():
+ # add column for mask status
+ space += " "
+ return space
+
+ def map_to_use_expand(self, myvals, forced_flags=False,
+ remove_hidden=True):
+ """Map use expand variables
+
+ @param myvals: list
+ @param forced_flags: bool
+ @param remove_hidden: bool
+ @rtype ret dictionary
+ or ret dict, forced dict.
+ """
+ ret = {}
+ forced = {}
+ for exp in self.use_expand:
+ ret[exp] = []
+ forced[exp] = set()
+ for val in myvals[:]:
+ if val.startswith(exp.lower()+"_"):
+ if val in self.forced_flags:
+ forced[exp].add(val[len(exp)+1:])
+ ret[exp].append(val[len(exp)+1:])
+ myvals.remove(val)
+ ret["USE"] = myvals
+ forced["USE"] = [val for val in myvals \
+ if val in self.forced_flags]
+ if remove_hidden:
+ for exp in self.use_expand_hidden:
+ ret.pop(exp, None)
+ if forced_flags:
+ return ret, forced
+ return ret
+
+
+ def _display_use(self, pkg, pkg_info):
+ """ USE flag display
+
+ @param pkg: _emerge.Package.Package instance
+ @param pkg_info: PkgInfo instance
+ Modifies self.use_expand_hidden, self.use_expand, self.verboseadd,
+ self.forced_flags
+ """
+
+ self.forced_flags = set()
+ self.forced_flags.update(pkg.use.force)
+ self.forced_flags.update(pkg.use.mask)
+
+ cur_use = [flag for flag in self.conf.pkg_use_enabled(pkg) \
+ if flag in pkg.iuse.all]
+ cur_iuse = sorted(pkg.iuse.all)
+
+ if pkg_info.previous_pkg is not None:
+ previous_pkg = pkg_info.previous_pkg
+ old_iuse = sorted(previous_pkg.iuse.all)
+ old_use = previous_pkg.use.enabled
+ is_new = False
+ else:
+ old_iuse = []
+ old_use = []
+ is_new = True
+
+ old_use = [flag for flag in old_use if flag in old_iuse]
+
+ self.use_expand = pkg.use.expand
+ self.use_expand_hidden = pkg.use.expand_hidden
+
+ # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
+ # are the only thing that triggered reinstallation.
+ reinst_flags_map = {}
+ reinstall_for_flags = self.conf.reinstall_nodes.get(pkg)
+ reinst_expand_map = None
+ if reinstall_for_flags:
+ reinst_flags_map = self.map_to_use_expand(
+ list(reinstall_for_flags), remove_hidden=False)
+ for k in list(reinst_flags_map):
+ if not reinst_flags_map[k]:
+ del reinst_flags_map[k]
+ if not reinst_flags_map.get("USE"):
+ reinst_expand_map = reinst_flags_map.copy()
+ reinst_expand_map.pop("USE", None)
+ if reinst_expand_map and \
+ not set(reinst_expand_map).difference(
+ self.use_expand_hidden):
+ self.use_expand_hidden = \
+ set(self.use_expand_hidden).difference(
+ reinst_expand_map)
+
+ cur_iuse_map, iuse_forced = \
+ self.map_to_use_expand(cur_iuse, forced_flags=True)
+ cur_use_map = self.map_to_use_expand(cur_use)
+ old_iuse_map = self.map_to_use_expand(old_iuse)
+ old_use_map = self.map_to_use_expand(old_use)
+
+ use_expand = sorted(self.use_expand)
+ use_expand.insert(0, "USE")
+ feature_flags = _get_feature_flags(_get_eapi_attrs(pkg.eapi))
+
+ for key in use_expand:
+ if key in self.use_expand_hidden:
+ continue
+ self.verboseadd += _create_use_string(self.conf, key.upper(),
+ cur_iuse_map[key], iuse_forced[key],
+ cur_use_map[key], old_iuse_map[key],
+ old_use_map[key], is_new, feature_flags,
+ reinst_flags_map.get(key))
+ return
+
+
+ @staticmethod
+ def pkgprint(pkg_str, pkg_info):
+ """Colorizes a string acording to pkg_info settings
+
+ @param pkg_str: string
+ @param pkg_info: dictionary
+ @rtype colorized string
+ """
+ if pkg_info.merge:
+ if pkg_info.built:
+ if pkg_info.system:
+ return colorize("PKG_BINARY_MERGE_SYSTEM", pkg_str)
+ elif pkg_info.world:
+ return colorize("PKG_BINARY_MERGE_WORLD", pkg_str)
+ else:
+ return colorize("PKG_BINARY_MERGE", pkg_str)
+ else:
+ if pkg_info.system:
+ return colorize("PKG_MERGE_SYSTEM", pkg_str)
+ elif pkg_info.world:
+ return colorize("PKG_MERGE_WORLD", pkg_str)
+ else:
+ return colorize("PKG_MERGE", pkg_str)
+ elif pkg_info.operation == "uninstall":
+ return colorize("PKG_UNINSTALL", pkg_str)
+ else:
+ if pkg_info.system:
+ return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
+ elif pkg_info.world:
+ return colorize("PKG_NOMERGE_WORLD", pkg_str)
+ else:
+ return colorize("PKG_NOMERGE", pkg_str)
+
+
+ def verbose_size(self, pkg, repoadd_set, pkg_info):
+ """Determines the size of the downloads required
+
+ @param pkg: _emerge.Package.Package instance
+ @param repoadd_set: set of repos to add
+ @param pkg_info: dictionary
+ Modifies class globals: self.myfetchlist, self.counters.totalsize,
+ self.verboseadd, repoadd_set.
+ """
+ mysize = 0
+ if pkg.type_name in ("binary", "ebuild") and pkg_info.merge:
+ db = pkg.root_config.trees[
+ pkg.root_config.pkg_tree_map[pkg.type_name]].dbapi
+ kwargs = {}
+ if pkg.type_name == "ebuild":
+ kwargs["useflags"] = pkg_info.use
+ kwargs["myrepo"] = pkg.repo
+ myfilesdict = None
+ try:
+ myfilesdict = db.getfetchsizes(pkg.cpv, **kwargs)
+ except InvalidDependString as e:
+ # FIXME: validate SRC_URI earlier
+ depstr, = db.aux_get(pkg.cpv,
+ ["SRC_URI"], myrepo=pkg.repo)
+ show_invalid_depstring_notice(
+ pkg, _unicode(e))
+ raise
+ except SignatureException:
+ # missing/invalid binary package SIZE signature
+ pass
+ if myfilesdict is None:
+ myfilesdict = "[empty/missing/bad digest]"
+ else:
+ for myfetchfile in myfilesdict:
+ if myfetchfile not in self.myfetchlist:
+ mysize += myfilesdict[myfetchfile]
+ self.myfetchlist.add(myfetchfile)
+ if pkg_info.ordered:
+ self.counters.totalsize += mysize
+ self.verboseadd += localized_size(mysize)
+
+ if self.quiet_repo_display:
+ # overlay verbose
+ # assign index for a previous version in the same slot
+ if pkg_info.previous_pkg is not None:
+ repo_name_prev = pkg_info.previous_pkg.repo
+ else:
+ repo_name_prev = None
+
+ # now use the data to generate output
+ if pkg.installed or pkg_info.previous_pkg is None:
+ self.repoadd = self.conf.repo_display.repoStr(
+ pkg_info.repo_path_real)
+ else:
+ repo_path_prev = None
+ if repo_name_prev:
+ repo_path_prev = self.portdb.getRepositoryPath(
+ repo_name_prev)
+ if repo_path_prev == pkg_info.repo_path_real:
+ self.repoadd = self.conf.repo_display.repoStr(
+ pkg_info.repo_path_real)
+ else:
+ self.repoadd = "%s=>%s" % (
+ self.conf.repo_display.repoStr(repo_path_prev),
+ self.conf.repo_display.repoStr(pkg_info.repo_path_real))
+ if self.repoadd:
+ repoadd_set.add(self.repoadd)
+
+
+ def convert_myoldbest(self, pkg, pkg_info):
+ """converts and colorizes a version list to a string
+
+ @param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionary
+ @rtype string.
+ """
+ myoldbest = pkg_info.oldbest_list
+ # Convert myoldbest from a list to a string.
+ myoldbest_str = ""
+ if myoldbest:
+ versions = []
+ for pos, old_pkg in enumerate(myoldbest):
+ key = old_pkg.version
+ if key[-3:] == "-r0":
+ key = key[:-3]
+ if self.conf.verbosity == 3:
+ if pkg_info.attr_display.new_slot:
+ key += _slot_separator + old_pkg.slot
+ if old_pkg.slot != old_pkg.sub_slot:
+ key += "/" + old_pkg.sub_slot
+ elif any(x.slot + "/" + x.sub_slot != "0/0" for x in myoldbest + [pkg]):
+ key += _slot_separator + old_pkg.slot
+ if old_pkg.slot != old_pkg.sub_slot or \
+ old_pkg.slot == pkg.slot and old_pkg.sub_slot != pkg.sub_slot:
+ key += "/" + old_pkg.sub_slot
+ if not self.quiet_repo_display:
+ key += _repo_separator + old_pkg.repo
+ versions.append(key)
+ myoldbest_str = blue("["+", ".join(versions)+"]")
+ return myoldbest_str
+
+ def _append_slot(self, pkg_str, pkg, pkg_info):
+ """Potentially appends slot and subslot to package string.
+
+ @param pkg_str: string
+ @param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionary
+ @rtype string
+ """
+ if pkg_info.attr_display.new_slot:
+ pkg_str += _slot_separator + pkg_info.slot
+ if pkg_info.slot != pkg_info.sub_slot:
+ pkg_str += "/" + pkg_info.sub_slot
+ elif any(x.slot + "/" + x.sub_slot != "0/0" for x in pkg_info.oldbest_list + [pkg]):
+ pkg_str += _slot_separator + pkg_info.slot
+ if pkg_info.slot != pkg_info.sub_slot or \
+ any(x.slot == pkg_info.slot and x.sub_slot != pkg_info.sub_slot for x in pkg_info.oldbest_list):
+ pkg_str += "/" + pkg_info.sub_slot
+ return pkg_str
+
+ def _append_repository(self, pkg_str, pkg, pkg_info):
+ """Potentially appends repository to package string.
+
+ @param pkg_str: string
+ @param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionary
+ @rtype string
+ """
+ if not self.quiet_repo_display:
+ pkg_str += _repo_separator + pkg.repo
+ return pkg_str
+
+ def _append_build_id(self, pkg_str, pkg, pkg_info):
+ """Potentially appends repository to package string.
+
+ @param pkg_str: string
+ @param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionary
+ @rtype string
+ """
+ if pkg.type_name == "binary" and pkg.cpv.build_id is not None:
+ pkg_str += "-%s" % pkg.cpv.build_id
+ return pkg_str
+
+ def _set_non_root_columns(self, pkg, pkg_info):
+ """sets the indent level and formats the output
+
+ @param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionary
+ @rtype string
+ """
+ ver_str = self._append_build_id(pkg_info.ver, pkg, pkg_info)
+ if self.conf.verbosity == 3:
+ ver_str = self._append_slot(ver_str, pkg, pkg_info)
+ ver_str = self._append_repository(ver_str, pkg, pkg_info)
+ if self.conf.quiet:
+ myprint = _unicode(pkg_info.attr_display) + " " + self.indent + \
+ self.pkgprint(pkg_info.cp, pkg_info)
+ myprint = myprint+darkblue(" "+ver_str)+" "
+ myprint = myprint+pkg_info.oldbest
+ myprint = myprint+darkgreen("to "+pkg.root)
+ self.verboseadd = None
+ else:
+ if not pkg_info.merge:
+ myprint = "[%s] %s%s" % \
+ (self.pkgprint(pkg_info.operation.ljust(13), pkg_info),
+ self.indent, self.pkgprint(pkg.cp, pkg_info))
+ else:
+ myprint = "[%s %s] %s%s" % \
+ (self.pkgprint(pkg.type_name, pkg_info),
+ pkg_info.attr_display,
+ self.indent, self.pkgprint(pkg.cp, pkg_info))
+ if (self.newlp-nc_len(myprint)) > 0:
+ myprint = myprint+(" "*(self.newlp-nc_len(myprint)))
+ myprint = myprint+" "+darkblue("["+ver_str+"]")+" "
+ if (self.oldlp-nc_len(myprint)) > 0:
+ myprint = myprint+" "*(self.oldlp-nc_len(myprint))
+ myprint = myprint+pkg_info.oldbest
+ myprint += darkgreen("to " + pkg.root)
+ return myprint
+
+
+ def _set_root_columns(self, pkg, pkg_info):
+ """sets the indent level and formats the output
+
+ @param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionary
+ @rtype string
+ Modifies self.verboseadd
+ """
+ ver_str = self._append_build_id(pkg_info.ver, pkg, pkg_info)
+ if self.conf.verbosity == 3:
+ ver_str = self._append_slot(ver_str, pkg, pkg_info)
+ ver_str = self._append_repository(ver_str, pkg, pkg_info)
+ if self.conf.quiet:
+ myprint = _unicode(pkg_info.attr_display) + " " + self.indent + \
+ self.pkgprint(pkg_info.cp, pkg_info)
+ myprint = myprint+" "+green(ver_str)+" "
+ myprint = myprint+pkg_info.oldbest
+ self.verboseadd = None
+ else:
+ if not pkg_info.merge:
+ addl = self.empty_space_in_brackets()
+ myprint = "[%s%s] %s%s" % \
+ (self.pkgprint(pkg_info.operation.ljust(13), pkg_info),
+ addl, self.indent, self.pkgprint(pkg.cp, pkg_info))
+ else:
+ myprint = "[%s %s] %s%s" % \
+ (self.pkgprint(pkg.type_name, pkg_info),
+ pkg_info.attr_display,
+ self.indent, self.pkgprint(pkg.cp, pkg_info))
+ if (self.newlp-nc_len(myprint)) > 0:
+ myprint = myprint+(" "*(self.newlp-nc_len(myprint)))
+ myprint = myprint+" "+green("["+ver_str+"]")+" "
+ if (self.oldlp-nc_len(myprint)) > 0:
+ myprint = myprint+(" "*(self.oldlp-nc_len(myprint)))
+ myprint += pkg_info.oldbest
+ return myprint
+
+
+ def _set_no_columns(self, pkg, pkg_info):
+ """prints pkg info without column indentation.
+
+ @param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionary
+ @rtype the updated addl
+ """
+ pkg_str = self._append_build_id(pkg.cpv, pkg, pkg_info)
+ if self.conf.verbosity == 3:
+ pkg_str = self._append_slot(pkg_str, pkg, pkg_info)
+ pkg_str = self._append_repository(pkg_str, pkg, pkg_info)
+ if not pkg_info.merge:
+ addl = self.empty_space_in_brackets()
+ myprint = "[%s%s] %s%s %s" % \
+ (self.pkgprint(pkg_info.operation.ljust(13),
+ pkg_info), addl,
+ self.indent, self.pkgprint(pkg_str, pkg_info),
+ pkg_info.oldbest)
+ else:
+ myprint = "[%s %s] %s%s %s" % \
+ (self.pkgprint(pkg.type_name, pkg_info),
+ pkg_info.attr_display, self.indent,
+ self.pkgprint(pkg_str, pkg_info), pkg_info.oldbest)
+ return myprint
+
+ def print_messages(self, show_repos):
+ """Performs the actual output printing of the pre-formatted
+ messages
+
+ @param show_repos: bool.
+ """
+ for msg in self.print_msg:
+ if isinstance(msg, basestring):
+ writemsg_stdout("%s\n" % (msg,), noiselevel=-1)
+ continue
+ myprint, self.verboseadd, repoadd = msg
+ if self.verboseadd:
+ myprint += " " + self.verboseadd
+ if show_repos and repoadd:
+ myprint += " " + teal("[%s]" % repoadd)
+ writemsg_stdout("%s\n" % (myprint,), noiselevel=-1)
+ return
+
+
+ def print_blockers(self):
+ """Performs the actual output printing of the pre-formatted
+ blocker messages
+ """
+ for pkg in self.blockers:
+ writemsg_stdout("%s\n" % (pkg,), noiselevel=-1)
+ return
+
+
+ def print_verbose(self, show_repos):
+ """Prints the verbose output to std_out
+
+ @param show_repos: bool.
+ """
+ writemsg_stdout('\n%s\n' % (self.counters,), noiselevel=-1)
+ if show_repos:
+ # Use unicode_literals to force unicode format string so
+ # that RepoDisplay.__unicode__() is called in python2.
+ writemsg_stdout("%s" % (self.conf.repo_display,),
+ noiselevel=-1)
+ return
+
+
+ def print_changelog(self):
+ """Prints the changelog text to std_out
+ """
+ for chunk in self.changelogs:
+ writemsg_stdout(chunk,
+ noiselevel=-1)
+
+
+ def get_display_list(self, mylist):
+ """Determines the display list to process
+
+ @param mylist
+ @rtype list
+ Modifies self.counters.blocks, self.counters.blocks_satisfied,
+
+ """
+ unsatisfied_blockers = []
+ ordered_nodes = []
+ for pkg in mylist:
+ if isinstance(pkg, Blocker):
+ self.counters.blocks += 1
+ if pkg.satisfied:
+ ordered_nodes.append(pkg)
+ self.counters.blocks_satisfied += 1
+ else:
+ unsatisfied_blockers.append(pkg)
+ else:
+ ordered_nodes.append(pkg)
+ if self.conf.tree_display:
+ display_list = _tree_display(self.conf, ordered_nodes)
+ else:
+ display_list = [(pkg, 0, True) for pkg in ordered_nodes]
+ for pkg in unsatisfied_blockers:
+ display_list.append((pkg, 0, True))
+ return display_list
+
+
+ def set_pkg_info(self, pkg, ordered):
+ """Sets various pkg_info dictionary variables
+
+ @param pkg: _emerge.Package.Package instance
+ @param ordered: bool
+ @rtype pkg_info dictionary
+ Modifies self.counters.restrict_fetch,
+ self.counters.restrict_fetch_satisfied
+ """
+ pkg_info = PkgInfo()
+ pkg_info.cp = pkg.cp
+ pkg_info.ver = self.get_ver_str(pkg)
+ pkg_info.slot = pkg.slot
+ pkg_info.sub_slot = pkg.sub_slot
+ pkg_info.repo_name = pkg.repo
+ pkg_info.ordered = ordered
+ pkg_info.operation = pkg.operation
+ pkg_info.merge = ordered and pkg_info.operation == "merge"
+ if not pkg_info.merge and pkg_info.operation == "merge":
+ pkg_info.operation = "nomerge"
+ pkg_info.built = pkg.type_name != "ebuild"
+ pkg_info.ebuild_path = None
+ if ordered:
+ if pkg_info.merge:
+ if pkg.type_name == "binary":
+ self.counters.binary += 1
+ elif pkg_info.operation == "uninstall":
+ self.counters.uninst += 1
+ if pkg.type_name == "ebuild":
+ pkg_info.ebuild_path = self.portdb.findname(
+ pkg.cpv, myrepo=pkg_info.repo_name)
+ if pkg_info.ebuild_path is None:
+ raise AssertionError(
+ "ebuild not found for '%s'" % pkg.cpv)
+ pkg_info.repo_path_real = os.path.dirname(os.path.dirname(
+ os.path.dirname(pkg_info.ebuild_path)))
+ else:
+ pkg_info.repo_path_real = self.portdb.getRepositoryPath(pkg.repo)
+ pkg_info.use = list(self.conf.pkg_use_enabled(pkg))
+ if not pkg.built and pkg.operation == 'merge' and \
+ 'fetch' in pkg.restrict:
+ if pkg_info.ordered:
+ self.counters.restrict_fetch += 1
+ pkg_info.attr_display.fetch_restrict = True
+ if not self.portdb.getfetchsizes(pkg.cpv,
+ useflags=pkg_info.use, myrepo=pkg.repo):
+ pkg_info.attr_display.fetch_restrict_satisfied = True
+ if pkg_info.ordered:
+ self.counters.restrict_fetch_satisfied += 1
+ else:
+ if pkg_info.ebuild_path is not None:
+ self.restrict_fetch_list[pkg] = pkg_info
+
+ if self.vardb.cpv_exists(pkg.cpv):
+ # Do a cpv match first, in case the SLOT has changed.
+ pkg_info.previous_pkg = self.vardb.match_pkgs(
+ Atom('=' + pkg.cpv))[0]
+ else:
+ slot_matches = self.vardb.match_pkgs(pkg.slot_atom)
+ if slot_matches:
+ pkg_info.previous_pkg = slot_matches[0]
+
+ return pkg_info
+
+
+ def do_changelog(self, pkg, pkg_info):
+ """Processes and adds the changelog text to the master text for output
+
+ @param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionay
+ Modifies self.changelogs
+ """
+ if pkg_info.previous_pkg is not None:
+ ebuild_path_cl = pkg_info.ebuild_path
+ if ebuild_path_cl is None:
+ # binary package
+ ebuild_path_cl = self.portdb.findname(pkg.cpv, myrepo=pkg.repo)
+ if ebuild_path_cl is not None:
+ self.changelogs.extend(_calc_changelog(
+ ebuild_path_cl, pkg_info.previous_pkg, pkg.cpv))
+ return
+
+
+ def check_system_world(self, pkg):
+ """Checks for any occurances of the package in the system or world sets
+
+ @param pkg: _emerge.Package.Package instance
+ @rtype system and world booleans
+ """
+ root_config = self.conf.roots[pkg.root]
+ system_set = root_config.sets["system"]
+ world_set = self.conf.selected_sets[pkg.root]
+ system = False
+ world = False
+ try:
+ system = system_set.findAtomForPackage(
+ pkg, modified_use=self.conf.pkg_use_enabled(pkg))
+ world = world_set.findAtomForPackage(
+ pkg, modified_use=self.conf.pkg_use_enabled(pkg))
+ if not (self.conf.oneshot or world) and \
+ pkg.root == self.conf.target_root and \
+ self.conf.favorites.findAtomForPackage(
+ pkg, modified_use=self.conf.pkg_use_enabled(pkg)
+ ):
+ # Maybe it will be added to world now.
+ if create_world_atom(pkg, self.conf.favorites, root_config):
+ world = True
+ except InvalidDependString:
+ # This is reported elsewhere if relevant.
+ pass
+ return system, world
+
+
+ @staticmethod
+ def get_ver_str(pkg):
+ """Obtains the version string
+ @param pkg: _emerge.Package.Package instance
+ @rtype string
+ """
+ ver_str = pkg.cpv.version
+ if ver_str.endswith("-r0"):
+ ver_str = ver_str[:-3]
+ return ver_str
+
+
+ def _get_installed_best(self, pkg, pkg_info):
+ """ we need to use "--emptrytree" testing here rather than
+ "empty" param testing because "empty"
+ param is used for -u, where you still *do* want to see when
+ something is being upgraded.
+
+ @param pkg: _emerge.Package.Package instance
+ @param pkg_info: dictionay
+ @rtype addl, myoldbest: list, myinslotlist: list
+ Modifies self.counters.reinst, self.counters.new
+
+ """
+ myoldbest = []
+ myinslotlist = None
+ installed_versions = self.vardb.match_pkgs(Atom(pkg.cp))
+ if self.vardb.cpv_exists(pkg.cpv):
+ pkg_info.attr_display.replace = True
+ installed_version = pkg_info.previous_pkg
+ if installed_version.slot != pkg.slot or installed_version.sub_slot != pkg.sub_slot or \
+ not self.quiet_repo_display and installed_version.repo != pkg.repo:
+ myoldbest = [installed_version]
+ if pkg_info.ordered:
+ if pkg_info.merge:
+ self.counters.reinst += 1
+ # filter out old-style virtual matches
+ elif installed_versions and \
+ installed_versions[0].cp == pkg.cp:
+ myinslotlist = self.vardb.match_pkgs(pkg.slot_atom)
+ # If this is the first install of a new-style virtual, we
+ # need to filter out old-style virtual matches.
+ if myinslotlist and \
+ myinslotlist[0].cp != pkg.cp:
+ myinslotlist = None
+ if myinslotlist:
+ myoldbest = myinslotlist[:]
+ if not cpvequal(pkg.cpv,
+ best([pkg.cpv] + [x.cpv for x in myinslotlist])):
+ # Downgrade in slot
+ pkg_info.attr_display.new_version = True
+ pkg_info.attr_display.downgrade = True
+ if pkg_info.ordered:
+ self.counters.downgrades += 1
+ else:
+ # Update in slot
+ pkg_info.attr_display.new_version = True
+ if pkg_info.ordered:
+ self.counters.upgrades += 1
+ else:
+ myoldbest = installed_versions
+ pkg_info.attr_display.new = True
+ pkg_info.attr_display.new_slot = True
+ if pkg_info.ordered:
+ self.counters.newslot += 1
+ if self.conf.changelog:
+ self.do_changelog(pkg, pkg_info)
+ else:
+ pkg_info.attr_display.new = True
+ if pkg_info.ordered:
+ self.counters.new += 1
+ return myoldbest, myinslotlist
+
+
+ def __call__(self, depgraph, mylist, favorites=None, verbosity=None):
+ """The main operation to format and display the resolver output.
+
+ @param depgraph: dependency grah
+ @param mylist: list of packages being processed
+ @param favorites: list, defaults to []
+ @param verbosity: verbose level, defaults to None
+ Modifies self.conf, self.myfetchlist, self.portdb, self.vardb,
+ self.pkgsettings, self.verboseadd, self.oldlp, self.newlp,
+ self.print_msg,
+ """
+ if favorites is None:
+ favorites = []
+ self.conf = _DisplayConfig(depgraph, mylist, favorites, verbosity)
+ mylist = self.get_display_list(self.conf.mylist)
+ # files to fetch list - avoids counting a same file twice
+ # in size display (verbose mode)
+ self.myfetchlist = set()
+
+ self.quiet_repo_display = "--quiet-repo-display" in depgraph._frozen_config.myopts
+ if self.quiet_repo_display:
+ # Use this set to detect when all the "repoadd" strings are "[0]"
+ # and disable the entire repo display in this case.
+ repoadd_set = set()
+
+ self.restrict_fetch_list = {}
+
+ for mylist_index in range(len(mylist)):
+ pkg, depth, ordered = mylist[mylist_index]
+ self.portdb = self.conf.trees[pkg.root]["porttree"].dbapi
+ self.vardb = self.conf.trees[pkg.root]["vartree"].dbapi
+ self.pkgsettings = self.conf.pkgsettings[pkg.root]
+ self.indent = " " * depth
+
+ if isinstance(pkg, Blocker):
+ self._blockers(pkg)
+ else:
+ pkg_info = self.set_pkg_info(pkg, ordered)
+ pkg_info.oldbest_list, myinslotlist = \
+ self._get_installed_best(pkg, pkg_info)
+ if ordered and pkg_info.merge and \
+ not pkg_info.attr_display.new:
+ for arg, atom in depgraph._iter_atoms_for_pkg(pkg):
+ if arg.force_reinstall:
+ pkg_info.attr_display.force_reinstall = True
+ break
+
+ self.verboseadd = ""
+ if self.quiet_repo_display:
+ self.repoadd = None
+ self._display_use(pkg, pkg_info)
+ if self.conf.verbosity == 3:
+ if self.quiet_repo_display:
+ self.verbose_size(pkg, repoadd_set, pkg_info)
+ else:
+ self.verbose_size(pkg, None, pkg_info)
+
+ self.oldlp = self.conf.columnwidth - 30
+ self.newlp = self.oldlp - 30
+ pkg_info.oldbest = self.convert_myoldbest(pkg, pkg_info)
+ pkg_info.system, pkg_info.world = \
+ self.check_system_world(pkg)
+ if 'interactive' in pkg.properties and \
+ pkg.operation == 'merge':
+ pkg_info.attr_display.interactive = True
+ if ordered:
+ self.counters.interactive += 1
+
+ if self.include_mask_str():
+ pkg_info.attr_display.mask = self.gen_mask_str(pkg)
+
+ if pkg.root_config.settings["ROOT"] != "/":
+ if pkg_info.oldbest:
+ pkg_info.oldbest += " "
+ if self.conf.columns:
+ myprint = self._set_non_root_columns(pkg, pkg_info)
+ else:
+ pkg_str = self._append_build_id(
+ pkg.cpv, pkg, pkg_info)
+ if self.conf.verbosity == 3:
+ pkg_str = self._append_slot(pkg_str, pkg, pkg_info)
+ pkg_str = self._append_repository(pkg_str, pkg, pkg_info)
+ if not pkg_info.merge:
+ addl = self.empty_space_in_brackets()
+ myprint = "[%s%s] " % (
+ self.pkgprint(pkg_info.operation.ljust(13),
+ pkg_info), addl,
+ )
+ else:
+ myprint = "[%s %s] " % (
+ self.pkgprint(pkg.type_name, pkg_info),
+ pkg_info.attr_display)
+ myprint += self.indent + \
+ self.pkgprint(pkg_str, pkg_info) + " " + \
+ pkg_info.oldbest + darkgreen("to " + pkg.root)
+ else:
+ if self.conf.columns:
+ myprint = self._set_root_columns(pkg, pkg_info)
+ else:
+ myprint = self._set_no_columns(pkg, pkg_info)
+
+ if self.conf.columns and pkg.operation == "uninstall":
+ continue
+ if self.quiet_repo_display:
+ self.print_msg.append((myprint, self.verboseadd, self.repoadd))
+ else:
+ self.print_msg.append((myprint, self.verboseadd, None))
+
+ show_repos = self.quiet_repo_display and repoadd_set and repoadd_set != set(["0"])
+
+ # now finally print out the messages
+ self.print_messages(show_repos)
+ self.print_blockers()
+ if self.conf.verbosity == 3:
+ self.print_verbose(show_repos)
+ for pkg, pkg_info in self.restrict_fetch_list.items():
+ writemsg_stdout("\nFetch instructions for %s:\n" % (pkg.cpv,),
+ noiselevel=-1)
+ spawn_nofetch(self.conf.trees[pkg.root]["porttree"].dbapi,
+ pkg_info.ebuild_path)
+ if self.conf.changelog:
+ self.print_changelog()
+
+ return os.EX_OK
+
+
+def format_unmatched_atom(pkg, atom, pkg_use_enabled):
+ """
+ Returns two strings. The first string contains the
+ 'atom' with parts of the atom colored, which 'pkg'
+ doesn't match. The second string has the same number
+ of characters as the first one, but consists of only
+ white space or ^. The ^ characters have the same position
+ as the colored parts of the first string.
+ """
+ # Things to check:
+ # 1. Version
+ # 2. cp
+ # 3. slot/sub_slot
+ # 4. repository
+ # 5. USE
+
+ if atom.soname:
+ return "%s" % (atom,), ""
+
+ highlight = set()
+
+ def perform_coloring():
+ atom_str = ""
+ marker_str = ""
+ for ii, x in enumerate(atom):
+ if ii in highlight:
+ atom_str += colorize("BAD", x)
+ marker_str += "^"
+ else:
+ atom_str += x
+ marker_str += " "
+ return atom_str, marker_str
+
+ if atom.cp != pkg.cp:
+ # Highlight the cp part only.
+ ii = atom.find(atom.cp)
+ highlight.update(range(ii, ii + len(atom.cp)))
+ return perform_coloring()
+
+ version_atom = atom.without_repo.without_slot.without_use
+ version_atom_set = InternalPackageSet(initial_atoms=(version_atom,))
+ highlight_version = not bool(version_atom_set.findAtomForPackage(pkg,
+ modified_use=pkg_use_enabled(pkg)))
+
+ highlight_slot = False
+ if (atom.slot and atom.slot != pkg.slot) or \
+ (atom.sub_slot and atom.sub_slot != pkg.sub_slot):
+ highlight_slot = True
+
+ if highlight_version:
+ op = atom.operator
+ ver = None
+ if atom.cp != atom.cpv:
+ ver = cpv_getversion(atom.cpv)
+
+ if op == "=*":
+ op = "="
+ ver += "*"
+
+ if op is not None:
+ highlight.update(range(len(op)))
+
+ if ver is not None:
+ start = atom.rfind(ver)
+ end = start + len(ver)
+ highlight.update(range(start, end))
+
+ if highlight_slot:
+ slot_str = ":" + atom.slot
+ if atom.sub_slot:
+ slot_str += "/" + atom.sub_slot
+ if atom.slot_operator:
+ slot_str += atom.slot_operator
+ start = atom.find(slot_str)
+ end = start + len(slot_str)
+ highlight.update(range(start, end))
+
+ highlight_use = set()
+ if atom.use:
+ use_atom = "%s[%s]" % (atom.cp, str(atom.use))
+ use_atom_set = InternalPackageSet(initial_atoms=(use_atom,))
+ if not use_atom_set.findAtomForPackage(pkg, \
+ modified_use=pkg_use_enabled(pkg)):
+ missing_iuse = pkg.iuse.get_missing_iuse(
+ atom.unevaluated_atom.use.required)
+ if missing_iuse:
+ highlight_use = set(missing_iuse)
+ else:
+ #Use conditionals not met.
+ violated_atom = atom.violated_conditionals(
+ pkg_use_enabled(pkg), pkg.iuse.is_valid_flag)
+ if violated_atom.use is not None:
+ highlight_use = set(violated_atom.use.enabled.union(
+ violated_atom.use.disabled))
+
+ if highlight_use:
+ ii = atom.find("[") + 1
+ for token in atom.use.tokens:
+ if token.lstrip("-!").rstrip("=?") in highlight_use:
+ highlight.update(range(ii, ii + len(token)))
+ ii += len(token) + 1
+
+ return perform_coloring()
diff --git a/lib/_emerge/resolver/output_helpers.py b/lib/_emerge/resolver/output_helpers.py
new file mode 100644
index 000000000..b83717e93
--- /dev/null
+++ b/lib/_emerge/resolver/output_helpers.py
@@ -0,0 +1,693 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Contains private support functions for the Display class
+in output.py
+"""
+
+from __future__ import unicode_literals
+
+__all__ = (
+ )
+
+import io
+import re
+import sys
+
+from portage import os
+from portage import _encodings, _unicode_encode
+from portage._sets.base import InternalPackageSet
+from portage.exception import PackageSetNotFound
+from portage.localization import localized_size
+from portage.output import (blue, bold, colorize, create_color_func,
+ green, red, teal, turquoise, yellow)
+bad = create_color_func("BAD")
+from portage.util import writemsg
+from portage.util.SlotObject import SlotObject
+from portage.versions import catpkgsplit
+
+from _emerge.Blocker import Blocker
+from _emerge.Package import Package
+
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+
+class _RepoDisplay(object):
+ def __init__(self, roots):
+ self._shown_repos = {}
+ self._unknown_repo = False
+ repo_paths = set()
+ for root_config in roots.values():
+ for repo in root_config.settings.repositories:
+ repo_paths.add(repo.location)
+ repo_paths = list(repo_paths)
+ self._repo_paths = repo_paths
+ self._repo_paths_real = [ os.path.realpath(repo_path) \
+ for repo_path in repo_paths ]
+
+ def repoStr(self, repo_path_real):
+ real_index = -1
+ if repo_path_real:
+ real_index = self._repo_paths_real.index(repo_path_real)
+ if real_index == -1:
+ s = "?"
+ self._unknown_repo = True
+ else:
+ shown_repos = self._shown_repos
+ repo_paths = self._repo_paths
+ repo_path = repo_paths[real_index]
+ index = shown_repos.get(repo_path)
+ if index is None:
+ index = len(shown_repos)
+ shown_repos[repo_path] = index
+ s = str(index)
+ return s
+
+ def __str__(self):
+ output = []
+ shown_repos = self._shown_repos
+ unknown_repo = self._unknown_repo
+ if shown_repos or self._unknown_repo:
+ output.append("Repositories:\n")
+ show_repo_paths = list(shown_repos)
+ for repo_path, repo_index in shown_repos.items():
+ show_repo_paths[repo_index] = repo_path
+ if show_repo_paths:
+ for index, repo_path in enumerate(show_repo_paths):
+ output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
+ if unknown_repo:
+ output.append(" "+teal("[?]") + \
+ " indicates that the source repository could not be determined\n")
+ return "".join(output)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'])
+
+
+class _PackageCounters(object):
+
+ def __init__(self):
+ self.upgrades = 0
+ self.downgrades = 0
+ self.new = 0
+ self.newslot = 0
+ self.reinst = 0
+ self.uninst = 0
+ self.blocks = 0
+ self.blocks_satisfied = 0
+ self.totalsize = 0
+ self.restrict_fetch = 0
+ self.restrict_fetch_satisfied = 0
+ self.interactive = 0
+ self.binary = 0
+
+ def __str__(self):
+ total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
+ myoutput = []
+ details = []
+ myoutput.append("Total: %s package" % total_installs)
+ if total_installs != 1:
+ myoutput.append("s")
+ if total_installs != 0:
+ myoutput.append(" (")
+ if self.upgrades > 0:
+ details.append("%s upgrade" % self.upgrades)
+ if self.upgrades > 1:
+ details[-1] += "s"
+ if self.downgrades > 0:
+ details.append("%s downgrade" % self.downgrades)
+ if self.downgrades > 1:
+ details[-1] += "s"
+ if self.new > 0:
+ details.append("%s new" % self.new)
+ if self.newslot > 0:
+ details.append("%s in new slot" % self.newslot)
+ if self.newslot > 1:
+ details[-1] += "s"
+ if self.reinst > 0:
+ details.append("%s reinstall" % self.reinst)
+ if self.reinst > 1:
+ details[-1] += "s"
+ if self.binary > 0:
+ details.append("%s binary" % self.binary)
+ if self.binary > 1:
+ details[-1] = details[-1][:-1] + "ies"
+ if self.uninst > 0:
+ details.append("%s uninstall" % self.uninst)
+ if self.uninst > 1:
+ details[-1] += "s"
+ if self.interactive > 0:
+ details.append("%s %s" % (self.interactive,
+ colorize("WARN", "interactive")))
+ myoutput.append(", ".join(details))
+ if total_installs != 0:
+ myoutput.append(")")
+ myoutput.append(", Size of downloads: %s" % localized_size(self.totalsize))
+ if self.restrict_fetch:
+ myoutput.append("\nFetch Restriction: %s package" % \
+ self.restrict_fetch)
+ if self.restrict_fetch > 1:
+ myoutput.append("s")
+ if self.restrict_fetch_satisfied < self.restrict_fetch:
+ myoutput.append(bad(" (%s unsatisfied)") % \
+ (self.restrict_fetch - self.restrict_fetch_satisfied))
+ if self.blocks > 0:
+ myoutput.append("\nConflict: %s block" % \
+ self.blocks)
+ if self.blocks > 1:
+ myoutput.append("s")
+ if self.blocks_satisfied < self.blocks:
+ myoutput.append(bad(" (%s unsatisfied)") % \
+ (self.blocks - self.blocks_satisfied))
+ return "".join(myoutput)
+
+
+class _DisplayConfig(object):
+
+ def __init__(self, depgraph, mylist, favorites, verbosity):
+ frozen_config = depgraph._frozen_config
+ dynamic_config = depgraph._dynamic_config
+
+ self.mylist = mylist
+ self.favorites = InternalPackageSet(favorites, allow_repo=True)
+ self.verbosity = verbosity
+
+ if self.verbosity is None:
+ self.verbosity = ("--quiet" in frozen_config.myopts and 1 or \
+ "--verbose" in frozen_config.myopts and 3 or 2)
+
+ self.oneshot = "--oneshot" in frozen_config.myopts or \
+ "--onlydeps" in frozen_config.myopts
+ self.columns = "--columns" in frozen_config.myopts
+ self.tree_display = "--tree" in frozen_config.myopts
+ self.alphabetical = "--alphabetical" in frozen_config.myopts
+ self.quiet = "--quiet" in frozen_config.myopts
+ self.all_flags = self.verbosity == 3 or self.quiet
+ self.print_use_string = self.verbosity != 1 or "--verbose" in frozen_config.myopts
+ self.changelog = "--changelog" in frozen_config.myopts
+ self.edebug = frozen_config.edebug
+ self.unordered_display = "--unordered-display" in frozen_config.myopts
+
+ mywidth = 130
+ if "COLUMNWIDTH" in frozen_config.settings:
+ try:
+ mywidth = int(frozen_config.settings["COLUMNWIDTH"])
+ except ValueError as e:
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ writemsg("!!! Unable to parse COLUMNWIDTH='%s'\n" % \
+ frozen_config.settings["COLUMNWIDTH"], noiselevel=-1)
+ del e
+ self.columnwidth = mywidth
+
+ if "--quiet-repo-display" in frozen_config.myopts:
+ self.repo_display = _RepoDisplay(frozen_config.roots)
+ self.trees = frozen_config.trees
+ self.pkgsettings = frozen_config.pkgsettings
+ self.target_root = frozen_config.target_root
+ self.running_root = frozen_config._running_root
+ self.roots = frozen_config.roots
+
+ # Create a set of selected packages for each root
+ self.selected_sets = {}
+ for root_name, root in self.roots.items():
+ try:
+ self.selected_sets[root_name] = InternalPackageSet(
+ initial_atoms=root.setconfig.getSetAtoms("selected"))
+ except PackageSetNotFound:
+ # A nested set could not be resolved, so ignore nested sets.
+ self.selected_sets[root_name] = root.sets["selected"]
+
+ self.blocker_parents = dynamic_config._blocker_parents
+ self.reinstall_nodes = dynamic_config._reinstall_nodes
+ self.digraph = dynamic_config.digraph
+ self.blocker_uninstalls = dynamic_config._blocker_uninstalls
+ self.package_tracker = dynamic_config._package_tracker
+ self.set_nodes = dynamic_config._set_nodes
+
+ self.pkg_use_enabled = depgraph._pkg_use_enabled
+ self.pkg = depgraph._pkg
+
+
+def _create_use_string(conf, name, cur_iuse, iuse_forced, cur_use,
+ old_iuse, old_use,
+ is_new, feature_flags, reinst_flags):
+
+ if not conf.print_use_string:
+ return ""
+
+ enabled = []
+ if conf.alphabetical:
+ disabled = enabled
+ removed = enabled
+ else:
+ disabled = []
+ removed = []
+ cur_iuse = set(cur_iuse)
+ enabled_flags = cur_iuse.intersection(cur_use)
+ removed_iuse = set(old_iuse).difference(cur_iuse)
+ any_iuse = cur_iuse.union(old_iuse)
+ any_iuse = list(any_iuse)
+ any_iuse.sort()
+
+ for flag in any_iuse:
+ flag_str = None
+ isEnabled = False
+ reinst_flag = reinst_flags and flag in reinst_flags
+ if flag in enabled_flags:
+ isEnabled = True
+ if is_new or flag in old_use and \
+ (conf.all_flags or reinst_flag):
+ flag_str = red(flag)
+ elif flag not in old_iuse:
+ flag_str = yellow(flag) + "%*"
+ elif flag not in old_use:
+ flag_str = green(flag) + "*"
+ elif flag in removed_iuse:
+ if conf.all_flags or reinst_flag:
+ flag_str = yellow("-" + flag) + "%"
+ if flag in old_use:
+ flag_str += "*"
+ flag_str = "(" + flag_str + ")"
+ removed.append(flag_str)
+ continue
+ else:
+ if is_new or flag in old_iuse and \
+ flag not in old_use and \
+ (conf.all_flags or reinst_flag):
+ flag_str = blue("-" + flag)
+ elif flag not in old_iuse:
+ flag_str = yellow("-" + flag)
+ if flag not in iuse_forced:
+ flag_str += "%"
+ elif flag in old_use:
+ flag_str = green("-" + flag) + "*"
+ if flag_str:
+ if flag in feature_flags:
+ flag_str = "{" + flag_str + "}"
+ elif flag in iuse_forced:
+ flag_str = "(" + flag_str + ")"
+ if isEnabled:
+ enabled.append(flag_str)
+ else:
+ disabled.append(flag_str)
+
+ if conf.alphabetical:
+ ret = " ".join(enabled)
+ else:
+ ret = " ".join(enabled + disabled + removed)
+ if ret:
+ ret = '%s="%s" ' % (name, ret)
+ return ret
+
+
+def _tree_display(conf, mylist):
+
+ # If there are any Uninstall instances, add the
+ # corresponding blockers to the digraph.
+ mygraph = conf.digraph.copy()
+
+ executed_uninstalls = set(node for node in mylist \
+ if isinstance(node, Package) and node.operation == "unmerge")
+
+ for uninstall in conf.blocker_uninstalls.leaf_nodes():
+ uninstall_parents = \
+ conf.blocker_uninstalls.parent_nodes(uninstall)
+ if not uninstall_parents:
+ continue
+
+ # Remove the corresponding "nomerge" node and substitute
+ # the Uninstall node.
+ inst_pkg = conf.pkg(uninstall.cpv, "installed",
+ uninstall.root_config, installed=True)
+
+ try:
+ mygraph.remove(inst_pkg)
+ except KeyError:
+ pass
+
+ try:
+ inst_pkg_blockers = conf.blocker_parents.child_nodes(inst_pkg)
+ except KeyError:
+ inst_pkg_blockers = []
+
+ # Break the Package -> Uninstall edges.
+ mygraph.remove(uninstall)
+
+ # Resolution of a package's blockers
+ # depend on it's own uninstallation.
+ for blocker in inst_pkg_blockers:
+ mygraph.add(uninstall, blocker)
+
+ # Expand Package -> Uninstall edges into
+ # Package -> Blocker -> Uninstall edges.
+ for blocker in uninstall_parents:
+ mygraph.add(uninstall, blocker)
+ for parent in conf.blocker_parents.parent_nodes(blocker):
+ if parent != inst_pkg:
+ mygraph.add(blocker, parent)
+
+ # If the uninstall task did not need to be executed because
+ # of an upgrade, display Blocker -> Upgrade edges since the
+ # corresponding Blocker -> Uninstall edges will not be shown.
+ upgrade_node = next(conf.package_tracker.match(
+ uninstall.root, uninstall.slot_atom), None)
+
+ if upgrade_node is not None and \
+ uninstall not in executed_uninstalls:
+ for blocker in uninstall_parents:
+ mygraph.add(upgrade_node, blocker)
+
+ if conf.unordered_display:
+ display_list = _unordered_tree_display(mygraph, mylist)
+ else:
+ display_list = _ordered_tree_display(conf, mygraph, mylist)
+
+ _prune_tree_display(display_list)
+
+ return display_list
+
+
+def _unordered_tree_display(mygraph, mylist):
+ display_list = []
+ seen_nodes = set()
+
+ def print_node(node, depth):
+
+ if node in seen_nodes:
+ pass
+ else:
+ seen_nodes.add(node)
+
+ if isinstance(node, (Blocker, Package)):
+ display_list.append((node, depth, True))
+ else:
+ depth = -1
+
+ for child_node in mygraph.child_nodes(node):
+ print_node(child_node, depth + 1)
+
+ for root_node in mygraph.root_nodes():
+ print_node(root_node, 0)
+
+ return display_list
+
+
+def _ordered_tree_display(conf, mygraph, mylist):
+ depth = 0
+ shown_edges = set()
+ tree_nodes = []
+ display_list = []
+
+ for x in mylist:
+ depth = len(tree_nodes)
+ while depth and x not in \
+ mygraph.child_nodes(tree_nodes[depth-1]):
+ depth -= 1
+ if depth:
+ tree_nodes = tree_nodes[:depth]
+ tree_nodes.append(x)
+ display_list.append((x, depth, True))
+ shown_edges.add((x, tree_nodes[depth-1]))
+ else:
+ traversed_nodes = set() # prevent endless circles
+ traversed_nodes.add(x)
+ def add_parents(current_node, ordered):
+ parent_nodes = None
+ # Do not traverse to parents if this node is an
+ # an argument or a direct member of a set that has
+ # been specified as an argument (system or world).
+ if current_node not in conf.set_nodes:
+ parent_nodes = mygraph.parent_nodes(current_node)
+ if parent_nodes:
+ child_nodes = set(mygraph.child_nodes(current_node))
+ selected_parent = None
+ # First, try to avoid a direct cycle.
+ for node in parent_nodes:
+ if not isinstance(node, (Blocker, Package)):
+ continue
+ if node not in traversed_nodes and \
+ node not in child_nodes:
+ edge = (current_node, node)
+ if edge in shown_edges:
+ continue
+ selected_parent = node
+ break
+ if not selected_parent:
+ # A direct cycle is unavoidable.
+ for node in parent_nodes:
+ if not isinstance(node, (Blocker, Package)):
+ continue
+ if node not in traversed_nodes:
+ edge = (current_node, node)
+ if edge in shown_edges:
+ continue
+ selected_parent = node
+ break
+ if selected_parent:
+ shown_edges.add((current_node, selected_parent))
+ traversed_nodes.add(selected_parent)
+ add_parents(selected_parent, False)
+ display_list.append((current_node,
+ len(tree_nodes), ordered))
+ tree_nodes.append(current_node)
+ tree_nodes = []
+ add_parents(x, True)
+
+ return display_list
+
+
+def _prune_tree_display(display_list):
+ last_merge_depth = 0
+ for i in range(len(display_list) - 1, -1, -1):
+ node, depth, ordered = display_list[i]
+ if not ordered and depth == 0 and i > 0 \
+ and node == display_list[i-1][0] and \
+ display_list[i-1][1] == 0:
+ # An ordered node got a consecutive duplicate
+ # when the tree was being filled in.
+ del display_list[i]
+ continue
+ if ordered and isinstance(node, Package) \
+ and node.operation in ('merge', 'uninstall'):
+ last_merge_depth = depth
+ continue
+ if depth >= last_merge_depth or \
+ i < len(display_list) - 1 and \
+ depth >= display_list[i+1][1]:
+ del display_list[i]
+
+
+def _calc_changelog(ebuildpath,current,next):
+ if ebuildpath == None or not os.path.exists(ebuildpath):
+ return []
+ current = '-'.join(catpkgsplit(current)[1:])
+ if current.endswith('-r0'):
+ current = current[:-3]
+ next = '-'.join(catpkgsplit(next)[1:])
+ if next.endswith('-r0'):
+ next = next[:-3]
+
+ changelogdir = os.path.dirname(ebuildpath)
+ changelogs = ['ChangeLog']
+ # ChangeLog-YYYY (see bug #389611)
+ changelogs.extend(sorted((fn for fn in os.listdir(changelogdir)
+ if fn.startswith('ChangeLog-')), reverse=True))
+
+ divisions = []
+ found_current = False
+ for fn in changelogs:
+ changelogpath = os.path.join(changelogdir, fn)
+ try:
+ with io.open(_unicode_encode(changelogpath,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ changelog = f.read()
+ except EnvironmentError:
+ return []
+ for node in _find_changelog_tags(changelog):
+ if node[0] == current:
+ found_current = True
+ break
+ else:
+ divisions.append(node)
+ if found_current:
+ break
+
+ if not found_current:
+ return []
+
+ #print 'XX from',current,'to',next
+ #for div,text in divisions: print 'XX',div
+ # skip entries for all revisions above the one we are about to emerge
+ later_rev_index = None
+ for i, node in enumerate(divisions):
+ if node[0] == next:
+ if later_rev_index is not None:
+ first_node = divisions[later_rev_index]
+ # Discard the later revision and the first ChangeLog entry
+ # that follows it. We want to display all the entries after
+ # that first entry, as discussed in bug #373009.
+ trimmed_lines = []
+ iterator = iter(first_node[1])
+ for l in iterator:
+ if not l:
+ # end of the first entry that's discarded
+ break
+ first_node = (None, list(iterator))
+ divisions = [first_node] + divisions[later_rev_index+1:]
+ break
+ if node[0] is not None:
+ later_rev_index = i
+
+ output = []
+ prev_blank = False
+ prev_rev = False
+ for rev, lines in divisions:
+ if rev is not None:
+ if not (prev_blank or prev_rev):
+ output.append("\n")
+ output.append(bold('*' + rev) + '\n')
+ prev_rev = True
+ prev_blank = False
+ if lines:
+ prev_rev = False
+ if not prev_blank:
+ output.append("\n")
+ for l in lines:
+ output.append(l + "\n")
+ output.append("\n")
+ prev_blank = True
+ return output
+
+def _strip_header_comments(lines):
+ # strip leading and trailing blank or header/comment lines
+ i = 0
+ while i < len(lines) and (not lines[i] or lines[i][:1] == "#"):
+ i += 1
+ if i:
+ lines = lines[i:]
+ while lines and (not lines[-1] or lines[-1][:1] == "#"):
+ lines.pop()
+ return lines
+
+def _find_changelog_tags(changelog):
+ divs = []
+ if not changelog:
+ return divs
+ release = None
+ release_end = 0
+ for match in re.finditer(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?$',
+ changelog, re.M):
+ divs.append((release, _strip_header_comments(
+ changelog[release_end:match.start()].splitlines())))
+ release_end = match.end()
+ release = match.group(1)
+ if release.endswith('.ebuild'):
+ release = release[:-7]
+ if release.endswith('-r0'):
+ release = release[:-3]
+
+ divs.append((release,
+ _strip_header_comments(changelog[release_end:].splitlines())))
+ return divs
+
+class PkgInfo(object):
+ """Simple class to hold instance attributes for current
+ information about the pkg being printed.
+ """
+
+ __slots__ = ("attr_display", "built", "cp",
+ "ebuild_path", "fetch_symbol", "merge",
+ "oldbest", "oldbest_list", "operation", "ordered", "previous_pkg",
+ "repo_name", "repo_path_real", "slot", "sub_slot", "system", "use", "ver", "world")
+
+
+ def __init__(self):
+ self.built = False
+ self.cp = ''
+ self.ebuild_path = ''
+ self.fetch_symbol = ''
+ self.merge = ''
+ self.oldbest = ''
+ self.oldbest_list = []
+ self.operation = ''
+ self.ordered = False
+ self.previous_pkg = None
+ self.repo_path_real = ''
+ self.repo_name = ''
+ self.slot = ''
+ self.sub_slot = ''
+ self.system = False
+ self.use = ''
+ self.ver = ''
+ self.world = False
+ self.attr_display = PkgAttrDisplay()
+
+class PkgAttrDisplay(SlotObject):
+
+ __slots__ = ("downgrade", "fetch_restrict", "fetch_restrict_satisfied",
+ "force_reinstall",
+ "interactive", "mask", "new", "new_slot", "new_version", "replace")
+
+ def __str__(self):
+ output = []
+
+ if self.interactive:
+ output.append(colorize("WARN", "I"))
+ else:
+ output.append(" ")
+
+ if self.new or self.force_reinstall:
+ if self.force_reinstall:
+ output.append(red("r"))
+ else:
+ output.append(green("N"))
+ else:
+ output.append(" ")
+
+ if self.new_slot or self.replace:
+ if self.replace:
+ output.append(yellow("R"))
+ else:
+ output.append(green("S"))
+ else:
+ output.append(" ")
+
+ if self.fetch_restrict or self.fetch_restrict_satisfied:
+ if self.fetch_restrict_satisfied:
+ output.append(green("f"))
+ else:
+ output.append(red("F"))
+ else:
+ output.append(" ")
+
+ if self.new_version:
+ output.append(turquoise("U"))
+ else:
+ output.append(" ")
+
+ if self.downgrade:
+ output.append(blue("D"))
+ else:
+ output.append(" ")
+
+ if self.mask is not None:
+ output.append(self.mask)
+
+ return "".join(output)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'])
diff --git a/lib/_emerge/resolver/package_tracker.py b/lib/_emerge/resolver/package_tracker.py
new file mode 100644
index 000000000..ccb0b11cf
--- /dev/null
+++ b/lib/_emerge/resolver/package_tracker.py
@@ -0,0 +1,386 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import bisect
+import collections
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.dep:Atom,match_from_list',
+ 'portage.util:cmp_sort_key',
+ 'portage.versions:vercmp',
+)
+
+_PackageConflict = collections.namedtuple("_PackageConflict", ["root", "pkgs", "atom", "description"])
+
+class PackageConflict(_PackageConflict):
+ """
+ Class to track the reason for a conflict and the conflicting packages.
+ """
+ def __iter__(self):
+ return iter(self.pkgs)
+
+ def __contains__(self, pkg):
+ return pkg in self.pkgs
+
+ def __len__(self):
+ return len(self.pkgs)
+
+
+class PackageTracker(object):
+ """
+ **Behavior**
+
+ This section is intended to give you a good conceptual overview of the ``PackageTracker`` class and its general
+ behavior -- how you can expect it to behave and how in turn expects to be used successfully by the programmer.
+
+ This class is used to model the behavior of a real Gentoo or other system using Portage for package management,
+ along with the installed and to-be-installed packages. The installed packages are ones that are already on the
+ system and recorded in ``/var/db/pkg``, while the to-be-installed packages are a group of packages that Portage is
+ considering installing on the system, based on the information in Portage's dependency graph. Multiple roots are
+ supported, so that situations can be modeled where ROOT is set to a non-default value (non-``/``).
+
+ You can use the add_pkg() method to add a to-be-merged package to the PackageTracker, and ``add_installed_pkg()`` to
+ add an already-installed package to the package tracker. Typical use of the package tracker involves the
+ ``depgraph.py`` code populating the package tracker with calls to ``add_installed_pkg()`` to add all installed
+ packages on the system, and then it is initialized and ready for use. At that point, ``depgraph.py`` can use
+ ``add_pkg()`` to add to-be-installed packages to the system.
+
+ It's worth mentioning that ``PackageTracker`` uses ``Package`` objects as arguments, and stores these objects
+ internally. There are parts of the code that ensure that a ``Package`` instance is added to the PackageTracker
+ only once.
+
+ Note that when a to-be-merged package is added to the package tracker via ``add_pkg()``, it will "cover up"
+ (replace) any installed package that shares the same root-catpkg-slot or root-catpkg-version, meaning that calling
+ the ``all_pkgs()`` or ``match()`` method will not return the installed package in the list. And the code does
+ support the scenario where ``add_installed_pkg(pkg2)`` is called *after* a call to ``add_pkg(pkg1)`` -- in this
+ case, if ``pkg1`` would 'cover up' ``pkg2``, this will be identified and handled correctly.
+
+ But the package tracker is designed to have an important behavior in this regard -- because PackageTracker has a
+ ``remove()`` method, these replaced/covered-up packages are not permanently removed -- so if you ``remove()`` a
+ to-be-installed package that was "replacing" an installed package, the installed package will "reappear". This
+ removal functionality is used by the slot conflict code in ``depgraph.py`` to modify the list of to-be-installed
+ packages as it addresses slot conflicts.
+
+ One of the main purposes of the PackageTracker is to detect conflicts between packages. Conflicts are detected
+ on to-be-installed packages only.
+
+ A slot conflict is a situation where a to-be-installed package is added to the package tracker via ``add_pkg()``,
+ and there is already a to-be-installed package added that has the same root, catpkg and slot. These cannot co-exist.
+
+ A cpv conflict is a situation where a to-be-installed package is added to the package tracker via ``add_pkg()``, and
+ there is already a to-be-installed package add that has the same root, catpkg, and version+revision. These cannot
+ co-exist.
+
+ The package tracker does not prevent slot and cpv conflicts from occurring. Instead, it allows them to be recorded
+ and the ``conflicts()`` and ``slot_conflicts()`` method will cause the package tracker to look at its internal data
+ structures and generate ``PackageConflict()`` objects for each conflict it finds.
+
+ The ``match()`` method is used extensively by ``depgraph.py`` to find packages that match a particular dependency
+ atom. The code now also supports soname dependencies.
+
+ **Future Functionality**
+
+ The package tracker may be extended in the future to track additional useful information:
+
+ * Packages that block one another. This information is not currently injected into the package tracker.
+
+ * Sub-slot conflicts. It is possible to identify situations where a to-be-installed package is in a new sub-slot.
+ In this case, the depgraph can be queried for parents of this dependency, and these parents can be scheduled
+ to be rebuilt.
+
+ :ivar _cp_pkg_map: The collection of to-be-installed (not yet merged) packages. We care about conflicts in these
+ packages.
+ :ivar _cp_vdb_pkg_map: The collection of already-installed packages.
+ :ivar _multi_pkgs: A list of keys in ``self._cp_pkg_map`` that have potential slot and cpv conflicts.
+ :ivar _replacing: The mechanism by which ``PackageTracker`` records to-be-installed packages that 'cover up'
+ already-installed packages. ``self._replacing[cp_key] = [ new_pkg_that_replaced_cp_key... ]``.
+ :ivar _replaced_by: ``self.replaced_by[cp_key] == [ replaced_pkg_1, replaced_pkg_2 ]``
+ """
+
+ def __init__(self, soname_deps=False):
+
+ """
+ :param soname_deps bool: Determines whether support for soname deps should be enabled or not.
+ """
+
+ self._cp_pkg_map = collections.defaultdict(list)
+ self._cp_vdb_pkg_map = collections.defaultdict(list)
+ self._multi_pkgs = []
+
+ # Cache for result of conflicts().
+ self._conflicts_cache = None
+
+ self._replacing = collections.defaultdict(list)
+ self._replaced_by = collections.defaultdict(list)
+
+ self._match_cache = collections.defaultdict(dict)
+ if soname_deps:
+ self._provides_index = collections.defaultdict(list)
+ else:
+ self._provides_index = None
+
+ def add_pkg(self, pkg):
+ """
+ Add a new package to the tracker. Records conflicts as necessary.
+ """
+ cp_key = pkg.root, pkg.cp
+
+ if any(other is pkg for other in self._cp_pkg_map[cp_key]):
+ return
+
+ self._cp_pkg_map[cp_key].append(pkg)
+
+ if len(self._cp_pkg_map[cp_key]) > 1:
+ self._conflicts_cache = None
+ if len(self._cp_pkg_map[cp_key]) == 2:
+ self._multi_pkgs.append(cp_key)
+
+ self._replacing[pkg] = []
+ for installed in self._cp_vdb_pkg_map.get(cp_key, []):
+ if installed.slot_atom == pkg.slot_atom or \
+ installed.cpv == pkg.cpv:
+ self._replacing[pkg].append(installed)
+ self._replaced_by[installed].append(pkg)
+
+ self._add_provides(pkg)
+
+ self._match_cache.pop(cp_key, None)
+
+ def _add_provides(self, pkg):
+ if (self._provides_index is not None and
+ pkg.provides is not None):
+ index = self._provides_index
+ root = pkg.root
+ for atom in pkg.provides:
+ # Use bisect.insort for ordered match results.
+ bisect.insort(index[(root, atom)], pkg)
+
+ def add_installed_pkg(self, installed):
+ """
+ Add an installed package during vdb load. These packages
+ are not returned by matched_pull as long as add_pkg hasn't
+ been called with them. They are only returned by match_final.
+ """
+ cp_key = installed.root, installed.cp
+ if any(other is installed for other in self._cp_vdb_pkg_map[cp_key]):
+ return
+
+ self._cp_vdb_pkg_map[cp_key].append(installed)
+
+ for pkg in self._cp_pkg_map.get(cp_key, []):
+ if installed.slot_atom == pkg.slot_atom or \
+ installed.cpv == pkg.cpv:
+ self._replacing[pkg].append(installed)
+ self._replaced_by[installed].append(pkg)
+
+ self._match_cache.pop(cp_key, None)
+
+ def remove_pkg(self, pkg):
+ """
+ Removes the package from the tracker.
+ Raises KeyError if it isn't present.
+ """
+ cp_key = pkg.root, pkg.cp
+ try:
+ self._cp_pkg_map.get(cp_key, []).remove(pkg)
+ except ValueError:
+ raise KeyError(pkg)
+
+ if self._cp_pkg_map[cp_key]:
+ self._conflicts_cache = None
+
+ if not self._cp_pkg_map[cp_key]:
+ del self._cp_pkg_map[cp_key]
+ elif len(self._cp_pkg_map[cp_key]) == 1:
+ self._multi_pkgs = [other_cp_key for other_cp_key in self._multi_pkgs \
+ if other_cp_key != cp_key]
+
+ for installed in self._replacing[pkg]:
+ self._replaced_by[installed].remove(pkg)
+ if not self._replaced_by[installed]:
+ del self._replaced_by[installed]
+ del self._replacing[pkg]
+
+ if self._provides_index is not None:
+ index = self._provides_index
+ root = pkg.root
+ for atom in pkg.provides:
+ key = (root, atom)
+ items = index[key]
+ try:
+ items.remove(pkg)
+ except ValueError:
+ pass
+ if not items:
+ del index[key]
+
+ self._match_cache.pop(cp_key, None)
+
+ def discard_pkg(self, pkg):
+ """
+ Removes the package from the tracker.
+ Does not raises KeyError if it is not present.
+ """
+ try:
+ self.remove_pkg(pkg)
+ except KeyError:
+ pass
+
+ def match(self, root, atom, installed=True):
+ """
+ Iterates over the packages matching 'atom'.
+ If 'installed' is True, installed non-replaced
+ packages may also be returned.
+ """
+ if atom.soname:
+ return iter(self._provides_index.get((root, atom), []))
+
+ cp_key = root, atom.cp
+ cache_key = root, atom, atom.unevaluated_atom, installed
+ try:
+ return iter(self._match_cache.get(cp_key, {})[cache_key])
+ except KeyError:
+ pass
+
+ candidates = self._cp_pkg_map.get(cp_key, [])[:]
+
+ if installed:
+ for installed in self._cp_vdb_pkg_map.get(cp_key, []):
+ if installed not in self._replaced_by:
+ candidates.append(installed)
+
+ ret = match_from_list(atom, candidates)
+ ret.sort(key=cmp_sort_key(lambda x, y: vercmp(x.version, y.version)))
+ self._match_cache[cp_key][cache_key] = ret
+
+ return iter(ret)
+
+ def conflicts(self):
+ """
+ Iterates over the curently existing conflicts.
+ """
+ if self._conflicts_cache is None:
+ self._conflicts_cache = []
+
+ for cp_key in self._multi_pkgs:
+
+ # Categorize packages according to cpv and slot.
+ slot_map = collections.defaultdict(list)
+ cpv_map = collections.defaultdict(list)
+ for pkg in self._cp_pkg_map[cp_key]:
+ slot_key = pkg.root, pkg.slot_atom
+ cpv_key = pkg.root, pkg.cpv
+ slot_map[slot_key].append(pkg)
+ cpv_map[cpv_key].append(pkg)
+
+ # Slot conflicts.
+ for slot_key in slot_map:
+ slot_pkgs = slot_map[slot_key]
+ if len(slot_pkgs) > 1:
+ self._conflicts_cache.append(PackageConflict(
+ description = "slot conflict",
+ root = slot_key[0],
+ atom = slot_key[1],
+ pkgs = tuple(slot_pkgs),
+ ))
+
+ # CPV conflicts.
+ for cpv_key in cpv_map:
+ cpv_pkgs = cpv_map[cpv_key]
+ if len(cpv_pkgs) > 1:
+ # Make sure this cpv conflict is not a slot conflict at the same time.
+ # Ignore it if it is.
+ slots = set(pkg.slot for pkg in cpv_pkgs)
+ if len(slots) > 1:
+ self._conflicts_cache.append(PackageConflict(
+ description = "cpv conflict",
+ root = cpv_key[0],
+ atom = cpv_key[1],
+ pkgs = tuple(cpv_pkgs),
+ ))
+
+ return iter(self._conflicts_cache)
+
+ def slot_conflicts(self):
+ """
+ Iterates over present slot conflicts.
+ This is only intended for consumers that haven't been
+ updated to deal with other kinds of conflicts.
+ This function should be removed once all consumers are updated.
+ """
+ return (conflict for conflict in self.conflicts() \
+ if conflict.description == "slot conflict")
+
+ def all_pkgs(self, root):
+ """
+ Iterates over all packages for the given root
+ present in the tracker, including the installed
+ packages.
+ """
+ for cp_key in self._cp_pkg_map:
+ if cp_key[0] == root:
+ for pkg in self._cp_pkg_map[cp_key]:
+ yield pkg
+
+ for cp_key in self._cp_vdb_pkg_map:
+ if cp_key[0] == root:
+ for installed in self._cp_vdb_pkg_map[cp_key]:
+ if installed not in self._replaced_by:
+ yield installed
+
+ def contains(self, pkg, installed=True):
+ """
+ Checks if the package is in the tracker.
+ If 'installed' is True, returns True for
+ non-replaced installed packages.
+ """
+ cp_key = pkg.root, pkg.cp
+ for other in self._cp_pkg_map.get(cp_key, []):
+ if other is pkg:
+ return True
+
+ if installed:
+ for installed in self._cp_vdb_pkg_map.get(cp_key, []):
+ if installed is pkg and \
+ installed not in self._replaced_by:
+ return True
+
+ return False
+
+ def __contains__(self, pkg):
+ """
+ Checks if the package is in the tracker.
+ Returns True for non-replaced installed packages.
+ """
+ return self.contains(pkg, installed=True)
+
+
+class PackageTrackerDbapiWrapper(object):
+ """
+ A wrpper class that provides parts of the legacy
+ dbapi interface. Remove it once all consumers have
+ died.
+ """
+ def __init__(self, root, package_tracker):
+ self._root = root
+ self._package_tracker = package_tracker
+
+ def cpv_inject(self, pkg):
+ self._package_tracker.add_pkg(pkg)
+
+ def match_pkgs(self, atom):
+ ret = sorted(self._package_tracker.match(self._root, atom),
+ key=cmp_sort_key(lambda x, y: vercmp(x.version, y.version)))
+ return ret
+
+ def __iter__(self):
+ return self._package_tracker.all_pkgs(self._root)
+
+ def match(self, atom, use_cache=None):
+ return self.match_pkgs(atom)
+
+ def cp_list(self, cp):
+ return self.match_pkgs(Atom(cp))
diff --git a/lib/_emerge/resolver/slot_collision.py b/lib/_emerge/resolver/slot_collision.py
new file mode 100644
index 000000000..f676b38c8
--- /dev/null
+++ b/lib/_emerge/resolver/slot_collision.py
@@ -0,0 +1,1185 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function, unicode_literals
+
+import sys
+
+from portage import _encodings, _unicode_encode
+from _emerge.AtomArg import AtomArg
+from _emerge.Package import Package
+from _emerge.PackageArg import PackageArg
+from portage.dep import check_required_use
+from portage.output import colorize
+from portage._sets.base import InternalPackageSet
+from portage.util import writemsg
+from portage.versions import cpv_getversion, vercmp
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+class slot_conflict_handler(object):
+ """This class keeps track of all slot conflicts and provides
+ an interface to get possible solutions.
+
+ How it works:
+ If two packages have been pulled into a slot, one needs to
+ go away. This class focuses on cases where this can be achieved
+ with a change in USE settings.
+
+ 1) Find out if what causes a given slot conflict. There are
+ three possibilities:
+
+ a) One parent needs foo-1:0 and another one needs foo-2:0,
+ nothing we can do about this. This is called a 'version
+ based conflict'.
+
+ b) All parents of one of the conflict packages could use
+ another conflict package. This is called an 'unspecific
+ conflict'. This should be caught by the backtracking logic.
+ Ask the user to enable -uN (if not already enabled). If -uN is
+ enabled, this case is treated in the same way as c).
+
+ c) Neither a 'version based conflict' nor an 'unspecific
+ conflict'. Ignoring use deps would result result in an
+ 'unspecific conflict'. This is called a 'specific conflict'.
+ This is the only conflict we try to find suggestions for.
+
+ 2) Computing suggestions.
+
+ Def.: "configuration": A list of packages, containing exactly one
+ package from each slot conflict.
+
+ We try to find USE changes such that all parents of conflict packages
+ can work with a package in the configuration we're looking at. This
+ is done for all possible configurations, except if the 'all-ebuild'
+ configuration has a suggestion. In this case we immediately abort the
+ search.
+ For the current configuration, all use flags that are part of violated
+ use deps are computed. This is done for every slot conflict on its own.
+
+ Def.: "solution (candidate)": An assignment of "enabled" / "disabled"
+ values for the use flags that are part of violated use deps.
+
+ Now all involved use flags for the current configuration are known. For
+ now they have an undetermined value. Fix their value in the
+ following cases:
+ * The use dep in the parent atom is unconditional.
+ * The parent package is 'installed'.
+ * The conflict package is 'installed'.
+
+ USE of 'installed' packages can't be changed. This always requires an
+ non-installed package.
+
+ During this procedure, contradictions may occur. In this case the
+ configuration has no solution.
+
+ Now generate all possible solution candidates with fixed values. Check
+ if they don't introduce new conflicts.
+
+ We have found a valid assignment for all involved use flags. Compute
+ the needed USE changes and prepare the message for the user.
+ """
+
+ _check_configuration_max = 1024
+
+ def __init__(self, depgraph):
+ self.depgraph = depgraph
+ self.myopts = depgraph._frozen_config.myopts
+ self.debug = "--debug" in self.myopts
+ if self.debug:
+ writemsg("Starting slot conflict handler\n", noiselevel=-1)
+
+ # List of tuples, where each tuple represents a slot conflict.
+ self.all_conflicts = []
+ for conflict in depgraph._dynamic_config._package_tracker.slot_conflicts():
+ self.all_conflicts.append((conflict.root, conflict.atom, conflict.pkgs))
+
+ #A dict mapping packages to pairs of parent package
+ #and parent atom
+ self.all_parents = depgraph._dynamic_config._parent_atoms
+
+ #set containing all nodes that are part of a slot conflict
+ conflict_nodes = set()
+
+ #a list containing list of packages that form a slot conflict
+ conflict_pkgs = []
+
+ #a list containing sets of (parent, atom) pairs that have pulled packages
+ #into the same slot
+ all_conflict_atoms_by_slotatom = []
+
+ #fill conflict_pkgs, all_conflict_atoms_by_slotatom
+ for root, atom, pkgs in self.all_conflicts:
+ conflict_pkgs.append(list(pkgs))
+ all_conflict_atoms_by_slotatom.append(set())
+
+ for pkg in pkgs:
+ conflict_nodes.add(pkg)
+ for ppkg, atom in self.all_parents.get(pkg):
+ all_conflict_atoms_by_slotatom[-1].add((ppkg, atom))
+
+ #Variable that holds the non-explanation part of the message.
+ self.conflict_msg = []
+ #If any conflict package was pulled in only by unspecific atoms, then
+ #the user forgot to enable --newuse and/or --update.
+ self.conflict_is_unspecific = False
+
+ #Indicate if the conflict is caused by incompatible version requirements
+ #cat/pkg-2 pulled in, but a parent requires <cat/pkg-2
+ self.is_a_version_conflict = False
+
+ self._prepare_conflict_msg_and_check_for_specificity()
+
+ #a list of dicts that hold the needed USE values to solve all conflicts
+ self.solutions = []
+
+ #a list of dicts that hold the needed USE changes to solve all conflicts
+ self.changes = []
+
+ #configuration = a list of packages with exactly one package from every
+ #single slot conflict
+ config_gen = _configuration_generator(conflict_pkgs)
+ first_config = True
+
+ #go through all configurations and collect solutions
+ while(True):
+ config = config_gen.get_configuration()
+ if not config:
+ break
+
+ if self.debug:
+ writemsg("\nNew configuration:\n", noiselevel=-1)
+ for pkg in config:
+ writemsg(" %s\n" % (pkg,), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+ new_solutions = self._check_configuration(config, all_conflict_atoms_by_slotatom, conflict_nodes)
+
+ if new_solutions:
+ self.solutions.extend(new_solutions)
+
+ if first_config:
+ #If the "all ebuild"-config gives a solution, use it.
+ #Otherwise enumerate all other solutions.
+ if self.debug:
+ writemsg("All-ebuild configuration has a solution. Aborting search.\n", noiselevel=-1)
+ break
+ first_config = False
+
+ if len(conflict_pkgs) > 4:
+ # The number of configurations to check grows exponentially in the number of conflict_pkgs.
+ # To prevent excessive running times, only check the "all-ebuild" configuration,
+ # if the number of conflict packages is too large.
+ if self.debug:
+ writemsg("\nAborting search due to excessive number of configurations.\n", noiselevel=-1)
+ break
+
+ for solution in self.solutions:
+ self._add_change(self._get_change(solution))
+
+
+ def get_conflict(self):
+ return "".join(self.conflict_msg)
+
+ def _is_subset(self, change1, change2):
+ """
+ Checks if a set of changes 'change1' is a subset of the changes 'change2'.
+ """
+ #All pkgs of change1 have to be in change2.
+ #For every package in change1, the changes have to be a subset of
+ #the corresponding changes in change2.
+ for pkg in change1:
+ if pkg not in change2:
+ return False
+
+ for flag in change1[pkg]:
+ if flag not in change2[pkg]:
+ return False
+ if change1[pkg][flag] != change2[pkg][flag]:
+ return False
+ return True
+
+ def _add_change(self, new_change):
+ """
+ Make sure to keep only minimal changes. If "+foo", does the job, discard "+foo -bar".
+ """
+ changes = self.changes
+ #Make sure there is no other solution that is a subset of the new solution.
+ ignore = False
+ to_be_removed = []
+ for change in changes:
+ if self._is_subset(change, new_change):
+ ignore = True
+ break
+ elif self._is_subset(new_change, change):
+ to_be_removed.append(change)
+
+ if not ignore:
+ #Discard all existing change that are a superset of the new change.
+ for obsolete_change in to_be_removed:
+ changes.remove(obsolete_change)
+ changes.append(new_change)
+
+ def _get_change(self, solution):
+ _pkg_use_enabled = self.depgraph._pkg_use_enabled
+ new_change = {}
+ for pkg in solution:
+ for flag, state in solution[pkg].items():
+ real_flag = pkg.iuse.get_real_flag(flag)
+ if real_flag is None:
+ # Triggered by use-dep defaults.
+ continue
+ if state == "enabled" and flag not in _pkg_use_enabled(pkg):
+ new_change.setdefault(pkg, {})[real_flag] = True
+ elif state == "disabled" and flag in _pkg_use_enabled(pkg):
+ new_change.setdefault(pkg, {})[real_flag] = False
+ return new_change
+
+ def _prepare_conflict_msg_and_check_for_specificity(self):
+ """
+ Print all slot conflicts in a human readable way.
+ """
+ _pkg_use_enabled = self.depgraph._pkg_use_enabled
+ usepkgonly = "--usepkgonly" in self.myopts
+ need_rebuild = {}
+ verboseconflicts = "--verbose-conflicts" in self.myopts
+ any_omitted_parents = False
+ msg = self.conflict_msg
+ indent = " "
+ msg.append("\n!!! Multiple package instances within a single " + \
+ "package slot have been pulled\n")
+ msg.append("!!! into the dependency graph, resulting" + \
+ " in a slot conflict:\n\n")
+
+ for root, slot_atom, pkgs in self.all_conflicts:
+ msg.append("%s" % (slot_atom,))
+ if root != self.depgraph._frozen_config._running_root.root:
+ msg.append(" for %s" % (root,))
+ msg.append("\n\n")
+
+ for pkg in pkgs:
+ msg.append(indent)
+ msg.append("%s" % (pkg,))
+ parent_atoms = self.all_parents.get(pkg)
+ if parent_atoms:
+ #Create a list of collision reasons and map them to sets
+ #of atoms.
+ #Possible reasons:
+ # ("version", "ge") for operator >=, >
+ # ("version", "eq") for operator =, ~
+ # ("version", "le") for operator <=, <
+ # ("use", "<some use flag>") for unmet use conditionals
+ collision_reasons = {}
+ num_all_specific_atoms = 0
+
+ for ppkg, atom in parent_atoms:
+ if not atom.soname:
+ atom_set = InternalPackageSet(
+ initial_atoms=(atom,))
+ atom_without_use_set = InternalPackageSet(
+ initial_atoms=(atom.without_use,))
+ atom_without_use_and_slot_set = \
+ InternalPackageSet(initial_atoms=(
+ atom.without_use.without_slot,))
+
+ for other_pkg in pkgs:
+ if other_pkg == pkg:
+ continue
+
+ if atom.soname:
+ # The soname does not match.
+ key = ("soname", atom)
+ atoms = collision_reasons.get(key, set())
+ atoms.add((ppkg, atom, other_pkg))
+ num_all_specific_atoms += 1
+ collision_reasons[key] = atoms
+ elif not atom_without_use_and_slot_set.findAtomForPackage(other_pkg,
+ modified_use=_pkg_use_enabled(other_pkg)):
+ if atom.operator is not None:
+ # The version range does not match.
+ sub_type = None
+ if atom.operator in (">=", ">"):
+ sub_type = "ge"
+ elif atom.operator in ("=", "~"):
+ sub_type = "eq"
+ elif atom.operator in ("<=", "<"):
+ sub_type = "le"
+
+ key = ("version", sub_type)
+ atoms = collision_reasons.get(key, set())
+ atoms.add((ppkg, atom, other_pkg))
+ num_all_specific_atoms += 1
+ collision_reasons[key] = atoms
+
+ elif not atom_without_use_set.findAtomForPackage(other_pkg, \
+ modified_use=_pkg_use_enabled(other_pkg)):
+ # The slot and/or sub_slot does not match.
+ key = ("slot", (atom.slot, atom.sub_slot, atom.slot_operator))
+ atoms = collision_reasons.get(key, set())
+ atoms.add((ppkg, atom, other_pkg))
+ num_all_specific_atoms += 1
+ collision_reasons[key] = atoms
+
+ elif not atom_set.findAtomForPackage(other_pkg, \
+ modified_use=_pkg_use_enabled(other_pkg)):
+ missing_iuse = other_pkg.iuse.get_missing_iuse(
+ atom.unevaluated_atom.use.required)
+ if missing_iuse:
+ for flag in missing_iuse:
+ atoms = collision_reasons.get(("use", flag), set())
+ atoms.add((ppkg, atom, other_pkg))
+ collision_reasons[("use", flag)] = atoms
+ num_all_specific_atoms += 1
+ else:
+ #Use conditionals not met.
+ violated_atom = atom.violated_conditionals(_pkg_use_enabled(other_pkg), \
+ other_pkg.iuse.is_valid_flag)
+ if violated_atom.use is None:
+ # Something like bug #453400 caused the
+ # above findAtomForPackage call to
+ # return None unexpectedly.
+ msg = ("\n\n!!! BUG: Detected "
+ "USE dep match inconsistency:\n"
+ "\tppkg: %s\n"
+ "\tviolated_atom: %s\n"
+ "\tatom: %s unevaluated: %s\n"
+ "\tother_pkg: %s IUSE: %s USE: %s\n" %
+ (ppkg,
+ violated_atom,
+ atom,
+ atom.unevaluated_atom,
+ other_pkg,
+ sorted(other_pkg.iuse.all),
+ sorted(_pkg_use_enabled(other_pkg))))
+ writemsg(msg, noiselevel=-2)
+ raise AssertionError(
+ 'BUG: USE dep match inconsistency')
+ for flag in violated_atom.use.enabled.union(violated_atom.use.disabled):
+ atoms = collision_reasons.get(("use", flag), set())
+ atoms.add((ppkg, atom, other_pkg))
+ collision_reasons[("use", flag)] = atoms
+ num_all_specific_atoms += 1
+ elif isinstance(ppkg, AtomArg) and other_pkg.installed:
+ parent_atoms = collision_reasons.get(("AtomArg", None), set())
+ parent_atoms.add((ppkg, atom))
+ collision_reasons[("AtomArg", None)] = parent_atoms
+ num_all_specific_atoms += 1
+
+ msg.append(" pulled in by\n")
+
+ selected_for_display = set()
+ unconditional_use_deps = set()
+
+ for (type, sub_type), parents in collision_reasons.items():
+ #From each (type, sub_type) pair select at least one atom.
+ #Try to select as few atoms as possible
+
+ if type == "version":
+ #Find the atom with version that is as far away as possible.
+ best_matches = {}
+ for ppkg, atom, other_pkg in parents:
+ if atom.cp in best_matches:
+ cmp = vercmp( \
+ cpv_getversion(atom.cpv), \
+ cpv_getversion(best_matches[atom.cp][1].cpv))
+
+ if (sub_type == "ge" and cmp > 0) \
+ or (sub_type == "le" and cmp < 0) \
+ or (sub_type == "eq" and cmp > 0):
+ best_matches[atom.cp] = (ppkg, atom)
+ else:
+ best_matches[atom.cp] = (ppkg, atom)
+ if verboseconflicts:
+ selected_for_display.add((ppkg, atom))
+ if not verboseconflicts:
+ selected_for_display.update(
+ best_matches.values())
+ elif type in ("soname", "slot"):
+ # Check for packages that might need to
+ # be rebuilt, but cannot be rebuilt for
+ # some reason.
+ for ppkg, atom, other_pkg in parents:
+ if not (isinstance(ppkg, Package) and ppkg.installed):
+ continue
+ if not (atom.soname or atom.slot_operator_built):
+ continue
+ if self.depgraph._frozen_config.excluded_pkgs.findAtomForPackage(ppkg,
+ modified_use=self.depgraph._pkg_use_enabled(ppkg)):
+ selected_for_display.add((ppkg, atom))
+ need_rebuild[ppkg] = 'matched by --exclude argument'
+ elif self.depgraph._frozen_config.useoldpkg_atoms.findAtomForPackage(ppkg,
+ modified_use=self.depgraph._pkg_use_enabled(ppkg)):
+ selected_for_display.add((ppkg, atom))
+ need_rebuild[ppkg] = 'matched by --useoldpkg-atoms argument'
+ elif usepkgonly:
+ # This case is tricky, so keep quiet in order to avoid false-positives.
+ pass
+ elif not self.depgraph._equiv_ebuild_visible(ppkg):
+ selected_for_display.add((ppkg, atom))
+ need_rebuild[ppkg] = 'ebuild is masked or unavailable'
+
+ for ppkg, atom, other_pkg in parents:
+ selected_for_display.add((ppkg, atom))
+ if not verboseconflicts:
+ break
+ elif type == "use":
+ #Prefer atoms with unconditional use deps over, because it's
+ #not possible to change them on the parent, which means there
+ #are fewer possible solutions.
+ use = sub_type
+ for ppkg, atom, other_pkg in parents:
+ missing_iuse = other_pkg.iuse.get_missing_iuse(
+ atom.unevaluated_atom.use.required)
+ if missing_iuse:
+ unconditional_use_deps.add((ppkg, atom))
+ else:
+ parent_use = None
+ if isinstance(ppkg, Package):
+ parent_use = _pkg_use_enabled(ppkg)
+ violated_atom = atom.unevaluated_atom.violated_conditionals(
+ _pkg_use_enabled(other_pkg),
+ other_pkg.iuse.is_valid_flag,
+ parent_use=parent_use)
+ # It's possible for autounmask to change
+ # parent_use such that the unevaluated form
+ # of the atom now matches, even though the
+ # earlier evaluated form (from before
+ # autounmask changed parent_use) does not.
+ # In this case (see bug #374423), it's
+ # expected that violated_atom.use is None.
+ # Since the atom now matches, we don't want
+ # to display it in the slot conflict
+ # message, so we simply ignore it and rely
+ # on the autounmask display to communicate
+ # the necessary USE change to the user.
+ if violated_atom.use is None:
+ continue
+ if use in violated_atom.use.enabled or \
+ use in violated_atom.use.disabled:
+ unconditional_use_deps.add((ppkg, atom))
+ # When USE flags are removed, it can be
+ # essential to see all broken reverse
+ # dependencies here, so don't omit any.
+ # If the list is long, people can simply
+ # use a pager.
+ selected_for_display.add((ppkg, atom))
+ elif type == "AtomArg":
+ for ppkg, atom in parents:
+ selected_for_display.add((ppkg, atom))
+
+ def highlight_violations(atom, version, use, slot_violated):
+ """Colorize parts of an atom"""
+ atom_str = "%s" % (atom,)
+ colored_idx = set()
+ if version:
+ op = atom.operator
+ ver = None
+ if atom.cp != atom.cpv:
+ ver = cpv_getversion(atom.cpv)
+ slot = atom.slot
+ sub_slot = atom.sub_slot
+ slot_operator = atom.slot_operator
+
+ if op == "=*":
+ op = "="
+ ver += "*"
+
+ slot_str = ""
+ if slot:
+ slot_str = ":" + slot
+ if sub_slot:
+ slot_str += "/" + sub_slot
+ if slot_operator:
+ slot_str += slot_operator
+
+ # Compute color_idx before adding the color codes
+ # as these change the indices of the letters.
+ if op is not None:
+ colored_idx.update(range(len(op)))
+
+ if ver is not None:
+ start = atom_str.rfind(ver)
+ end = start + len(ver)
+ colored_idx.update(range(start, end))
+
+ if slot_str:
+ ii = atom_str.find(slot_str)
+ colored_idx.update(range(ii, ii + len(slot_str)))
+
+
+ if op is not None:
+ atom_str = atom_str.replace(op, colorize("BAD", op), 1)
+
+ if ver is not None:
+ start = atom_str.rfind(ver)
+ end = start + len(ver)
+ atom_str = atom_str[:start] + \
+ colorize("BAD", ver) + \
+ atom_str[end:]
+
+ if slot_str:
+ atom_str = atom_str.replace(slot_str, colorize("BAD", slot_str), 1)
+
+ elif slot_violated:
+ slot = atom.slot
+ sub_slot = atom.sub_slot
+ slot_operator = atom.slot_operator
+
+ slot_str = ""
+ if slot:
+ slot_str = ":" + slot
+ if sub_slot:
+ slot_str += "/" + sub_slot
+ if slot_operator:
+ slot_str += slot_operator
+
+ if slot_str:
+ ii = atom_str.find(slot_str)
+ colored_idx.update(range(ii, ii + len(slot_str)))
+ atom_str = atom_str.replace(slot_str, colorize("BAD", slot_str), 1)
+
+ if use and atom.use.tokens:
+ use_part_start = atom_str.find("[")
+ use_part_end = atom_str.find("]")
+
+ new_tokens = []
+ # Compute start index in non-colored atom.
+ ii = str(atom).find("[") + 1
+ for token in atom.use.tokens:
+ if token.lstrip("-!").rstrip("=?") in use:
+ new_tokens.append(colorize("BAD", token))
+ colored_idx.update(range(ii, ii + len(token)))
+ else:
+ new_tokens.append(token)
+ ii += 1 + len(token)
+
+ atom_str = atom_str[:use_part_start] \
+ + "[%s]" % (",".join(new_tokens),) + \
+ atom_str[use_part_end+1:]
+
+ return atom_str, colored_idx
+
+ # Show unconditional use deps first, since those
+ # are more problematic than the conditional kind.
+ ordered_list = list(unconditional_use_deps)
+ if len(selected_for_display) > len(unconditional_use_deps):
+ for parent_atom in selected_for_display:
+ if parent_atom not in unconditional_use_deps:
+ ordered_list.append(parent_atom)
+ for parent_atom in ordered_list:
+ parent, atom = parent_atom
+ if atom.soname:
+ msg.append("%s required by %s\n" %
+ (atom, parent))
+ elif isinstance(parent, PackageArg):
+ # For PackageArg it's
+ # redundant to display the atom attribute.
+ msg.append("%s\n" % (parent,))
+ elif isinstance(parent, AtomArg):
+ msg.append(2*indent)
+ msg.append("%s (Argument)\n" % (atom,))
+ else:
+ # Display the specific atom from SetArg or
+ # Package types.
+ version_violated = False
+ slot_violated = False
+ use = []
+ for (type, sub_type), parents in collision_reasons.items():
+ for x in parents:
+ if parent == x[0] and atom == x[1]:
+ if type == "version":
+ version_violated = True
+ elif type == "slot":
+ slot_violated = True
+ elif type == "use":
+ use.append(sub_type)
+ break
+
+ atom_str, colored_idx = highlight_violations(atom.unevaluated_atom,
+ version_violated, use, slot_violated)
+
+ if version_violated or slot_violated:
+ self.is_a_version_conflict = True
+
+ cur_line = "%s required by %s\n" % (atom_str, parent)
+ marker_line = ""
+ for ii in range(len(cur_line)):
+ if ii in colored_idx:
+ marker_line += "^"
+ else:
+ marker_line += " "
+ marker_line += "\n"
+ msg.append(2*indent)
+ msg.append(cur_line)
+ msg.append(2*indent)
+ msg.append(marker_line)
+
+ if not selected_for_display:
+ msg.append(2*indent)
+ msg.append("(no parents that aren't satisfied by other packages in this slot)\n")
+ self.conflict_is_unspecific = True
+
+ omitted_parents = num_all_specific_atoms - len(selected_for_display)
+ if omitted_parents:
+ any_omitted_parents = True
+ msg.append(2*indent)
+ if len(selected_for_display) > 1:
+ msg.append("(and %d more with the same problems)\n" % omitted_parents)
+ else:
+ msg.append("(and %d more with the same problem)\n" % omitted_parents)
+ else:
+ msg.append(" (no parents)\n")
+ msg.append("\n")
+
+ if any_omitted_parents:
+ msg.append(colorize("INFORM",
+ "NOTE: Use the '--verbose-conflicts'"
+ " option to display parents omitted above"))
+ msg.append("\n")
+
+ if need_rebuild:
+ msg.append("\n!!! The slot conflict(s) shown above involve package(s) which may need to\n")
+ msg.append("!!! be rebuilt in order to solve the conflict(s). However, the following\n")
+ msg.append("!!! package(s) cannot be rebuilt for the reason(s) shown:\n\n")
+ for ppkg, reason in need_rebuild.items():
+ msg.append("%s%s: %s\n" % (indent, ppkg, reason))
+ msg.append("\n")
+
+ msg.append("\n")
+
+ def get_explanation(self):
+ msg = ""
+
+ if self.is_a_version_conflict:
+ return None
+
+ if self.conflict_is_unspecific and \
+ not ("--newuse" in self.myopts and "--update" in self.myopts):
+ msg += "!!! Enabling --newuse and --update might solve this conflict.\n"
+ msg += "!!! If not, it might help emerge to give a more specific suggestion.\n\n"
+ return msg
+
+ solutions = self.solutions
+ if not solutions:
+ return None
+
+ if len(solutions)==1:
+ if len(self.all_conflicts) == 1:
+ msg += "It might be possible to solve this slot collision\n"
+ else:
+ msg += "It might be possible to solve these slot collisions\n"
+ msg += "by applying all of the following changes:\n"
+ else:
+ if len(self.all_conflicts) == 1:
+ msg += "It might be possible to solve this slot collision\n"
+ else:
+ msg += "It might be possible to solve these slot collisions\n"
+ msg += "by applying one of the following solutions:\n"
+
+ def print_change(change, indent=""):
+ mymsg = ""
+ for pkg in change:
+ changes = []
+ for flag, state in change[pkg].items():
+ if state:
+ changes.append(colorize("red", "+" + flag))
+ else:
+ changes.append(colorize("blue", "-" + flag))
+ mymsg += indent + "- " + pkg.cpv + " (Change USE: %s" % " ".join(changes) + ")\n"
+ mymsg += "\n"
+ return mymsg
+
+
+ if len(self.changes) == 1:
+ msg += print_change(self.changes[0], " ")
+ else:
+ for change in self.changes:
+ msg += " Solution: Apply all of:\n"
+ msg += print_change(change, " ")
+
+ return msg
+
+ def _check_configuration(self, config, all_conflict_atoms_by_slotatom, conflict_nodes):
+ """
+ Given a configuartion, required use changes are computed and checked to
+ make sure that no new conflict is introduced. Returns a solution or None.
+ """
+ _pkg_use_enabled = self.depgraph._pkg_use_enabled
+ #An installed package can only be part of a valid configuration if it has no
+ #pending use changed. Otherwise the ebuild will be pulled in again.
+ for pkg in config:
+ if not pkg.installed:
+ continue
+
+ for root, atom, pkgs in self.all_conflicts:
+ if pkg not in pkgs:
+ continue
+ for other_pkg in pkgs:
+ if other_pkg == pkg:
+ continue
+ if pkg.iuse.all.symmetric_difference(other_pkg.iuse.all) \
+ or _pkg_use_enabled(pkg).symmetric_difference(_pkg_use_enabled(other_pkg)):
+ if self.debug:
+ writemsg(("%s has pending USE changes. "
+ "Rejecting configuration.\n") % (pkg,),
+ noiselevel=-1)
+ return False
+
+ #A list of dicts. Keeps one dict per slot conflict. [ { flag1: "enabled" }, { flag2: "disabled" } ]
+ all_involved_flags = []
+
+ #Go through all slot conflicts
+ for id, pkg in enumerate(config):
+ involved_flags = {}
+ for ppkg, atom in all_conflict_atoms_by_slotatom[id]:
+ if not atom.package:
+ continue
+
+ if ppkg in conflict_nodes and not ppkg in config:
+ #The parent is part of a slot conflict itself and is
+ #not part of the current config.
+ continue
+
+ i = InternalPackageSet(initial_atoms=(atom,))
+ if i.findAtomForPackage(pkg, modified_use=_pkg_use_enabled(pkg)):
+ continue
+
+ i = InternalPackageSet(initial_atoms=(atom.without_use,))
+ if not i.findAtomForPackage(pkg, modified_use=_pkg_use_enabled(pkg)):
+ #Version range does not match.
+ if self.debug:
+ writemsg(("%s does not satify all version "
+ "requirements. Rejecting configuration.\n") %
+ (pkg,), noiselevel=-1)
+ return False
+
+ if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required):
+ #Missing IUSE.
+ #FIXME: This needs to support use dep defaults.
+ if self.debug:
+ writemsg(("%s misses needed flags from IUSE."
+ " Rejecting configuration.\n") % (pkg,),
+ noiselevel=-1)
+ return False
+
+ if not isinstance(ppkg, Package) or ppkg.installed:
+ #We cannot assume that it's possible to reinstall the package. Do not
+ #check if some of its atom has use.conditional
+ violated_atom = atom.violated_conditionals(_pkg_use_enabled(pkg), \
+ pkg.iuse.is_valid_flag)
+ else:
+ violated_atom = atom.unevaluated_atom.violated_conditionals(_pkg_use_enabled(pkg), \
+ pkg.iuse.is_valid_flag, parent_use=_pkg_use_enabled(ppkg))
+ if violated_atom.use is None:
+ # It's possible for autounmask to change
+ # parent_use such that the unevaluated form
+ # of the atom now matches, even though the
+ # earlier evaluated form (from before
+ # autounmask changed parent_use) does not.
+ # In this case (see bug #374423), it's
+ # expected that violated_atom.use is None.
+ continue
+
+ if pkg.installed and (violated_atom.use.enabled or violated_atom.use.disabled):
+ #We can't change USE of an installed package (only of an ebuild, but that is already
+ #part of the conflict, isn't it?
+ if self.debug:
+ writemsg(("%s: installed package would need USE"
+ " changes. Rejecting configuration.\n") % (pkg,),
+ noiselevel=-1)
+ return False
+
+ #Compute the required USE changes. A flag can be forced to "enabled" or "disabled",
+ #it can be in the conditional state "cond" that allows both values or in the
+ #"contradiction" state, which means that some atoms insist on differnt values
+ #for this flag and those kill this configuration.
+ for flag in violated_atom.use.required:
+ state = involved_flags.get(flag, "")
+
+ if flag in violated_atom.use.enabled:
+ if state in ("", "cond", "enabled"):
+ state = "enabled"
+ else:
+ state = "contradiction"
+ elif flag in violated_atom.use.disabled:
+ if state in ("", "cond", "disabled"):
+ state = "disabled"
+ else:
+ state = "contradiction"
+ else:
+ if state == "":
+ state = "cond"
+
+ involved_flags[flag] = state
+
+ if pkg.installed:
+ #We don't change the installed pkg's USE. Force all involved flags
+ #to the same value as the installed package has it.
+ for flag in involved_flags:
+ if involved_flags[flag] == "enabled":
+ if not flag in _pkg_use_enabled(pkg):
+ involved_flags[flag] = "contradiction"
+ elif involved_flags[flag] == "disabled":
+ if flag in _pkg_use_enabled(pkg):
+ involved_flags[flag] = "contradiction"
+ elif involved_flags[flag] == "cond":
+ if flag in _pkg_use_enabled(pkg):
+ involved_flags[flag] = "enabled"
+ else:
+ involved_flags[flag] = "disabled"
+
+ for flag, state in involved_flags.items():
+ if state == "contradiction":
+ if self.debug:
+ writemsg("Contradicting requirements found for flag " + \
+ flag + ". Rejecting configuration.\n", noiselevel=-1)
+ return False
+
+ all_involved_flags.append(involved_flags)
+
+ if self.debug:
+ writemsg("All involved flags:\n", noiselevel=-1)
+ for id, involved_flags in enumerate(all_involved_flags):
+ writemsg(" %s\n" % (config[id],), noiselevel=-1)
+ for flag, state in involved_flags.items():
+ writemsg(" " + flag + ": " + state + "\n", noiselevel=-1)
+
+ solutions = []
+ sol_gen = _solution_candidate_generator(all_involved_flags)
+ checked = 0
+ while True:
+ candidate = sol_gen.get_candidate()
+ if not candidate:
+ break
+ solution = self._check_solution(config, candidate, all_conflict_atoms_by_slotatom)
+ checked += 1
+ if solution:
+ solutions.append(solution)
+
+ if checked >= self._check_configuration_max:
+ # TODO: Implement early elimination for candidates that would
+ # change forced or masked flags, and don't count them here.
+ if self.debug:
+ writemsg("\nAborting _check_configuration due to "
+ "excessive number of candidates.\n", noiselevel=-1)
+ break
+
+ if self.debug:
+ if not solutions:
+ writemsg("No viable solutions. Rejecting configuration.\n", noiselevel=-1)
+ return solutions
+
+
+ def _force_flag_for_package(self, required_changes, pkg, flag, state):
+ """
+ Adds an USE change to required_changes. Sets the target state to
+ "contradiction" if a flag is forced to conflicting values.
+ """
+ _pkg_use_enabled = self.depgraph._pkg_use_enabled
+
+ if state == "disabled":
+ changes = required_changes.get(pkg, {})
+ flag_change = changes.get(flag, "")
+ if flag_change == "enabled":
+ flag_change = "contradiction"
+ elif flag in _pkg_use_enabled(pkg):
+ flag_change = "disabled"
+
+ changes[flag] = flag_change
+ required_changes[pkg] = changes
+ elif state == "enabled":
+ changes = required_changes.get(pkg, {})
+ flag_change = changes.get(flag, "")
+ if flag_change == "disabled":
+ flag_change = "contradiction"
+ else:
+ flag_change = "enabled"
+
+ changes[flag] = flag_change
+ required_changes[pkg] = changes
+
+ def _check_solution(self, config, all_involved_flags, all_conflict_atoms_by_slotatom):
+ """
+ Given a configuartion and all involved flags, all possible settings for the involved
+ flags are checked if they solve the slot conflict.
+ """
+ _pkg_use_enabled = self.depgraph._pkg_use_enabled
+
+ if self.debug:
+ #The code is a bit verbose, because the states might not
+ #be a string, but a _value_helper.
+ msg = "Solution candidate: "
+ msg += "["
+ first = True
+ for involved_flags in all_involved_flags:
+ if first:
+ first = False
+ else:
+ msg += ", "
+ msg += "{"
+ inner_first = True
+ for flag, state in involved_flags.items():
+ if inner_first:
+ inner_first = False
+ else:
+ msg += ", "
+ msg += flag + ": %s" % (state,)
+ msg += "}"
+ msg += "]\n"
+ writemsg(msg, noiselevel=-1)
+
+ required_changes = {}
+ for id, pkg in enumerate(config):
+ if not pkg.installed:
+ #We can't change the USE of installed packages.
+ for flag in all_involved_flags[id]:
+ if not pkg.iuse.is_valid_flag(flag):
+ continue
+ state = all_involved_flags[id][flag]
+ self._force_flag_for_package(required_changes, pkg, flag, state)
+
+ #Go through all (parent, atom) pairs for the current slot conflict.
+ for ppkg, atom in all_conflict_atoms_by_slotatom[id]:
+ if not atom.package:
+ continue
+ use = atom.unevaluated_atom.use
+ if not use:
+ #No need to force something for an atom without USE conditionals.
+ #These atoms are already satisfied.
+ continue
+ for flag in all_involved_flags[id]:
+ state = all_involved_flags[id][flag]
+
+ if flag not in use.required or not use.conditional:
+ continue
+ if flag in use.conditional.enabled:
+ #[flag?]
+ if state == "enabled":
+ #no need to change anything, the atom won't
+ #force -flag on pkg
+ pass
+ elif state == "disabled":
+ #if flag is enabled we get [flag] -> it must be disabled
+ self._force_flag_for_package(required_changes, ppkg, flag, "disabled")
+ elif flag in use.conditional.disabled:
+ #[!flag?]
+ if state == "enabled":
+ #if flag is enabled we get [-flag] -> it must be disabled
+ self._force_flag_for_package(required_changes, ppkg, flag, "disabled")
+ elif state == "disabled":
+ #no need to change anything, the atom won't
+ #force +flag on pkg
+ pass
+ elif flag in use.conditional.equal:
+ #[flag=]
+ if state == "enabled":
+ #if flag is disabled we get [-flag] -> it must be enabled
+ self._force_flag_for_package(required_changes, ppkg, flag, "enabled")
+ elif state == "disabled":
+ #if flag is enabled we get [flag] -> it must be disabled
+ self._force_flag_for_package(required_changes, ppkg, flag, "disabled")
+ elif flag in use.conditional.not_equal:
+ #[!flag=]
+ if state == "enabled":
+ #if flag is enabled we get [-flag] -> it must be disabled
+ self._force_flag_for_package(required_changes, ppkg, flag, "disabled")
+ elif state == "disabled":
+ #if flag is disabled we get [flag] -> it must be enabled
+ self._force_flag_for_package(required_changes, ppkg, flag, "enabled")
+
+ is_valid_solution = True
+ for pkg in required_changes:
+ for state in required_changes[pkg].values():
+ if not state in ("enabled", "disabled"):
+ is_valid_solution = False
+
+ if not is_valid_solution:
+ return None
+
+ #Check if all atoms are satisfied after the changes are applied.
+ for id, pkg in enumerate(config):
+ new_use = _pkg_use_enabled(pkg)
+ if pkg in required_changes:
+ old_use = pkg.use.enabled
+ new_use = set(new_use)
+ for flag, state in required_changes[pkg].items():
+ if state == "enabled":
+ new_use.add(flag)
+ elif state == "disabled":
+ new_use.discard(flag)
+ if not new_use.symmetric_difference(old_use):
+ #avoid copying the package in findAtomForPackage if possible
+ new_use = old_use
+
+ for ppkg, atom in all_conflict_atoms_by_slotatom[id]:
+ if not atom.package:
+ continue
+ if not hasattr(ppkg, "use"):
+ #It's a SetArg or something like that.
+ continue
+ ppkg_new_use = set(_pkg_use_enabled(ppkg))
+ if ppkg in required_changes:
+ for flag, state in required_changes[ppkg].items():
+ if state == "enabled":
+ ppkg_new_use.add(flag)
+ elif state == "disabled":
+ ppkg_new_use.discard(flag)
+
+ new_atom = atom.unevaluated_atom.evaluate_conditionals(ppkg_new_use)
+ i = InternalPackageSet(initial_atoms=(new_atom,))
+ if not i.findAtomForPackage(pkg, new_use):
+ #We managed to create a new problem with our changes.
+ is_valid_solution = False
+ if self.debug:
+ writemsg(("new conflict introduced: %s"
+ " does not match %s from %s\n") %
+ (pkg, new_atom, ppkg), noiselevel=-1)
+ break
+
+ if not is_valid_solution:
+ break
+
+ #Make sure the changes don't violate REQUIRED_USE
+ for pkg in required_changes:
+ required_use = pkg._metadata.get("REQUIRED_USE")
+ if not required_use:
+ continue
+
+ use = set(_pkg_use_enabled(pkg))
+ for flag, state in required_changes[pkg].items():
+ if state == "enabled":
+ use.add(flag)
+ else:
+ use.discard(flag)
+
+ if not check_required_use(required_use, use, pkg.iuse.is_valid_flag):
+ is_valid_solution = False
+ break
+
+ if is_valid_solution and required_changes:
+ return required_changes
+ else:
+ return None
+
+class _configuration_generator(object):
+ def __init__(self, conflict_pkgs):
+ #reorder packages such that installed packages come last
+ self.conflict_pkgs = []
+ for pkgs in conflict_pkgs:
+ new_pkgs = []
+ for pkg in pkgs:
+ if not pkg.installed:
+ new_pkgs.append(pkg)
+ for pkg in pkgs:
+ if pkg.installed:
+ new_pkgs.append(pkg)
+ self.conflict_pkgs.append(new_pkgs)
+
+ self.solution_ids = []
+ for pkgs in self.conflict_pkgs:
+ self.solution_ids.append(0)
+ self._is_first_solution = True
+
+ def get_configuration(self):
+ if self._is_first_solution:
+ self._is_first_solution = False
+ else:
+ if not self._next():
+ return None
+
+ solution = []
+ for id, pkgs in enumerate(self.conflict_pkgs):
+ solution.append(pkgs[self.solution_ids[id]])
+ return solution
+
+ def _next(self, id=None):
+ solution_ids = self.solution_ids
+ conflict_pkgs = self.conflict_pkgs
+
+ if id is None:
+ id = len(solution_ids)-1
+
+ if solution_ids[id] == len(conflict_pkgs[id])-1:
+ if id > 0:
+ return self._next(id=id-1)
+ else:
+ return False
+ else:
+ solution_ids[id] += 1
+ for other_id in range(id+1, len(solution_ids)):
+ solution_ids[other_id] = 0
+ return True
+
+class _solution_candidate_generator(object):
+ class _value_helper(object):
+ def __init__(self, value=None):
+ self.value = value
+ def __eq__(self, other):
+ if isinstance(other, basestring):
+ return self.value == other
+ else:
+ return self.value == other.value
+ def __str__(self):
+ return "%s" % (self.value,)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'], errors='backslashreplace')
+
+ def __init__(self, all_involved_flags):
+ #A copy of all_involved_flags with all "cond" values
+ #replaced by a _value_helper object.
+ self.all_involved_flags = []
+
+ #A list tracking references to all used _value_helper
+ #objects.
+ self.conditional_values = []
+
+ for involved_flags in all_involved_flags:
+ new_involved_flags = {}
+ for flag, state in involved_flags.items():
+ if state in ("enabled", "disabled"):
+ new_involved_flags[flag] = state
+ else:
+ v = self._value_helper("disabled")
+ new_involved_flags[flag] = v
+ self.conditional_values.append(v)
+ self.all_involved_flags.append(new_involved_flags)
+
+ self._is_first_solution = True
+
+ def get_candidate(self):
+ if self._is_first_solution:
+ self._is_first_solution = False
+ else:
+ if not self._next():
+ return None
+
+ return self.all_involved_flags
+
+ def _next(self, id=None):
+ values = self.conditional_values
+
+ if not values:
+ return False
+
+ if id is None:
+ id = len(values)-1
+
+ if values[id].value == "enabled":
+ if id > 0:
+ return self._next(id=id-1)
+ else:
+ return False
+ else:
+ values[id].value = "enabled"
+ for other_id in range(id+1, len(values)):
+ values[other_id].value = "disabled"
+ return True
+
+
diff --git a/lib/_emerge/search.py b/lib/_emerge/search.py
new file mode 100644
index 000000000..eb52b2ca3
--- /dev/null
+++ b/lib/_emerge/search.py
@@ -0,0 +1,531 @@
+# Copyright 1999-2016 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import difflib
+import re
+import portage
+from portage import os
+from portage.dbapi.porttree import _parse_uri_map
+from portage.dbapi.IndexedPortdb import IndexedPortdb
+from portage.dbapi.IndexedVardb import IndexedVardb
+from portage.localization import localized_size
+from portage.output import bold, bold as white, darkgreen, green, red
+from portage.util import writemsg_stdout
+from portage.util.iterators.MultiIterGroupBy import MultiIterGroupBy
+
+from _emerge.Package import Package
+
+class search(object):
+
+ #
+ # class constants
+ #
+ VERSION_SHORT=1
+ VERSION_RELEASE=2
+
+ #
+ # public interface
+ #
+ def __init__(self, root_config, spinner, searchdesc,
+ verbose, usepkg, usepkgonly, search_index=True,
+ search_similarity=None, fuzzy=True):
+ """Searches the available and installed packages for the supplied search key.
+ The list of available and installed packages is created at object instantiation.
+ This makes successive searches faster."""
+ self.settings = root_config.settings
+ self.verbose = verbose
+ self.searchdesc = searchdesc
+ self.searchkey = None
+ self._results_specified = False
+ # Disable the spinner since search results are displayed
+ # incrementally.
+ self.spinner = None
+ self.root_config = root_config
+ self.setconfig = root_config.setconfig
+ self.fuzzy = fuzzy
+ self.search_similarity = (80 if search_similarity is None
+ else search_similarity)
+ self.matches = {"pkg" : []}
+ self.mlen = 0
+
+ self._dbs = []
+
+ portdb = root_config.trees["porttree"].dbapi
+ bindb = root_config.trees["bintree"].dbapi
+ vardb = root_config.trees["vartree"].dbapi
+
+ if search_index:
+ portdb = IndexedPortdb(portdb)
+ vardb = IndexedVardb(vardb)
+
+ if not usepkgonly and portdb._have_root_eclass_dir:
+ self._dbs.append(portdb)
+
+ if (usepkg or usepkgonly) and bindb.cp_all():
+ self._dbs.append(bindb)
+
+ self._dbs.append(vardb)
+ self._portdb = portdb
+ self._vardb = vardb
+
+ def _spinner_update(self):
+ if self.spinner:
+ self.spinner.update()
+
+ def _cp_all(self):
+ iterators = []
+ for db in self._dbs:
+ # MultiIterGroupBy requires sorted input
+ i = db.cp_all(sort=True)
+ try:
+ i = iter(i)
+ except TypeError:
+ pass
+ iterators.append(i)
+ for group in MultiIterGroupBy(iterators):
+ yield group[0]
+
+ def _aux_get(self, *args, **kwargs):
+ for db in self._dbs:
+ try:
+ return db.aux_get(*args, **kwargs)
+ except KeyError:
+ pass
+ raise KeyError(args[0])
+
+ def _aux_get_error(self, cpv):
+ portage.writemsg("emerge: search: "
+ "aux_get('%s') failed, skipping\n" % cpv,
+ noiselevel=-1)
+
+ def _findname(self, *args, **kwargs):
+ for db in self._dbs:
+ if db is not self._portdb:
+ # We don't want findname to return anything
+ # unless it's an ebuild in a portage tree.
+ # Otherwise, it's already built and we don't
+ # care about it.
+ continue
+ func = getattr(db, "findname", None)
+ if func:
+ value = func(*args, **kwargs)
+ if value:
+ return value
+ return None
+
+ def _getFetchMap(self, *args, **kwargs):
+ for db in self._dbs:
+ func = getattr(db, "getFetchMap", None)
+ if func:
+ value = func(*args, **kwargs)
+ if value:
+ return value
+ return {}
+
+ def _visible(self, db, cpv, metadata):
+ installed = db is self._vardb
+ built = installed or db is not self._portdb
+ pkg_type = "ebuild"
+ if installed:
+ pkg_type = "installed"
+ elif built:
+ pkg_type = "binary"
+ return Package(type_name=pkg_type,
+ root_config=self.root_config,
+ cpv=cpv, built=built, installed=installed,
+ metadata=metadata).visible
+
+ def _first_cp(self, cp):
+
+ for db in self._dbs:
+ if hasattr(db, "cp_list"):
+ matches = db.cp_list(cp)
+ if matches:
+ return matches[-1]
+ else:
+ matches = db.match(cp)
+
+ for cpv in matches:
+ if cpv.cp == cp:
+ return cpv
+
+ return None
+
+
+ def _xmatch(self, level, atom):
+ """
+ This method does not expand old-style virtuals because it
+ is restricted to returning matches for a single ${CATEGORY}/${PN}
+ and old-style virual matches unreliable for that when querying
+ multiple package databases. If necessary, old-style virtuals
+ can be performed on atoms prior to calling this method.
+ """
+ if not isinstance(atom, portage.dep.Atom):
+ atom = portage.dep.Atom(atom)
+
+ cp = atom.cp
+ if level == "match-all":
+ matches = set()
+ for db in self._dbs:
+ if hasattr(db, "xmatch"):
+ matches.update(db.xmatch(level, atom))
+ else:
+ matches.update(db.match(atom))
+ result = list(x for x in matches if portage.cpv_getkey(x) == cp)
+ db._cpv_sort_ascending(result)
+ elif level == "match-visible":
+ matches = set()
+ for db in self._dbs:
+ if hasattr(db, "xmatch"):
+ matches.update(db.xmatch(level, atom))
+ else:
+ db_keys = list(db._aux_cache_keys)
+ for cpv in db.match(atom):
+ try:
+ metadata = zip(db_keys,
+ db.aux_get(cpv, db_keys))
+ except KeyError:
+ self._aux_get_error(cpv)
+ continue
+ if not self._visible(db, cpv, metadata):
+ continue
+ matches.add(cpv)
+ result = list(x for x in matches if portage.cpv_getkey(x) == cp)
+ db._cpv_sort_ascending(result)
+ elif level == "bestmatch-visible":
+ result = None
+ for db in self._dbs:
+ if hasattr(db, "xmatch"):
+ cpv = db.xmatch("bestmatch-visible", atom)
+ if not cpv or portage.cpv_getkey(cpv) != cp:
+ continue
+ if not result or cpv == portage.best([cpv, result]):
+ result = cpv
+ else:
+ db_keys = list(db._aux_cache_keys)
+ matches = db.match(atom)
+ try:
+ db.match_unordered
+ except AttributeError:
+ pass
+ else:
+ db._cpv_sort_ascending(matches)
+
+ # break out of this loop with highest visible
+ # match, checked in descending order
+ for cpv in reversed(matches):
+ if portage.cpv_getkey(cpv) != cp:
+ continue
+ try:
+ metadata = zip(db_keys,
+ db.aux_get(cpv, db_keys))
+ except KeyError:
+ self._aux_get_error(cpv)
+ continue
+ if not self._visible(db, cpv, metadata):
+ continue
+ if not result or cpv == portage.best([cpv, result]):
+ result = cpv
+ break
+ else:
+ raise NotImplementedError(level)
+ return result
+
+ def execute(self,searchkey):
+ """Performs the search for the supplied search key"""
+ self.searchkey = searchkey
+
+ def _iter_search(self):
+
+ match_category = 0
+ self.packagematches = []
+ if self.searchdesc:
+ self.searchdesc=1
+ self.matches = {"pkg":[], "desc":[], "set":[]}
+ else:
+ self.searchdesc=0
+ self.matches = {"pkg":[], "set":[]}
+ writemsg_stdout("Searching...\n\n", noiselevel=-1)
+
+ regexsearch = False
+ if self.searchkey.startswith('%'):
+ regexsearch = True
+ self.searchkey = self.searchkey[1:]
+ if self.searchkey.startswith('@'):
+ match_category = 1
+ self.searchkey = self.searchkey[1:]
+ # Auto-detect category match mode (@ symbol can be deprecated
+ # after this is available in a stable version of portage).
+ if '/' in self.searchkey:
+ match_category = 1
+ fuzzy = False
+ if regexsearch:
+ self.searchre=re.compile(self.searchkey,re.I)
+ else:
+ self.searchre=re.compile(re.escape(self.searchkey), re.I)
+
+ # Fuzzy search does not support regular expressions, therefore
+ # it is disabled for regular expression searches.
+ if self.fuzzy:
+ fuzzy = True
+ cutoff = float(self.search_similarity) / 100
+ if match_category:
+ # Weigh the similarity of category and package
+ # names independently, in order to avoid matching
+ # lots of irrelevant packages in the same category
+ # when the package name is much shorter than the
+ # category name.
+ part_split = portage.catsplit
+ else:
+ part_split = lambda match_string: (match_string,)
+
+ part_matchers = []
+ for part in part_split(self.searchkey):
+ seq_match = difflib.SequenceMatcher()
+ seq_match.set_seq2(part.lower())
+ part_matchers.append(seq_match)
+
+ def fuzzy_search_part(seq_match, match_string):
+ seq_match.set_seq1(match_string.lower())
+ return (seq_match.real_quick_ratio() >= cutoff and
+ seq_match.quick_ratio() >= cutoff and
+ seq_match.ratio() >= cutoff)
+
+ def fuzzy_search(match_string):
+ return all(fuzzy_search_part(seq_match, part)
+ for seq_match, part in zip(
+ part_matchers, part_split(match_string)))
+
+ for package in self._cp_all():
+ self._spinner_update()
+
+ if match_category:
+ match_string = package[:]
+ else:
+ match_string = package.split("/")[-1]
+
+ if self.searchre.search(match_string):
+ yield ("pkg", package)
+ elif fuzzy and fuzzy_search(match_string):
+ yield ("pkg", package)
+ elif self.searchdesc: # DESCRIPTION searching
+ # Use _first_cp to avoid an expensive visibility check,
+ # since the visibility check can be avoided entirely
+ # when the DESCRIPTION does not match.
+ full_package = self._first_cp(package)
+ if not full_package:
+ continue
+ try:
+ full_desc = self._aux_get(
+ full_package, ["DESCRIPTION"])[0]
+ except KeyError:
+ self._aux_get_error(full_package)
+ continue
+ if not self.searchre.search(full_desc):
+ continue
+
+ yield ("desc", package)
+
+ self.sdict = self.setconfig.getSets()
+ for setname in self.sdict:
+ self._spinner_update()
+ if match_category:
+ match_string = setname
+ else:
+ match_string = setname.split("/")[-1]
+
+ if self.searchre.search(match_string):
+ yield ("set", setname)
+ elif self.searchdesc:
+ if self.searchre.search(
+ self.sdict[setname].getMetadata("DESCRIPTION")):
+ yield ("set", setname)
+
+ def addCP(self, cp):
+ """
+ Add a specific cp to the search results. This modifies the
+ behavior of the output method, so that it only displays specific
+ packages added via this method.
+ """
+ self._results_specified = True
+ if not self._xmatch("match-all", cp):
+ return
+ self.matches["pkg"].append(cp)
+ self.mlen += 1
+
+ def output(self):
+ """Outputs the results of the search."""
+
+ class msg(object):
+ @staticmethod
+ def append(msg):
+ writemsg_stdout(msg, noiselevel=-1)
+
+ msg.append("\b\b \n[ Results for search key : " + \
+ bold(self.searchkey) + " ]\n")
+ vardb = self._vardb
+ metadata_keys = set(Package.metadata_keys)
+ metadata_keys.update(["DESCRIPTION", "HOMEPAGE", "LICENSE", "SRC_URI"])
+ metadata_keys = tuple(metadata_keys)
+
+ if self._results_specified:
+ # Handle results added via addCP
+ addCP_matches = []
+ for mytype, matches in self.matches.items():
+ for match in matches:
+ addCP_matches.append((mytype, match))
+ iterator = iter(addCP_matches)
+
+ else:
+ # Do a normal search
+ iterator = self._iter_search()
+
+ for mtype, match in iterator:
+ self.mlen += 1
+ masked = False
+ full_package = None
+ if mtype in ("pkg", "desc"):
+ full_package = self._xmatch(
+ "bestmatch-visible", match)
+ if not full_package:
+ masked = True
+ full_package = self._xmatch("match-all", match)
+ if full_package:
+ full_package = full_package[-1]
+ elif mtype == "set":
+ msg.append(green("*") + " " + bold(match) + "\n")
+ if self.verbose:
+ msg.append(" " + darkgreen("Description:") + \
+ " " + \
+ self.sdict[match].getMetadata("DESCRIPTION") \
+ + "\n\n")
+ if full_package:
+ try:
+ metadata = dict(zip(metadata_keys,
+ self._aux_get(full_package, metadata_keys)))
+ except KeyError:
+ self._aux_get_error(full_package)
+ continue
+
+ desc = metadata["DESCRIPTION"]
+ homepage = metadata["HOMEPAGE"]
+ license = metadata["LICENSE"]
+
+ if masked:
+ msg.append(green("*") + " " + \
+ white(match) + " " + red("[ Masked ]") + "\n")
+ else:
+ msg.append(green("*") + " " + bold(match) + "\n")
+ myversion = self.getVersion(full_package, search.VERSION_RELEASE)
+
+ mysum = [0,0]
+ file_size_str = None
+ mycat = match.split("/")[0]
+ mypkg = match.split("/")[1]
+ mycpv = match + "-" + myversion
+ myebuild = self._findname(mycpv)
+ if myebuild:
+ pkg = Package(built=False, cpv=mycpv,
+ installed=False, metadata=metadata,
+ root_config=self.root_config, type_name="ebuild")
+ pkgdir = os.path.dirname(myebuild)
+ mf = self.settings.repositories.get_repo_for_location(
+ os.path.dirname(os.path.dirname(pkgdir)))
+ mf = mf.load_manifest(
+ pkgdir, self.settings["DISTDIR"])
+ try:
+ uri_map = _parse_uri_map(mycpv, metadata,
+ use=pkg.use.enabled)
+ except portage.exception.InvalidDependString as e:
+ file_size_str = "Unknown (%s)" % (e,)
+ del e
+ else:
+ try:
+ mysum[0] = mf.getDistfilesSize(uri_map)
+ except KeyError as e:
+ file_size_str = "Unknown (missing " + \
+ "digest for %s)" % (e,)
+ del e
+
+ available = False
+ for db in self._dbs:
+ if db is not vardb and \
+ db.cpv_exists(mycpv):
+ available = True
+ if not myebuild and hasattr(db, "bintree"):
+ myebuild = db.bintree.getname(mycpv)
+ try:
+ mysum[0] = os.stat(myebuild).st_size
+ except OSError:
+ myebuild = None
+ break
+
+ if myebuild and file_size_str is None:
+ file_size_str = localized_size(mysum[0])
+
+ if self.verbose:
+ if available:
+ msg.append(" %s %s\n" % \
+ (darkgreen("Latest version available:"),
+ myversion))
+ msg.append(" %s\n" % \
+ self.getInstallationStatus(mycat+'/'+mypkg))
+ if myebuild:
+ msg.append(" %s %s\n" % \
+ (darkgreen("Size of files:"), file_size_str))
+ msg.append(" " + darkgreen("Homepage:") + \
+ " " + homepage + "\n")
+ msg.append(" " + darkgreen("Description:") \
+ + " " + desc + "\n")
+ msg.append(" " + darkgreen("License:") + \
+ " " + license + "\n\n")
+
+ msg.append("[ Applications found : " + \
+ bold(str(self.mlen)) + " ]\n\n")
+
+ # This method can be called multiple times, so
+ # reset the match count for the next call. Don't
+ # reset it at the beginning of this method, since
+ # that would lose modfications from the addCP
+ # method.
+ self.mlen = 0
+
+ #
+ # private interface
+ #
+ def getInstallationStatus(self,package):
+ if not isinstance(package, portage.dep.Atom):
+ package = portage.dep.Atom(package)
+
+ installed_package = self._vardb.match(package)
+ if installed_package:
+ try:
+ self._vardb.match_unordered
+ except AttributeError:
+ installed_package = installed_package[-1]
+ else:
+ installed_package = portage.best(installed_package)
+
+ else:
+ installed_package = ""
+ result = ""
+ version = self.getVersion(installed_package,search.VERSION_RELEASE)
+ if len(version) > 0:
+ result = darkgreen("Latest version installed:")+" "+version
+ else:
+ result = darkgreen("Latest version installed:")+" [ Not Installed ]"
+ return result
+
+ def getVersion(self,full_package,detail):
+ if len(full_package) > 1:
+ package_parts = portage.catpkgsplit(full_package)
+ if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
+ result = package_parts[2]+ "-" + package_parts[3]
+ else:
+ result = package_parts[2]
+ else:
+ result = ""
+ return result
+
diff --git a/lib/_emerge/show_invalid_depstring_notice.py b/lib/_emerge/show_invalid_depstring_notice.py
new file mode 100644
index 000000000..e11ea65ed
--- /dev/null
+++ b/lib/_emerge/show_invalid_depstring_notice.py
@@ -0,0 +1,35 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+import textwrap
+import portage
+from portage import os
+from portage.util import writemsg_level
+
+def show_invalid_depstring_notice(parent_node, error_msg):
+
+ msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
+ "\n\n%s\n\n%s\n\n" % (error_msg, parent_node)
+ p_key = parent_node.cpv
+ p_status = parent_node.operation
+ msg = []
+ if p_status == "nomerge":
+ category, pf = portage.catsplit(p_key)
+ pkg_location = os.path.join(parent_node.root_config.settings['EROOT'], portage.VDB_PATH, category, pf)
+ msg.append("Portage is unable to process the dependencies of the ")
+ msg.append("'%s' package. " % p_key)
+ msg.append("In order to correct this problem, the package ")
+ msg.append("should be uninstalled, reinstalled, or upgraded. ")
+ msg.append("As a temporary workaround, the --nodeps option can ")
+ msg.append("be used to ignore all dependencies. For reference, ")
+ msg.append("the problematic dependencies can be found in the ")
+ msg.append("*DEPEND files located in '%s/'." % pkg_location)
+ else:
+ msg.append("This package can not be installed. ")
+ msg.append("Please notify the '%s' package maintainer " % p_key)
+ msg.append("about this problem.")
+
+ msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
+ writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
+
diff --git a/lib/_emerge/stdout_spinner.py b/lib/_emerge/stdout_spinner.py
new file mode 100644
index 000000000..670686adf
--- /dev/null
+++ b/lib/_emerge/stdout_spinner.py
@@ -0,0 +1,86 @@
+# Copyright 1999-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import platform
+import sys
+import time
+
+from portage.output import darkgreen, green
+
+class stdout_spinner(object):
+ scroll_msgs = [
+ "Gentoo Rocks ("+platform.system()+")",
+ "Thank you for using Gentoo. :)",
+ "Are you actually trying to read this?",
+ "How many times have you stared at this?",
+ "We are generating the cache right now",
+ "You are paying too much attention.",
+ "A theory is better than its explanation.",
+ "Phasers locked on target, Captain.",
+ "Thrashing is just virtual crashing.",
+ "To be is to program.",
+ "Real Users hate Real Programmers.",
+ "When all else fails, read the instructions.",
+ "Functionality breeds Contempt.",
+ "The future lies ahead.",
+ "3.1415926535897932384626433832795028841971694",
+ "Sometimes insanity is the only alternative.",
+ "Inaccuracy saves a world of explanation.",
+ ]
+
+ twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
+
+ def __init__(self):
+ self.spinpos = 0
+ self.update = self.update_twirl
+ self.scroll_sequence = self.scroll_msgs[
+ int(time.time() * 100) % len(self.scroll_msgs)]
+ self.last_update = 0
+ self.min_display_latency = 0.05
+
+ def _return_early(self):
+ """
+ Flushing ouput to the tty too frequently wastes cpu time. Therefore,
+ each update* method should return without doing any output when this
+ method returns True.
+ """
+ cur_time = time.time()
+ if cur_time - self.last_update < self.min_display_latency:
+ return True
+ self.last_update = cur_time
+ return False
+
+ def update_basic(self):
+ self.spinpos = (self.spinpos + 1) % 500
+ if self._return_early():
+ return True
+ if (self.spinpos % 100) == 0:
+ if self.spinpos == 0:
+ sys.stdout.write(". ")
+ else:
+ sys.stdout.write(".")
+ sys.stdout.flush()
+ return True
+
+ def update_scroll(self):
+ if self._return_early():
+ return True
+ if(self.spinpos >= len(self.scroll_sequence)):
+ sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
+ len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
+ else:
+ sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
+ sys.stdout.flush()
+ self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
+ return True
+
+ def update_twirl(self):
+ self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
+ if self._return_early():
+ return True
+ sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
+ sys.stdout.flush()
+ return True
+
+ def update_quiet(self):
+ return True
diff --git a/lib/_emerge/unmerge.py b/lib/_emerge/unmerge.py
new file mode 100644
index 000000000..8923e20ea
--- /dev/null
+++ b/lib/_emerge/unmerge.py
@@ -0,0 +1,608 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+import signal
+import sys
+import textwrap
+import portage
+from portage import os
+from portage.dbapi._expand_new_virt import expand_new_virt
+from portage.localization import _
+from portage.output import bold, colorize, darkgreen, green
+from portage._sets import SETPREFIX
+from portage._sets.base import EditablePackageSet
+from portage.versions import cpv_sort_key, _pkg_str
+
+from _emerge.emergelog import emergelog
+from _emerge.Package import Package
+from _emerge.UserQuery import UserQuery
+from _emerge.UninstallFailure import UninstallFailure
+from _emerge.countdown import countdown
+
+def _unmerge_display(root_config, myopts, unmerge_action,
+ unmerge_files, clean_delay=1, ordered=0,
+ writemsg_level=portage.util.writemsg_level):
+ """
+ Returns a tuple of (returncode, pkgmap) where returncode is
+ os.EX_OK if no errors occur, and 1 otherwise.
+ """
+
+ quiet = "--quiet" in myopts
+ settings = root_config.settings
+ sets = root_config.sets
+ vartree = root_config.trees["vartree"]
+ candidate_catpkgs=[]
+ global_unmerge=0
+ out = portage.output.EOutput()
+ pkg_cache = {}
+ db_keys = list(vartree.dbapi._aux_cache_keys)
+
+ def _pkg(cpv):
+ pkg = pkg_cache.get(cpv)
+ if pkg is None:
+ pkg = Package(built=True, cpv=cpv, installed=True,
+ metadata=zip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
+ operation="uninstall", root_config=root_config,
+ type_name="installed")
+ pkg_cache[cpv] = pkg
+ return pkg
+
+ vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH)
+ try:
+ # At least the parent needs to exist for the lock file.
+ portage.util.ensure_dirs(vdb_path)
+ except portage.exception.PortageException:
+ pass
+ vdb_lock = None
+ try:
+ if os.access(vdb_path, os.W_OK):
+ vartree.dbapi.lock()
+ vdb_lock = True
+
+ realsyslist = []
+ sys_virt_map = {}
+ for x in sets["system"].getAtoms():
+ for atom in expand_new_virt(vartree.dbapi, x):
+ if not atom.blocker:
+ realsyslist.append(atom)
+ if atom.cp != x.cp:
+ sys_virt_map[atom.cp] = x.cp
+
+ syslist = []
+ for x in realsyslist:
+ mycp = x.cp
+ # Since Gentoo stopped using old-style virtuals in
+ # 2011, typically it's possible to avoid getvirtuals()
+ # calls entirely. It will not be triggered here by
+ # new-style virtuals since those are expanded to
+ # non-virtual atoms above by expand_new_virt().
+ if mycp.startswith("virtual/") and \
+ mycp in settings.getvirtuals():
+ providers = []
+ for provider in settings.getvirtuals()[mycp]:
+ if vartree.dbapi.match(provider):
+ providers.append(provider)
+ if len(providers) == 1:
+ syslist.extend(providers)
+ else:
+ syslist.append(mycp)
+ syslist = frozenset(syslist)
+
+ if not unmerge_files:
+ if unmerge_action in ["rage-clean", "unmerge"]:
+ print()
+ print(bold("emerge %s" % unmerge_action) +
+ " can only be used with specific package names")
+ print()
+ return 1, {}
+ else:
+ global_unmerge = 1
+
+ localtree = vartree
+ # process all arguments and add all
+ # valid db entries to candidate_catpkgs
+ if global_unmerge:
+ if not unmerge_files:
+ candidate_catpkgs.extend(vartree.dbapi.cp_all())
+ else:
+ #we've got command-line arguments
+ if not unmerge_files:
+ print("\nNo packages to %s have been provided.\n" %
+ unmerge_action)
+ return 1, {}
+ for x in unmerge_files:
+ arg_parts = x.split('/')
+ if x[0] not in [".","/"] and \
+ arg_parts[-1][-7:] != ".ebuild":
+ #possible cat/pkg or dep; treat as such
+ candidate_catpkgs.append(x)
+ elif unmerge_action in ["prune","clean"]:
+ print("\n!!! Prune and clean do not accept individual" + \
+ " ebuilds as arguments;\n skipping.\n")
+ continue
+ else:
+ # it appears that the user is specifying an installed
+ # ebuild and we're in "unmerge" mode, so it's ok.
+ if not os.path.exists(x):
+ print("\n!!! The path '"+x+"' doesn't exist.\n")
+ return 1, {}
+
+ absx = os.path.abspath(x)
+ sp_absx = absx.split("/")
+ if sp_absx[-1][-7:] == ".ebuild":
+ del sp_absx[-1]
+ absx = "/".join(sp_absx)
+
+ sp_absx_len = len(sp_absx)
+
+ vdb_path = os.path.join(settings["EROOT"], portage.VDB_PATH)
+
+ sp_vdb = vdb_path.split("/")
+ sp_vdb_len = len(sp_vdb)
+
+ if not os.path.exists(absx+"/CONTENTS"):
+ print("!!! Not a valid db dir: "+str(absx))
+ return 1, {}
+
+ if sp_absx_len <= sp_vdb_len:
+ # The Path is shorter... so it can't be inside the vdb.
+ print(sp_absx)
+ print(absx)
+ print("\n!!!",x,"cannot be inside "+ \
+ vdb_path+"; aborting.\n")
+ return 1, {}
+
+ for idx in range(0,sp_vdb_len):
+ if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
+ print(sp_absx)
+ print(absx)
+ print("\n!!!", x, "is not inside "+\
+ vdb_path+"; aborting.\n")
+ return 1, {}
+
+ print("="+"/".join(sp_absx[sp_vdb_len:]))
+ candidate_catpkgs.append(
+ "="+"/".join(sp_absx[sp_vdb_len:]))
+
+ newline=""
+ if (not "--quiet" in myopts):
+ newline="\n"
+ if settings["ROOT"] != "/":
+ writemsg_level(darkgreen(newline+ \
+ ">>> Using system located in ROOT tree %s\n" % \
+ settings["ROOT"]))
+
+ if (("--pretend" in myopts) or ("--ask" in myopts)) and \
+ not ("--quiet" in myopts):
+ writemsg_level(darkgreen(newline+\
+ ">>> These are the packages that would be unmerged:\n"))
+
+ # Preservation of order is required for --depclean and --prune so
+ # that dependencies are respected. Use all_selected to eliminate
+ # duplicate packages since the same package may be selected by
+ # multiple atoms.
+ pkgmap = []
+ all_selected = set()
+ for x in candidate_catpkgs:
+ # cycle through all our candidate deps and determine
+ # what will and will not get unmerged
+ try:
+ mymatch = vartree.dbapi.match(x)
+ except portage.exception.AmbiguousPackageName as errpkgs:
+ print("\n\n!!! The short ebuild name \"" + \
+ x + "\" is ambiguous. Please specify")
+ print("!!! one of the following fully-qualified " + \
+ "ebuild names instead:\n")
+ for i in errpkgs[0]:
+ print(" " + green(i))
+ print()
+ sys.exit(1)
+
+ if not mymatch and x[0] not in "<>=~":
+ mymatch = localtree.dep_match(x)
+ if not mymatch:
+ portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
+ (x.replace("null/", ""), unmerge_action), noiselevel=-1)
+ continue
+
+ pkgmap.append(
+ {"protected": set(), "selected": set(), "omitted": set()})
+ mykey = len(pkgmap) - 1
+ if unmerge_action in ["rage-clean", "unmerge"]:
+ for y in mymatch:
+ if y not in all_selected:
+ pkgmap[mykey]["selected"].add(y)
+ all_selected.add(y)
+ elif unmerge_action == "prune":
+ if len(mymatch) == 1:
+ continue
+ best_version = mymatch[0]
+ best_slot = vartree.getslot(best_version)
+ best_counter = vartree.dbapi.cpv_counter(best_version)
+ for mypkg in mymatch[1:]:
+ myslot = vartree.getslot(mypkg)
+ mycounter = vartree.dbapi.cpv_counter(mypkg)
+ if (myslot == best_slot and mycounter > best_counter) or \
+ mypkg == portage.best([mypkg, best_version]):
+ if myslot == best_slot:
+ if mycounter < best_counter:
+ # On slot collision, keep the one with the
+ # highest counter since it is the most
+ # recently installed.
+ continue
+ best_version = mypkg
+ best_slot = myslot
+ best_counter = mycounter
+ pkgmap[mykey]["protected"].add(best_version)
+ pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
+ if mypkg != best_version and mypkg not in all_selected)
+ all_selected.update(pkgmap[mykey]["selected"])
+ else:
+ # unmerge_action == "clean"
+ slotmap={}
+ for mypkg in mymatch:
+ if unmerge_action == "clean":
+ myslot = localtree.getslot(mypkg)
+ else:
+ # since we're pruning, we don't care about slots
+ # and put all the pkgs in together
+ myslot = 0
+ if myslot not in slotmap:
+ slotmap[myslot] = {}
+ slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
+
+ for mypkg in vartree.dbapi.cp_list(
+ portage.cpv_getkey(mymatch[0])):
+ myslot = vartree.getslot(mypkg)
+ if myslot not in slotmap:
+ slotmap[myslot] = {}
+ slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
+
+ for myslot in slotmap:
+ counterkeys = list(slotmap[myslot])
+ if not counterkeys:
+ continue
+ counterkeys.sort()
+ pkgmap[mykey]["protected"].add(
+ slotmap[myslot][counterkeys[-1]])
+ del counterkeys[-1]
+
+ for counter in counterkeys[:]:
+ mypkg = slotmap[myslot][counter]
+ if mypkg not in mymatch:
+ counterkeys.remove(counter)
+ pkgmap[mykey]["protected"].add(
+ slotmap[myslot][counter])
+
+ #be pretty and get them in order of merge:
+ for ckey in counterkeys:
+ mypkg = slotmap[myslot][ckey]
+ if mypkg not in all_selected:
+ pkgmap[mykey]["selected"].add(mypkg)
+ all_selected.add(mypkg)
+ # ok, now the last-merged package
+ # is protected, and the rest are selected
+ numselected = len(all_selected)
+ if global_unmerge and not numselected:
+ portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
+ return 1, {}
+
+ if not numselected:
+ portage.writemsg_stdout(
+ "\n>>> No packages selected for removal by " + \
+ unmerge_action + "\n")
+ return 1, {}
+ finally:
+ if vdb_lock:
+ vartree.dbapi.flush_cache()
+ vartree.dbapi.unlock()
+
+ # generate a list of package sets that are directly or indirectly listed in "selected",
+ # as there is no persistent list of "installed" sets
+ installed_sets = ["selected"]
+ stop = False
+ pos = 0
+ while not stop:
+ stop = True
+ pos = len(installed_sets)
+ for s in installed_sets[pos - 1:]:
+ if s not in sets:
+ continue
+ candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
+ if candidates:
+ stop = False
+ installed_sets += candidates
+ installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
+ del stop, pos
+
+ # we don't want to unmerge packages that are still listed in user-editable package sets
+ # listed in "world" as they would be remerged on the next update of "world" or the
+ # relevant package sets.
+ unknown_sets = set()
+ for cp in range(len(pkgmap)):
+ for cpv in pkgmap[cp]["selected"].copy():
+ try:
+ pkg = _pkg(cpv)
+ except KeyError:
+ # It could have been uninstalled
+ # by a concurrent process.
+ continue
+
+ if unmerge_action != "clean" and root_config.root == "/":
+ skip_pkg = False
+ if portage.match_from_list(portage.const.PORTAGE_PACKAGE_ATOM,
+ [pkg]):
+ msg = ("Not unmerging package %s "
+ "since there is no valid reason for Portage to "
+ "%s itself.") % (pkg.cpv, unmerge_action)
+ skip_pkg = True
+ elif vartree.dbapi._dblink(cpv).isowner(
+ portage._python_interpreter):
+ msg = ("Not unmerging package %s since there is no valid "
+ "reason for Portage to %s currently used Python "
+ "interpreter.") % (pkg.cpv, unmerge_action)
+ skip_pkg = True
+ if skip_pkg:
+ for line in textwrap.wrap(msg, 75):
+ out.eerror(line)
+ # adjust pkgmap so the display output is correct
+ pkgmap[cp]["selected"].remove(cpv)
+ all_selected.remove(cpv)
+ pkgmap[cp]["protected"].add(cpv)
+ continue
+
+ parents = []
+ for s in installed_sets:
+ # skip sets that the user requested to unmerge, and skip world
+ # user-selected set, since the package will be removed from
+ # that set later on.
+ if s in root_config.setconfig.active or s == "selected":
+ continue
+
+ if s not in sets:
+ if s in unknown_sets:
+ continue
+ unknown_sets.add(s)
+ out = portage.output.EOutput()
+ out.eerror(("Unknown set '@%s' in %s%s") % \
+ (s, root_config.settings['EROOT'], portage.const.WORLD_SETS_FILE))
+ continue
+
+ # only check instances of EditablePackageSet as other classes are generally used for
+ # special purposes and can be ignored here (and are usually generated dynamically, so the
+ # user can't do much about them anyway)
+ if isinstance(sets[s], EditablePackageSet):
+
+ # This is derived from a snippet of code in the
+ # depgraph._iter_atoms_for_pkg() method.
+ for atom in sets[s].iterAtomsForPackage(pkg):
+ inst_matches = vartree.dbapi.match(atom)
+ inst_matches.reverse() # descending order
+ higher_slot = None
+ for inst_cpv in inst_matches:
+ try:
+ inst_pkg = _pkg(inst_cpv)
+ except KeyError:
+ # It could have been uninstalled
+ # by a concurrent process.
+ continue
+
+ if inst_pkg.cp != atom.cp:
+ continue
+ if pkg >= inst_pkg:
+ # This is descending order, and we're not
+ # interested in any versions <= pkg given.
+ break
+ if pkg.slot_atom != inst_pkg.slot_atom:
+ higher_slot = inst_pkg
+ break
+ if higher_slot is None:
+ parents.append(s)
+ break
+ if parents:
+ print(colorize("WARN", "Package %s is going to be unmerged," % cpv))
+ print(colorize("WARN", "but still listed in the following package sets:"))
+ print(" %s\n" % ", ".join(parents))
+
+ del installed_sets
+
+ numselected = len(all_selected)
+ if not numselected:
+ writemsg_level(
+ "\n>>> No packages selected for removal by " + \
+ unmerge_action + "\n")
+ return 1, {}
+
+ # Unmerge order only matters in some cases
+ if not ordered:
+ unordered = {}
+ for d in pkgmap:
+ selected = d["selected"]
+ if not selected:
+ continue
+ cp = portage.cpv_getkey(next(iter(selected)))
+ cp_dict = unordered.get(cp)
+ if cp_dict is None:
+ cp_dict = {}
+ unordered[cp] = cp_dict
+ for k in d:
+ cp_dict[k] = set()
+ for k, v in d.items():
+ cp_dict[k].update(v)
+ pkgmap = [unordered[cp] for cp in sorted(unordered)]
+
+ for x in range(len(pkgmap)):
+ selected = pkgmap[x]["selected"]
+ if not selected:
+ continue
+ for mytype, mylist in pkgmap[x].items():
+ if mytype == "selected":
+ continue
+ mylist.difference_update(all_selected)
+ cp = portage.cpv_getkey(next(iter(selected)))
+ for y in localtree.dep_match(cp):
+ if y not in pkgmap[x]["omitted"] and \
+ y not in pkgmap[x]["selected"] and \
+ y not in pkgmap[x]["protected"] and \
+ y not in all_selected:
+ pkgmap[x]["omitted"].add(y)
+ if global_unmerge and not pkgmap[x]["selected"]:
+ #avoid cluttering the preview printout with stuff that isn't getting unmerged
+ continue
+ if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
+ virt_cp = sys_virt_map.get(cp)
+ if virt_cp is None:
+ cp_info = "'%s'" % (cp,)
+ else:
+ cp_info = "'%s' (%s)" % (cp, virt_cp)
+ writemsg_level(colorize("BAD","\n\n!!! " + \
+ "%s is part of your system profile.\n" % (cp_info,)),
+ level=logging.WARNING, noiselevel=-1)
+ writemsg_level(colorize("WARN","!!! Unmerging it may " + \
+ "be damaging to your system.\n\n"),
+ level=logging.WARNING, noiselevel=-1)
+ if not quiet:
+ writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
+ else:
+ writemsg_level(bold(cp) + ": ", noiselevel=-1)
+ for mytype in ["selected","protected","omitted"]:
+ if not quiet:
+ writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
+ if pkgmap[x][mytype]:
+ sorted_pkgs = []
+ for mypkg in pkgmap[x][mytype]:
+ try:
+ sorted_pkgs.append(mypkg.cpv)
+ except AttributeError:
+ sorted_pkgs.append(_pkg_str(mypkg))
+ sorted_pkgs.sort(key=cpv_sort_key())
+ for mypkg in sorted_pkgs:
+ if mytype == "selected":
+ writemsg_level(
+ colorize("UNMERGE_WARN", mypkg.version + " "),
+ noiselevel=-1)
+ else:
+ writemsg_level(
+ colorize("GOOD", mypkg.version + " "),
+ noiselevel=-1)
+ else:
+ writemsg_level("none ", noiselevel=-1)
+ if not quiet:
+ writemsg_level("\n", noiselevel=-1)
+ if quiet:
+ writemsg_level("\n", noiselevel=-1)
+
+ writemsg_level("\nAll selected packages: %s\n" %
+ " ".join('=%s' % x for x in all_selected), noiselevel=-1)
+
+ writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
+ " packages are slated for removal.\n")
+ writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
+ " and " + colorize("GOOD", "'omitted'") + \
+ " packages will not be removed.\n\n")
+
+ return os.EX_OK, pkgmap
+
+def unmerge(root_config, myopts, unmerge_action,
+ unmerge_files, ldpath_mtimes, autoclean=0,
+ clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
+ scheduler=None, writemsg_level=portage.util.writemsg_level):
+ """
+ Returns os.EX_OK if no errors occur, 1 if an error occurs, and
+ 130 if interrupted due to a 'no' answer for --ask.
+ """
+
+ if clean_world:
+ clean_world = myopts.get('--deselect') != 'n'
+
+ rval, pkgmap = _unmerge_display(root_config, myopts,
+ unmerge_action, unmerge_files,
+ clean_delay=clean_delay, ordered=ordered,
+ writemsg_level=writemsg_level)
+
+ if rval != os.EX_OK:
+ return rval
+
+ enter_invalid = '--ask-enter-invalid' in myopts
+ vartree = root_config.trees["vartree"]
+ sets = root_config.sets
+ settings = root_config.settings
+ mysettings = portage.config(clone=settings)
+ xterm_titles = "notitles" not in settings.features
+
+ if "--pretend" in myopts:
+ #we're done... return
+ return os.EX_OK
+ if "--ask" in myopts:
+ uq = UserQuery(myopts)
+ if uq.query("Would you like to unmerge these packages?",
+ enter_invalid) == "No":
+ # enter pretend mode for correct formatting of results
+ myopts["--pretend"] = True
+ print()
+ print("Quitting.")
+ print()
+ return 128 + signal.SIGINT
+
+ if not vartree.dbapi.writable:
+ writemsg_level("!!! %s\n" %
+ _("Read-only file system: %s") % vartree.dbapi._dbroot,
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ #the real unmerging begins, after a short delay unless we're raging....
+ if not unmerge_action == "rage-clean" and clean_delay and not autoclean:
+ countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
+
+ all_selected = set()
+ all_selected.update(*[x["selected"] for x in pkgmap])
+
+ # Set counter variables
+ curval = 1
+ maxval = len(all_selected)
+
+ for x in range(len(pkgmap)):
+ for y in pkgmap[x]["selected"]:
+ emergelog(xterm_titles, "=== Unmerging... ("+y+")")
+ message = ">>> Unmerging ({0} of {1}) {2}...\n".format(
+ colorize("MERGE_LIST_PROGRESS", str(curval)),
+ colorize("MERGE_LIST_PROGRESS", str(maxval)),
+ y)
+ writemsg_level(message, noiselevel=-1)
+ curval += 1
+
+ mysplit = y.split("/")
+ #unmerge...
+ retval = portage.unmerge(mysplit[0], mysplit[1],
+ settings=mysettings,
+ vartree=vartree, ldpath_mtimes=ldpath_mtimes,
+ scheduler=scheduler)
+
+ if retval != os.EX_OK:
+ emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
+ if raise_on_error:
+ raise UninstallFailure(retval)
+ sys.exit(retval)
+ else:
+ if clean_world and hasattr(sets["selected"], "cleanPackage")\
+ and hasattr(sets["selected"], "lock"):
+ sets["selected"].lock()
+ if hasattr(sets["selected"], "load"):
+ sets["selected"].load()
+ sets["selected"].cleanPackage(vartree.dbapi, y)
+ sets["selected"].unlock()
+ emergelog(xterm_titles, " >>> unmerge success: "+y)
+
+ if clean_world and hasattr(sets["selected"], "remove")\
+ and hasattr(sets["selected"], "lock"):
+ sets["selected"].lock()
+ # load is called inside remove()
+ for s in root_config.setconfig.active:
+ sets["selected"].remove(SETPREFIX + s)
+ sets["selected"].unlock()
+
+ return os.EX_OK
+
diff --git a/lib/portage/__init__.py b/lib/portage/__init__.py
new file mode 100644
index 000000000..166bfc700
--- /dev/null
+++ b/lib/portage/__init__.py
@@ -0,0 +1,664 @@
+# Copyright 1998-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+VERSION = "HEAD"
+
+# ===========================================================================
+# START OF IMPORTS -- START OF IMPORTS -- START OF IMPORTS -- START OF IMPORT
+# ===========================================================================
+
+try:
+ import sys
+ import errno
+ if not hasattr(errno, 'ESTALE'):
+ # ESTALE may not be defined on some systems, such as interix.
+ errno.ESTALE = -1
+ import re
+ import types
+ import platform
+
+ # Temporarily delete these imports, to ensure that only the
+ # wrapped versions are imported by portage internals.
+ import os
+ del os
+ import shutil
+ del shutil
+
+except ImportError as e:
+ sys.stderr.write("\n\n")
+ sys.stderr.write("!!! Failed to complete python imports. These are internal modules for\n")
+ sys.stderr.write("!!! python and failure here indicates that you have a problem with python\n")
+ sys.stderr.write("!!! itself and thus portage is not able to continue processing.\n\n")
+
+ sys.stderr.write("!!! You might consider starting python with verbose flags to see what has\n")
+ sys.stderr.write("!!! gone wrong. Here is the information we got for this exception:\n")
+ sys.stderr.write(" "+str(e)+"\n\n")
+ raise
+
+try:
+
+ import portage.proxy.lazyimport
+ import portage.proxy as proxy
+ proxy.lazyimport.lazyimport(globals(),
+ 'portage.cache.cache_errors:CacheError',
+ 'portage.checksum',
+ 'portage.checksum:perform_checksum,perform_md5,prelink_capable',
+ 'portage.cvstree',
+ 'portage.data',
+ 'portage.data:lchown,ostype,portage_gid,portage_uid,secpass,' + \
+ 'uid,userland,userpriv_groups,wheelgid',
+ 'portage.dbapi',
+ 'portage.dbapi.bintree:bindbapi,binarytree',
+ 'portage.dbapi.cpv_expand:cpv_expand',
+ 'portage.dbapi.dep_expand:dep_expand',
+ 'portage.dbapi.porttree:close_portdbapi_caches,FetchlistDict,' + \
+ 'portagetree,portdbapi',
+ 'portage.dbapi.vartree:dblink,merge,unmerge,vardbapi,vartree',
+ 'portage.dbapi.virtual:fakedbapi',
+ 'portage.dep',
+ 'portage.dep:best_match_to_list,dep_getcpv,dep_getkey,' + \
+ 'flatten,get_operator,isjustname,isspecific,isvalidatom,' + \
+ 'match_from_list,match_to_list',
+ 'portage.dep.dep_check:dep_check,dep_eval,dep_wordreduce,dep_zapdeps',
+ 'portage.eclass_cache',
+ 'portage.elog',
+ 'portage.exception',
+ 'portage.getbinpkg',
+ 'portage.locks',
+ 'portage.locks:lockdir,lockfile,unlockdir,unlockfile',
+ 'portage.mail',
+ 'portage.manifest:Manifest',
+ 'portage.output',
+ 'portage.output:bold,colorize',
+ 'portage.package.ebuild.doebuild:doebuild,' + \
+ 'doebuild_environment,spawn,spawnebuild',
+ 'portage.package.ebuild.config:autouse,best_from_dict,' + \
+ 'check_config_instance,config',
+ 'portage.package.ebuild.deprecated_profile_check:' + \
+ 'deprecated_profile_check',
+ 'portage.package.ebuild.digestcheck:digestcheck',
+ 'portage.package.ebuild.digestgen:digestgen',
+ 'portage.package.ebuild.fetch:fetch',
+ 'portage.package.ebuild.getmaskingreason:getmaskingreason',
+ 'portage.package.ebuild.getmaskingstatus:getmaskingstatus',
+ 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
+ 'portage.process',
+ 'portage.process:atexit_register,run_exitfuncs',
+ 'portage.update:dep_transform,fixdbentries,grab_updates,' + \
+ 'parse_updates,update_config_files,update_dbentries,' + \
+ 'update_dbentry',
+ 'portage.util',
+ 'portage.util:atomic_ofstream,apply_secpass_permissions,' + \
+ 'apply_recursive_permissions,dump_traceback,getconfig,' + \
+ 'grabdict,grabdict_package,grabfile,grabfile_package,' + \
+ 'map_dictlist_vals,new_protect_filename,normalize_path,' + \
+ 'pickle_read,pickle_write,stack_dictlist,stack_dicts,' + \
+ 'stack_lists,unique_array,varexpand,writedict,writemsg,' + \
+ 'writemsg_stdout,write_atomic',
+ 'portage.util.digraph:digraph',
+ 'portage.util.env_update:env_update',
+ 'portage.util.ExtractKernelVersion:ExtractKernelVersion',
+ 'portage.util.listdir:cacheddir,listdir',
+ 'portage.util.movefile:movefile',
+ 'portage.util.mtimedb:MtimeDB',
+ 'portage.versions',
+ 'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,' + \
+ 'cpv_getkey@getCPFromCPV,endversion_keys,' + \
+ 'suffix_value@endversion,pkgcmp,pkgsplit,vercmp,ververify',
+ 'portage.xpak',
+ 'subprocess',
+ 'time',
+ )
+
+ try:
+ from collections import OrderedDict
+ except ImportError:
+ proxy.lazyimport.lazyimport(globals(),
+ 'portage.cache.mappings:OrderedDict')
+
+ import portage.const
+ from portage.const import VDB_PATH, PRIVATE_PATH, CACHE_PATH, DEPCACHE_PATH, \
+ USER_CONFIG_PATH, MODULES_FILE_PATH, CUSTOM_PROFILE_PATH, PORTAGE_BASE_PATH, \
+ PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PROFILE_PATH, LOCALE_DATA_PATH, \
+ EBUILD_SH_BINARY, SANDBOX_BINARY, BASH_BINARY, \
+ MOVE_BINARY, PRELINK_BINARY, WORLD_FILE, MAKE_CONF_FILE, MAKE_DEFAULTS_FILE, \
+ DEPRECATED_PROFILE_FILE, USER_VIRTUALS_FILE, EBUILD_SH_ENV_FILE, \
+ INVALID_ENV_FILE, CUSTOM_MIRRORS_FILE, CONFIG_MEMORY_FILE,\
+ INCREMENTALS, EAPI, MISC_SH_BINARY, REPO_NAME_LOC, REPO_NAME_FILE
+
+except ImportError as e:
+ sys.stderr.write("\n\n")
+ sys.stderr.write("!!! Failed to complete portage imports. There are internal modules for\n")
+ sys.stderr.write("!!! portage and failure here indicates that you have a problem with your\n")
+ sys.stderr.write("!!! installation of portage. Please try a rescue portage located in the\n")
+ sys.stderr.write("!!! portage tree under '/usr/portage/sys-apps/portage/files/' (default).\n")
+ sys.stderr.write("!!! There is a README.RESCUE file that details the steps required to perform\n")
+ sys.stderr.write("!!! a recovery of portage.\n")
+ sys.stderr.write(" "+str(e)+"\n\n")
+ raise
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+ long = int
+
+# We use utf_8 encoding everywhere. Previously, we used
+# sys.getfilesystemencoding() for the 'merge' encoding, but that had
+# various problems:
+#
+# 1) If the locale is ever changed then it can cause orphan files due
+# to changed character set translation.
+#
+# 2) Ebuilds typically install files with utf_8 encoded file names,
+# and then portage would be forced to rename those files to match
+# sys.getfilesystemencoding(), possibly breaking things.
+#
+# 3) Automatic translation between encodings can lead to nonsensical
+# file names when the source encoding is unknown by portage.
+#
+# 4) It's inconvenient for ebuilds to convert the encodings of file
+# names to match the current locale, and upstreams typically encode
+# file names with utf_8 encoding.
+#
+# So, instead of relying on sys.getfilesystemencoding(), we avoid the above
+# problems by using a constant utf_8 'merge' encoding for all locales, as
+# discussed in bug #382199 and bug #381509.
+_encodings = {
+ 'content' : 'utf_8',
+ 'fs' : 'utf_8',
+ 'merge' : 'utf_8',
+ 'repo.content' : 'utf_8',
+ 'stdio' : 'utf_8',
+}
+
+if sys.hexversion >= 0x3000000:
+
+ def _decode_argv(argv):
+ # With Python 3, the surrogateescape encoding error handler makes it
+ # possible to access the original argv bytes, which can be useful
+ # if their actual encoding does no match the filesystem encoding.
+ fs_encoding = sys.getfilesystemencoding()
+ return [_unicode_decode(x.encode(fs_encoding, 'surrogateescape'))
+ for x in argv]
+
+ def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
+ if isinstance(s, str):
+ s = s.encode(encoding, errors)
+ return s
+
+ def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
+ if isinstance(s, bytes):
+ s = str(s, encoding=encoding, errors=errors)
+ return s
+
+ _native_string = _unicode_decode
+else:
+
+ def _decode_argv(argv):
+ return [_unicode_decode(x) for x in argv]
+
+ def _unicode_encode(s, encoding=_encodings['content'], errors='backslashreplace'):
+ if isinstance(s, unicode):
+ s = s.encode(encoding, errors)
+ return s
+
+ def _unicode_decode(s, encoding=_encodings['content'], errors='replace'):
+ if isinstance(s, bytes):
+ s = unicode(s, encoding=encoding, errors=errors)
+ return s
+
+ _native_string = _unicode_encode
+
+class _unicode_func_wrapper(object):
+ """
+ Wraps a function, converts arguments from unicode to bytes,
+ and return values to unicode from bytes. Function calls
+ will raise UnicodeEncodeError if an argument fails to be
+ encoded with the required encoding. Return values that
+ are single strings are decoded with errors='replace'. Return
+ values that are lists of strings are decoded with errors='strict'
+ and elements that fail to be decoded are omitted from the returned
+ list.
+ """
+ __slots__ = ('_func', '_encoding')
+
+ def __init__(self, func, encoding=_encodings['fs']):
+ self._func = func
+ self._encoding = encoding
+
+ def _process_args(self, args, kwargs):
+
+ encoding = self._encoding
+ wrapped_args = [_unicode_encode(x, encoding=encoding, errors='strict')
+ for x in args]
+ if kwargs:
+ wrapped_kwargs = dict(
+ (k, _unicode_encode(v, encoding=encoding, errors='strict'))
+ for k, v in kwargs.items())
+ else:
+ wrapped_kwargs = {}
+
+ return (wrapped_args, wrapped_kwargs)
+
+ def __call__(self, *args, **kwargs):
+
+ encoding = self._encoding
+ wrapped_args, wrapped_kwargs = self._process_args(args, kwargs)
+
+ rval = self._func(*wrapped_args, **wrapped_kwargs)
+
+ # Don't use isinstance() since we don't want to convert subclasses
+ # of tuple such as posix.stat_result in Python >=3.2.
+ if rval.__class__ in (list, tuple):
+ decoded_rval = []
+ for x in rval:
+ try:
+ x = _unicode_decode(x, encoding=encoding, errors='strict')
+ except UnicodeDecodeError:
+ pass
+ else:
+ decoded_rval.append(x)
+
+ if isinstance(rval, tuple):
+ rval = tuple(decoded_rval)
+ else:
+ rval = decoded_rval
+ else:
+ rval = _unicode_decode(rval, encoding=encoding, errors='replace')
+
+ return rval
+
+class _unicode_module_wrapper(object):
+ """
+ Wraps a module and wraps all functions with _unicode_func_wrapper.
+ """
+ __slots__ = ('_mod', '_encoding', '_overrides', '_cache')
+
+ def __init__(self, mod, encoding=_encodings['fs'], overrides=None, cache=True):
+ object.__setattr__(self, '_mod', mod)
+ object.__setattr__(self, '_encoding', encoding)
+ object.__setattr__(self, '_overrides', overrides)
+ if cache:
+ cache = {}
+ else:
+ cache = None
+ object.__setattr__(self, '_cache', cache)
+
+ def __getattribute__(self, attr):
+ cache = object.__getattribute__(self, '_cache')
+ if cache is not None:
+ result = cache.get(attr)
+ if result is not None:
+ return result
+ result = getattr(object.__getattribute__(self, '_mod'), attr)
+ encoding = object.__getattribute__(self, '_encoding')
+ overrides = object.__getattribute__(self, '_overrides')
+ override = None
+ if overrides is not None:
+ override = overrides.get(id(result))
+ if override is not None:
+ result = override
+ elif isinstance(result, type):
+ pass
+ elif type(result) is types.ModuleType:
+ result = _unicode_module_wrapper(result,
+ encoding=encoding, overrides=overrides)
+ elif hasattr(result, '__call__'):
+ result = _unicode_func_wrapper(result, encoding=encoding)
+ if cache is not None:
+ cache[attr] = result
+ return result
+
+class _eintr_func_wrapper(object):
+ """
+ Wraps a function and handles EINTR by calling the function as
+ many times as necessary (until it returns without raising EINTR).
+ """
+
+ __slots__ = ('_func',)
+
+ def __init__(self, func):
+ self._func = func
+
+ def __call__(self, *args, **kwargs):
+
+ while True:
+ try:
+ rval = self._func(*args, **kwargs)
+ break
+ except EnvironmentError as e:
+ if e.errno != errno.EINTR:
+ raise
+
+ return rval
+
+import os as _os
+_os_overrides = {
+ id(_os.fdopen) : _os.fdopen,
+ id(_os.popen) : _os.popen,
+ id(_os.read) : _os.read,
+ id(_os.system) : _os.system,
+ id(_os.waitpid) : _eintr_func_wrapper(_os.waitpid)
+}
+
+
+try:
+ _os_overrides[id(_os.mkfifo)] = _os.mkfifo
+except AttributeError:
+ pass # Jython
+
+if hasattr(_os, 'statvfs'):
+ _os_overrides[id(_os.statvfs)] = _os.statvfs
+
+os = _unicode_module_wrapper(_os, overrides=_os_overrides,
+ encoding=_encodings['fs'])
+_os_merge = _unicode_module_wrapper(_os,
+ encoding=_encodings['merge'], overrides=_os_overrides)
+
+import shutil as _shutil
+shutil = _unicode_module_wrapper(_shutil, encoding=_encodings['fs'])
+
+# Imports below this point rely on the above unicode wrapper definitions.
+try:
+ __import__('selinux')
+ import portage._selinux
+ selinux = _unicode_module_wrapper(_selinux,
+ encoding=_encodings['fs'])
+ _selinux_merge = _unicode_module_wrapper(_selinux,
+ encoding=_encodings['merge'])
+except (ImportError, OSError) as e:
+ if isinstance(e, OSError):
+ sys.stderr.write("!!! SELinux not loaded: %s\n" % str(e))
+ del e
+ _selinux = None
+ selinux = None
+ _selinux_merge = None
+
+# ===========================================================================
+# END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END OF IMPORTS -- END
+# ===========================================================================
+
+_python_interpreter = os.path.realpath(sys.executable)
+_bin_path = PORTAGE_BIN_PATH
+_pym_path = PORTAGE_PYM_PATH
+_not_installed = os.path.isfile(os.path.join(PORTAGE_BASE_PATH, ".portage_not_installed"))
+
+# Api consumers included in portage should set this to True.
+_internal_caller = False
+
+_sync_mode = False
+
+def _get_stdin():
+ """
+ Buggy code in python's multiprocessing/process.py closes sys.stdin
+ and reassigns it to open(os.devnull), but fails to update the
+ corresponding __stdin__ reference. So, detect that case and handle
+ it appropriately.
+ """
+ if not sys.__stdin__.closed:
+ return sys.__stdin__
+ return sys.stdin
+
+_shell_quote_re = re.compile(r"[\s><=*\\\"'$`]")
+
+def _shell_quote(s):
+ """
+ Quote a string in double-quotes and use backslashes to
+ escape any backslashes, double-quotes, dollar signs, or
+ backquotes in the string.
+ """
+ if _shell_quote_re.search(s) is None:
+ return s
+ for letter in "\\\"$`":
+ if letter in s:
+ s = s.replace(letter, "\\" + letter)
+ return "\"%s\"" % s
+
+bsd_chflags = None
+
+if platform.system() in ('FreeBSD',):
+ # TODO: remove this class?
+ class bsd_chflags(object):
+ chflags = os.chflags
+ lchflags = os.lchflags
+
+def load_mod(name):
+ modname = ".".join(name.split(".")[:-1])
+ mod = __import__(modname)
+ components = name.split('.')
+ for comp in components[1:]:
+ mod = getattr(mod, comp)
+ return mod
+
+def getcwd():
+ "this fixes situations where the current directory doesn't exist"
+ try:
+ return os.getcwd()
+ except OSError: #dir doesn't exist
+ os.chdir("/")
+ return "/"
+getcwd()
+
+def abssymlink(symlink, target=None):
+ """
+ This reads symlinks, resolving the relative symlinks,
+ and returning the absolute.
+ @param symlink: path of symlink (must be absolute)
+ @param target: the target of the symlink (as returned
+ by readlink)
+ @rtype: str
+ @return: the absolute path of the symlink target
+ """
+ if target is not None:
+ mylink = target
+ else:
+ mylink = os.readlink(symlink)
+ if mylink[0] != '/':
+ mydir = os.path.dirname(symlink)
+ mylink = mydir + "/" + mylink
+ return os.path.normpath(mylink)
+
+_doebuild_manifest_exempt_depend = 0
+
+_testing_eapis = frozenset(["4-python", "4-slot-abi", "5-progress", "5-hdepend", "7_pre1", "7"])
+_deprecated_eapis = frozenset(["4_pre1", "3_pre2", "3_pre1", "5_pre1", "5_pre2", "6_pre1"])
+_supported_eapis = frozenset([str(x) for x in range(portage.const.EAPI + 1)] + list(_testing_eapis) + list(_deprecated_eapis))
+
+def _eapi_is_deprecated(eapi):
+ return eapi in _deprecated_eapis
+
+def eapi_is_supported(eapi):
+ if not isinstance(eapi, basestring):
+ # Only call str() when necessary since with python2 it
+ # can trigger UnicodeEncodeError if EAPI is corrupt.
+ eapi = str(eapi)
+ eapi = eapi.strip()
+
+ return eapi in _supported_eapis
+
+# This pattern is specified by PMS section 7.3.1.
+_pms_eapi_re = re.compile(r"^[ \t]*EAPI=(['\"]?)([A-Za-z0-9+_.-]*)\1[ \t]*([ \t]#.*)?$")
+_comment_or_blank_line = re.compile(r"^\s*(#.*)?$")
+
+def _parse_eapi_ebuild_head(f):
+ eapi = None
+ eapi_lineno = None
+ lineno = 0
+ for line in f:
+ lineno += 1
+ m = _comment_or_blank_line.match(line)
+ if m is None:
+ eapi_lineno = lineno
+ m = _pms_eapi_re.match(line)
+ if m is not None:
+ eapi = m.group(2)
+ break
+
+ return (eapi, eapi_lineno)
+
+def _movefile(src, dest, **kwargs):
+ """Calls movefile and raises a PortageException if an error occurs."""
+ if movefile(src, dest, **kwargs) is None:
+ raise portage.exception.PortageException(
+ "mv '%s' '%s'" % (src, dest))
+
+auxdbkeys = (
+ 'DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
+ 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
+ 'KEYWORDS', 'INHERITED', 'IUSE', 'REQUIRED_USE',
+ 'PDEPEND', 'BDEPEND', 'EAPI',
+ 'PROPERTIES', 'DEFINED_PHASES', 'HDEPEND', 'UNUSED_04',
+ 'UNUSED_03', 'UNUSED_02', 'UNUSED_01',
+)
+auxdbkeylen = len(auxdbkeys)
+
+def portageexit():
+ pass
+
+class _trees_dict(dict):
+ __slots__ = ('_running_eroot', '_target_eroot',)
+ def __init__(self, *pargs, **kargs):
+ dict.__init__(self, *pargs, **kargs)
+ self._running_eroot = None
+ self._target_eroot = None
+
+def create_trees(config_root=None, target_root=None, trees=None, env=None,
+ sysroot=None, eprefix=None):
+
+ if trees is None:
+ trees = _trees_dict()
+ elif not isinstance(trees, _trees_dict):
+ # caller passed a normal dict or something,
+ # but we need a _trees_dict instance
+ trees = _trees_dict(trees)
+
+ if env is None:
+ env = os.environ
+
+ settings = config(config_root=config_root, target_root=target_root,
+ env=env, sysroot=sysroot, eprefix=eprefix)
+ settings.lock()
+
+ depcachedir = settings.get('PORTAGE_DEPCACHEDIR')
+ trees._target_eroot = settings['EROOT']
+ myroots = [(settings['EROOT'], settings)]
+ if settings["ROOT"] == "/" and settings["EPREFIX"] == const.EPREFIX:
+ trees._running_eroot = trees._target_eroot
+ else:
+
+ # When ROOT != "/" we only want overrides from the calling
+ # environment to apply to the config that's associated
+ # with ROOT != "/", so pass a nearly empty dict for the env parameter.
+ clean_env = {}
+ for k in ('PATH', 'PORTAGE_GRPNAME', 'PORTAGE_REPOSITORIES', 'PORTAGE_USERNAME',
+ 'PYTHONPATH', 'SSH_AGENT_PID', 'SSH_AUTH_SOCK', 'TERM',
+ 'ftp_proxy', 'http_proxy', 'no_proxy',
+ '__PORTAGE_TEST_HARDLINK_LOCKS'):
+ v = settings.get(k)
+ if v is not None:
+ clean_env[k] = v
+ if depcachedir is not None:
+ clean_env['PORTAGE_DEPCACHEDIR'] = depcachedir
+ settings = config(config_root=None, target_root="/",
+ env=clean_env, sysroot="/", eprefix=None)
+ settings.lock()
+ trees._running_eroot = settings['EROOT']
+ myroots.append((settings['EROOT'], settings))
+
+ for myroot, mysettings in myroots:
+ trees[myroot] = portage.util.LazyItemsDict(trees.get(myroot, {}))
+ trees[myroot].addLazySingleton("virtuals", mysettings.getvirtuals)
+ trees[myroot].addLazySingleton(
+ "vartree", vartree, categories=mysettings.categories,
+ settings=mysettings)
+ trees[myroot].addLazySingleton("porttree",
+ portagetree, settings=mysettings)
+ trees[myroot].addLazySingleton("bintree",
+ binarytree, pkgdir=mysettings["PKGDIR"], settings=mysettings)
+ return trees
+
+if VERSION == 'HEAD':
+ class _LazyVersion(proxy.objectproxy.ObjectProxy):
+ def _get_target(self):
+ global VERSION
+ if VERSION is not self:
+ return VERSION
+ if os.path.isdir(os.path.join(PORTAGE_BASE_PATH, '.git')):
+ encoding = _encodings['fs']
+ cmd = [BASH_BINARY, "-c", ("cd %s ; git describe --match 'portage-*' || exit $? ; " + \
+ "if [ -n \"`git diff-index --name-only --diff-filter=M HEAD`\" ] ; " + \
+ "then echo modified ; git rev-list --format=%%ct -n 1 HEAD ; fi ; " + \
+ "exit 0") % _shell_quote(PORTAGE_BASE_PATH)]
+ cmd = [_unicode_encode(x, encoding=encoding, errors='strict')
+ for x in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ output = _unicode_decode(proc.communicate()[0], encoding=encoding)
+ status = proc.wait()
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
+ output_lines = output.splitlines()
+ if output_lines:
+ version_split = output_lines[0].split('-')
+ if len(version_split) > 1:
+ VERSION = version_split[1]
+ patchlevel = False
+ if len(version_split) > 2:
+ patchlevel = True
+ VERSION = "%s_p%s" % (VERSION, version_split[2])
+ if len(output_lines) > 1 and output_lines[1] == 'modified':
+ head_timestamp = None
+ if len(output_lines) > 3:
+ try:
+ head_timestamp = long(output_lines[3])
+ except ValueError:
+ pass
+ timestamp = long(time.time())
+ if head_timestamp is not None and timestamp > head_timestamp:
+ timestamp = timestamp - head_timestamp
+ if not patchlevel:
+ VERSION = "%s_p0" % (VERSION,)
+ VERSION = "%s_p%d" % (VERSION, timestamp)
+ return VERSION
+ VERSION = 'HEAD'
+ return VERSION
+ VERSION = _LazyVersion()
+
+_legacy_global_var_names = ("archlist", "db", "features",
+ "groups", "mtimedb", "mtimedbfile", "pkglines",
+ "portdb", "profiledir", "root", "selinux_enabled",
+ "settings", "thirdpartymirrors")
+
+def _reset_legacy_globals():
+
+ global _legacy_globals_constructed
+ _legacy_globals_constructed = set()
+ for k in _legacy_global_var_names:
+ globals()[k] = _LegacyGlobalProxy(k)
+
+class _LegacyGlobalProxy(proxy.objectproxy.ObjectProxy):
+
+ __slots__ = ('_name',)
+
+ def __init__(self, name):
+ proxy.objectproxy.ObjectProxy.__init__(self)
+ object.__setattr__(self, '_name', name)
+
+ def _get_target(self):
+ name = object.__getattribute__(self, '_name')
+ from portage._legacy_globals import _get_legacy_global
+ return _get_legacy_global(name)
+
+_reset_legacy_globals()
+
+def _disable_legacy_globals():
+ """
+ This deletes the ObjectProxy instances that are used
+ for lazy initialization of legacy global variables.
+ The purpose of deleting them is to prevent new code
+ from referencing these deprecated variables.
+ """
+ global _legacy_global_var_names
+ for k in _legacy_global_var_names:
+ globals().pop(k, None)
diff --git a/lib/portage/_emirrordist/Config.py b/lib/portage/_emirrordist/Config.py
new file mode 100644
index 000000000..574a83559
--- /dev/null
+++ b/lib/portage/_emirrordist/Config.py
@@ -0,0 +1,140 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import copy
+import io
+import logging
+import shelve
+import sys
+import time
+
+import portage
+from portage import os
+from portage.util import grabdict, grablines
+from portage.util._ShelveUnicodeWrapper import ShelveUnicodeWrapper
+
+class Config(object):
+ def __init__(self, options, portdb, event_loop):
+ self.options = options
+ self.portdb = portdb
+ self.event_loop = event_loop
+ self.added_byte_count = 0
+ self.added_file_count = 0
+ self.scheduled_deletion_count = 0
+ self.delete_count = 0
+ self.file_owners = {}
+ self.file_failures = {}
+ self.start_time = time.time()
+ self._open_files = []
+
+ self.log_success = self._open_log('success', options.success_log, 'a')
+ self.log_failure = self._open_log('failure', options.failure_log, 'a')
+
+ self.distfiles = None
+ if options.distfiles is not None:
+ self.distfiles = options.distfiles
+
+ self.mirrors = copy.copy(portdb.settings.thirdpartymirrors())
+
+ if options.mirror_overrides is not None:
+ self.mirrors.update(grabdict(options.mirror_overrides))
+
+ if options.mirror_skip is not None:
+ for x in options.mirror_skip.split(","):
+ self.mirrors[x] = []
+
+ self.whitelist = None
+ if options.whitelist_from is not None:
+ self.whitelist = set()
+ for filename in options.whitelist_from:
+ for line in grablines(filename):
+ line = line.strip()
+ if line and not line.startswith("#"):
+ self.whitelist.add(line)
+
+ self.restrict_mirror_exemptions = None
+ if options.restrict_mirror_exemptions is not None:
+ self.restrict_mirror_exemptions = frozenset(
+ options.restrict_mirror_exemptions.split(","))
+
+ self.recycle_db = None
+ if options.recycle_db is not None:
+ self.recycle_db = self._open_shelve(
+ options.recycle_db, 'recycle')
+
+ self.distfiles_db = None
+ if options.distfiles_db is not None:
+ self.distfiles_db = self._open_shelve(
+ options.distfiles_db, 'distfiles')
+
+ self.deletion_db = None
+ if options.deletion_db is not None:
+ self.deletion_db = self._open_shelve(
+ options.deletion_db, 'deletion')
+
+ def _open_log(self, log_desc, log_path, mode):
+
+ if log_path is None or self.options.dry_run:
+ log_func = logging.info
+ line_format = "%s: %%s" % log_desc
+ add_newline = False
+ if log_path is not None:
+ logging.warning("dry-run: %s log "
+ "redirected to logging.info" % log_desc)
+ else:
+ self._open_files.append(io.open(log_path, mode=mode,
+ encoding='utf_8'))
+ line_format = "%s\n"
+ log_func = self._open_files[-1].write
+
+ return self._LogFormatter(line_format, log_func)
+
+ class _LogFormatter(object):
+
+ __slots__ = ('_line_format', '_log_func')
+
+ def __init__(self, line_format, log_func):
+ self._line_format = line_format
+ self._log_func = log_func
+
+ def __call__(self, msg):
+ self._log_func(self._line_format % (msg,))
+
+ def _open_shelve(self, db_file, db_desc):
+ if self.options.dry_run:
+ open_flag = "r"
+ else:
+ open_flag = "c"
+
+ if self.options.dry_run and not os.path.exists(db_file):
+ db = {}
+ else:
+ try:
+ db = shelve.open(db_file, flag=open_flag)
+ except ImportError as e:
+ # ImportError has different attributes for python2 vs. python3
+ if (getattr(e, 'name', None) == 'bsddb' or
+ getattr(e, 'message', None) == 'No module named bsddb'):
+ from bsddb3 import dbshelve
+ db = dbshelve.open(db_file, flags=open_flag)
+
+ if sys.hexversion < 0x3000000:
+ db = ShelveUnicodeWrapper(db)
+
+ if self.options.dry_run:
+ logging.warning("dry-run: %s db opened in readonly mode" % db_desc)
+ if not isinstance(db, dict):
+ volatile_db = dict((k, db[k]) for k in db)
+ db.close()
+ db = volatile_db
+ else:
+ self._open_files.append(db)
+
+ return db
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ while self._open_files:
+ self._open_files.pop().close()
diff --git a/lib/portage/_emirrordist/DeletionIterator.py b/lib/portage/_emirrordist/DeletionIterator.py
new file mode 100644
index 000000000..dff52c042
--- /dev/null
+++ b/lib/portage/_emirrordist/DeletionIterator.py
@@ -0,0 +1,83 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+import stat
+
+from portage import os
+from .DeletionTask import DeletionTask
+
+class DeletionIterator(object):
+
+ def __init__(self, config):
+ self._config = config
+
+ def __iter__(self):
+ distdir = self._config.options.distfiles
+ file_owners = self._config.file_owners
+ whitelist = self._config.whitelist
+ distfiles_local = self._config.options.distfiles_local
+ deletion_db = self._config.deletion_db
+ deletion_delay = self._config.options.deletion_delay
+ start_time = self._config.start_time
+ distfiles_set = set(os.listdir(self._config.options.distfiles))
+ for filename in distfiles_set:
+ try:
+ st = os.stat(os.path.join(distdir, filename))
+ except OSError as e:
+ logging.error("stat failed on '%s' in distfiles: %s\n" %
+ (filename, e))
+ continue
+ if not stat.S_ISREG(st.st_mode):
+ continue
+ elif filename in file_owners:
+ if deletion_db is not None:
+ try:
+ del deletion_db[filename]
+ except KeyError:
+ pass
+ elif whitelist is not None and filename in whitelist:
+ if deletion_db is not None:
+ try:
+ del deletion_db[filename]
+ except KeyError:
+ pass
+ elif distfiles_local is not None and \
+ os.path.exists(os.path.join(distfiles_local, filename)):
+ if deletion_db is not None:
+ try:
+ del deletion_db[filename]
+ except KeyError:
+ pass
+ else:
+ self._config.scheduled_deletion_count += 1
+
+ if deletion_db is None or deletion_delay is None:
+
+ yield DeletionTask(background=True,
+ distfile=filename,
+ config=self._config)
+
+ else:
+ deletion_entry = deletion_db.get(filename)
+
+ if deletion_entry is None:
+ logging.debug("add '%s' to deletion db" % filename)
+ deletion_db[filename] = start_time
+
+ elif deletion_entry + deletion_delay <= start_time:
+
+ yield DeletionTask(background=True,
+ distfile=filename,
+ config=self._config)
+
+ if deletion_db is not None:
+ for filename in list(deletion_db):
+ if filename not in distfiles_set:
+ try:
+ del deletion_db[filename]
+ except KeyError:
+ pass
+ else:
+ logging.debug("drop '%s' from deletion db" %
+ filename)
diff --git a/lib/portage/_emirrordist/DeletionTask.py b/lib/portage/_emirrordist/DeletionTask.py
new file mode 100644
index 000000000..7d10957fa
--- /dev/null
+++ b/lib/portage/_emirrordist/DeletionTask.py
@@ -0,0 +1,129 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+
+from portage import os
+from portage.util._async.FileCopier import FileCopier
+from _emerge.CompositeTask import CompositeTask
+
+class DeletionTask(CompositeTask):
+
+ __slots__ = ('distfile', 'config')
+
+ def _start(self):
+
+ distfile_path = os.path.join(
+ self.config.options.distfiles, self.distfile)
+
+ if self.config.options.recycle_dir is not None:
+ distfile_path = os.path.join(self.config.options.distfiles, self.distfile)
+ recycle_path = os.path.join(
+ self.config.options.recycle_dir, self.distfile)
+ if self.config.options.dry_run:
+ logging.info(("dry-run: move '%s' from "
+ "distfiles to recycle") % self.distfile)
+ else:
+ logging.debug(("move '%s' from "
+ "distfiles to recycle") % self.distfile)
+ try:
+ os.rename(distfile_path, recycle_path)
+ except OSError as e:
+ if e.errno != errno.EXDEV:
+ logging.error(("rename %s from distfiles to "
+ "recycle failed: %s") % (self.distfile, e))
+ else:
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ self._start_task(
+ FileCopier(src_path=distfile_path,
+ dest_path=recycle_path,
+ background=False),
+ self._recycle_copier_exit)
+ return
+
+ success = True
+
+ if self.config.options.dry_run:
+ logging.info(("dry-run: delete '%s' from "
+ "distfiles") % self.distfile)
+ else:
+ logging.debug(("delete '%s' from "
+ "distfiles") % self.distfile)
+ try:
+ os.unlink(distfile_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ logging.error("%s unlink failed in distfiles: %s" %
+ (self.distfile, e))
+ success = False
+
+ if success:
+ self._success()
+ self.returncode = os.EX_OK
+ else:
+ self.returncode = 1
+
+ self._async_wait()
+
+ def _recycle_copier_exit(self, copier):
+
+ self._assert_current(copier)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ success = True
+ if copier.returncode == os.EX_OK:
+
+ try:
+ os.unlink(copier.src_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ logging.error("%s unlink failed in distfiles: %s" %
+ (self.distfile, e))
+ success = False
+
+ else:
+ logging.error(("%s copy from distfiles "
+ "to recycle failed: %s") % (self.distfile, e))
+ success = False
+
+ if success:
+ self._success()
+ self.returncode = os.EX_OK
+ else:
+ self.returncode = 1
+
+ self._current_task = None
+ self.wait()
+
+ def _success(self):
+
+ cpv = "unknown"
+ if self.config.distfiles_db is not None:
+ cpv = self.config.distfiles_db.get(self.distfile, cpv)
+
+ self.config.delete_count += 1
+ self.config.log_success("%s\t%s\tremoved" % (cpv, self.distfile))
+
+ if self.config.distfiles_db is not None:
+ try:
+ del self.config.distfiles_db[self.distfile]
+ except KeyError:
+ pass
+ else:
+ logging.debug(("drop '%s' from "
+ "distfiles db") % self.distfile)
+
+ if self.config.deletion_db is not None:
+ try:
+ del self.config.deletion_db[self.distfile]
+ except KeyError:
+ pass
+ else:
+ logging.debug(("drop '%s' from "
+ "deletion db") % self.distfile)
diff --git a/lib/portage/_emirrordist/FetchIterator.py b/lib/portage/_emirrordist/FetchIterator.py
new file mode 100644
index 000000000..4ad797502
--- /dev/null
+++ b/lib/portage/_emirrordist/FetchIterator.py
@@ -0,0 +1,289 @@
+# Copyright 2013-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import threading
+
+from portage import os
+from portage.checksum import (_apply_hash_filter,
+ _filter_unaccelarated_hashes, _hash_filter)
+from portage.dep import use_reduce
+from portage.exception import PortageException, PortageKeyError
+from portage.util._async.AsyncTaskFuture import AsyncTaskFuture
+from portage.util._async.TaskScheduler import TaskScheduler
+from portage.util.futures.iter_completed import iter_gather
+from .FetchTask import FetchTask
+from _emerge.CompositeTask import CompositeTask
+
+
+class FetchIterator(object):
+
+ def __init__(self, config):
+ self._config = config
+ self._terminated = threading.Event()
+
+ def terminate(self):
+ """
+ Schedules early termination of the __iter__ method, which is
+ useful because under some conditions it's possible for __iter__
+ to loop for a long time without yielding to the caller. For
+ example, it's useful when there are many ebuilds with stale
+ cache and RESTRICT=mirror.
+
+ This method is thread-safe (and safe for signal handlers).
+ """
+ self._terminated.set()
+
+ def _iter_every_cp(self):
+ # List categories individually, in order to start yielding quicker,
+ # and in order to reduce latency in case of a signal interrupt.
+ cp_all = self._config.portdb.cp_all
+ for category in sorted(self._config.portdb.categories):
+ for cp in cp_all(categories=(category,)):
+ yield cp
+
+ def __iter__(self):
+
+ portdb = self._config.portdb
+ get_repo_for_location = portdb.repositories.get_repo_for_location
+
+ hash_filter = _hash_filter(
+ portdb.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if hash_filter.transparent:
+ hash_filter = None
+
+ for cp in self._iter_every_cp():
+
+ if self._terminated.is_set():
+ return
+
+ for tree in portdb.porttrees:
+
+ # Reset state so the Manifest is pulled once
+ # for this cp / tree combination.
+ repo_config = get_repo_for_location(tree)
+ digests_future = portdb._event_loop.create_future()
+
+ for cpv in portdb.cp_list(cp, mytree=tree):
+
+ if self._terminated.is_set():
+ return
+
+ yield _EbuildFetchTasks(
+ fetch_tasks_future=_async_fetch_tasks(
+ self._config,
+ hash_filter,
+ repo_config,
+ digests_future,
+ cpv,
+ portdb._event_loop)
+ )
+
+
+class _EbuildFetchTasks(CompositeTask):
+ """
+ Executes FetchTask instances (which are asynchronously constructed)
+ for each of the files referenced by an ebuild.
+ """
+ __slots__ = ('fetch_tasks_future',)
+ def _start(self):
+ self._start_task(AsyncTaskFuture(future=self.fetch_tasks_future),
+ self._start_fetch_tasks)
+
+ def _start_fetch_tasks(self, task):
+ if self._default_exit(task) != os.EX_OK:
+ self._async_wait()
+ return
+
+ self._start_task(
+ TaskScheduler(
+ iter(self.fetch_tasks_future.result()),
+ max_jobs=1,
+ event_loop=self.scheduler),
+ self._default_final_exit)
+
+
+def _async_fetch_tasks(config, hash_filter, repo_config, digests_future, cpv,
+ loop):
+ """
+ Asynchronously construct FetchTask instances for each of the files
+ referenced by an ebuild.
+
+ @param config: emirrordist config
+ @type config: portage._emirrordist.Config.Config
+ @param hash_filter: PORTAGE_CHECKSUM_FILTER settings
+ @type hash_filter: portage.checksum._hash_filter
+ @param repo_config: repository configuration
+ @type repo_config: RepoConfig
+ @param digests_future: future that contains cached distfiles digests
+ for the current cp if available
+ @type digests_future: asyncio.Future
+ @param cpv: current ebuild cpv
+ @type cpv: portage.versions._pkg_str
+ @param loop: event loop
+ @type loop: EventLoop
+ @return: A future that results in a list containing FetchTask
+ instances for each of the files referenced by an ebuild.
+ @rtype: asyncio.Future (or compatible)
+ """
+ result = loop.create_future()
+ fetch_tasks = []
+
+ def aux_get_done(gather_result):
+ # All exceptions must be consumed from gather_result before this
+ # function returns, in order to avoid triggering the event loop's
+ # exception handler.
+ if not gather_result.cancelled():
+ list(future.exception() for future in gather_result.result()
+ if not future.cancelled())
+ else:
+ result.cancel()
+
+ if result.cancelled():
+ return
+
+ aux_get_result, fetch_map_result = gather_result.result()
+ if aux_get_result.cancelled() or fetch_map_result.cancelled():
+ # Cancel result after consuming any exceptions which
+ # are now irrelevant due to cancellation.
+ aux_get_result.cancelled() or aux_get_result.exception()
+ fetch_map_result.cancelled() or fetch_map_result.exception()
+ result.cancel()
+ return
+
+ try:
+ restrict, = aux_get_result.result()
+ except (PortageKeyError, PortageException) as e:
+ config.log_failure("%s\t\taux_get exception %s" %
+ (cpv, e))
+ result.set_result(fetch_tasks)
+ return
+
+ # Here we use matchnone=True to ignore conditional parts
+ # of RESTRICT since they don't apply unconditionally.
+ # Assume such conditionals only apply on the client side.
+ try:
+ restrict = frozenset(use_reduce(restrict,
+ flat=True, matchnone=True))
+ except PortageException as e:
+ config.log_failure("%s\t\tuse_reduce exception %s" %
+ (cpv, e))
+ result.set_result(fetch_tasks)
+ return
+
+ if "fetch" in restrict:
+ result.set_result(fetch_tasks)
+ return
+
+ try:
+ uri_map = fetch_map_result.result()
+ except PortageException as e:
+ config.log_failure("%s\t\tgetFetchMap exception %s" %
+ (cpv, e))
+ result.set_result(fetch_tasks)
+ return
+
+ if not uri_map:
+ result.set_result(fetch_tasks)
+ return
+
+ if "mirror" in restrict:
+ skip = False
+ if config.restrict_mirror_exemptions is not None:
+ new_uri_map = {}
+ for filename, uri_tuple in uri_map.items():
+ for uri in uri_tuple:
+ if uri[:9] == "mirror://":
+ i = uri.find("/", 9)
+ if i != -1 and uri[9:i].strip("/") in \
+ config.restrict_mirror_exemptions:
+ new_uri_map[filename] = uri_tuple
+ break
+ if new_uri_map:
+ uri_map = new_uri_map
+ else:
+ skip = True
+ else:
+ skip = True
+
+ if skip:
+ result.set_result(fetch_tasks)
+ return
+
+ # Parse Manifest for this cp if we haven't yet.
+ try:
+ if digests_future.done():
+ # If there's an exception then raise it.
+ digests = digests_future.result()
+ else:
+ digests = repo_config.load_manifest(
+ os.path.join(repo_config.location, cpv.cp)).\
+ getTypeDigests("DIST")
+ except (EnvironmentError, PortageException) as e:
+ digests_future.done() or digests_future.set_exception(e)
+ for filename in uri_map:
+ config.log_failure(
+ "%s\t%s\tManifest exception %s" %
+ (cpv, filename, e))
+ config.file_failures[filename] = cpv
+ result.set_result(fetch_tasks)
+ return
+ else:
+ digests_future.done() or digests_future.set_result(digests)
+
+ if not digests:
+ for filename in uri_map:
+ config.log_failure("%s\t%s\tdigest entry missing" %
+ (cpv, filename))
+ config.file_failures[filename] = cpv
+ result.set_result(fetch_tasks)
+ return
+
+ for filename, uri_tuple in uri_map.items():
+ file_digests = digests.get(filename)
+ if file_digests is None:
+ config.log_failure("%s\t%s\tdigest entry missing" %
+ (cpv, filename))
+ config.file_failures[filename] = cpv
+ continue
+ if filename in config.file_owners:
+ continue
+ config.file_owners[filename] = cpv
+
+ file_digests = \
+ _filter_unaccelarated_hashes(file_digests)
+ if hash_filter is not None:
+ file_digests = _apply_hash_filter(
+ file_digests, hash_filter)
+
+ fetch_tasks.append(FetchTask(
+ cpv=cpv,
+ background=True,
+ digests=file_digests,
+ distfile=filename,
+ restrict=restrict,
+ uri_tuple=uri_tuple,
+ config=config))
+
+ result.set_result(fetch_tasks)
+
+ def future_generator():
+ yield config.portdb.async_aux_get(cpv, ("RESTRICT",),
+ myrepo=repo_config.name, loop=loop)
+ yield config.portdb.async_fetch_map(cpv,
+ mytree=repo_config.location, loop=loop)
+
+ # Use iter_gather(max_jobs=1) to limit the number of processes per
+ # _EbuildFetchTask instance, and also to avoid spawning two bash
+ # processes for the same cpv simultaneously (the second one can
+ # use metadata cached by the first one).
+ gather_result = iter_gather(
+ future_generator(),
+ max_jobs=1,
+ loop=loop,
+ )
+ gather_result.add_done_callback(aux_get_done)
+ result.add_done_callback(lambda result:
+ gather_result.cancel() if result.cancelled() and
+ not gather_result.done() else None)
+
+ return result
diff --git a/lib/portage/_emirrordist/FetchTask.py b/lib/portage/_emirrordist/FetchTask.py
new file mode 100644
index 000000000..1440b697c
--- /dev/null
+++ b/lib/portage/_emirrordist/FetchTask.py
@@ -0,0 +1,631 @@
+# Copyright 2013-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division
+
+import collections
+import errno
+import logging
+import random
+import stat
+import subprocess
+import sys
+
+import portage
+from portage import _encodings, _unicode_encode
+from portage import os
+from portage.util._async.FileCopier import FileCopier
+from portage.util._async.FileDigester import FileDigester
+from portage.util._async.PipeLogger import PipeLogger
+from portage.util._async.PopenProcess import PopenProcess
+from _emerge.CompositeTask import CompositeTask
+
+default_hash_name = portage.const.MANIFEST2_HASH_DEFAULT
+
+# Use --no-check-certificate since Manifest digests should provide
+# enough security, and certificates can be self-signed or whatnot.
+default_fetchcommand = "wget -c -v -t 1 --passive-ftp --no-check-certificate --timeout=60 -O \"${DISTDIR}/${FILE}\" \"${URI}\""
+
+class FetchTask(CompositeTask):
+
+ __slots__ = ('distfile', 'digests', 'config', 'cpv',
+ 'restrict', 'uri_tuple', '_current_mirror',
+ '_current_stat', '_fetch_tmp_dir_info', '_fetch_tmp_file',
+ '_fs_mirror_stack', '_mirror_stack',
+ '_previously_added',
+ '_primaryuri_stack', '_log_path', '_tried_uris')
+
+ def _start(self):
+
+ if self.config.options.fetch_log_dir is not None and \
+ not self.config.options.dry_run:
+ self._log_path = os.path.join(
+ self.config.options.fetch_log_dir,
+ self.distfile + '.log')
+
+ self._previously_added = True
+ if self.config.distfiles_db is not None and \
+ self.distfile not in self.config.distfiles_db:
+ self._previously_added = False
+ self.config.distfiles_db[self.distfile] = self.cpv
+
+ if not self._have_needed_digests():
+ msg = "incomplete digests: %s" % " ".join(self.digests)
+ self.scheduler.output(msg, background=self.background,
+ log_path=self._log_path)
+ self.config.log_failure("%s\t%s\t%s" %
+ (self.cpv, self.distfile, msg))
+ self.config.file_failures[self.distfile] = self.cpv
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ distfile_path = os.path.join(
+ self.config.options.distfiles, self.distfile)
+
+ st = None
+ size_ok = False
+ try:
+ st = os.stat(distfile_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ msg = "%s stat failed in %s: %s" % \
+ (self.distfile, "distfiles", e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ else:
+ size_ok = st.st_size == self.digests["size"]
+
+ if not size_ok:
+ if self.config.options.dry_run:
+ if st is not None:
+ logging.info(("dry-run: delete '%s' with "
+ "wrong size from distfiles") % (self.distfile,))
+ else:
+ # Do the unlink in order to ensure that the path is clear,
+ # even if stat raised ENOENT, since a broken symlink can
+ # trigger ENOENT.
+ if self._unlink_file(distfile_path, "distfiles"):
+ if st is not None:
+ logging.debug(("delete '%s' with "
+ "wrong size from distfiles") % (self.distfile,))
+ else:
+ self.config.log_failure("%s\t%s\t%s" %
+ (self.cpv, self.distfile, "unlink failed in distfiles"))
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ if size_ok:
+ if self.config.options.verify_existing_digest:
+ self._start_task(
+ FileDigester(file_path=distfile_path,
+ hash_names=(self._select_hash(),),
+ background=self.background,
+ logfile=self._log_path), self._distfiles_digester_exit)
+ return
+
+ self._success()
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ self._start_fetch()
+
+ def _success(self):
+ if not self._previously_added:
+ size = self.digests["size"]
+ self.config.added_byte_count += size
+ self.config.added_file_count += 1
+ self.config.log_success("%s\t%s\tadded %i bytes" %
+ (self.cpv, self.distfile, size))
+
+ if self._log_path is not None:
+ if not self.config.options.dry_run:
+ try:
+ os.unlink(self._log_path)
+ except OSError:
+ pass
+
+ if self.config.options.recycle_dir is not None:
+
+ recycle_file = os.path.join(
+ self.config.options.recycle_dir, self.distfile)
+
+ if self.config.options.dry_run:
+ if os.path.exists(recycle_file):
+ logging.info("dry-run: delete '%s' from recycle" %
+ (self.distfile,))
+ else:
+ try:
+ os.unlink(recycle_file)
+ except OSError:
+ pass
+ else:
+ logging.debug("delete '%s' from recycle" %
+ (self.distfile,))
+
+ def _distfiles_digester_exit(self, digester):
+
+ self._assert_current(digester)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ if self._default_exit(digester) != os.EX_OK:
+ # IOError reading file in our main distfiles directory? This
+ # is a bad situation which normally does not occur, so
+ # skip this file and report it, in order to draw attention
+ # from the administrator.
+ msg = "%s distfiles digester failed unexpectedly" % \
+ (self.distfile,)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ self.config.log_failure("%s\t%s\t%s" %
+ (self.cpv, self.distfile, msg))
+ self.config.file_failures[self.distfile] = self.cpv
+ self.wait()
+ return
+
+ wrong_digest = self._find_bad_digest(digester.digests)
+ if wrong_digest is None:
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ self._start_fetch()
+
+ _mirror_info = collections.namedtuple('_mirror_info',
+ 'name location')
+
+ def _start_fetch(self):
+
+ self._previously_added = False
+ self._fs_mirror_stack = []
+ if self.config.options.distfiles_local is not None:
+ self._fs_mirror_stack.append(self._mirror_info(
+ 'distfiles-local', self.config.options.distfiles_local))
+ if self.config.options.recycle_dir is not None:
+ self._fs_mirror_stack.append(self._mirror_info(
+ 'recycle', self.config.options.recycle_dir))
+
+ self._primaryuri_stack = []
+ self._mirror_stack = []
+ for uri in reversed(self.uri_tuple):
+ if uri.startswith('mirror://'):
+ self._mirror_stack.append(
+ self._mirror_iterator(uri, self.config.mirrors))
+ else:
+ self._primaryuri_stack.append(uri)
+
+ self._tried_uris = set()
+ self._try_next_mirror()
+
+ @staticmethod
+ def _mirror_iterator(uri, mirrors_dict):
+
+ slash_index = uri.find("/", 9)
+ if slash_index != -1:
+ mirror_name = uri[9:slash_index].strip("/")
+ mirrors = mirrors_dict.get(mirror_name)
+ if not mirrors:
+ return
+ mirrors = list(mirrors)
+ while mirrors:
+ mirror = mirrors.pop(random.randint(0, len(mirrors) - 1))
+ yield mirror.rstrip("/") + "/" + uri[slash_index+1:]
+
+ def _try_next_mirror(self):
+ if self._fs_mirror_stack:
+ self._fetch_fs(self._fs_mirror_stack.pop())
+ return
+ else:
+ uri = self._next_uri()
+ if uri is not None:
+ self._tried_uris.add(uri)
+ self._fetch_uri(uri)
+ return
+
+ if self._tried_uris:
+ msg = "all uris failed"
+ else:
+ msg = "no fetchable uris"
+
+ self.config.log_failure("%s\t%s\t%s" %
+ (self.cpv, self.distfile, msg))
+ self.config.file_failures[self.distfile] = self.cpv
+ self.returncode = os.EX_OK
+ self.wait()
+
+ def _next_uri(self):
+ remaining_tries = self.config.options.tries - len(self._tried_uris)
+ if remaining_tries > 0:
+
+ if remaining_tries <= self.config.options.tries // 2:
+ while self._primaryuri_stack:
+ uri = self._primaryuri_stack.pop()
+ if uri not in self._tried_uris:
+ return uri
+
+ while self._mirror_stack:
+ uri = next(self._mirror_stack[-1], None)
+ if uri is None:
+ self._mirror_stack.pop()
+ else:
+ if uri not in self._tried_uris:
+ return uri
+
+ while self._primaryuri_stack:
+ uri = self._primaryuri_stack.pop()
+ if uri not in self._tried_uris:
+ return uri
+
+ return None
+
+ def _fetch_fs(self, mirror_info):
+ file_path = os.path.join(mirror_info.location, self.distfile)
+
+ st = None
+ size_ok = False
+ try:
+ st = os.stat(file_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ msg = "%s stat failed in %s: %s" % \
+ (self.distfile, mirror_info.name, e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ else:
+ size_ok = st.st_size == self.digests["size"]
+ self._current_stat = st
+
+ if size_ok:
+ self._current_mirror = mirror_info
+ self._start_task(
+ FileDigester(file_path=file_path,
+ hash_names=(self._select_hash(),),
+ background=self.background,
+ logfile=self._log_path),
+ self._fs_mirror_digester_exit)
+ else:
+ self._try_next_mirror()
+
+ def _fs_mirror_digester_exit(self, digester):
+
+ self._assert_current(digester)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ current_mirror = self._current_mirror
+ if digester.returncode != os.EX_OK:
+ msg = "%s %s digester failed unexpectedly" % \
+ (self.distfile, current_mirror.name)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ else:
+ bad_digest = self._find_bad_digest(digester.digests)
+ if bad_digest is not None:
+ msg = "%s %s has bad %s digest: expected %s, got %s" % \
+ (self.distfile, current_mirror.name, bad_digest,
+ self.digests[bad_digest], digester.digests[bad_digest])
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ elif self.config.options.dry_run:
+ # Report success without actually touching any files
+ if self._same_device(current_mirror.location,
+ self.config.options.distfiles):
+ logging.info(("dry-run: hardlink '%s' from %s "
+ "to distfiles") % (self.distfile, current_mirror.name))
+ else:
+ logging.info("dry-run: copy '%s' from %s to distfiles" %
+ (self.distfile, current_mirror.name))
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+ else:
+ src = os.path.join(current_mirror.location, self.distfile)
+ dest = os.path.join(self.config.options.distfiles, self.distfile)
+ if self._hardlink_atomic(src, dest,
+ "%s to %s" % (current_mirror.name, "distfiles")):
+ logging.debug("hardlink '%s' from %s to distfiles" %
+ (self.distfile, current_mirror.name))
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+ else:
+ self._start_task(
+ FileCopier(src_path=src, dest_path=dest,
+ background=(self.background and
+ self._log_path is not None),
+ logfile=self._log_path),
+ self._fs_mirror_copier_exit)
+ return
+
+ self._try_next_mirror()
+
+ def _fs_mirror_copier_exit(self, copier):
+
+ self._assert_current(copier)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ current_mirror = self._current_mirror
+ if copier.returncode != os.EX_OK:
+ msg = "%s %s copy failed unexpectedly" % \
+ (self.distfile, current_mirror.name)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ else:
+
+ logging.debug("copy '%s' from %s to distfiles" %
+ (self.distfile, current_mirror.name))
+
+ # Apply the timestamp from the source file, but
+ # just rely on umask for permissions.
+ try:
+ if sys.hexversion >= 0x3030000:
+ os.utime(copier.dest_path,
+ ns=(self._current_stat.st_mtime_ns,
+ self._current_stat.st_mtime_ns))
+ else:
+ os.utime(copier.dest_path,
+ (self._current_stat[stat.ST_MTIME],
+ self._current_stat[stat.ST_MTIME]))
+ except OSError as e:
+ msg = "%s %s utime failed unexpectedly: %s" % \
+ (self.distfile, current_mirror.name, e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ self._try_next_mirror()
+
+ def _fetch_uri(self, uri):
+
+ if self.config.options.dry_run:
+ # Simply report success.
+ logging.info("dry-run: fetch '%s' from '%s'" %
+ (self.distfile, uri))
+ self._success()
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ if self.config.options.temp_dir:
+ self._fetch_tmp_dir_info = 'temp-dir'
+ distdir = self.config.options.temp_dir
+ else:
+ self._fetch_tmp_dir_info = 'distfiles'
+ distdir = self.config.options.distfiles
+
+ tmp_basename = self.distfile + '._emirrordist_fetch_.%s' % os.getpid()
+
+ variables = {
+ "DISTDIR": distdir,
+ "URI": uri,
+ "FILE": tmp_basename
+ }
+
+ self._fetch_tmp_file = os.path.join(distdir, tmp_basename)
+
+ try:
+ os.unlink(self._fetch_tmp_file)
+ except OSError:
+ pass
+
+ args = portage.util.shlex_split(default_fetchcommand)
+ args = [portage.util.varexpand(x, mydict=variables)
+ for x in args]
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
+ not os.path.isabs(args[0]):
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see https://bugs.python.org/issue8513).
+ fullname = portage.process.find_binary(args[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(args[0])
+ args[0] = fullname
+
+ args = [_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict') for x in args]
+
+ null_fd = os.open(os.devnull, os.O_RDONLY)
+ fetcher = PopenProcess(background=self.background,
+ proc=subprocess.Popen(args, stdin=null_fd,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
+ scheduler=self.scheduler)
+ os.close(null_fd)
+
+ fetcher.pipe_reader = PipeLogger(background=self.background,
+ input_fd=fetcher.proc.stdout, log_file_path=self._log_path,
+ scheduler=self.scheduler)
+
+ self._start_task(fetcher, self._fetcher_exit)
+
+ def _fetcher_exit(self, fetcher):
+
+ self._assert_current(fetcher)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ if os.path.exists(self._fetch_tmp_file):
+ self._start_task(
+ FileDigester(file_path=self._fetch_tmp_file,
+ hash_names=(self._select_hash(),),
+ background=self.background,
+ logfile=self._log_path),
+ self._fetch_digester_exit)
+ else:
+ self._try_next_mirror()
+
+ def _fetch_digester_exit(self, digester):
+
+ self._assert_current(digester)
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ if digester.returncode != os.EX_OK:
+ msg = "%s %s digester failed unexpectedly" % \
+ (self.distfile, self._fetch_tmp_dir_info)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ else:
+ bad_digest = self._find_bad_digest(digester.digests)
+ if bad_digest is not None:
+ msg = "%s has bad %s digest: expected %s, got %s" % \
+ (self.distfile, bad_digest,
+ self.digests[bad_digest], digester.digests[bad_digest])
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ try:
+ os.unlink(self._fetch_tmp_file)
+ except OSError:
+ pass
+ else:
+ dest = os.path.join(self.config.options.distfiles, self.distfile)
+ try:
+ os.rename(self._fetch_tmp_file, dest)
+ except OSError:
+ self._start_task(
+ FileCopier(src_path=self._fetch_tmp_file,
+ dest_path=dest,
+ background=(self.background and
+ self._log_path is not None),
+ logfile=self._log_path),
+ self._fetch_copier_exit)
+ return
+ else:
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ return
+
+ self._try_next_mirror()
+
+ def _fetch_copier_exit(self, copier):
+
+ self._assert_current(copier)
+
+ try:
+ os.unlink(self._fetch_tmp_file)
+ except OSError:
+ pass
+
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ if copier.returncode == os.EX_OK:
+ self._success()
+ self.returncode = os.EX_OK
+ self.wait()
+ else:
+ # out of space?
+ msg = "%s %s copy failed unexpectedly" % \
+ (self.distfile, self._fetch_tmp_dir_info)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ self.config.log_failure("%s\t%s\t%s" %
+ (self.cpv, self.distfile, msg))
+ self.config.file_failures[self.distfile] = self.cpv
+ self.returncode = 1
+ self.wait()
+
+ def _unlink_file(self, file_path, dir_info):
+ try:
+ os.unlink(file_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ msg = "unlink '%s' failed in %s: %s" % \
+ (self.distfile, dir_info, e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ return False
+ return True
+
+ def _have_needed_digests(self):
+ return "size" in self.digests and \
+ self._select_hash() is not None
+
+ def _select_hash(self):
+ if default_hash_name in self.digests:
+ return default_hash_name
+ else:
+ for hash_name in self.digests:
+ if hash_name != "size" and \
+ hash_name in portage.checksum.get_valid_checksum_keys():
+ return hash_name
+
+ return None
+
+ def _find_bad_digest(self, digests):
+ for hash_name, hash_value in digests.items():
+ if self.digests[hash_name] != hash_value:
+ return hash_name
+ return None
+
+ @staticmethod
+ def _same_device(path1, path2):
+ try:
+ st1 = os.stat(path1)
+ st2 = os.stat(path2)
+ except OSError:
+ return False
+ else:
+ return st1.st_dev == st2.st_dev
+
+ def _hardlink_atomic(self, src, dest, dir_info):
+
+ head, tail = os.path.split(dest)
+ hardlink_tmp = os.path.join(head, ".%s._mirrordist_hardlink_.%s" % \
+ (tail, os.getpid()))
+
+ try:
+ try:
+ os.link(src, hardlink_tmp)
+ except OSError as e:
+ if e.errno != errno.EXDEV:
+ msg = "hardlink %s from %s failed: %s" % \
+ (self.distfile, dir_info, e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ return False
+
+ try:
+ os.rename(hardlink_tmp, dest)
+ except OSError as e:
+ msg = "hardlink rename '%s' from %s failed: %s" % \
+ (self.distfile, dir_info, e)
+ self.scheduler.output(msg + '\n', background=True,
+ log_path=self._log_path)
+ logging.error(msg)
+ return False
+ finally:
+ try:
+ os.unlink(hardlink_tmp)
+ except OSError:
+ pass
+
+ return True
diff --git a/lib/portage/_emirrordist/MirrorDistTask.py b/lib/portage/_emirrordist/MirrorDistTask.py
new file mode 100644
index 000000000..8eb3081c6
--- /dev/null
+++ b/lib/portage/_emirrordist/MirrorDistTask.py
@@ -0,0 +1,249 @@
+# Copyright 2013-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import sys
+import time
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+import portage
+from portage import os
+from portage.util._async.TaskScheduler import TaskScheduler
+from _emerge.CompositeTask import CompositeTask
+from .FetchIterator import FetchIterator
+from .DeletionIterator import DeletionIterator
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+class MirrorDistTask(CompositeTask):
+
+ __slots__ = ('_config', '_fetch_iterator', '_term_rlock',
+ '_term_callback_handle')
+
+ def __init__(self, config):
+ CompositeTask.__init__(self, scheduler=config.event_loop)
+ self._config = config
+ self._term_rlock = threading.RLock()
+ self._term_callback_handle = None
+ self._fetch_iterator = None
+
+ def _start(self):
+ self._fetch_iterator = FetchIterator(self._config)
+ fetch = TaskScheduler(iter(self._fetch_iterator),
+ max_jobs=self._config.options.jobs,
+ max_load=self._config.options.load_average,
+ event_loop=self._config.event_loop)
+ self._start_task(fetch, self._fetch_exit)
+
+ def _fetch_exit(self, fetch):
+
+ self._assert_current(fetch)
+ if self._was_cancelled():
+ self._async_wait()
+ return
+
+ if self._config.options.delete:
+ deletion = TaskScheduler(iter(DeletionIterator(self._config)),
+ max_jobs=self._config.options.jobs,
+ max_load=self._config.options.load_average,
+ event_loop=self._config.event_loop)
+ self._start_task(deletion, self._deletion_exit)
+ return
+
+ self._post_deletion()
+
+ def _deletion_exit(self, deletion):
+
+ self._assert_current(deletion)
+ if self._was_cancelled():
+ self._async_wait()
+ return
+
+ self._post_deletion()
+
+ def _post_deletion(self):
+
+ if self._config.options.recycle_db is not None:
+ self._update_recycle_db()
+
+ if self._config.options.scheduled_deletion_log is not None:
+ self._scheduled_deletion_log()
+
+ self._summary()
+
+ self.returncode = os.EX_OK
+ self._current_task = None
+ self._async_wait()
+
+ def _update_recycle_db(self):
+
+ start_time = self._config.start_time
+ recycle_dir = self._config.options.recycle_dir
+ recycle_db = self._config.recycle_db
+ r_deletion_delay = self._config.options.recycle_deletion_delay
+
+ # Use a dict optimize access.
+ recycle_db_cache = dict(recycle_db.items())
+
+ for filename in os.listdir(recycle_dir):
+
+ recycle_file = os.path.join(recycle_dir, filename)
+
+ try:
+ st = os.stat(recycle_file)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ logging.error(("stat failed for '%s' in "
+ "recycle: %s") % (filename, e))
+ continue
+
+ value = recycle_db_cache.pop(filename, None)
+ if value is None:
+ logging.debug(("add '%s' to "
+ "recycle db") % filename)
+ recycle_db[filename] = (st.st_size, start_time)
+ else:
+ r_size, r_time = value
+ if long(r_size) != st.st_size:
+ recycle_db[filename] = (st.st_size, start_time)
+ elif r_time + r_deletion_delay < start_time:
+ if self._config.options.dry_run:
+ logging.info(("dry-run: delete '%s' from "
+ "recycle") % filename)
+ logging.info(("drop '%s' from "
+ "recycle db") % filename)
+ else:
+ try:
+ os.unlink(recycle_file)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ logging.error(("delete '%s' from "
+ "recycle failed: %s") % (filename, e))
+ else:
+ logging.debug(("delete '%s' from "
+ "recycle") % filename)
+ try:
+ del recycle_db[filename]
+ except KeyError:
+ pass
+ else:
+ logging.debug(("drop '%s' from "
+ "recycle db") % filename)
+
+ # Existing files were popped from recycle_db_cache,
+ # so any remaining entries are for files that no
+ # longer exist.
+ for filename in recycle_db_cache:
+ try:
+ del recycle_db[filename]
+ except KeyError:
+ pass
+ else:
+ logging.debug(("drop non-existent '%s' from "
+ "recycle db") % filename)
+
+ def _scheduled_deletion_log(self):
+
+ start_time = self._config.start_time
+ dry_run = self._config.options.dry_run
+ deletion_delay = self._config.options.deletion_delay
+ distfiles_db = self._config.distfiles_db
+
+ date_map = {}
+ for filename, timestamp in self._config.deletion_db.items():
+ date = timestamp + deletion_delay
+ if date < start_time:
+ date = start_time
+ date = time.strftime("%Y-%m-%d", time.gmtime(date))
+ date_files = date_map.get(date)
+ if date_files is None:
+ date_files = []
+ date_map[date] = date_files
+ date_files.append(filename)
+
+ if dry_run:
+ logging.warning("dry-run: scheduled-deletions log "
+ "will be summarized via logging.info")
+
+ lines = []
+ for date in sorted(date_map):
+ date_files = date_map[date]
+ if dry_run:
+ logging.info(("dry-run: scheduled deletions for %s: %s files") %
+ (date, len(date_files)))
+ lines.append("%s\n" % date)
+ for filename in date_files:
+ cpv = "unknown"
+ if distfiles_db is not None:
+ cpv = distfiles_db.get(filename, cpv)
+ lines.append("\t%s\t%s\n" % (filename, cpv))
+
+ if not dry_run:
+ portage.util.write_atomic(
+ self._config.options.scheduled_deletion_log,
+ "".join(lines))
+
+ def _summary(self):
+ elapsed_time = time.time() - self._config.start_time
+ fail_count = len(self._config.file_failures)
+ delete_count = self._config.delete_count
+ scheduled_deletion_count = self._config.scheduled_deletion_count - delete_count
+ added_file_count = self._config.added_file_count
+ added_byte_count = self._config.added_byte_count
+
+ logging.info("finished in %i seconds" % elapsed_time)
+ logging.info("failed to fetch %i files" % fail_count)
+ logging.info("deleted %i files" % delete_count)
+ logging.info("deletion of %i files scheduled" %
+ scheduled_deletion_count)
+ logging.info("added %i files" % added_file_count)
+ logging.info("added %i bytes total" % added_byte_count)
+
+ def _cleanup(self):
+ """
+ Cleanup any callbacks that have been registered with the global
+ event loop.
+ """
+ # The self._term_callback_handle attribute requires locking
+ # since it's modified by the thread safe terminate method.
+ with self._term_rlock:
+ if self._term_callback_handle not in (None, False):
+ self._term_callback_handle.cancel()
+ # This prevents the terminate method from scheduling
+ # any more callbacks (since _cleanup must eliminate all
+ # callbacks in order to ensure complete cleanup).
+ self._term_callback_handle = False
+
+ def terminate(self):
+ with self._term_rlock:
+ if self._term_callback_handle is None:
+ self._term_callback_handle = self.scheduler.call_soon_threadsafe(
+ self._term_callback)
+
+ def _term_callback(self):
+ if self._fetch_iterator is not None:
+ self._fetch_iterator.terminate()
+ self.cancel()
+ if self.returncode is None:
+ # In this case, the exit callback for self._current_task will
+ # trigger notification of exit listeners. Don't call _async_wait()
+ # yet, since that could trigger event loop recursion if the
+ # current (cancelled) task's exit callback does not set the
+ # returncode first.
+ pass
+ else:
+ self._async_wait()
+
+ def _async_wait(self):
+ """
+ Override _async_wait to call self._cleanup().
+ """
+ self._cleanup()
+ super(MirrorDistTask, self)._async_wait()
diff --git a/lib/portage/_emirrordist/__init__.py b/lib/portage/_emirrordist/__init__.py
new file mode 100644
index 000000000..6cde9320b
--- /dev/null
+++ b/lib/portage/_emirrordist/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/_emirrordist/main.py b/lib/portage/_emirrordist/main.py
new file mode 100644
index 000000000..b63837b2a
--- /dev/null
+++ b/lib/portage/_emirrordist/main.py
@@ -0,0 +1,442 @@
+# Copyright 2013-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import argparse
+import logging
+import sys
+
+import portage
+from portage import os
+from portage.util import normalize_path, writemsg_level, _recursive_file_list
+from portage.util._async.run_main_scheduler import run_main_scheduler
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.global_event_loop import global_event_loop
+from .Config import Config
+from .MirrorDistTask import MirrorDistTask
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+seconds_per_day = 24 * 60 * 60
+
+common_options = (
+ {
+ "longopt" : "--dry-run",
+ "help" : "perform a trial run with no changes made (usually combined "
+ "with --verbose)",
+ "action" : "store_true"
+ },
+ {
+ "longopt" : "--verbose",
+ "shortopt" : "-v",
+ "help" : "display extra information on stderr "
+ "(multiple occurences increase verbosity)",
+ "action" : "count",
+ "default" : 0,
+ },
+ {
+ "longopt" : "--ignore-default-opts",
+ "help" : "do not use the EMIRRORDIST_DEFAULT_OPTS environment variable",
+ "action" : "store_true"
+ },
+ {
+ "longopt" : "--distfiles",
+ "help" : "distfiles directory to use (required)",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--jobs",
+ "shortopt" : "-j",
+ "help" : "number of concurrent jobs to run",
+ "type" : int
+ },
+ {
+ "longopt" : "--load-average",
+ "shortopt" : "-l",
+ "help" : "load average limit for spawning of new concurrent jobs",
+ "metavar" : "LOAD",
+ "type" : float
+ },
+ {
+ "longopt" : "--tries",
+ "help" : "maximum number of tries per file, 0 means unlimited (default is 10)",
+ "default" : 10,
+ "type" : int
+ },
+ {
+ "longopt" : "--repo",
+ "help" : "name of repo to operate on"
+ },
+ {
+ "longopt" : "--config-root",
+ "help" : "location of portage config files",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--repositories-configuration",
+ "help" : "override configuration of repositories (in format of repos.conf)"
+ },
+ {
+ "longopt" : "--strict-manifests",
+ "help" : "manually override \"strict\" FEATURES setting",
+ "choices" : ("y", "n"),
+ "metavar" : "<y|n>",
+ },
+ {
+ "longopt" : "--failure-log",
+ "help" : "log file for fetch failures, with tab-delimited "
+ "output, for reporting purposes",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--success-log",
+ "help" : "log file for fetch successes, with tab-delimited "
+ "output, for reporting purposes",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--scheduled-deletion-log",
+ "help" : "log file for scheduled deletions, with tab-delimited "
+ "output, for reporting purposes",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--delete",
+ "help" : "enable deletion of unused distfiles",
+ "action" : "store_true"
+ },
+ {
+ "longopt" : "--deletion-db",
+ "help" : "database file used to track lifetime of files "
+ "scheduled for delayed deletion",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--deletion-delay",
+ "help" : "delay time for deletion, measured in seconds",
+ "metavar" : "SECONDS"
+ },
+ {
+ "longopt" : "--temp-dir",
+ "help" : "temporary directory for downloads",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--mirror-overrides",
+ "help" : "file holding a list of mirror overrides",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--mirror-skip",
+ "help" : "comma delimited list of mirror targets to skip "
+ "when fetching"
+ },
+ {
+ "longopt" : "--restrict-mirror-exemptions",
+ "help" : "comma delimited list of mirror targets for which to "
+ "ignore RESTRICT=\"mirror\""
+ },
+ {
+ "longopt" : "--verify-existing-digest",
+ "help" : "use digest as a verification of whether existing "
+ "distfiles are valid",
+ "action" : "store_true"
+ },
+ {
+ "longopt" : "--distfiles-local",
+ "help" : "distfiles-local directory to use",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--distfiles-db",
+ "help" : "database file used to track which ebuilds a "
+ "distfile belongs to",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--recycle-dir",
+ "help" : "directory for extended retention of files that "
+ "are removed from distdir with the --delete option",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--recycle-db",
+ "help" : "database file used to track lifetime of files "
+ "in recycle dir",
+ "metavar" : "FILE"
+ },
+ {
+ "longopt" : "--recycle-deletion-delay",
+ "help" : "delay time for deletion of unused files from "
+ "recycle dir, measured in seconds (defaults to "
+ "the equivalent of 60 days)",
+ "default" : 60 * seconds_per_day,
+ "metavar" : "SECONDS",
+ "type" : int
+ },
+ {
+ "longopt" : "--fetch-log-dir",
+ "help" : "directory for individual fetch logs",
+ "metavar" : "DIR"
+ },
+ {
+ "longopt" : "--whitelist-from",
+ "help" : "specifies a file containing a list of files to "
+ "whitelist, one per line, # prefixed lines ignored",
+ "action" : "append",
+ "metavar" : "FILE"
+ },
+)
+
+def parse_args(args):
+ description = "emirrordist - a fetch tool for mirroring " \
+ "of package distfiles"
+ usage = "emirrordist [options] <action>"
+ parser = argparse.ArgumentParser(description=description, usage=usage)
+
+ actions = parser.add_argument_group('Actions')
+ actions.add_argument("--version",
+ action="store_true",
+ help="display portage version and exit")
+ actions.add_argument("--mirror",
+ action="store_true",
+ help="mirror distfiles for the selected repository")
+
+ common = parser.add_argument_group('Common options')
+ for opt_info in common_options:
+ opt_pargs = [opt_info["longopt"]]
+ if opt_info.get("shortopt"):
+ opt_pargs.append(opt_info["shortopt"])
+ opt_kwargs = {"help" : opt_info["help"]}
+ for k in ("action", "choices", "default", "metavar", "type"):
+ if k in opt_info:
+ opt_kwargs[k] = opt_info[k]
+ common.add_argument(*opt_pargs, **opt_kwargs)
+
+ options, args = parser.parse_known_args(args)
+
+ return (parser, options, args)
+
+def emirrordist_main(args):
+
+ # The calling environment is ignored, so the program is
+ # completely controlled by commandline arguments.
+ env = {}
+
+ if not sys.stdout.isatty():
+ portage.output.nocolor()
+ env['NOCOLOR'] = 'true'
+
+ parser, options, args = parse_args(args)
+
+ if options.version:
+ sys.stdout.write("Portage %s\n" % portage.VERSION)
+ return os.EX_OK
+
+ config_root = options.config_root
+
+ if options.repositories_configuration is not None:
+ env['PORTAGE_REPOSITORIES'] = options.repositories_configuration
+
+ settings = portage.config(config_root=config_root,
+ local_config=False, env=env)
+
+ default_opts = None
+ if not options.ignore_default_opts:
+ default_opts = settings.get('EMIRRORDIST_DEFAULT_OPTS', '').split()
+
+ if default_opts:
+ parser, options, args = parse_args(default_opts + args)
+
+ settings = portage.config(config_root=config_root,
+ local_config=False, env=env)
+
+ if options.repo is None:
+ if len(settings.repositories.prepos) == 2:
+ for repo in settings.repositories:
+ if repo.name != "DEFAULT":
+ options.repo = repo.name
+ break
+
+ if options.repo is None:
+ parser.error("--repo option is required")
+
+ repo_path = settings.repositories.treemap.get(options.repo)
+ if repo_path is None:
+ parser.error("Unable to locate repository named '%s'" % (options.repo,))
+
+ if options.jobs is not None:
+ options.jobs = int(options.jobs)
+
+ if options.load_average is not None:
+ options.load_average = float(options.load_average)
+
+ if options.failure_log is not None:
+ options.failure_log = normalize_path(
+ os.path.abspath(options.failure_log))
+
+ parent_dir = os.path.dirname(options.failure_log)
+ if not (os.path.isdir(parent_dir) and
+ os.access(parent_dir, os.W_OK|os.X_OK)):
+ parser.error(("--failure-log '%s' parent is not a "
+ "writable directory") % options.failure_log)
+
+ if options.success_log is not None:
+ options.success_log = normalize_path(
+ os.path.abspath(options.success_log))
+
+ parent_dir = os.path.dirname(options.success_log)
+ if not (os.path.isdir(parent_dir) and
+ os.access(parent_dir, os.W_OK|os.X_OK)):
+ parser.error(("--success-log '%s' parent is not a "
+ "writable directory") % options.success_log)
+
+ if options.scheduled_deletion_log is not None:
+ options.scheduled_deletion_log = normalize_path(
+ os.path.abspath(options.scheduled_deletion_log))
+
+ parent_dir = os.path.dirname(options.scheduled_deletion_log)
+ if not (os.path.isdir(parent_dir) and
+ os.access(parent_dir, os.W_OK|os.X_OK)):
+ parser.error(("--scheduled-deletion-log '%s' parent is not a "
+ "writable directory") % options.scheduled_deletion_log)
+
+ if options.deletion_db is None:
+ parser.error("--scheduled-deletion-log requires --deletion-db")
+
+ if options.deletion_delay is not None:
+ options.deletion_delay = long(options.deletion_delay)
+ if options.deletion_db is None:
+ parser.error("--deletion-delay requires --deletion-db")
+
+ if options.deletion_db is not None:
+ if options.deletion_delay is None:
+ parser.error("--deletion-db requires --deletion-delay")
+ options.deletion_db = normalize_path(
+ os.path.abspath(options.deletion_db))
+
+ if options.temp_dir is not None:
+ options.temp_dir = normalize_path(
+ os.path.abspath(options.temp_dir))
+
+ if not (os.path.isdir(options.temp_dir) and
+ os.access(options.temp_dir, os.W_OK|os.X_OK)):
+ parser.error(("--temp-dir '%s' is not a "
+ "writable directory") % options.temp_dir)
+
+ if options.distfiles is not None:
+ options.distfiles = normalize_path(
+ os.path.abspath(options.distfiles))
+
+ if not (os.path.isdir(options.distfiles) and
+ os.access(options.distfiles, os.W_OK|os.X_OK)):
+ parser.error(("--distfiles '%s' is not a "
+ "writable directory") % options.distfiles)
+ else:
+ parser.error("missing required --distfiles parameter")
+
+ if options.mirror_overrides is not None:
+ options.mirror_overrides = normalize_path(
+ os.path.abspath(options.mirror_overrides))
+
+ if not (os.access(options.mirror_overrides, os.R_OK) and
+ os.path.isfile(options.mirror_overrides)):
+ parser.error(
+ "--mirror-overrides-file '%s' is not a readable file" %
+ options.mirror_overrides)
+
+ if options.distfiles_local is not None:
+ options.distfiles_local = normalize_path(
+ os.path.abspath(options.distfiles_local))
+
+ if not (os.path.isdir(options.distfiles_local) and
+ os.access(options.distfiles_local, os.W_OK|os.X_OK)):
+ parser.error(("--distfiles-local '%s' is not a "
+ "writable directory") % options.distfiles_local)
+
+ if options.distfiles_db is not None:
+ options.distfiles_db = normalize_path(
+ os.path.abspath(options.distfiles_db))
+
+ if options.tries is not None:
+ options.tries = int(options.tries)
+
+ if options.recycle_dir is not None:
+ options.recycle_dir = normalize_path(
+ os.path.abspath(options.recycle_dir))
+ if not (os.path.isdir(options.recycle_dir) and
+ os.access(options.recycle_dir, os.W_OK|os.X_OK)):
+ parser.error(("--recycle-dir '%s' is not a "
+ "writable directory") % options.recycle_dir)
+
+ if options.recycle_db is not None:
+ if options.recycle_dir is None:
+ parser.error("--recycle-db requires "
+ "--recycle-dir to be specified")
+ options.recycle_db = normalize_path(
+ os.path.abspath(options.recycle_db))
+
+ if options.recycle_deletion_delay is not None:
+ options.recycle_deletion_delay = \
+ long(options.recycle_deletion_delay)
+
+ if options.fetch_log_dir is not None:
+ options.fetch_log_dir = normalize_path(
+ os.path.abspath(options.fetch_log_dir))
+
+ if not (os.path.isdir(options.fetch_log_dir) and
+ os.access(options.fetch_log_dir, os.W_OK|os.X_OK)):
+ parser.error(("--fetch-log-dir '%s' is not a "
+ "writable directory") % options.fetch_log_dir)
+
+ if options.whitelist_from:
+ normalized_paths = []
+ for x in options.whitelist_from:
+ path = normalize_path(os.path.abspath(x))
+ if not os.access(path, os.R_OK):
+ parser.error("--whitelist-from '%s' is not readable" % x)
+ if os.path.isfile(path):
+ normalized_paths.append(path)
+ elif os.path.isdir(path):
+ for file in _recursive_file_list(path):
+ if not os.access(file, os.R_OK):
+ parser.error("--whitelist-from '%s' directory contains not readable file '%s'" % (x, file))
+ normalized_paths.append(file)
+ else:
+ parser.error("--whitelist-from '%s' is not a regular file or a directory" % x)
+ options.whitelist_from = normalized_paths
+
+ if options.strict_manifests is not None:
+ if options.strict_manifests == "y":
+ settings.features.add("strict")
+ else:
+ settings.features.discard("strict")
+
+ settings.lock()
+
+ portdb = portage.portdbapi(mysettings=settings)
+
+ # Limit ebuilds to the specified repo.
+ portdb.porttrees = [repo_path]
+
+ portage.util.initialize_logger()
+
+ if options.verbose > 0:
+ l = logging.getLogger()
+ l.setLevel(l.getEffectiveLevel() - 10 * options.verbose)
+
+ with Config(options, portdb,
+ SchedulerInterface(global_event_loop())) as config:
+
+ if not options.mirror:
+ parser.error('No action specified')
+
+ returncode = os.EX_OK
+
+ if options.mirror:
+ signum = run_main_scheduler(MirrorDistTask(config))
+ if signum is not None:
+ sys.exit(128 + signum)
+
+ return returncode
diff --git a/lib/portage/_global_updates.py b/lib/portage/_global_updates.py
new file mode 100644
index 000000000..81ee484ee
--- /dev/null
+++ b/lib/portage/_global_updates.py
@@ -0,0 +1,255 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import stat
+
+from portage import best, os
+from portage.const import WORLD_FILE
+from portage.data import secpass
+from portage.exception import DirectoryNotFound
+from portage.localization import _
+from portage.output import bold, colorize
+from portage.update import grab_updates, parse_updates, update_config_files, update_dbentry
+from portage.util import grabfile, shlex_split, \
+ writemsg, writemsg_stdout, write_atomic
+
+def _global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
+ """
+ Perform new global updates if they exist in 'profiles/updates/'
+ subdirectories of all active repositories (PORTDIR + PORTDIR_OVERLAY).
+ This simply returns if ROOT != "/" (when len(trees) != 1). If ROOT != "/"
+ then the user should instead use emaint --fix movebin and/or moveinst.
+
+ @param trees: A dictionary containing portage trees.
+ @type trees: dict
+ @param prev_mtimes: A dictionary containing mtimes of files located in
+ $PORTDIR/profiles/updates/.
+ @type prev_mtimes: dict
+ @rtype: bool
+ @return: True if update commands have been performed, otherwise False
+ """
+ # only do this if we're root and not running repoman/ebuild digest
+
+ if secpass < 2 or \
+ "SANDBOX_ACTIVE" in os.environ or \
+ len(trees) != 1:
+ return False
+
+ return _do_global_updates(trees, prev_mtimes,
+ quiet=quiet, if_mtime_changed=if_mtime_changed)
+
+def _do_global_updates(trees, prev_mtimes, quiet=False, if_mtime_changed=True):
+ root = trees._running_eroot
+ mysettings = trees[root]["vartree"].settings
+ portdb = trees[root]["porttree"].dbapi
+ vardb = trees[root]["vartree"].dbapi
+ bindb = trees[root]["bintree"].dbapi
+
+ world_file = os.path.join(mysettings['EROOT'], WORLD_FILE)
+ world_list = grabfile(world_file)
+ world_modified = False
+ world_warnings = set()
+ updpath_map = {}
+ # Maps repo_name to list of updates. If a given repo has no updates
+ # directory, it will be omitted. If a repo has an updates directory
+ # but none need to be applied (according to timestamp logic), the
+ # value in the dict will be an empty list.
+ repo_map = {}
+ timestamps = {}
+
+ retupd = False
+ update_notice_printed = False
+ for repo_name in portdb.getRepositories():
+ repo = portdb.getRepositoryPath(repo_name)
+ updpath = os.path.join(repo, "profiles", "updates")
+ if not os.path.isdir(updpath):
+ continue
+
+ if updpath in updpath_map:
+ repo_map[repo_name] = updpath_map[updpath]
+ continue
+
+ try:
+ if if_mtime_changed:
+ update_data = grab_updates(updpath, prev_mtimes=prev_mtimes)
+ else:
+ update_data = grab_updates(updpath)
+ except DirectoryNotFound:
+ continue
+ myupd = []
+ updpath_map[updpath] = myupd
+ repo_map[repo_name] = myupd
+ if len(update_data) > 0:
+ for mykey, mystat, mycontent in update_data:
+ if not update_notice_printed:
+ update_notice_printed = True
+ writemsg_stdout("\n")
+ writemsg_stdout(colorize("GOOD",
+ _("Performing Global Updates\n")))
+ writemsg_stdout(_("(Could take a couple of minutes if you have a lot of binary packages.)\n"))
+ if not quiet:
+ writemsg_stdout(_(" %s='update pass' %s='binary update' "
+ "%s='/var/db update' %s='/var/db move'\n"
+ " %s='/var/db SLOT move' %s='binary move' "
+ "%s='binary SLOT move'\n %s='update /etc/portage/package.*'\n") % \
+ (bold("."), bold("*"), bold("#"), bold("@"), bold("s"), bold("%"), bold("S"), bold("p")))
+ valid_updates, errors = parse_updates(mycontent)
+ myupd.extend(valid_updates)
+ if not quiet:
+ writemsg_stdout(bold(mykey))
+ writemsg_stdout(len(valid_updates) * "." + "\n")
+ if len(errors) == 0:
+ # Update our internal mtime since we
+ # processed all of our directives.
+ timestamps[mykey] = mystat[stat.ST_MTIME]
+ else:
+ for msg in errors:
+ writemsg("%s\n" % msg, noiselevel=-1)
+ if myupd:
+ retupd = True
+
+ if retupd:
+ if os.access(bindb.bintree.pkgdir, os.W_OK):
+ # Call binarytree.populate(), since we want to make sure it's
+ # only populated with local packages here (getbinpkgs=0).
+ bindb.bintree.populate()
+ else:
+ bindb = None
+
+ master_repo = portdb.repositories.mainRepo()
+ if master_repo is not None:
+ master_repo = master_repo.name
+ if master_repo in repo_map:
+ repo_map['DEFAULT'] = repo_map[master_repo]
+
+ for repo_name, myupd in repo_map.items():
+ if repo_name == 'DEFAULT':
+ continue
+ if not myupd:
+ continue
+
+ def repo_match(repository):
+ return repository == repo_name or \
+ (repo_name == master_repo and repository not in repo_map)
+
+ def _world_repo_match(atoma, atomb):
+ """
+ Check whether to perform a world change from atoma to atomb.
+ If best vardb match for atoma comes from the same repository
+ as the update file, allow that. Additionally, if portdb still
+ can find a match for old atom name, warn about that.
+ """
+ matches = vardb.match(atoma)
+ if not matches:
+ matches = vardb.match(atomb)
+ if matches and \
+ repo_match(vardb.aux_get(best(matches), ['repository'])[0]):
+ if portdb.match(atoma):
+ world_warnings.add((atoma, atomb))
+ return True
+ else:
+ return False
+
+ for update_cmd in myupd:
+ for pos, atom in enumerate(world_list):
+ new_atom = update_dbentry(update_cmd, atom)
+ if atom != new_atom:
+ if _world_repo_match(atom, new_atom):
+ world_list[pos] = new_atom
+ world_modified = True
+
+ for update_cmd in myupd:
+ if update_cmd[0] == "move":
+ moves = vardb.move_ent(update_cmd, repo_match=repo_match)
+ if moves:
+ writemsg_stdout(moves * "@")
+ if bindb:
+ moves = bindb.move_ent(update_cmd, repo_match=repo_match)
+ if moves:
+ writemsg_stdout(moves * "%")
+ elif update_cmd[0] == "slotmove":
+ moves = vardb.move_slot_ent(update_cmd, repo_match=repo_match)
+ if moves:
+ writemsg_stdout(moves * "s")
+ if bindb:
+ moves = bindb.move_slot_ent(update_cmd, repo_match=repo_match)
+ if moves:
+ writemsg_stdout(moves * "S")
+
+ if world_modified:
+ world_list.sort()
+ write_atomic(world_file,
+ "".join("%s\n" % (x,) for x in world_list))
+ if world_warnings:
+ # XXX: print warning that we've updated world entries
+ # and the old name still matches something (from an overlay)?
+ pass
+
+ if retupd:
+
+ def _config_repo_match(repo_name, atoma, atomb):
+ """
+ Check whether to perform a world change from atoma to atomb.
+ If best vardb match for atoma comes from the same repository
+ as the update file, allow that. Additionally, if portdb still
+ can find a match for old atom name, warn about that.
+ """
+ matches = vardb.match(atoma)
+ if not matches:
+ matches = vardb.match(atomb)
+ if not matches:
+ return False
+ repository = vardb.aux_get(best(matches), ['repository'])[0]
+ return repository == repo_name or \
+ (repo_name == master_repo and repository not in repo_map)
+
+ update_config_files(root,
+ shlex_split(mysettings.get("CONFIG_PROTECT", "")),
+ shlex_split(mysettings.get("CONFIG_PROTECT_MASK", "")),
+ repo_map, match_callback=_config_repo_match,
+ case_insensitive="case-insensitive-fs"
+ in mysettings.features)
+
+ # The above global updates proceed quickly, so they
+ # are considered a single mtimedb transaction.
+ if timestamps:
+ # We do not update the mtime in the mtimedb
+ # until after _all_ of the above updates have
+ # been processed because the mtimedb will
+ # automatically commit when killed by ctrl C.
+ for mykey, mtime in timestamps.items():
+ prev_mtimes[mykey] = mtime
+
+ do_upgrade_packagesmessage = False
+ # We gotta do the brute force updates for these now.
+ if True:
+ def onUpdate(_maxval, curval):
+ if curval > 0:
+ writemsg_stdout("#")
+ if quiet:
+ onUpdate = None
+ vardb.update_ents(repo_map, onUpdate=onUpdate)
+ if bindb:
+ def onUpdate(_maxval, curval):
+ if curval > 0:
+ writemsg_stdout("*")
+ if quiet:
+ onUpdate = None
+ bindb.update_ents(repo_map, onUpdate=onUpdate)
+ else:
+ do_upgrade_packagesmessage = 1
+
+ # Update progress above is indicated by characters written to stdout so
+ # we print a couple new lines here to separate the progress output from
+ # what follows.
+ writemsg_stdout("\n\n")
+
+ if do_upgrade_packagesmessage and bindb and \
+ bindb.cpv_all():
+ writemsg_stdout(_(" ** Skipping packages. Run 'fixpackages' or set it in FEATURES to fix the tbz2's in the packages directory.\n"))
+ writemsg_stdout(bold(_("Note: This can take a very long time.")))
+ writemsg_stdout("\n")
+
+ return retupd
diff --git a/lib/portage/_legacy_globals.py b/lib/portage/_legacy_globals.py
new file mode 100644
index 000000000..45113d150
--- /dev/null
+++ b/lib/portage/_legacy_globals.py
@@ -0,0 +1,78 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.const import CACHE_PATH, PROFILE_PATH
+
+def _get_legacy_global(name):
+ constructed = portage._legacy_globals_constructed
+ if name in constructed:
+ return getattr(portage, name)
+
+ if name == 'portdb':
+ portage.portdb = portage.db[portage.root]["porttree"].dbapi
+ constructed.add(name)
+ return getattr(portage, name)
+
+ elif name in ('mtimedb', 'mtimedbfile'):
+ portage.mtimedbfile = os.path.join(portage.settings['EROOT'],
+ CACHE_PATH, "mtimedb")
+ constructed.add('mtimedbfile')
+ portage.mtimedb = portage.MtimeDB(portage.mtimedbfile)
+ constructed.add('mtimedb')
+ return getattr(portage, name)
+
+ # Portage needs to ensure a sane umask for the files it creates.
+ os.umask(0o22)
+
+ kwargs = {}
+ for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"),
+ ("target_root", "ROOT"), ("sysroot", "SYSROOT"),
+ ("eprefix", "EPREFIX")):
+ kwargs[k] = os.environ.get(envvar)
+
+ portage._initializing_globals = True
+ portage.db = portage.create_trees(**kwargs)
+ constructed.add('db')
+ del portage._initializing_globals
+
+ settings = portage.db[portage.db._target_eroot]["vartree"].settings
+
+ portage.settings = settings
+ constructed.add('settings')
+
+ # Since portage.db now uses EROOT for keys instead of ROOT, we make
+ # portage.root refer to EROOT such that it continues to work as a key.
+ portage.root = portage.db._target_eroot
+ constructed.add('root')
+
+ # COMPATIBILITY
+ # These attributes should not be used within
+ # Portage under any circumstances.
+
+ portage.archlist = settings.archlist()
+ constructed.add('archlist')
+
+ portage.features = settings.features
+ constructed.add('features')
+
+ portage.groups = settings.get("ACCEPT_KEYWORDS", "").split()
+ constructed.add('groups')
+
+ portage.pkglines = settings.packages
+ constructed.add('pkglines')
+
+ portage.selinux_enabled = settings.selinux_enabled()
+ constructed.add('selinux_enabled')
+
+ portage.thirdpartymirrors = settings.thirdpartymirrors()
+ constructed.add('thirdpartymirrors')
+
+ profiledir = os.path.join(settings["PORTAGE_CONFIGROOT"], PROFILE_PATH)
+ if not os.path.isdir(profiledir):
+ profiledir = None
+ portage.profiledir = profiledir
+ constructed.add('profiledir')
+
+ return getattr(portage, name)
diff --git a/lib/portage/_selinux.py b/lib/portage/_selinux.py
new file mode 100644
index 000000000..985e96628
--- /dev/null
+++ b/lib/portage/_selinux.py
@@ -0,0 +1,158 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# Don't use the unicode-wrapped os and shutil modules here since
+# the whole _selinux module itself will be wrapped.
+import os
+import shutil
+import sys
+import warnings
+
+import portage
+from portage import _encodings
+from portage import _native_string, _unicode_decode
+from portage.localization import _
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'selinux')
+
+def copyfile(src, dest):
+ src = _native_string(src, encoding=_encodings['fs'], errors='strict')
+ dest = _native_string(dest, encoding=_encodings['fs'], errors='strict')
+ (rc, ctx) = selinux.lgetfilecon(src)
+ if rc < 0:
+ if sys.hexversion < 0x3000000:
+ src = _unicode_decode(src, encoding=_encodings['fs'], errors='replace')
+ raise OSError(_("copyfile: Failed getting context of \"%s\".") % src)
+
+ setfscreate(ctx)
+ try:
+ shutil.copyfile(src, dest)
+ finally:
+ setfscreate()
+
+def getcontext():
+ (rc, ctx) = selinux.getcon()
+ if rc < 0:
+ raise OSError(_("getcontext: Failed getting current process context."))
+
+ return ctx
+
+def is_selinux_enabled():
+ return selinux.is_selinux_enabled()
+
+def mkdir(target, refdir):
+ target = _native_string(target, encoding=_encodings['fs'], errors='strict')
+ refdir = _native_string(refdir, encoding=_encodings['fs'], errors='strict')
+ (rc, ctx) = selinux.getfilecon(refdir)
+ if rc < 0:
+ if sys.hexversion < 0x3000000:
+ refdir = _unicode_decode(refdir, encoding=_encodings['fs'], errors='replace')
+ raise OSError(
+ _("mkdir: Failed getting context of reference directory \"%s\".") \
+ % refdir)
+
+ setfscreate(ctx)
+ try:
+ os.mkdir(target)
+ finally:
+ setfscreate()
+
+def rename(src, dest):
+ src = _native_string(src, encoding=_encodings['fs'], errors='strict')
+ dest = _native_string(dest, encoding=_encodings['fs'], errors='strict')
+ (rc, ctx) = selinux.lgetfilecon(src)
+ if rc < 0:
+ if sys.hexversion < 0x3000000:
+ src = _unicode_decode(src, encoding=_encodings['fs'], errors='replace')
+ raise OSError(_("rename: Failed getting context of \"%s\".") % src)
+
+ setfscreate(ctx)
+ try:
+ os.rename(src, dest)
+ finally:
+ setfscreate()
+
+def settype(newtype):
+ try:
+ ret = getcontext().split(":")
+ ret[2] = newtype
+ return ":".join(ret)
+ except IndexError:
+ warnings.warn("Invalid SELinux context: %s" % getcontext())
+ return None
+
+def setexec(ctx="\n"):
+ ctx = _native_string(ctx, encoding=_encodings['content'], errors='strict')
+ rc = 0
+ try:
+ rc = selinux.setexeccon(ctx)
+ except OSError:
+ msg = _("Failed to set new SELinux execution context. " + \
+ "Is your current SELinux context allowed to run Portage?")
+ if selinux.security_getenforce() == 1:
+ raise OSError(msg)
+ else:
+ portage.writemsg("!!! %s\n" % msg, noiselevel=-1)
+
+ if rc < 0:
+ if sys.hexversion < 0x3000000:
+ ctx = _unicode_decode(ctx, encoding=_encodings['content'], errors='replace')
+ if selinux.security_getenforce() == 1:
+ raise OSError(_("Failed setting exec() context \"%s\".") % ctx)
+ else:
+ portage.writemsg("!!! " + \
+ _("Failed setting exec() context \"%s\".") % ctx, \
+ noiselevel=-1)
+
+def setfscreate(ctx="\n"):
+ ctx = _native_string(ctx, encoding=_encodings['content'], errors='strict')
+ if selinux.setfscreatecon(ctx) < 0:
+ if sys.hexversion < 0x3000000:
+ ctx = _unicode_decode(ctx, encoding=_encodings['content'], errors='replace')
+ raise OSError(
+ _("setfscreate: Failed setting fs create context \"%s\".") % ctx)
+
+class spawn_wrapper(object):
+ """
+ Create a wrapper function for the given spawn function. When the wrapper
+ is called, it will adjust the arguments such that setexec() to be called
+ *after* the fork (thereby avoiding any interference with concurrent
+ threads in the calling process).
+ """
+ __slots__ = ("_con", "_spawn_func")
+
+ def __init__(self, spawn_func, selinux_type):
+ self._spawn_func = spawn_func
+ selinux_type = _native_string(selinux_type, encoding=_encodings['content'], errors='strict')
+ self._con = settype(selinux_type)
+
+ def __call__(self, *args, **kwargs):
+ if self._con is not None:
+ pre_exec = kwargs.get("pre_exec")
+
+ def _pre_exec():
+ if pre_exec is not None:
+ pre_exec()
+ setexec(self._con)
+
+ kwargs["pre_exec"] = _pre_exec
+
+ return self._spawn_func(*args, **kwargs)
+
+def symlink(target, link, reflnk):
+ target = _native_string(target, encoding=_encodings['fs'], errors='strict')
+ link = _native_string(link, encoding=_encodings['fs'], errors='strict')
+ reflnk = _native_string(reflnk, encoding=_encodings['fs'], errors='strict')
+ (rc, ctx) = selinux.lgetfilecon(reflnk)
+ if rc < 0:
+ if sys.hexversion < 0x3000000:
+ reflnk = _unicode_decode(reflnk, encoding=_encodings['fs'], errors='replace')
+ raise OSError(
+ _("symlink: Failed getting context of reference symlink \"%s\".") \
+ % reflnk)
+
+ setfscreate(ctx)
+ try:
+ os.symlink(target, link)
+ finally:
+ setfscreate()
diff --git a/lib/portage/_sets/ProfilePackageSet.py b/lib/portage/_sets/ProfilePackageSet.py
new file mode 100644
index 000000000..fec937391
--- /dev/null
+++ b/lib/portage/_sets/ProfilePackageSet.py
@@ -0,0 +1,35 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.util import grabfile_package, stack_lists
+from portage._sets.base import PackageSet
+
+class ProfilePackageSet(PackageSet):
+ _operations = ["merge"]
+
+ def __init__(self, profiles, debug=False):
+ super(ProfilePackageSet, self).__init__()
+ self._profiles = profiles
+ if profiles:
+ desc_profile = profiles[-1]
+ if desc_profile.user_config and len(profiles) > 1:
+ desc_profile = profiles[-2]
+ description = desc_profile.location
+ else:
+ description = None
+ self.description = "Profile packages for profile %s" % description
+
+ def load(self):
+ self._setAtoms(x for x in stack_lists(
+ [grabfile_package(os.path.join(y.location, "packages"),
+ verify_eapi=True, eapi=y.eapi, eapi_default=None,
+ allow_build_id=y.allow_build_id)
+ for y in self._profiles
+ if "profile-set" in y.profile_formats],
+ incremental=1) if x[:1] != "*")
+
+ def singleBuilder(self, options, settings, trees):
+ return ProfilePackageSet(
+ settings._locations_manager.profiles_complex)
+ singleBuilder = classmethod(singleBuilder)
diff --git a/lib/portage/_sets/__init__.py b/lib/portage/_sets/__init__.py
new file mode 100644
index 000000000..2c9bf9715
--- /dev/null
+++ b/lib/portage/_sets/__init__.py
@@ -0,0 +1,302 @@
+# Copyright 2007-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+__all__ = ["SETPREFIX", "get_boolean", "SetConfigError",
+ "SetConfig", "load_default_config"]
+
+import io
+import logging
+import sys
+import portage
+from portage import os
+from portage import load_mod
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage import _encodings
+from portage.const import USER_CONFIG_PATH, GLOBAL_CONFIG_PATH
+from portage.const import VCS_DIRS
+from portage.const import _ENABLE_SET_CONFIG
+from portage.exception import PackageSetNotFound
+from portage.localization import _
+from portage.util import writemsg_level
+from portage.util.configparser import (SafeConfigParser,
+ NoOptionError, ParsingError, read_configs)
+
+SETPREFIX = "@"
+
+def get_boolean(options, name, default):
+ if not name in options:
+ return default
+ elif options[name].lower() in ("1", "yes", "on", "true"):
+ return True
+ elif options[name].lower() in ("0", "no", "off", "false"):
+ return False
+ else:
+ raise SetConfigError(_("invalid value '%(value)s' for option '%(option)s'") % {"value": options[name], "option": name})
+
+class SetConfigError(Exception):
+ pass
+
+class SetConfig(object):
+ def __init__(self, paths, settings, trees):
+ self._parser = SafeConfigParser(
+ defaults={
+ "EPREFIX" : settings["EPREFIX"],
+ "EROOT" : settings["EROOT"],
+ "PORTAGE_CONFIGROOT" : settings["PORTAGE_CONFIGROOT"],
+ "ROOT" : settings["ROOT"],
+ })
+
+ if _ENABLE_SET_CONFIG:
+ read_configs(self._parser, paths)
+ else:
+ self._create_default_config()
+
+ self.errors = []
+ self.psets = {}
+ self.trees = trees
+ self.settings = settings
+ self._parsed = False
+ self.active = []
+
+ def _create_default_config(self):
+ """
+ Create a default hardcoded set configuration for a portage version
+ that does not support set configuration files. This is only used
+ in the current branch of portage if _ENABLE_SET_CONFIG is False.
+ Even if it's not used in this branch, keep it here in order to
+ minimize the diff between branches.
+
+ [world]
+ class = portage.sets.base.DummyPackageSet
+ packages = @selected @system
+
+ [selected]
+ class = portage.sets.files.WorldSelectedSet
+
+ [system]
+ class = portage.sets.profiles.PackagesSystemSet
+
+ """
+ parser = self._parser
+
+ parser.remove_section("world")
+ parser.add_section("world")
+ parser.set("world", "class", "portage.sets.base.DummyPackageSet")
+ parser.set("world", "packages", "@profile @selected @system")
+
+ parser.remove_section("profile")
+ parser.add_section("profile")
+ parser.set("profile", "class", "portage.sets.ProfilePackageSet.ProfilePackageSet")
+
+ parser.remove_section("selected")
+ parser.add_section("selected")
+ parser.set("selected", "class", "portage.sets.files.WorldSelectedSet")
+
+ parser.remove_section("selected-packages")
+ parser.add_section("selected-packages")
+ parser.set("selected-packages", "class", "portage.sets.files.WorldSelectedPackagesSet")
+
+ parser.remove_section("selected-sets")
+ parser.add_section("selected-sets")
+ parser.set("selected-sets", "class", "portage.sets.files.WorldSelectedSetsSet")
+
+ parser.remove_section("system")
+ parser.add_section("system")
+ parser.set("system", "class", "portage.sets.profiles.PackagesSystemSet")
+
+ parser.remove_section("security")
+ parser.add_section("security")
+ parser.set("security", "class", "portage.sets.security.NewAffectedSet")
+
+ parser.remove_section("usersets")
+ parser.add_section("usersets")
+ parser.set("usersets", "class", "portage.sets.files.StaticFileSet")
+ parser.set("usersets", "multiset", "true")
+ parser.set("usersets", "directory", "%(PORTAGE_CONFIGROOT)setc/portage/sets")
+ parser.set("usersets", "world-candidate", "true")
+
+ parser.remove_section("live-rebuild")
+ parser.add_section("live-rebuild")
+ parser.set("live-rebuild", "class", "portage.sets.dbapi.VariableSet")
+ parser.set("live-rebuild", "variable", "INHERITED")
+ parser.set("live-rebuild", "includes", " ".join(sorted(portage.const.LIVE_ECLASSES)))
+
+ parser.remove_section("module-rebuild")
+ parser.add_section("module-rebuild")
+ parser.set("module-rebuild", "class", "portage.sets.dbapi.OwnerSet")
+ parser.set("module-rebuild", "files", "/lib/modules")
+
+ parser.remove_section("preserved-rebuild")
+ parser.add_section("preserved-rebuild")
+ parser.set("preserved-rebuild", "class", "portage.sets.libs.PreservedLibraryConsumerSet")
+
+ parser.remove_section("x11-module-rebuild")
+ parser.add_section("x11-module-rebuild")
+ parser.set("x11-module-rebuild", "class", "portage.sets.dbapi.OwnerSet")
+ parser.set("x11-module-rebuild", "files", "/usr/lib/xorg/modules")
+ parser.set("x11-module-rebuild", "exclude-files", "/usr/bin/Xorg")
+
+ def update(self, setname, options):
+ parser = self._parser
+ self.errors = []
+ if not setname in self.psets:
+ options["name"] = setname
+ options["world-candidate"] = "False"
+
+ # for the unlikely case that there is already a section with the requested setname
+ import random
+ while setname in parser.sections():
+ setname = "%08d" % random.randint(0, 10**10)
+
+ parser.add_section(setname)
+ for k, v in options.items():
+ parser.set(setname, k, v)
+ else:
+ section = self.psets[setname].creator
+ if parser.has_option(section, "multiset") and \
+ parser.getboolean(section, "multiset"):
+ self.errors.append(_("Invalid request to reconfigure set '%(set)s' generated "
+ "by multiset section '%(section)s'") % {"set": setname, "section": section})
+ return
+ for k, v in options.items():
+ parser.set(section, k, v)
+ self._parse(update=True)
+
+ def _parse(self, update=False):
+ if self._parsed and not update:
+ return
+ parser = self._parser
+ for sname in parser.sections():
+ # find classname for current section, default to file based sets
+ if not parser.has_option(sname, "class"):
+ classname = "portage._sets.files.StaticFileSet"
+ else:
+ classname = parser.get(sname, "class")
+
+ if classname.startswith('portage.sets.'):
+ # The module has been made private, but we still support
+ # the previous namespace for sets.conf entries.
+ classname = classname.replace('sets', '_sets', 1)
+
+ # try to import the specified class
+ try:
+ setclass = load_mod(classname)
+ except (ImportError, AttributeError):
+ try:
+ setclass = load_mod("portage._sets." + classname)
+ except (ImportError, AttributeError):
+ self.errors.append(_("Could not import '%(class)s' for section "
+ "'%(section)s'") % {"class": classname, "section": sname})
+ continue
+ # prepare option dict for the current section
+ optdict = {}
+ for oname in parser.options(sname):
+ optdict[oname] = parser.get(sname, oname)
+
+ # create single or multiple instances of the given class depending on configuration
+ if parser.has_option(sname, "multiset") and \
+ parser.getboolean(sname, "multiset"):
+ if hasattr(setclass, "multiBuilder"):
+ newsets = {}
+ try:
+ newsets = setclass.multiBuilder(optdict, self.settings, self.trees)
+ except SetConfigError as e:
+ self.errors.append(_("Configuration error in section '%s': %s") % (sname, str(e)))
+ continue
+ for x in newsets:
+ if x in self.psets and not update:
+ self.errors.append(_("Redefinition of set '%s' (sections: '%s', '%s')") % (x, self.psets[x].creator, sname))
+ newsets[x].creator = sname
+ if parser.has_option(sname, "world-candidate") and \
+ parser.getboolean(sname, "world-candidate"):
+ newsets[x].world_candidate = True
+ self.psets.update(newsets)
+ else:
+ self.errors.append(_("Section '%(section)s' is configured as multiset, but '%(class)s' "
+ "doesn't support that configuration") % {"section": sname, "class": classname})
+ continue
+ else:
+ try:
+ setname = parser.get(sname, "name")
+ except NoOptionError:
+ setname = sname
+ if setname in self.psets and not update:
+ self.errors.append(_("Redefinition of set '%s' (sections: '%s', '%s')") % (setname, self.psets[setname].creator, sname))
+ if hasattr(setclass, "singleBuilder"):
+ try:
+ self.psets[setname] = setclass.singleBuilder(optdict, self.settings, self.trees)
+ self.psets[setname].creator = sname
+ if parser.has_option(sname, "world-candidate") and \
+ parser.getboolean(sname, "world-candidate"):
+ self.psets[setname].world_candidate = True
+ except SetConfigError as e:
+ self.errors.append(_("Configuration error in section '%s': %s") % (sname, str(e)))
+ continue
+ else:
+ self.errors.append(_("'%(class)s' does not support individual set creation, section '%(section)s' "
+ "must be configured as multiset") % {"class": classname, "section": sname})
+ continue
+ self._parsed = True
+
+ def getSets(self):
+ self._parse()
+ return self.psets.copy()
+
+ def getSetAtoms(self, setname, ignorelist=None):
+ """
+ This raises PackageSetNotFound if the give setname does not exist.
+ """
+ self._parse()
+ try:
+ myset = self.psets[setname]
+ except KeyError:
+ raise PackageSetNotFound(setname)
+ myatoms = myset.getAtoms()
+
+ if ignorelist is None:
+ ignorelist = set()
+
+ ignorelist.add(setname)
+ for n in myset.getNonAtoms():
+ if n.startswith(SETPREFIX):
+ s = n[len(SETPREFIX):]
+ if s in self.psets:
+ if s not in ignorelist:
+ myatoms.update(self.getSetAtoms(s,
+ ignorelist=ignorelist))
+ else:
+ raise PackageSetNotFound(s)
+
+ return myatoms
+
+def load_default_config(settings, trees):
+
+ if not _ENABLE_SET_CONFIG:
+ return SetConfig(None, settings, trees)
+
+ global_config_path = GLOBAL_CONFIG_PATH
+ if portage.const.EPREFIX:
+ global_config_path = os.path.join(portage.const.EPREFIX,
+ GLOBAL_CONFIG_PATH.lstrip(os.sep))
+ vcs_dirs = [_unicode_encode(x, encoding=_encodings['fs']) for x in VCS_DIRS]
+ def _getfiles():
+ for path, dirs, files in os.walk(os.path.join(global_config_path, "sets")):
+ for d in dirs:
+ if d in vcs_dirs or d.startswith(b".") or d.endswith(b"~"):
+ dirs.remove(d)
+ for f in files:
+ if not f.startswith(b".") and not f.endswith(b"~"):
+ yield os.path.join(path, f)
+
+ dbapi = trees["porttree"].dbapi
+ for repo in dbapi.getRepositories():
+ path = dbapi.getRepositoryPath(repo)
+ yield os.path.join(path, "sets.conf")
+
+ yield os.path.join(settings["PORTAGE_CONFIGROOT"],
+ USER_CONFIG_PATH, "sets.conf")
+
+ return SetConfig(_getfiles(), settings, trees)
diff --git a/lib/portage/_sets/base.py b/lib/portage/_sets/base.py
new file mode 100644
index 000000000..aba295602
--- /dev/null
+++ b/lib/portage/_sets/base.py
@@ -0,0 +1,250 @@
+# Copyright 2007-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from portage.dep import Atom, ExtendedAtomDict, best_match_to_list, match_from_list
+from portage.exception import InvalidAtom
+from portage.versions import cpv_getkey
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+OPERATIONS = ["merge", "unmerge"]
+
+class PackageSet(object):
+ # Set this to operations that are supported by your subclass. While
+ # technically there is no difference between "merge" and "unmerge" regarding
+ # package sets, the latter doesn't make sense for some sets like "system"
+ # or "security" and therefore isn't supported by them.
+ _operations = ["merge"]
+ description = "generic package set"
+
+ def __init__(self, allow_wildcard=False, allow_repo=False):
+ self._atoms = set()
+ self._atommap = ExtendedAtomDict(set)
+ self._loaded = False
+ self._loading = False
+ self.errors = []
+ self._nonatoms = set()
+ self.world_candidate = False
+ self._allow_wildcard = allow_wildcard
+ self._allow_repo = allow_repo
+
+ def __contains__(self, atom):
+ self._load()
+ return atom in self._atoms or atom in self._nonatoms
+
+ def __iter__(self):
+ self._load()
+ for x in self._atoms:
+ yield x
+ for x in self._nonatoms:
+ yield x
+
+ def __bool__(self):
+ self._load()
+ return bool(self._atoms or self._nonatoms)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+ def supportsOperation(self, op):
+ if not op in OPERATIONS:
+ raise ValueError(op)
+ return op in self._operations
+
+ def _load(self):
+ if not (self._loaded or self._loading):
+ self._loading = True
+ self.load()
+ self._loaded = True
+ self._loading = False
+
+ def getAtoms(self):
+ self._load()
+ return self._atoms.copy()
+
+ def getNonAtoms(self):
+ self._load()
+ return self._nonatoms.copy()
+
+ def _setAtoms(self, atoms):
+ self._atoms.clear()
+ self._nonatoms.clear()
+ for a in atoms:
+ if not isinstance(a, Atom):
+ if isinstance(a, basestring):
+ a = a.strip()
+ if not a:
+ continue
+ try:
+ a = Atom(a, allow_wildcard=True, allow_repo=True)
+ except InvalidAtom:
+ self._nonatoms.add(a)
+ continue
+ if not self._allow_wildcard and a.extended_syntax:
+ raise InvalidAtom("extended atom syntax not allowed here")
+ if not self._allow_repo and a.repo:
+ raise InvalidAtom("repository specification not allowed here")
+ self._atoms.add(a)
+
+ self._updateAtomMap()
+
+ def load(self):
+ # This method must be overwritten by subclasses
+ # Editable sets should use the value of self._mtime to determine if they
+ # need to reload themselves
+ raise NotImplementedError()
+
+ def containsCPV(self, cpv):
+ self._load()
+ for a in self._atoms:
+ if match_from_list(a, [cpv]):
+ return True
+ return False
+
+ def getMetadata(self, key):
+ if hasattr(self, key.lower()):
+ return getattr(self, key.lower())
+ else:
+ return ""
+
+ def _updateAtomMap(self, atoms=None):
+ """Update self._atommap for specific atoms or all atoms."""
+ if not atoms:
+ self._atommap.clear()
+ atoms = self._atoms
+ for a in atoms:
+ self._atommap.setdefault(a.cp, set()).add(a)
+
+ # Not sure if this one should really be in PackageSet
+ def findAtomForPackage(self, pkg, modified_use=None):
+ """Return the best match for a given package from the arguments, or
+ None if there are no matches. This matches virtual arguments against
+ the PROVIDE metadata. This can raise an InvalidDependString exception
+ if an error occurs while parsing PROVIDE."""
+
+ if modified_use is not None and modified_use is not pkg.use.enabled:
+ pkg = pkg.copy()
+ pkg._metadata["USE"] = " ".join(modified_use)
+
+ # Atoms matched via PROVIDE must be temporarily transformed since
+ # match_from_list() only works correctly when atom.cp == pkg.cp.
+ rev_transform = {}
+ for atom in self.iterAtomsForPackage(pkg):
+ if atom.cp == pkg.cp:
+ rev_transform[atom] = atom
+ else:
+ rev_transform[Atom(atom.replace(atom.cp, pkg.cp, 1), allow_wildcard=True, allow_repo=True)] = atom
+ best_match = best_match_to_list(pkg, iter(rev_transform))
+ if best_match:
+ return rev_transform[best_match]
+ return None
+
+ def iterAtomsForPackage(self, pkg):
+ """
+ Find all matching atoms for a given package. This matches virtual
+ arguments against the PROVIDE metadata. This will raise an
+ InvalidDependString exception if PROVIDE is invalid.
+ """
+ cpv_slot_list = [pkg]
+ cp = cpv_getkey(pkg.cpv)
+ self._load() # make sure the atoms are loaded
+
+ atoms = self._atommap.get(cp)
+ if atoms:
+ for atom in atoms:
+ if match_from_list(atom, cpv_slot_list):
+ yield atom
+
+class EditablePackageSet(PackageSet):
+
+ def __init__(self, allow_wildcard=False, allow_repo=False):
+ super(EditablePackageSet, self).__init__(allow_wildcard=allow_wildcard, allow_repo=allow_repo)
+
+ def update(self, atoms):
+ self._load()
+ modified = False
+ normal_atoms = []
+ for a in atoms:
+ if not isinstance(a, Atom):
+ try:
+ a = Atom(a, allow_wildcard=True, allow_repo=True)
+ except InvalidAtom:
+ modified = True
+ self._nonatoms.add(a)
+ continue
+ if not self._allow_wildcard and a.extended_syntax:
+ raise InvalidAtom("extended atom syntax not allowed here")
+ if not self._allow_repo and a.repo:
+ raise InvalidAtom("repository specification not allowed here")
+ normal_atoms.append(a)
+
+ if normal_atoms:
+ modified = True
+ self._atoms.update(normal_atoms)
+ self._updateAtomMap(atoms=normal_atoms)
+ if modified:
+ self.write()
+
+ def add(self, atom):
+ self.update([atom])
+
+ def replace(self, atoms):
+ self._setAtoms(atoms)
+ self.write()
+
+ def remove(self, atom):
+ self._load()
+ self._atoms.discard(atom)
+ self._nonatoms.discard(atom)
+ self._updateAtomMap()
+ self.write()
+
+ def removePackageAtoms(self, cp):
+ self._load()
+ for a in list(self._atoms):
+ if a.cp == cp:
+ self.remove(a)
+ self.write()
+
+ def write(self):
+ # This method must be overwritten in subclasses that should be editable
+ raise NotImplementedError()
+
+class InternalPackageSet(EditablePackageSet):
+ def __init__(self, initial_atoms=None, allow_wildcard=False, allow_repo=True):
+ """
+ Repo atoms are allowed more often than not, so it makes sense for this
+ class to allow them by default. The Atom constructor and isvalidatom()
+ functions default to allow_repo=False, which is sufficient to ensure
+ that repo atoms are prohibited when necessary.
+ """
+ super(InternalPackageSet, self).__init__(allow_wildcard=allow_wildcard, allow_repo=allow_repo)
+ if initial_atoms != None:
+ self.update(initial_atoms)
+
+ def clear(self):
+ self._atoms.clear()
+ self._updateAtomMap()
+
+ def load(self):
+ pass
+
+ def write(self):
+ pass
+
+class DummyPackageSet(PackageSet):
+ def __init__(self, atoms=None):
+ super(DummyPackageSet, self).__init__()
+ if atoms:
+ self._setAtoms(atoms)
+
+ def load(self):
+ pass
+
+ def singleBuilder(cls, options, settings, trees):
+ atoms = options.get("packages", "").split()
+ return DummyPackageSet(atoms=atoms)
+ singleBuilder = classmethod(singleBuilder)
diff --git a/lib/portage/_sets/dbapi.py b/lib/portage/_sets/dbapi.py
new file mode 100644
index 000000000..299cb8157
--- /dev/null
+++ b/lib/portage/_sets/dbapi.py
@@ -0,0 +1,537 @@
+# Copyright 2007-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division
+
+import re
+import time
+
+from portage import os
+from portage.versions import best, catsplit, vercmp
+from portage.dep import Atom, use_reduce
+from portage.exception import InvalidAtom
+from portage.localization import _
+from portage._sets.base import PackageSet
+from portage._sets import SetConfigError, get_boolean
+import portage
+
+__all__ = ["CategorySet", "ChangedDepsSet", "DowngradeSet",
+ "EverythingSet", "OwnerSet", "VariableSet"]
+
+class EverythingSet(PackageSet):
+ _operations = ["merge"]
+ description = "Package set which contains SLOT " + \
+ "atoms to match all installed packages"
+ _filter = None
+
+ def __init__(self, vdbapi, **kwargs):
+ super(EverythingSet, self).__init__()
+ self._db = vdbapi
+
+ def load(self):
+ myatoms = []
+ pkg_str = self._db._pkg_str
+ cp_list = self._db.cp_list
+
+ for cp in self._db.cp_all():
+ for cpv in cp_list(cp):
+ # NOTE: Create SLOT atoms even when there is only one
+ # SLOT installed, in order to avoid the possibility
+ # of unwanted upgrades as reported in bug #338959.
+ pkg = pkg_str(cpv, None)
+ atom = Atom("%s:%s" % (pkg.cp, pkg.slot))
+ if self._filter:
+ if self._filter(atom):
+ myatoms.append(atom)
+ else:
+ myatoms.append(atom)
+
+ self._setAtoms(myatoms)
+
+ def singleBuilder(self, options, settings, trees):
+ return EverythingSet(trees["vartree"].dbapi)
+ singleBuilder = classmethod(singleBuilder)
+
+class OwnerSet(PackageSet):
+
+ _operations = ["merge", "unmerge"]
+
+ description = "Package set which contains all packages " + \
+ "that own one or more files."
+
+ def __init__(self, vardb=None, exclude_files=None, files=None):
+ super(OwnerSet, self).__init__()
+ self._db = vardb
+ self._exclude_files = exclude_files
+ self._files = files
+
+ def mapPathsToAtoms(self, paths, exclude_paths=None):
+ """
+ All paths must have $EROOT stripped from the left side.
+ """
+ rValue = set()
+ vardb = self._db
+ pkg_str = vardb._pkg_str
+ if exclude_paths is None:
+ for link, p in vardb._owners.iter_owners(paths):
+ pkg = pkg_str(link.mycpv, None)
+ rValue.add("%s:%s" % (pkg.cp, pkg.slot))
+ else:
+ all_paths = set()
+ all_paths.update(paths)
+ all_paths.update(exclude_paths)
+ exclude_atoms = set()
+ for link, p in vardb._owners.iter_owners(all_paths):
+ pkg = pkg_str(link.mycpv, None)
+ atom = "%s:%s" % (pkg.cp, pkg.slot)
+ rValue.add(atom)
+ if p in exclude_paths:
+ exclude_atoms.add(atom)
+ rValue.difference_update(exclude_atoms)
+
+ return rValue
+
+ def load(self):
+ self._setAtoms(self.mapPathsToAtoms(self._files,
+ exclude_paths=self._exclude_files))
+
+ def singleBuilder(cls, options, settings, trees):
+ if not "files" in options:
+ raise SetConfigError(_("no files given"))
+
+ exclude_files = options.get("exclude-files")
+ if exclude_files is not None:
+ exclude_files = frozenset(portage.util.shlex_split(exclude_files))
+ return cls(vardb=trees["vartree"].dbapi, exclude_files=exclude_files,
+ files=frozenset(portage.util.shlex_split(options["files"])))
+
+ singleBuilder = classmethod(singleBuilder)
+
+class VariableSet(EverythingSet):
+
+ _operations = ["merge", "unmerge"]
+
+ description = "Package set which contains all packages " + \
+ "that match specified values of a specified variable."
+
+ def __init__(self, vardb, metadatadb=None, variable=None, includes=None, excludes=None):
+ super(VariableSet, self).__init__(vardb)
+ self._metadatadb = metadatadb
+ self._variable = variable
+ self._includes = includes
+ self._excludes = excludes
+
+ def _filter(self, atom):
+ ebuild = best(self._metadatadb.match(atom))
+ if not ebuild:
+ return False
+ values, = self._metadatadb.aux_get(ebuild, [self._variable])
+ values = values.split()
+ if self._includes and not self._includes.intersection(values):
+ return False
+ if self._excludes and self._excludes.intersection(values):
+ return False
+ return True
+
+ def singleBuilder(cls, options, settings, trees):
+
+ variable = options.get("variable")
+ if variable is None:
+ raise SetConfigError(_("missing required attribute: 'variable'"))
+
+ includes = options.get("includes", "")
+ excludes = options.get("excludes", "")
+
+ if not (includes or excludes):
+ raise SetConfigError(_("no includes or excludes given"))
+
+ metadatadb = options.get("metadata-source", "vartree")
+ if not metadatadb in trees:
+ raise SetConfigError(_("invalid value '%s' for option metadata-source") % metadatadb)
+
+ return cls(trees["vartree"].dbapi,
+ metadatadb=trees[metadatadb].dbapi,
+ excludes=frozenset(excludes.split()),
+ includes=frozenset(includes.split()),
+ variable=variable)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class DowngradeSet(PackageSet):
+
+ _operations = ["merge", "unmerge"]
+
+ description = "Package set which contains all packages " + \
+ "for which the highest visible ebuild version is lower than " + \
+ "the currently installed version."
+
+ def __init__(self, portdb=None, vardb=None):
+ super(DowngradeSet, self).__init__()
+ self._portdb = portdb
+ self._vardb = vardb
+
+ def load(self):
+ atoms = []
+ xmatch = self._portdb.xmatch
+ xmatch_level = "bestmatch-visible"
+ cp_list = self._vardb.cp_list
+ pkg_str = self._vardb._pkg_str
+ for cp in self._vardb.cp_all():
+ for cpv in cp_list(cp):
+ pkg = pkg_str(cpv, None)
+ slot_atom = "%s:%s" % (pkg.cp, pkg.slot)
+ ebuild = xmatch(xmatch_level, slot_atom)
+ if not ebuild:
+ continue
+ if vercmp(cpv.version, ebuild.version) > 0:
+ atoms.append(slot_atom)
+
+ self._setAtoms(atoms)
+
+ def singleBuilder(cls, options, settings, trees):
+ return cls(portdb=trees["porttree"].dbapi,
+ vardb=trees["vartree"].dbapi)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class UnavailableSet(EverythingSet):
+
+ _operations = ["unmerge"]
+
+ description = "Package set which contains all installed " + \
+ "packages for which there are no visible ebuilds " + \
+ "corresponding to the same $CATEGORY/$PN:$SLOT."
+
+ def __init__(self, vardb, metadatadb=None):
+ super(UnavailableSet, self).__init__(vardb)
+ self._metadatadb = metadatadb
+
+ def _filter(self, atom):
+ return not self._metadatadb.match(atom)
+
+ def singleBuilder(cls, options, settings, trees):
+
+ metadatadb = options.get("metadata-source", "porttree")
+ if not metadatadb in trees:
+ raise SetConfigError(_("invalid value '%s' for option "
+ "metadata-source") % (metadatadb,))
+
+ return cls(trees["vartree"].dbapi,
+ metadatadb=trees[metadatadb].dbapi)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class UnavailableBinaries(EverythingSet):
+
+ _operations = ('merge', 'unmerge',)
+
+ description = "Package set which contains all installed " + \
+ "packages for which corresponding binary packages " + \
+ "are not available."
+
+ def __init__(self, vardb, metadatadb=None):
+ super(UnavailableBinaries, self).__init__(vardb)
+ self._metadatadb = metadatadb
+
+ def _filter(self, atom):
+ inst_pkg = self._db.match(atom)
+ if not inst_pkg:
+ return False
+ inst_cpv = inst_pkg[0]
+ return not self._metadatadb.cpv_exists(inst_cpv)
+
+ def singleBuilder(cls, options, settings, trees):
+
+ metadatadb = options.get("metadata-source", "bintree")
+ if not metadatadb in trees:
+ raise SetConfigError(_("invalid value '%s' for option "
+ "metadata-source") % (metadatadb,))
+
+ return cls(trees["vartree"].dbapi,
+ metadatadb=trees[metadatadb].dbapi)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class CategorySet(PackageSet):
+ _operations = ["merge", "unmerge"]
+
+ def __init__(self, category, dbapi, only_visible=True):
+ super(CategorySet, self).__init__()
+ self._db = dbapi
+ self._category = category
+ self._check = only_visible
+ if only_visible:
+ s="visible"
+ else:
+ s="all"
+ self.description = "Package set containing %s packages of category %s" % (s, self._category)
+
+ def load(self):
+ myatoms = []
+ for cp in self._db.cp_all():
+ if catsplit(cp)[0] == self._category:
+ if (not self._check) or len(self._db.match(cp)) > 0:
+ myatoms.append(cp)
+ self._setAtoms(myatoms)
+
+ def _builderGetRepository(cls, options, repositories):
+ repository = options.get("repository", "porttree")
+ if not repository in repositories:
+ raise SetConfigError(_("invalid repository class '%s'") % repository)
+ return repository
+ _builderGetRepository = classmethod(_builderGetRepository)
+
+ def _builderGetVisible(cls, options):
+ return get_boolean(options, "only_visible", True)
+ _builderGetVisible = classmethod(_builderGetVisible)
+
+ def singleBuilder(cls, options, settings, trees):
+ if not "category" in options:
+ raise SetConfigError(_("no category given"))
+
+ category = options["category"]
+ if not category in settings.categories:
+ raise SetConfigError(_("invalid category name '%s'") % category)
+
+ repository = cls._builderGetRepository(options, trees.keys())
+ visible = cls._builderGetVisible(options)
+
+ return CategorySet(category, dbapi=trees[repository].dbapi, only_visible=visible)
+ singleBuilder = classmethod(singleBuilder)
+
+ def multiBuilder(cls, options, settings, trees):
+ rValue = {}
+
+ if "categories" in options:
+ categories = options["categories"].split()
+ invalid = set(categories).difference(settings.categories)
+ if invalid:
+ raise SetConfigError(_("invalid categories: %s") % ", ".join(list(invalid)))
+ else:
+ categories = settings.categories
+
+ repository = cls._builderGetRepository(options, trees.keys())
+ visible = cls._builderGetVisible(options)
+ name_pattern = options.get("name_pattern", "$category/*")
+
+ if not "$category" in name_pattern and not "${category}" in name_pattern:
+ raise SetConfigError(_("name_pattern doesn't include $category placeholder"))
+
+ for cat in categories:
+ myset = CategorySet(cat, trees[repository].dbapi, only_visible=visible)
+ myname = name_pattern.replace("$category", cat)
+ myname = myname.replace("${category}", cat)
+ rValue[myname] = myset
+ return rValue
+ multiBuilder = classmethod(multiBuilder)
+
+class AgeSet(EverythingSet):
+ _operations = ["merge", "unmerge"]
+ _aux_keys = ('BUILD_TIME',)
+
+ def __init__(self, vardb, mode="older", age=7):
+ super(AgeSet, self).__init__(vardb)
+ self._mode = mode
+ self._age = age
+
+ def _filter(self, atom):
+
+ cpv = self._db.match(atom)[0]
+ try:
+ date, = self._db.aux_get(cpv, self._aux_keys)
+ date = int(date)
+ except (KeyError, ValueError):
+ return bool(self._mode == "older")
+ age = (time.time() - date) / (3600 * 24)
+ if ((self._mode == "older" and age <= self._age) \
+ or (self._mode == "newer" and age >= self._age)):
+ return False
+ else:
+ return True
+
+ def singleBuilder(cls, options, settings, trees):
+ mode = options.get("mode", "older")
+ if str(mode).lower() not in ["newer", "older"]:
+ raise SetConfigError(_("invalid 'mode' value %s (use either 'newer' or 'older')") % mode)
+ try:
+ age = int(options.get("age", "7"))
+ except ValueError as e:
+ raise SetConfigError(_("value of option 'age' is not an integer"))
+ return AgeSet(vardb=trees["vartree"].dbapi, mode=mode, age=age)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class DateSet(EverythingSet):
+ _operations = ["merge", "unmerge"]
+ _aux_keys = ('BUILD_TIME',)
+
+ def __init__(self, vardb, date, mode="older"):
+ super(DateSet, self).__init__(vardb)
+ self._mode = mode
+ self._date = date
+
+ def _filter(self, atom):
+
+ cpv = self._db.match(atom)[0]
+ try:
+ date, = self._db.aux_get(cpv, self._aux_keys)
+ date = int(date)
+ except (KeyError, ValueError):
+ return bool(self._mode == "older")
+ # Make sure inequality is _strict_ to exclude tested package
+ if ((self._mode == "older" and date < self._date) \
+ or (self._mode == "newer" and date > self._date)):
+ return True
+ else:
+ return False
+
+ def singleBuilder(cls, options, settings, trees):
+ vardbapi = trees["vartree"].dbapi
+ mode = options.get("mode", "older")
+ if str(mode).lower() not in ["newer", "older"]:
+ raise SetConfigError(_("invalid 'mode' value %s (use either 'newer' or 'older')") % mode)
+
+ formats = []
+ if options.get("package") is not None:
+ formats.append("package")
+ if options.get("filestamp") is not None:
+ formats.append("filestamp")
+ if options.get("seconds") is not None:
+ formats.append("seconds")
+ if options.get("date") is not None:
+ formats.append("date")
+
+ if not formats:
+ raise SetConfigError(_("none of these options specified: 'package', 'filestamp', 'seconds', 'date'"))
+ elif len(formats) > 1:
+ raise SetConfigError(_("no more than one of these options is allowed: 'package', 'filestamp', 'seconds', 'date'"))
+
+ format = formats[0]
+
+ if (format == "package"):
+ package = options.get("package")
+ try:
+ cpv = vardbapi.match(package)[0]
+ date, = vardbapi.aux_get(cpv, ('BUILD_TIME',))
+ date = int(date)
+ except (KeyError, ValueError):
+ raise SetConfigError(_("cannot determine installation date of package %s") % package)
+ elif (format == "filestamp"):
+ filestamp = options.get("filestamp")
+ try:
+ date = int(os.stat(filestamp).st_mtime)
+ except (OSError, ValueError):
+ raise SetConfigError(_("cannot determine 'filestamp' of '%s'") % filestamp)
+ elif (format == "seconds"):
+ try:
+ date = int(options.get("seconds"))
+ except ValueError:
+ raise SetConfigError(_("option 'seconds' must be an integer"))
+ else:
+ dateopt = options.get("date")
+ try:
+ dateformat = options.get("dateformat", "%x %X")
+ date = int(time.mktime(time.strptime(dateopt, dateformat)))
+ except ValueError:
+ raise SetConfigError(_("'date=%s' does not match 'dateformat=%s'") % (dateopt, dateformat))
+ return DateSet(vardb=vardbapi, date=date, mode=mode)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class RebuiltBinaries(EverythingSet):
+ _operations = ('merge',)
+ _aux_keys = ('BUILD_TIME',)
+
+ def __init__(self, vardb, bindb=None):
+ super(RebuiltBinaries, self).__init__(vardb, bindb=bindb)
+ self._bindb = bindb
+
+ def _filter(self, atom):
+ cpv = self._db.match(atom)[0]
+ inst_build_time, = self._db.aux_get(cpv, self._aux_keys)
+ try:
+ bin_build_time, = self._bindb.aux_get(cpv, self._aux_keys)
+ except KeyError:
+ return False
+ return bool(bin_build_time and (inst_build_time != bin_build_time))
+
+ def singleBuilder(cls, options, settings, trees):
+ return RebuiltBinaries(trees["vartree"].dbapi,
+ bindb=trees["bintree"].dbapi)
+
+ singleBuilder = classmethod(singleBuilder)
+
+class ChangedDepsSet(PackageSet):
+
+ _operations = ["merge", "unmerge"]
+
+ description = "Package set which contains all installed " + \
+ "packages for which the vdb *DEPEND entries are outdated " + \
+ "compared to corresponding portdb entries."
+
+ def __init__(self, portdb=None, vardb=None):
+ super(ChangedDepsSet, self).__init__()
+ self._portdb = portdb
+ self._vardb = vardb
+
+ def load(self):
+ depvars = ('RDEPEND', 'PDEPEND')
+
+ # regexp used to match atoms using subslot operator :=
+ subslot_repl_re = re.compile(r':[^[]*=')
+
+ atoms = []
+ for cpv in self._vardb.cpv_all():
+ # no ebuild, no update :).
+ if not self._portdb.cpv_exists(cpv):
+ continue
+
+ # USE flags used to build the ebuild and EAPI
+ # (needed for Atom & use_reduce())
+ use, eapi = self._vardb.aux_get(cpv, ('USE', 'EAPI'))
+ usel = use.split()
+
+ # function used to recursively process atoms in nested lists.
+ def clean_subslots(depatom, usel=None):
+ if isinstance(depatom, list):
+ # process the nested list.
+ return [clean_subslots(x, usel) for x in depatom]
+ else:
+ try:
+ # this can be either an atom or some special operator.
+ # in the latter case, we get InvalidAtom and pass it as-is.
+ a = Atom(depatom)
+ except InvalidAtom:
+ return depatom
+ else:
+ # if we're processing portdb, we need to evaluate USE flag
+ # dependency conditionals to make them match vdb. this
+ # requires passing the list of USE flags, so we reuse it
+ # as conditional for the operation as well.
+ if usel is not None:
+ a = a.evaluate_conditionals(usel)
+
+ # replace slot operator := dependencies with plain :=
+ # since we can't properly compare expanded slots
+ # in vardb to abstract slots in portdb.
+ return subslot_repl_re.sub(':=', a)
+
+ # get all *DEPEND variables from vdb & portdb and compare them.
+ # we need to do some cleaning up & expansion to make matching
+ # meaningful since vdb dependencies are conditional-free.
+ vdbvars = [clean_subslots(use_reduce(x, uselist=usel, eapi=eapi))
+ for x in self._vardb.aux_get(cpv, depvars)]
+ pdbvars = [clean_subslots(use_reduce(x, uselist=usel, eapi=eapi), usel)
+ for x in self._portdb.aux_get(cpv, depvars)]
+
+ # if dependencies don't match, trigger the rebuild.
+ if vdbvars != pdbvars:
+ atoms.append('=%s' % cpv)
+
+ self._setAtoms(atoms)
+
+ def singleBuilder(cls, options, settings, trees):
+ return cls(portdb=trees["porttree"].dbapi,
+ vardb=trees["vartree"].dbapi)
+
+ singleBuilder = classmethod(singleBuilder)
diff --git a/lib/portage/_sets/files.py b/lib/portage/_sets/files.py
new file mode 100644
index 000000000..e045701ff
--- /dev/null
+++ b/lib/portage/_sets/files.py
@@ -0,0 +1,394 @@
+# Copyright 2007-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import re
+from itertools import chain
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.util import grabfile, write_atomic, ensure_dirs, normalize_path
+from portage.const import USER_CONFIG_PATH, VCS_DIRS, WORLD_FILE, WORLD_SETS_FILE
+from portage.localization import _
+from portage.locks import lockfile, unlockfile
+from portage import portage_gid
+from portage._sets.base import PackageSet, EditablePackageSet
+from portage._sets import SetConfigError, SETPREFIX, get_boolean
+from portage.env.loaders import ItemFileLoader, KeyListFileLoader
+from portage.env.validators import ValidAtomValidator
+from portage import cpv_getkey
+
+__all__ = ["StaticFileSet", "ConfigFileSet", "WorldSelectedSet",
+ "WorldSelectedPackagesSet", "WorldSelectedSetsSet"]
+
+class StaticFileSet(EditablePackageSet):
+ _operations = ["merge", "unmerge"]
+ _repopath_match = re.compile(r'.*\$\{repository:(?P<reponame>.+)\}.*')
+ _repopath_sub = re.compile(r'\$\{repository:(?P<reponame>.+)\}')
+
+ def __init__(self, filename, greedy=False, dbapi=None):
+ super(StaticFileSet, self).__init__(allow_repo=True)
+ self._filename = filename
+ self._mtime = None
+ self.description = "Package set loaded from file %s" % self._filename
+ self.loader = ItemFileLoader(self._filename, self._validate)
+ if greedy and not dbapi:
+ self.errors.append(_("%s configured as greedy set, but no dbapi instance passed in constructor") % self._filename)
+ greedy = False
+ self.greedy = greedy
+ self.dbapi = dbapi
+
+ metadata = grabfile(self._filename + ".metadata")
+ key = None
+ value = []
+ for line in metadata:
+ line = line.strip()
+ if len(line) == 0 and key != None:
+ setattr(self, key, " ".join(value))
+ key = None
+ elif line[-1] == ":" and key == None:
+ key = line[:-1].lower()
+ value = []
+ elif key != None:
+ value.append(line)
+ else:
+ pass
+ else:
+ if key != None:
+ setattr(self, key, " ".join(value))
+
+ def _validate(self, atom):
+ return bool(atom[:1] == SETPREFIX or ValidAtomValidator(atom, allow_repo=True))
+
+ def write(self):
+ write_atomic(self._filename, "".join("%s\n" % (atom,) \
+ for atom in sorted(chain(self._atoms, self._nonatoms))))
+
+ def load(self):
+ try:
+ mtime = os.stat(self._filename).st_mtime
+ except (OSError, IOError):
+ mtime = None
+ if (not self._loaded or self._mtime != mtime):
+ try:
+ data, errors = self.loader.load()
+ for fname in errors:
+ for e in errors[fname]:
+ self.errors.append(fname+": "+e)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ data = {}
+ if self.greedy:
+ atoms = []
+ for a in data:
+ matches = self.dbapi.match(a)
+ for cpv in matches:
+ pkg = self.dbapi._pkg_str(cpv, None)
+ atoms.append("%s:%s" % (pkg.cp, pkg.slot))
+ # In addition to any installed slots, also try to pull
+ # in the latest new slot that may be available.
+ atoms.append(a)
+ else:
+ atoms = iter(data)
+ self._setAtoms(atoms)
+ self._mtime = mtime
+
+ def singleBuilder(self, options, settings, trees):
+ if not "filename" in options:
+ raise SetConfigError(_("no filename specified"))
+ greedy = get_boolean(options, "greedy", False)
+ filename = options["filename"]
+ # look for repository path variables
+ match = self._repopath_match.match(filename)
+ if match:
+ try:
+ filename = self._repopath_sub.sub(trees["porttree"].dbapi.treemap[match.groupdict()["reponame"]], filename)
+ except KeyError:
+ raise SetConfigError(_("Could not find repository '%s'") % match.groupdict()["reponame"])
+ return StaticFileSet(filename, greedy=greedy, dbapi=trees["vartree"].dbapi)
+ singleBuilder = classmethod(singleBuilder)
+
+ def multiBuilder(self, options, settings, trees):
+ rValue = {}
+ directory = options.get("directory",
+ os.path.join(settings["PORTAGE_CONFIGROOT"],
+ USER_CONFIG_PATH, "sets"))
+ name_pattern = options.get("name_pattern", "${name}")
+ if not "$name" in name_pattern and not "${name}" in name_pattern:
+ raise SetConfigError(_("name_pattern doesn't include ${name} placeholder"))
+ greedy = get_boolean(options, "greedy", False)
+ # look for repository path variables
+ match = self._repopath_match.match(directory)
+ if match:
+ try:
+ directory = self._repopath_sub.sub(trees["porttree"].dbapi.treemap[match.groupdict()["reponame"]], directory)
+ except KeyError:
+ raise SetConfigError(_("Could not find repository '%s'") % match.groupdict()["reponame"])
+
+ try:
+ directory = _unicode_decode(directory,
+ encoding=_encodings['fs'], errors='strict')
+ # Now verify that we can also encode it.
+ _unicode_encode(directory,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeError:
+ directory = _unicode_decode(directory,
+ encoding=_encodings['fs'], errors='replace')
+ raise SetConfigError(
+ _("Directory path contains invalid character(s) for encoding '%s': '%s'") \
+ % (_encodings['fs'], directory))
+
+ vcs_dirs = [_unicode_encode(x, encoding=_encodings['fs']) for x in VCS_DIRS]
+ if os.path.isdir(directory):
+ directory = normalize_path(directory)
+
+ for parent, dirs, files in os.walk(directory):
+ try:
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ for d in dirs[:]:
+ if d in vcs_dirs or d.startswith(b".") or d.endswith(b"~"):
+ dirs.remove(d)
+ for filename in files:
+ try:
+ filename = _unicode_decode(filename,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if filename.startswith(".") or filename.endswith("~"):
+ continue
+ if filename.endswith(".metadata"):
+ continue
+ filename = os.path.join(parent,
+ filename)[1 + len(directory):]
+ myname = name_pattern.replace("$name", filename)
+ myname = myname.replace("${name}", filename)
+ rValue[myname] = StaticFileSet(
+ os.path.join(directory, filename),
+ greedy=greedy, dbapi=trees["vartree"].dbapi)
+ return rValue
+ multiBuilder = classmethod(multiBuilder)
+
+class ConfigFileSet(PackageSet):
+ def __init__(self, filename):
+ super(ConfigFileSet, self).__init__()
+ self._filename = filename
+ self.description = "Package set generated from %s" % self._filename
+ self.loader = KeyListFileLoader(self._filename, ValidAtomValidator)
+
+ def load(self):
+ data, errors = self.loader.load()
+ self._setAtoms(iter(data))
+
+ def singleBuilder(self, options, settings, trees):
+ if not "filename" in options:
+ raise SetConfigError(_("no filename specified"))
+ return ConfigFileSet(options["filename"])
+ singleBuilder = classmethod(singleBuilder)
+
+ def multiBuilder(self, options, settings, trees):
+ rValue = {}
+ directory = options.get("directory",
+ os.path.join(settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH))
+ name_pattern = options.get("name_pattern", "sets/package_$suffix")
+ if not "$suffix" in name_pattern and not "${suffix}" in name_pattern:
+ raise SetConfigError(_("name_pattern doesn't include $suffix placeholder"))
+ for suffix in ["keywords", "use", "mask", "unmask"]:
+ myname = name_pattern.replace("$suffix", suffix)
+ myname = myname.replace("${suffix}", suffix)
+ rValue[myname] = ConfigFileSet(os.path.join(directory, "package."+suffix))
+ return rValue
+ multiBuilder = classmethod(multiBuilder)
+
+class WorldSelectedSet(EditablePackageSet):
+ description = "Set of packages and subsets that were directly installed by the user"
+
+ def __init__(self, eroot):
+ super(WorldSelectedSet, self).__init__(allow_repo=True)
+ self._pkgset = WorldSelectedPackagesSet(eroot)
+ self._setset = WorldSelectedSetsSet(eroot)
+
+ def write(self):
+ self._pkgset._atoms = self._atoms.copy()
+ self._pkgset.write()
+ self._setset._nonatoms = self._nonatoms.copy()
+ self._setset.write()
+
+ def load(self):
+ # Iterating over these sets does not force them to load if they
+ # have been loaded previously.
+ self._pkgset.load()
+ self._setset.load()
+ self._setAtoms(chain(self._pkgset, self._setset))
+
+ def lock(self):
+ self._pkgset.lock()
+ self._setset.lock()
+
+ def unlock(self):
+ self._pkgset.unlock()
+ self._setset.unlock()
+
+ def cleanPackage(self, vardb, cpv):
+ self._pkgset.cleanPackage(vardb, cpv)
+
+ def singleBuilder(self, options, settings, trees):
+ return WorldSelectedSet(settings["EROOT"])
+ singleBuilder = classmethod(singleBuilder)
+
+class WorldSelectedPackagesSet(EditablePackageSet):
+ description = "Set of packages that were directly installed by the user"
+
+ def __init__(self, eroot):
+ super(WorldSelectedPackagesSet, self).__init__(allow_repo=True)
+ self._lock = None
+ self._filename = os.path.join(eroot, WORLD_FILE)
+ self.loader = ItemFileLoader(self._filename, self._validate)
+ self._mtime = None
+
+ def _validate(self, atom):
+ return ValidAtomValidator(atom, allow_repo=True)
+
+ def write(self):
+ write_atomic(self._filename,
+ "".join(sorted("%s\n" % x for x in self._atoms)))
+
+ def load(self):
+ atoms = []
+ atoms_changed = False
+ try:
+ mtime = os.stat(self._filename).st_mtime
+ except (OSError, IOError):
+ mtime = None
+ if (not self._loaded or self._mtime != mtime):
+ try:
+ data, errors = self.loader.load()
+ for fname in errors:
+ for e in errors[fname]:
+ self.errors.append(fname+": "+e)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ data = {}
+ atoms = list(data)
+ self._mtime = mtime
+ atoms_changed = True
+ else:
+ atoms.extend(self._atoms)
+
+ if atoms_changed:
+ self._setAtoms(atoms)
+
+ def _ensure_dirs(self):
+ ensure_dirs(os.path.dirname(self._filename), gid=portage_gid, mode=0o2750, mask=0o2)
+
+ def lock(self):
+ if self._lock is not None:
+ raise AssertionError("already locked")
+ self._ensure_dirs()
+ self._lock = lockfile(self._filename, wantnewlockfile=1)
+
+ def unlock(self):
+ if self._lock is None:
+ raise AssertionError("not locked")
+ unlockfile(self._lock)
+ self._lock = None
+
+ def cleanPackage(self, vardb, cpv):
+ '''
+ Before calling this function you should call lock and load.
+ After calling this function you should call unlock.
+ '''
+ if not self._lock:
+ raise AssertionError('cleanPackage needs the set to be locked')
+
+ worldlist = list(self._atoms)
+ mykey = cpv_getkey(cpv)
+ newworldlist = []
+ for x in worldlist:
+ if x.cp == mykey:
+ matches = vardb.match(x, use_cache=0)
+ if not matches:
+ #zap our world entry
+ pass
+ elif len(matches) == 1 and matches[0] == cpv:
+ #zap our world entry
+ pass
+ else:
+ #others are around; keep it.
+ newworldlist.append(x)
+ else:
+ #this doesn't match the package we're unmerging; keep it.
+ newworldlist.append(x)
+
+ newworldlist.extend(self._nonatoms)
+ self.replace(newworldlist)
+
+ def singleBuilder(self, options, settings, trees):
+ return WorldSelectedPackagesSet(settings["EROOT"])
+ singleBuilder = classmethod(singleBuilder)
+
+class WorldSelectedSetsSet(EditablePackageSet):
+ description = "Set of sets that were directly installed by the user"
+
+ def __init__(self, eroot):
+ super(WorldSelectedSetsSet, self).__init__(allow_repo=True)
+ self._lock = None
+ self._filename = os.path.join(eroot, WORLD_SETS_FILE)
+ self.loader = ItemFileLoader(self._filename, self._validate)
+ self._mtime = None
+
+ def _validate(self, setname):
+ return setname.startswith(SETPREFIX)
+
+ def write(self):
+ write_atomic(self._filename,
+ "".join(sorted("%s\n" % x for x in self._nonatoms)))
+
+ def load(self):
+ atoms_changed = False
+ try:
+ mtime = os.stat(self._filename).st_mtime
+ except (OSError, IOError):
+ mtime = None
+ if (not self._loaded or self._mtime != mtime):
+ try:
+ data, errors = self.loader.load()
+ for fname in errors:
+ for e in errors[fname]:
+ self.errors.append(fname+": "+e)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ data = {}
+ nonatoms = list(data)
+ self._mtime = mtime
+ atoms_changed = True
+ else:
+ nonatoms = list(self._nonatoms)
+
+ if atoms_changed:
+ self._setAtoms(nonatoms)
+
+ def lock(self):
+ if self._lock is not None:
+ raise AssertionError("already locked")
+ self._lock = lockfile(self._filename, wantnewlockfile=1)
+
+ def unlock(self):
+ if self._lock is None:
+ raise AssertionError("not locked")
+ unlockfile(self._lock)
+ self._lock = None
+
+ def singleBuilder(self, options, settings, trees):
+ return WorldSelectedSetsSet(settings["EROOT"])
+ singleBuilder = classmethod(singleBuilder)
diff --git a/lib/portage/_sets/libs.py b/lib/portage/_sets/libs.py
new file mode 100644
index 000000000..022e076f5
--- /dev/null
+++ b/lib/portage/_sets/libs.py
@@ -0,0 +1,99 @@
+# Copyright 2007-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+from portage.exception import InvalidData
+from portage.localization import _
+from portage._sets.base import PackageSet
+from portage._sets import get_boolean, SetConfigError
+import portage
+
+class LibraryConsumerSet(PackageSet):
+ _operations = ["merge", "unmerge"]
+
+ def __init__(self, vardbapi, debug=False):
+ super(LibraryConsumerSet, self).__init__()
+ self.dbapi = vardbapi
+ self.debug = debug
+
+ def mapPathsToAtoms(self, paths):
+ rValue = set()
+ for p in paths:
+ for cpv in self.dbapi._linkmap.getOwners(p):
+ try:
+ pkg = self.dbapi._pkg_str(cpv, None)
+ except (KeyError, InvalidData):
+ # This is expected for preserved libraries
+ # of packages that have been uninstalled
+ # without replacement.
+ pass
+ else:
+ rValue.add("%s:%s" % (pkg.cp, pkg.slot))
+ return rValue
+
+class LibraryFileConsumerSet(LibraryConsumerSet):
+
+ """
+ Note: This does not detect libtool archive (*.la) files that consume the
+ specified files (revdep-rebuild is able to detect them).
+ """
+
+ description = "Package set which contains all packages " + \
+ "that consume the specified library file(s)."
+
+ def __init__(self, vardbapi, files, **kargs):
+ super(LibraryFileConsumerSet, self).__init__(vardbapi, **kargs)
+ self.files = files
+
+ def load(self):
+ consumers = set()
+ for lib in self.files:
+ consumers.update(
+ self.dbapi._linkmap.findConsumers(lib, greedy=False))
+
+ if not consumers:
+ return
+ self._setAtoms(self.mapPathsToAtoms(consumers))
+
+ def singleBuilder(cls, options, settings, trees):
+ files = tuple(portage.util.shlex_split(options.get("files", "")))
+ if not files:
+ raise SetConfigError(_("no files given"))
+ debug = get_boolean(options, "debug", False)
+ return LibraryFileConsumerSet(trees["vartree"].dbapi,
+ files, debug=debug)
+ singleBuilder = classmethod(singleBuilder)
+
+class PreservedLibraryConsumerSet(LibraryConsumerSet):
+ def load(self):
+ reg = self.dbapi._plib_registry
+ if reg is None:
+ # preserve-libs is entirely disabled
+ return
+ consumers = set()
+ if reg:
+ plib_dict = reg.getPreservedLibs()
+ for libs in plib_dict.values():
+ for lib in libs:
+ if self.debug:
+ print(lib)
+ for x in sorted(self.dbapi._linkmap.findConsumers(lib, greedy=False)):
+ print(" ", x)
+ print("-"*40)
+ consumers.update(self.dbapi._linkmap.findConsumers(lib, greedy=False))
+ # Don't rebuild packages just because they contain preserved
+ # libs that happen to be consumers of other preserved libs.
+ for libs in plib_dict.values():
+ consumers.difference_update(libs)
+ else:
+ return
+ if not consumers:
+ return
+ self._setAtoms(self.mapPathsToAtoms(consumers))
+
+ def singleBuilder(cls, options, settings, trees):
+ debug = get_boolean(options, "debug", False)
+ return PreservedLibraryConsumerSet(trees["vartree"].dbapi,
+ debug=debug)
+ singleBuilder = classmethod(singleBuilder)
diff --git a/lib/portage/_sets/profiles.py b/lib/portage/_sets/profiles.py
new file mode 100644
index 000000000..bccc02e7c
--- /dev/null
+++ b/lib/portage/_sets/profiles.py
@@ -0,0 +1,57 @@
+# Copyright 2007-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+
+from portage import os
+from portage.util import grabfile_package, stack_lists
+from portage._sets.base import PackageSet
+from portage._sets import get_boolean
+from portage.util import writemsg_level
+
+__all__ = ["PackagesSystemSet"]
+
+class PackagesSystemSet(PackageSet):
+ _operations = ["merge"]
+
+ def __init__(self, profiles, debug=False):
+ super(PackagesSystemSet, self).__init__()
+ self._profiles = profiles
+ self._debug = debug
+ if profiles:
+ desc_profile = profiles[-1]
+ if desc_profile.user_config and len(profiles) > 1:
+ desc_profile = profiles[-2]
+ description = desc_profile.location
+ else:
+ description = None
+ self.description = "System packages for profile %s" % description
+
+ def load(self):
+ debug = self._debug
+ if debug:
+ writemsg_level("\nPackagesSystemSet: profiles: %s\n" %
+ (self._profiles,), level=logging.DEBUG, noiselevel=-1)
+
+ mylist = [grabfile_package(os.path.join(x.location, "packages"),
+ verify_eapi=True, eapi=x.eapi, eapi_default=None,
+ allow_build_id=x.allow_build_id)
+ for x in self._profiles]
+
+ if debug:
+ writemsg_level("\nPackagesSystemSet: raw packages: %s\n" % \
+ (mylist,), level=logging.DEBUG, noiselevel=-1)
+
+ mylist = stack_lists(mylist, incremental=1)
+
+ if debug:
+ writemsg_level("\nPackagesSystemSet: stacked packages: %s\n" % \
+ (mylist,), level=logging.DEBUG, noiselevel=-1)
+
+ self._setAtoms([x[1:] for x in mylist if x[0] == "*"])
+
+ def singleBuilder(self, options, settings, trees):
+ debug = get_boolean(options, "debug", False)
+ return PackagesSystemSet(
+ settings._locations_manager.profiles_complex, debug=debug)
+ singleBuilder = classmethod(singleBuilder)
diff --git a/lib/portage/_sets/security.py b/lib/portage/_sets/security.py
new file mode 100644
index 000000000..f8dbef2be
--- /dev/null
+++ b/lib/portage/_sets/security.py
@@ -0,0 +1,86 @@
+# Copyright 2007-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage.glsa as glsa
+from portage._sets.base import PackageSet
+from portage.versions import vercmp
+from portage._sets import get_boolean
+
+__all__ = ["SecuritySet", "NewGlsaSet", "NewAffectedSet", "AffectedSet"]
+
+class SecuritySet(PackageSet):
+ _operations = ["merge"]
+ _skip_applied = False
+
+ description = "package set that includes all packages possibly affected by a GLSA"
+
+ def __init__(self, settings, vardbapi, portdbapi, least_change=True):
+ super(SecuritySet, self).__init__()
+ self._settings = settings
+ self._vardbapi = vardbapi
+ self._portdbapi = portdbapi
+ self._least_change = least_change
+
+ def getGlsaList(self, skip_applied):
+ glsaindexlist = glsa.get_glsa_list(self._settings)
+ if skip_applied:
+ applied_list = glsa.get_applied_glsas(self._settings)
+ glsaindexlist = set(glsaindexlist).difference(applied_list)
+ glsaindexlist = list(glsaindexlist)
+ glsaindexlist.sort()
+ return glsaindexlist
+
+ def load(self):
+ glsaindexlist = self.getGlsaList(self._skip_applied)
+ atomlist = []
+ for glsaid in glsaindexlist:
+ myglsa = glsa.Glsa(glsaid, self._settings, self._vardbapi, self._portdbapi)
+ #print glsaid, myglsa.isVulnerable(), myglsa.isApplied(), myglsa.getMergeList()
+ if self.useGlsa(myglsa):
+ atomlist += ["="+x for x in myglsa.getMergeList(least_change=self._least_change)]
+ self._setAtoms(self._reduce(atomlist))
+
+ def _reduce(self, atomlist):
+ mydict = {}
+ for atom in atomlist[:]:
+ cpv = self._portdbapi.xmatch("match-all", atom)[0]
+ pkg = self._portdbapi._pkg_str(cpv, None)
+ cps = "%s:%s" % (pkg.cp, pkg.slot)
+ if not cps in mydict:
+ mydict[cps] = (atom, cpv)
+ else:
+ other_cpv = mydict[cps][1]
+ if vercmp(cpv.version, other_cpv.version) > 0:
+ atomlist.remove(mydict[cps][0])
+ mydict[cps] = (atom, cpv)
+ return atomlist
+
+ def useGlsa(self, myglsa):
+ return True
+
+ def updateAppliedList(self):
+ glsaindexlist = self.getGlsaList(True)
+ applied_list = glsa.get_applied_glsas(self._settings)
+ for glsaid in glsaindexlist:
+ myglsa = glsa.Glsa(glsaid, self._settings, self._vardbapi, self._portdbapi)
+ if not myglsa.isVulnerable() and not myglsa.nr in applied_list:
+ myglsa.inject()
+
+ def singleBuilder(cls, options, settings, trees):
+ least_change = not get_boolean(options, "use_emerge_resolver", False)
+ return cls(settings, trees["vartree"].dbapi, trees["porttree"].dbapi, least_change=least_change)
+ singleBuilder = classmethod(singleBuilder)
+
+class NewGlsaSet(SecuritySet):
+ _skip_applied = True
+ description = "Package set that includes all packages possibly affected by an unapplied GLSA"
+
+class AffectedSet(SecuritySet):
+ description = "Package set that includes all packages affected by an unapplied GLSA"
+
+ def useGlsa(self, myglsa):
+ return myglsa.isVulnerable()
+
+class NewAffectedSet(AffectedSet):
+ _skip_applied = True
+ description = "Package set that includes all packages affected by an unapplied GLSA"
diff --git a/lib/portage/_sets/shell.py b/lib/portage/_sets/shell.py
new file mode 100644
index 000000000..2c95845c8
--- /dev/null
+++ b/lib/portage/_sets/shell.py
@@ -0,0 +1,44 @@
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+
+from portage import os
+from portage import _unicode_decode
+from portage._sets.base import PackageSet
+from portage._sets import SetConfigError
+
+__all__ = ["CommandOutputSet"]
+
+class CommandOutputSet(PackageSet):
+ """This class creates a PackageSet from the output of a shell command.
+ The shell command should produce one atom per line, that is:
+
+ >>> atom1
+ atom2
+ ...
+ atomN
+
+ Args:
+ name: A string that identifies the set.
+ command: A string or sequence identifying the command to run
+ (see the subprocess.Popen documentaion for the format)
+ """
+ _operations = ["merge", "unmerge"]
+
+ def __init__(self, command):
+ super(CommandOutputSet, self).__init__()
+ self._command = command
+ self.description = "Package set generated from output of '%s'" % self._command
+
+ def load(self):
+ pipe = subprocess.Popen(self._command, stdout=subprocess.PIPE, shell=True)
+ stdout, stderr = pipe.communicate()
+ if pipe.wait() == os.EX_OK:
+ self._setAtoms(_unicode_decode(stdout).splitlines())
+
+ def singleBuilder(self, options, settings, trees):
+ if not "command" in options:
+ raise SetConfigError("no command specified")
+ return CommandOutputSet(options["command"])
+ singleBuilder = classmethod(singleBuilder)
diff --git a/lib/portage/cache/__init__.py b/lib/portage/cache/__init__.py
new file mode 100644
index 000000000..e7fe599f0
--- /dev/null
+++ b/lib/portage/cache/__init__.py
@@ -0,0 +1,4 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
diff --git a/lib/portage/cache/anydbm.py b/lib/portage/cache/anydbm.py
new file mode 100644
index 000000000..88d85b0da
--- /dev/null
+++ b/lib/portage/cache/anydbm.py
@@ -0,0 +1,116 @@
+# Copyright 2005-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Brian Harring (ferringb@gentoo.org)
+
+from __future__ import absolute_import
+
+try:
+ import anydbm as anydbm_module
+except ImportError:
+ # python 3.x
+ import dbm as anydbm_module
+
+try:
+ import dbm.gnu as gdbm
+except ImportError:
+ try:
+ import gdbm
+ except ImportError:
+ gdbm = None
+
+try:
+ from dbm import whichdb
+except ImportError:
+ from whichdb import whichdb
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+from portage import _unicode_encode
+from portage import os
+import sys
+from portage.cache import fs_template
+from portage.cache import cache_errors
+
+
+class database(fs_template.FsBased):
+
+ validation_chf = 'md5'
+ chf_types = ('md5', 'mtime')
+
+ autocommits = True
+ cleanse_keys = True
+ serialize_eclasses = False
+
+ def __init__(self, *args, **config):
+ super(database,self).__init__(*args, **config)
+
+ default_db = config.get("dbtype","anydbm")
+ if not default_db.startswith("."):
+ default_db = '.' + default_db
+
+ self._db_path = os.path.join(self.location, fs_template.gen_label(self.location, self.label)+default_db)
+ self.__db = None
+ mode = "w"
+ if whichdb(self._db_path) in ("dbm.gnu", "gdbm"):
+ # Allow multiple concurrent writers (see bug #53607).
+ mode += "u"
+ try:
+ # dbm.open() will not work with bytes in python-3.1:
+ # TypeError: can't concat bytes to str
+ self.__db = anydbm_module.open(self._db_path,
+ mode, self._perms)
+ except anydbm_module.error:
+ # XXX handle this at some point
+ try:
+ self._ensure_dirs()
+ self._ensure_dirs(self._db_path)
+ except (OSError, IOError) as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+ # try again if failed
+ try:
+ if self.__db == None:
+ # dbm.open() will not work with bytes in python-3.1:
+ # TypeError: can't concat bytes to str
+ if gdbm is None:
+ self.__db = anydbm_module.open(self._db_path,
+ "c", self._perms)
+ else:
+ # Prefer gdbm type if available, since it allows
+ # multiple concurrent writers (see bug #53607).
+ self.__db = gdbm.open(self._db_path,
+ "cu", self._perms)
+ except anydbm_module.error as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+ self._ensure_access(self._db_path)
+
+ def iteritems(self):
+ # dbm doesn't implement items()
+ for k in self.__db.keys():
+ yield (k, self[k])
+
+ def _getitem(self, cpv):
+ # we override getitem because it's just a cpickling of the data handed in.
+ return pickle.loads(self.__db[_unicode_encode(cpv)])
+
+ def _setitem(self, cpv, values):
+ self.__db[_unicode_encode(cpv)] = pickle.dumps(values,pickle.HIGHEST_PROTOCOL)
+
+ def _delitem(self, cpv):
+ del self.__db[cpv]
+
+ def __iter__(self):
+ return iter(list(self.__db.keys()))
+
+ def __contains__(self, cpv):
+ return cpv in self.__db
+
+ def __del__(self):
+ if "__db" in self.__dict__ and self.__db != None:
+ self.__db.sync()
+ self.__db.close()
+
+ if sys.hexversion >= 0x3000000:
+ items = iteritems
diff --git a/lib/portage/cache/cache_errors.py b/lib/portage/cache/cache_errors.py
new file mode 100644
index 000000000..3c1f2397e
--- /dev/null
+++ b/lib/portage/cache/cache_errors.py
@@ -0,0 +1,62 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+class CacheError(Exception): pass
+
+class InitializationError(CacheError):
+ def __init__(self, class_name, error):
+ self.error, self.class_name = error, class_name
+ def __str__(self):
+ return "Creation of instance %s failed due to %s" % \
+ (self.class_name, str(self.error))
+
+
+class CacheCorruption(CacheError):
+ def __init__(self, key, ex):
+ self.key, self.ex = key, ex
+ def __str__(self):
+ return "%s is corrupt: %s" % (self.key, str(self.ex))
+
+
+class GeneralCacheCorruption(CacheError):
+ def __init__(self,ex): self.ex = ex
+ def __str__(self): return "corruption detected: %s" % str(self.ex)
+
+
+class InvalidRestriction(CacheError):
+ def __init__(self, key, restriction, exception=None):
+ if exception == None: exception = ''
+ self.key, self.restriction, self.ex = key, restriction, ex
+ def __str__(self):
+ return "%s:%s is not valid: %s" % \
+ (self.key, self.restriction, str(self.ex))
+
+
+class ReadOnlyRestriction(CacheError):
+ def __init__(self, info=''):
+ self.info = info
+ def __str__(self):
+ return "cache is non-modifiable"+str(self.info)
+
+class StatCollision(CacheError):
+ """
+ If the content of a cache entry changes and neither the file mtime nor
+ size changes, it will prevent rsync from detecting changes. Cache backends
+ may raise this exception from _setitem() if they detect this type of stat
+ collision. See bug #139134.
+ """
+ def __init__(self, key, filename, mtime, size):
+ self.key = key
+ self.filename = filename
+ self.mtime = mtime
+ self.size = size
+
+ def __str__(self):
+ return "%s has stat collision with size %s and mtime %s" % \
+ (self.key, self.size, self.mtime)
+
+ def __repr__(self):
+ return "portage.cache.cache_errors.StatCollision(%s)" % \
+ (', '.join((repr(self.key), repr(self.filename),
+ repr(self.mtime), repr(self.size))),)
diff --git a/lib/portage/cache/ebuild_xattr.py b/lib/portage/cache/ebuild_xattr.py
new file mode 100644
index 000000000..cc6b06246
--- /dev/null
+++ b/lib/portage/cache/ebuild_xattr.py
@@ -0,0 +1,172 @@
+# -*- coding: utf-8 -*-
+# Copyright: 2009-2011 Gentoo Foundation
+# Author(s): Petteri Räty (betelgeuse@gentoo.org)
+# License: GPL2
+
+__all__ = ['database']
+
+import errno
+
+import portage
+from portage.cache import fs_template
+from portage.versions import catsplit
+from portage import cpv_getkey
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'xattr')
+
+class NoValueException(Exception):
+ pass
+
+class database(fs_template.FsBased):
+
+ autocommits = True
+
+ def __init__(self, *args, **config):
+ super(database,self).__init__(*args, **config)
+ self.portdir = self.label
+ self.ns = xattr.NS_USER + '.gentoo.cache'
+ self.keys = set(self._known_keys)
+ self.keys.add('_mtime_')
+ self.keys.add('_eclasses_')
+ # xattrs have an upper length
+ self.max_len = self.__get_max()
+
+ def __get_max(self):
+ path = os.path.join(self.portdir,'profiles/repo_name')
+ try:
+ return int(self.__get(path,'value_max_len'))
+ except NoValueException as e:
+ max = self.__calc_max(path)
+ self.__set(path,'value_max_len',str(max))
+ return max
+
+ def __calc_max(self,path):
+ """ Find out max attribute length supported by the file system """
+
+ hundred = ''
+ for i in range(100):
+ hundred+='a'
+
+ s=hundred
+
+ # Could use finally but needs python 2.5 then
+ try:
+ while True:
+ self.__set(path,'test_max',s)
+ s+=hundred
+ except IOError as e:
+ # ext based give wrong errno
+ # https://bugzilla.kernel.org/show_bug.cgi?id=12793
+ if e.errno in (errno.E2BIG, errno.ENOSPC):
+ result = len(s)-100
+ else:
+ raise
+
+ try:
+ self.__remove(path,'test_max')
+ except IOError as e:
+ if e.errno != errno.ENODATA:
+ raise
+
+ return result
+
+ def __get_path(self,cpv):
+ cat,pn = catsplit(cpv_getkey(cpv))
+ return os.path.join(self.portdir,cat,pn,os.path.basename(cpv) + ".ebuild")
+
+ def __has_cache(self,path):
+ try:
+ self.__get(path,'_mtime_')
+ except NoValueException as e:
+ return False
+
+ return True
+
+ def __get(self,path,key,default=None):
+ try:
+ return xattr.get(path,key,namespace=self.ns)
+ except IOError as e:
+ if not default is None and errno.ENODATA == e.errno:
+ return default
+ else:
+ raise NoValueException()
+
+ def __remove(self,path,key):
+ xattr.remove(path,key,namespace=self.ns)
+
+ def __set(self,path,key,value):
+ xattr.set(path,key,value,namespace=self.ns)
+
+ def _getitem(self, cpv):
+ values = {}
+ path = self.__get_path(cpv)
+ all = {}
+ for tuple in xattr.get_all(path,namespace=self.ns):
+ key,value = tuple
+ all[key] = value
+
+ if not '_mtime_' in all:
+ raise KeyError(cpv)
+
+ # We default to '' like other caches
+ for key in self.keys:
+ attr_value = all.get(key,'1:')
+ parts,sep,value = attr_value.partition(':')
+ parts = int(parts)
+ if parts > 1:
+ for i in range(1,parts):
+ value += all.get(key+str(i))
+ values[key] = value
+
+ return values
+
+ def _setitem(self, cpv, values):
+ path = self.__get_path(cpv)
+ max = self.max_len
+ for key,value in values.items():
+ # mtime comes in as long so need to convert to strings
+ s = str(value)
+ # We need to split long values
+ value_len = len(s)
+ parts = 0
+ if value_len > max:
+ # Find out how many parts we need
+ parts = value_len/max
+ if value_len % max > 0:
+ parts += 1
+
+ # Only the first entry carries the number of parts
+ self.__set(path,key,'%s:%s'%(parts,s[0:max]))
+
+ # Write out the rest
+ for i in range(1,parts):
+ start = i * max
+ val = s[start:start+max]
+ self.__set(path,key+str(i),val)
+ else:
+ self.__set(path,key,"%s:%s"%(1,s))
+
+ def _delitem(self, cpv):
+ pass # Will be gone with the ebuild
+
+ def __contains__(self, cpv):
+ return os.path.exists(self.__get_path(cpv))
+
+ def __iter__(self):
+
+ for root, dirs, files in os.walk(self.portdir):
+ for file in files:
+ try:
+ file = _unicode_decode(file,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if file[-7:] == '.ebuild':
+ cat = os.path.basename(os.path.dirname(root))
+ pn_pv = file[:-7]
+ path = os.path.join(root,file)
+ if self.__has_cache(path):
+ yield "%s/%s/%s" % (cat,os.path.basename(root),file[:-7])
diff --git a/lib/portage/cache/flat_hash.py b/lib/portage/cache/flat_hash.py
new file mode 100644
index 000000000..79783245b
--- /dev/null
+++ b/lib/portage/cache/flat_hash.py
@@ -0,0 +1,166 @@
+# Copyright 2005-2016 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Brian Harring (ferringb@gentoo.org)
+
+from __future__ import unicode_literals
+
+from portage.cache import fs_template
+from portage.cache import cache_errors
+import errno
+import io
+import stat
+import sys
+import tempfile
+import os as _os
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage.exception import InvalidData
+from portage.versions import _pkg_str
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+class database(fs_template.FsBased):
+
+ autocommits = True
+
+ def __init__(self, *args, **config):
+ super(database,self).__init__(*args, **config)
+ self.location = os.path.join(self.location,
+ self.label.lstrip(os.path.sep).rstrip(os.path.sep))
+ write_keys = set(self._known_keys)
+ write_keys.add("_eclasses_")
+ write_keys.add("_%s_" % (self.validation_chf,))
+ self._write_keys = sorted(write_keys)
+ if not self.readonly and not os.path.exists(self.location):
+ self._ensure_dirs()
+
+ def _getitem(self, cpv):
+ # Don't use os.path.join, for better performance.
+ fp = self.location + _os.sep + cpv
+ try:
+ with io.open(_unicode_encode(fp,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as myf:
+ lines = myf.read().split("\n")
+ if not lines[-1]:
+ lines.pop()
+ d = self._parse_data(lines, cpv)
+ if '_mtime_' not in d:
+ # Backward compatibility with old cache
+ # that uses mtime mangling.
+ d['_mtime_'] = _os.fstat(myf.fileno())[stat.ST_MTIME]
+ return d
+ except (IOError, OSError) as e:
+ if e.errno != errno.ENOENT:
+ raise cache_errors.CacheCorruption(cpv, e)
+ raise KeyError(cpv, e)
+
+ def _parse_data(self, data, cpv):
+ try:
+ return dict( x.split("=", 1) for x in data )
+ except ValueError as e:
+ # If a line is missing an "=", the split length is 1 instead of 2.
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ def _setitem(self, cpv, values):
+ try:
+ fd, fp = tempfile.mkstemp(dir=self.location)
+ except EnvironmentError as e:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ with io.open(fd, mode='w',
+ encoding=_encodings['repo.content'],
+ errors='backslashreplace') as myf:
+ for k in self._write_keys:
+ v = values.get(k)
+ if not v:
+ continue
+ # NOTE: This format string requires unicode_literals, so that
+ # k and v are coerced to unicode, in order to prevent TypeError
+ # when writing raw bytes to TextIOWrapper with Python 2.
+ myf.write("%s=%s\n" % (k, v))
+
+ self._ensure_access(fp)
+
+ #update written. now we move it.
+
+ new_fp = os.path.join(self.location,cpv)
+ try:
+ os.rename(fp, new_fp)
+ except EnvironmentError as e:
+ success = False
+ try:
+ if errno.ENOENT == e.errno:
+ try:
+ self._ensure_dirs(cpv)
+ os.rename(fp, new_fp)
+ success = True
+ except EnvironmentError as e:
+ raise cache_errors.CacheCorruption(cpv, e)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+ finally:
+ if not success:
+ os.remove(fp)
+
+ def _delitem(self, cpv):
+# import pdb;pdb.set_trace()
+ try:
+ os.remove(os.path.join(self.location,cpv))
+ except OSError as e:
+ if errno.ENOENT == e.errno:
+ raise KeyError(cpv)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ def __contains__(self, cpv):
+ return os.path.exists(os.path.join(self.location, cpv))
+
+ def __iter__(self):
+ """generator for walking the dir struct"""
+ dirs = [(0, self.location)]
+ len_base = len(self.location)
+ while dirs:
+ depth, dir_path = dirs.pop()
+ try:
+ dir_list = os.listdir(dir_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ continue
+ for l in dir_list:
+ p = os.path.join(dir_path, l)
+ try:
+ st = os.lstat(p)
+ except OSError:
+ # Cache entry disappeared.
+ continue
+ if stat.S_ISDIR(st.st_mode):
+ # Only recurse 1 deep, in order to avoid iteration over
+ # entries from another nested cache instance. This can
+ # happen if the user nests an overlay inside
+ # /usr/portage/local as in bug #302764.
+ if depth < 1:
+ dirs.append((depth+1, p))
+ continue
+
+ try:
+ yield _pkg_str(p[len_base+1:])
+ except InvalidData:
+ continue
+
+
+class md5_database(database):
+
+ validation_chf = 'md5'
+ store_eclass_paths = False
+
+
+class mtime_md5_database(database):
+ validation_chf = 'md5'
+ chf_types = ('md5', 'mtime')
diff --git a/lib/portage/cache/fs_template.py b/lib/portage/cache/fs_template.py
new file mode 100644
index 000000000..e3c3c12c2
--- /dev/null
+++ b/lib/portage/cache/fs_template.py
@@ -0,0 +1,93 @@
+# Copyright 2005-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Brian Harring (ferringb@gentoo.org)
+
+import os as _os
+import sys
+from portage.cache import template
+from portage import os
+
+from portage.proxy.lazyimport import lazyimport
+lazyimport(globals(),
+ 'portage.exception:PortageException',
+ 'portage.util:apply_permissions,ensure_dirs',
+)
+del lazyimport
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+class FsBased(template.database):
+ """template wrapping fs needed options, and providing _ensure_access as a way to
+ attempt to ensure files have the specified owners/perms"""
+
+ def __init__(self, *args, **config):
+
+ for x, y in (("gid", -1), ("perms", 0o644)):
+ if x in config:
+ # Since Python 3.4, chown requires int type (no proxies).
+ setattr(self, "_" + x, int(config[x]))
+ del config[x]
+ else:
+ setattr(self, "_"+x, y)
+ super(FsBased, self).__init__(*args, **config)
+
+ if self.label.startswith(os.path.sep):
+ # normpath.
+ self.label = os.path.sep + os.path.normpath(self.label).lstrip(os.path.sep)
+
+
+ def _ensure_access(self, path, mtime=-1):
+ """returns true or false if it's able to ensure that path is properly chmod'd and chowned.
+ if mtime is specified, attempts to ensure that's correct also"""
+ try:
+ apply_permissions(path, gid=self._gid, mode=self._perms)
+ if mtime != -1:
+ mtime=long(mtime)
+ os.utime(path, (mtime, mtime))
+ except (PortageException, EnvironmentError):
+ return False
+ return True
+
+ def _ensure_dirs(self, path=None):
+ """with path!=None, ensure beyond self.location. otherwise, ensure self.location"""
+ if path:
+ path = os.path.dirname(path)
+ base = self.location
+ else:
+ path = self.location
+ base='/'
+
+ for dir in path.lstrip(os.path.sep).rstrip(os.path.sep).split(os.path.sep):
+ base = os.path.join(base,dir)
+ if ensure_dirs(base):
+ # We only call apply_permissions if ensure_dirs created
+ # a new directory, so as not to interfere with
+ # permissions of existing directories.
+ mode = self._perms
+ if mode == -1:
+ mode = 0
+ mode |= 0o755
+ apply_permissions(base, mode=mode, gid=self._gid)
+
+ def _prune_empty_dirs(self):
+ all_dirs = []
+ for parent, dirs, files in os.walk(self.location):
+ for x in dirs:
+ all_dirs.append(_os.path.join(parent, x))
+ while all_dirs:
+ try:
+ _os.rmdir(all_dirs.pop())
+ except OSError:
+ pass
+
+def gen_label(base, label):
+ """if supplied label is a path, generate a unique label based upon label, and supplied base path"""
+ if label.find(os.path.sep) == -1:
+ return label
+ label = label.strip("\"").strip("'")
+ label = os.path.join(*(label.rstrip(os.path.sep).split(os.path.sep)))
+ tail = os.path.split(label)[1]
+ return "%s-%X" % (tail, abs(label.__hash__()))
+
diff --git a/lib/portage/cache/index/IndexStreamIterator.py b/lib/portage/cache/index/IndexStreamIterator.py
new file mode 100644
index 000000000..972aee116
--- /dev/null
+++ b/lib/portage/cache/index/IndexStreamIterator.py
@@ -0,0 +1,27 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class IndexStreamIterator(object):
+
+ def __init__(self, f, parser):
+
+ self.parser = parser
+ self._file = f
+
+ def close(self):
+
+ if self._file is not None:
+ self._file.close()
+ self._file = None
+
+ def __iter__(self):
+
+ try:
+
+ for line in self._file:
+ node = self.parser(line)
+ if node is not None:
+ yield node
+
+ finally:
+ self.close()
diff --git a/lib/portage/cache/index/__init__.py b/lib/portage/cache/index/__init__.py
new file mode 100644
index 000000000..7cd880e11
--- /dev/null
+++ b/lib/portage/cache/index/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/cache/index/pkg_desc_index.py b/lib/portage/cache/index/pkg_desc_index.py
new file mode 100644
index 000000000..dbcbb8313
--- /dev/null
+++ b/lib/portage/cache/index/pkg_desc_index.py
@@ -0,0 +1,60 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import collections
+import sys
+
+from portage.versions import _pkg_str
+
+if sys.hexversion >= 0x3000000:
+ _unicode = str
+else:
+ _unicode = unicode
+
+pkg_desc_index_node = collections.namedtuple("pkg_desc_index_node",
+ ["cp", "cpv_list", "desc"])
+
+class pkg_node(_unicode):
+ """
+ A minimal package node class. For performance reasons, inputs
+ are not validated.
+ """
+
+ def __init__(self, cp, version, repo=None):
+ self.__dict__['cp'] = cp
+ self.__dict__['repo'] = repo
+ self.__dict__['version'] = version
+ self.__dict__['build_time'] = None
+
+ def __new__(cls, cp, version, repo=None):
+ return _unicode.__new__(cls, cp + "-" + version)
+
+ def __setattr__(self, name, value):
+ raise AttributeError("pkg_node instances are immutable",
+ self.__class__, name, value)
+
+def pkg_desc_index_line_format(cp, pkgs, desc):
+ return "%s %s: %s\n" % (cp,
+ " ".join(_pkg_str(cpv).version
+ for cpv in pkgs), desc)
+
+def pkg_desc_index_line_read(line, repo=None):
+
+ try:
+ pkgs, desc = line.split(":", 1)
+ except ValueError:
+ return None
+ desc = desc.strip()
+
+ try:
+ cp, pkgs = pkgs.split(" ", 1)
+ except ValueError:
+ return None
+
+ cp_list = []
+ for ver in pkgs.split():
+ cp_list.append(pkg_node(cp, ver, repo))
+
+ return pkg_desc_index_node(cp, tuple(cp_list), desc)
diff --git a/lib/portage/cache/mappings.py b/lib/portage/cache/mappings.py
new file mode 100644
index 000000000..921fdaf1b
--- /dev/null
+++ b/lib/portage/cache/mappings.py
@@ -0,0 +1,485 @@
+# Copyright: 2005-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Brian Harring (ferringb@gentoo.org)
+
+__all__ = ["Mapping", "MutableMapping", "UserDict", "ProtectedDict",
+ "LazyLoad", "slot_dict_class"]
+
+import sys
+import weakref
+
+class Mapping(object):
+ """
+ In python-3.0, the UserDict.DictMixin class has been replaced by
+ Mapping and MutableMapping from the collections module, but 2to3
+ doesn't currently account for this change:
+
+ https://bugs.python.org/issue2876
+
+ As a workaround for the above issue, use this class as a substitute
+ for UserDict.DictMixin so that code converted via 2to3 will run.
+ """
+
+ __slots__ = ()
+
+ def __iter__(self):
+ return iter(self.keys())
+
+ def keys(self):
+ return list(self.__iter__())
+
+ def __contains__(self, key):
+ try:
+ value = self[key]
+ except KeyError:
+ return False
+ return True
+
+ def iteritems(self):
+ for k in self:
+ yield (k, self[k])
+
+ def iterkeys(self):
+ return self.__iter__()
+
+ def itervalues(self):
+ for _, v in self.items():
+ yield v
+
+ def values(self):
+ return [v for _, v in self.iteritems()]
+
+ def items(self):
+ return list(self.iteritems())
+
+ def get(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def __repr__(self):
+ return repr(dict(self.items()))
+
+ def __len__(self):
+ return len(list(self))
+
+ if sys.hexversion >= 0x3000000:
+ items = iteritems
+ keys = __iter__
+ values = itervalues
+
+class MutableMapping(Mapping):
+ """
+ A mutable vesion of the Mapping class.
+ """
+
+ __slots__ = ()
+
+ def clear(self):
+ for key in list(self):
+ del self[key]
+
+ def setdefault(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return default
+
+ def pop(self, key, *args):
+ if len(args) > 1:
+ raise TypeError("pop expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+ try:
+ value = self[key]
+ except KeyError:
+ if args:
+ return args[0]
+ raise
+ del self[key]
+ return value
+
+ def popitem(self):
+ try:
+ k, v = next(iter(self.items()))
+ except StopIteration:
+ raise KeyError('container is empty')
+ del self[k]
+ return (k, v)
+
+ def update(self, *args, **kwargs):
+ if len(args) > 1:
+ raise TypeError(
+ "expected at most 1 positional argument, got " + \
+ repr(len(args)))
+ other = None
+ if args:
+ other = args[0]
+ if other is None:
+ pass
+ elif hasattr(other, 'iteritems'):
+ # Use getattr to avoid interference from 2to3.
+ for k, v in getattr(other, 'iteritems')():
+ self[k] = v
+ elif hasattr(other, 'items'):
+ # Use getattr to avoid interference from 2to3.
+ for k, v in getattr(other, 'items')():
+ self[k] = v
+ elif hasattr(other, 'keys'):
+ for k in other.keys():
+ self[k] = other[k]
+ else:
+ for k, v in other:
+ self[k] = v
+ if kwargs:
+ self.update(kwargs)
+
+class UserDict(MutableMapping):
+ """
+ Use this class as a substitute for UserDict.UserDict so that
+ code converted via 2to3 will run:
+
+ https://bugs.python.org/issue2876
+ """
+
+ __slots__ = ('data',)
+
+ def __init__(self, *args, **kwargs):
+
+ self.data = {}
+
+ if len(args) > 1:
+ raise TypeError(
+ "expected at most 1 positional argument, got " + \
+ repr(len(args)))
+
+ if args:
+ self.update(args[0])
+
+ if kwargs:
+ self.update(kwargs)
+
+ def __repr__(self):
+ return repr(self.data)
+
+ def __contains__(self, key):
+ return key in self.data
+
+ def __iter__(self):
+ return iter(self.data)
+
+ def __len__(self):
+ return len(self.data)
+
+ def __getitem__(self, key):
+ return self.data[key]
+
+ def __setitem__(self, key, item):
+ self.data[key] = item
+
+ def __delitem__(self, key):
+ del self.data[key]
+
+ def clear(self):
+ self.data.clear()
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+
+class OrderedDict(UserDict):
+
+ __slots__ = ('_order',)
+
+ def __init__(self, *args, **kwargs):
+ self._order = []
+ UserDict.__init__(self, *args, **kwargs)
+
+ def __iter__(self):
+ return iter(self._order)
+
+ def __setitem__(self, key, item):
+ new_key = key not in self
+ UserDict.__setitem__(self, key, item)
+ if new_key:
+ self._order.append(key)
+
+ def __delitem__(self, key):
+ UserDict.__delitem__(self, key)
+ self._order.remove(key)
+
+ def clear(self):
+ UserDict.clear(self)
+ del self._order[:]
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+
+class ProtectedDict(MutableMapping):
+ """
+ given an initial dict, this wraps that dict storing changes in a secondary dict, protecting
+ the underlying dict from changes
+ """
+ __slots__=("orig","new","blacklist")
+
+ def __init__(self, orig):
+ self.orig = orig
+ self.new = {}
+ self.blacklist = {}
+
+
+ def __setitem__(self, key, val):
+ self.new[key] = val
+ if key in self.blacklist:
+ del self.blacklist[key]
+
+
+ def __getitem__(self, key):
+ if key in self.new:
+ return self.new[key]
+ if key in self.blacklist:
+ raise KeyError(key)
+ return self.orig[key]
+
+
+ def __delitem__(self, key):
+ if key in self.new:
+ del self.new[key]
+ elif key in self.orig:
+ if key not in self.blacklist:
+ self.blacklist[key] = True
+ return
+ raise KeyError(key)
+
+
+ def __iter__(self):
+ for k in self.new:
+ yield k
+ for k in self.orig:
+ if k not in self.blacklist and k not in self.new:
+ yield k
+
+ def __contains__(self, key):
+ return key in self.new or (key not in self.blacklist and key in self.orig)
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+
+class LazyLoad(Mapping):
+ """
+ Lazy loading of values for a dict
+ """
+ __slots__=("pull", "d")
+
+ def __init__(self, pull_items_func, initial_items=[]):
+ self.d = {}
+ for k, v in initial_items:
+ self.d[k] = v
+ self.pull = pull_items_func
+
+ def __getitem__(self, key):
+ if key in self.d:
+ return self.d[key]
+ elif self.pull != None:
+ self.d.update(self.pull())
+ self.pull = None
+ return self.d[key]
+
+ def __iter__(self):
+ if self.pull is not None:
+ self.d.update(self.pull())
+ self.pull = None
+ return iter(self.d)
+
+ def __contains__(self, key):
+ if key in self.d:
+ return True
+ elif self.pull != None:
+ self.d.update(self.pull())
+ self.pull = None
+ return key in self.d
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+
+_slot_dict_classes = weakref.WeakValueDictionary()
+
+def slot_dict_class(keys, prefix="_val_"):
+ """
+ Generates mapping classes that behave similar to a dict but store values
+ as object attributes that are allocated via __slots__. Instances of these
+ objects have a smaller memory footprint than a normal dict object.
+
+ @param keys: Fixed set of allowed keys
+ @type keys: Iterable
+ @param prefix: a prefix to use when mapping
+ attribute names from keys
+ @type prefix: String
+ @rtype: SlotDict
+ @return: A class that constructs SlotDict instances
+ having the specified keys.
+ """
+ if isinstance(keys, frozenset):
+ keys_set = keys
+ else:
+ keys_set = frozenset(keys)
+ v = _slot_dict_classes.get((keys_set, prefix))
+ if v is None:
+
+ class SlotDict(object):
+
+ allowed_keys = keys_set
+ _prefix = prefix
+ __slots__ = ("__weakref__",) + \
+ tuple(prefix + k for k in allowed_keys)
+
+ def __init__(self, *args, **kwargs):
+
+ if len(args) > 1:
+ raise TypeError(
+ "expected at most 1 positional argument, got " + \
+ repr(len(args)))
+
+ if args:
+ self.update(args[0])
+
+ if kwargs:
+ self.update(kwargs)
+
+ def __iter__(self):
+ for k, v in self.iteritems():
+ yield k
+
+ def __len__(self):
+ l = 0
+ for i in self.iteritems():
+ l += 1
+ return l
+
+ def keys(self):
+ return list(self)
+
+ def iteritems(self):
+ prefix = self._prefix
+ for k in self.allowed_keys:
+ try:
+ yield (k, getattr(self, prefix + k))
+ except AttributeError:
+ pass
+
+ def items(self):
+ return list(self.iteritems())
+
+ def itervalues(self):
+ for k, v in self.iteritems():
+ yield v
+
+ def values(self):
+ return list(self.itervalues())
+
+ def __delitem__(self, k):
+ try:
+ delattr(self, self._prefix + k)
+ except AttributeError:
+ raise KeyError(k)
+
+ def __setitem__(self, k, v):
+ setattr(self, self._prefix + k, v)
+
+ def setdefault(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return default
+
+ def update(self, *args, **kwargs):
+ if len(args) > 1:
+ raise TypeError(
+ "expected at most 1 positional argument, got " + \
+ repr(len(args)))
+ other = None
+ if args:
+ other = args[0]
+ if other is None:
+ pass
+ elif hasattr(other, 'iteritems'):
+ # Use getattr to avoid interference from 2to3.
+ for k, v in getattr(other, 'iteritems')():
+ self[k] = v
+ elif hasattr(other, 'items'):
+ # Use getattr to avoid interference from 2to3.
+ for k, v in getattr(other, 'items')():
+ self[k] = v
+ elif hasattr(other, 'keys'):
+ for k in other.keys():
+ self[k] = other[k]
+ else:
+ for k, v in other:
+ self[k] = v
+ if kwargs:
+ self.update(kwargs)
+
+ def __getitem__(self, k):
+ try:
+ return getattr(self, self._prefix + k)
+ except AttributeError:
+ raise KeyError(k)
+
+ def get(self, key, default=None):
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def __contains__(self, k):
+ return hasattr(self, self._prefix + k)
+
+ def pop(self, key, *args):
+ if len(args) > 1:
+ raise TypeError(
+ "pop expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+ try:
+ value = self[key]
+ except KeyError:
+ if args:
+ return args[0]
+ raise
+ del self[key]
+ return value
+
+ def popitem(self):
+ try:
+ k, v = self.iteritems().next()
+ except StopIteration:
+ raise KeyError('container is empty')
+ del self[k]
+ return (k, v)
+
+ def copy(self):
+ c = self.__class__()
+ c.update(self)
+ return c
+
+ def clear(self):
+ for k in self.allowed_keys:
+ try:
+ delattr(self, self._prefix + k)
+ except AttributeError:
+ pass
+
+ def __str__(self):
+ return str(dict(self.iteritems()))
+
+ def __repr__(self):
+ return repr(dict(self.iteritems()))
+
+ if sys.hexversion >= 0x3000000:
+ items = iteritems
+ keys = __iter__
+ values = itervalues
+
+ v = SlotDict
+ _slot_dict_classes[v.allowed_keys] = v
+ return v
diff --git a/lib/portage/cache/metadata.py b/lib/portage/cache/metadata.py
new file mode 100644
index 000000000..59b25b606
--- /dev/null
+++ b/lib/portage/cache/metadata.py
@@ -0,0 +1,158 @@
+# Copyright 2005-2018 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+import errno
+import re
+import stat
+import sys
+from operator import attrgetter
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage.cache import cache_errors, flat_hash
+import portage.eclass_cache
+from portage.cache.template import reconstruct_eclasses
+from portage.cache.mappings import ProtectedDict
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+ long = int
+
+# this is the old cache format, flat_list. count maintained here.
+magic_line_count = 22
+
+# store the current key order *here*.
+class database(flat_hash.database):
+ complete_eclass_entries = False
+ auxdbkey_order=('DEPEND', 'RDEPEND', 'SLOT', 'SRC_URI',
+ 'RESTRICT', 'HOMEPAGE', 'LICENSE', 'DESCRIPTION',
+ 'KEYWORDS', 'INHERITED', 'IUSE', 'REQUIRED_USE',
+ 'PDEPEND', 'BDEPEND', 'EAPI', 'PROPERTIES',
+ 'DEFINED_PHASES', 'HDEPEND')
+
+ autocommits = True
+ serialize_eclasses = False
+
+ _hashed_re = re.compile('^(\\w+)=([^\n]*)')
+
+ def __init__(self, location, *args, **config):
+ loc = location
+ super(database, self).__init__(location, *args, **config)
+ self.location = os.path.join(loc, "metadata","cache")
+ self.ec = None
+ self.raise_stat_collision = False
+
+ def _parse_data(self, data, cpv):
+ _hashed_re_match = self._hashed_re.match
+ d = {}
+
+ for line in data:
+ hashed = False
+ hashed_match = _hashed_re_match(line)
+ if hashed_match is None:
+ d.clear()
+ try:
+ for i, key in enumerate(self.auxdbkey_order):
+ d[key] = data[i]
+ except IndexError:
+ pass
+ break
+ else:
+ d[hashed_match.group(1)] = hashed_match.group(2)
+
+ if "_eclasses_" not in d:
+ if "INHERITED" in d:
+ if self.ec is None:
+ self.ec = portage.eclass_cache.cache(self.location[:-15])
+ getter = attrgetter(self.validation_chf)
+ try:
+ ec_data = self.ec.get_eclass_data(d["INHERITED"].split())
+ d["_eclasses_"] = dict((k, (v.eclass_dir, getter(v)))
+ for k,v in ec_data.items())
+ except KeyError as e:
+ # INHERITED contains a non-existent eclass.
+ raise cache_errors.CacheCorruption(cpv, e)
+ else:
+ d["_eclasses_"] = {}
+ elif isinstance(d["_eclasses_"], basestring):
+ # We skip this if flat_hash.database._parse_data() was called above
+ # because it calls reconstruct_eclasses() internally.
+ d["_eclasses_"] = reconstruct_eclasses(None, d["_eclasses_"])
+
+ return d
+
+ def _setitem(self, cpv, values):
+ if "_eclasses_" in values:
+ values = ProtectedDict(values)
+ values["INHERITED"] = ' '.join(sorted(values["_eclasses_"]))
+
+ new_content = []
+ for k in self.auxdbkey_order:
+ new_content.append(values.get(k, ''))
+ new_content.append('\n')
+ for i in range(magic_line_count - len(self.auxdbkey_order)):
+ new_content.append('\n')
+ new_content = ''.join(new_content)
+ new_content = _unicode_encode(new_content,
+ _encodings['repo.content'], errors='backslashreplace')
+
+ new_fp = os.path.join(self.location, cpv)
+ try:
+ f = open(_unicode_encode(new_fp,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ except EnvironmentError:
+ pass
+ else:
+ try:
+ try:
+ existing_st = os.fstat(f.fileno())
+ existing_content = f.read()
+ finally:
+ f.close()
+ except EnvironmentError:
+ pass
+ else:
+ existing_mtime = existing_st[stat.ST_MTIME]
+ if values['_mtime_'] == existing_mtime and \
+ existing_content == new_content:
+ return
+
+ if self.raise_stat_collision and \
+ values['_mtime_'] == existing_mtime and \
+ len(new_content) == existing_st.st_size:
+ raise cache_errors.StatCollision(cpv, new_fp,
+ existing_mtime, existing_st.st_size)
+
+ s = cpv.rfind("/")
+ fp = os.path.join(self.location,cpv[:s],
+ ".update.%i.%s" % (os.getpid(), cpv[s+1:]))
+ try:
+ myf = open(_unicode_encode(fp,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ except EnvironmentError as e:
+ if errno.ENOENT == e.errno:
+ try:
+ self._ensure_dirs(cpv)
+ myf = open(_unicode_encode(fp,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ except EnvironmentError as e:
+ raise cache_errors.CacheCorruption(cpv, e)
+ else:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ try:
+ myf.write(new_content)
+ finally:
+ myf.close()
+ self._ensure_access(fp, mtime=values["_mtime_"])
+
+ try:
+ os.rename(fp, new_fp)
+ except EnvironmentError as e:
+ try:
+ os.unlink(fp)
+ except EnvironmentError:
+ pass
+ raise cache_errors.CacheCorruption(cpv, e)
diff --git a/lib/portage/cache/sql_template.py b/lib/portage/cache/sql_template.py
new file mode 100644
index 000000000..d023b1b5d
--- /dev/null
+++ b/lib/portage/cache/sql_template.py
@@ -0,0 +1,301 @@
+# Copyright: 2005 Gentoo Foundation
+# Author(s): Brian Harring (ferringb@gentoo.org)
+# License: GPL2
+
+import sys
+from portage.cache import template, cache_errors
+from portage.cache.template import reconstruct_eclasses
+
+class SQLDatabase(template.database):
+ """template class for RDBM based caches
+
+ This class is designed such that derivatives don't have to change much code, mostly constant strings.
+ _BaseError must be an exception class that all Exceptions thrown from the derived RDBMS are derived
+ from.
+
+ SCHEMA_INSERT_CPV_INTO_PACKAGE should be modified dependant on the RDBMS, as should SCHEMA_PACKAGE_CREATE-
+ basically you need to deal with creation of a unique pkgid. If the dbapi2 rdbms class has a method of
+ recovering that id, then modify _insert_cpv to remove the extra select.
+
+ Creation of a derived class involves supplying _initdb_con, and table_exists.
+ Additionally, the default schemas may have to be modified.
+ """
+
+ SCHEMA_PACKAGE_NAME = "package_cache"
+ SCHEMA_PACKAGE_CREATE = "CREATE TABLE %s (\
+ pkgid INTEGER PRIMARY KEY, label VARCHAR(255), cpv VARCHAR(255), UNIQUE(label, cpv))" % SCHEMA_PACKAGE_NAME
+ SCHEMA_PACKAGE_DROP = "DROP TABLE %s" % SCHEMA_PACKAGE_NAME
+
+ SCHEMA_VALUES_NAME = "values_cache"
+ SCHEMA_VALUES_CREATE = "CREATE TABLE %s ( pkgid integer references %s (pkgid) on delete cascade, \
+ key varchar(255), value text, UNIQUE(pkgid, key))" % (SCHEMA_VALUES_NAME, SCHEMA_PACKAGE_NAME)
+ SCHEMA_VALUES_DROP = "DROP TABLE %s" % SCHEMA_VALUES_NAME
+ SCHEMA_INSERT_CPV_INTO_PACKAGE = "INSERT INTO %s (label, cpv) VALUES(%%s, %%s)" % SCHEMA_PACKAGE_NAME
+
+ _BaseError = ()
+ _dbClass = None
+
+ autocommits = False
+# cleanse_keys = True
+
+ # boolean indicating if the derived RDBMS class supports replace syntax
+ _supports_replace = False
+
+ def __init__(self, location, label, auxdbkeys, *args, **config):
+ """initialize the instance.
+ derived classes shouldn't need to override this"""
+
+ super(SQLDatabase, self).__init__(location, label, auxdbkeys, *args, **config)
+
+ config.setdefault("host","127.0.0.1")
+ config.setdefault("autocommit", self.autocommits)
+ self._initdb_con(config)
+
+ self.label = self._sfilter(self.label)
+
+
+ def _dbconnect(self, config):
+ """should be overridden if the derived class needs special parameters for initializing
+ the db connection, or cursor"""
+ self.db = self._dbClass(**config)
+ self.con = self.db.cursor()
+
+
+ def _initdb_con(self,config):
+ """ensure needed tables are in place.
+ If the derived class needs a different set of table creation commands, overload the approriate
+ SCHEMA_ attributes. If it needs additional execution beyond, override"""
+
+ self._dbconnect(config)
+ if not self._table_exists(self.SCHEMA_PACKAGE_NAME):
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction("table %s doesn't exist" % \
+ self.SCHEMA_PACKAGE_NAME)
+ try:
+ self.con.execute(self.SCHEMA_PACKAGE_CREATE)
+ except self._BaseError as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+ if not self._table_exists(self.SCHEMA_VALUES_NAME):
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction("table %s doesn't exist" % \
+ self.SCHEMA_VALUES_NAME)
+ try:
+ self.con.execute(self.SCHEMA_VALUES_CREATE)
+ except self._BaseError as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+
+ def _table_exists(self, tbl):
+ """return true if a table exists
+ derived classes must override this"""
+ raise NotImplementedError
+
+
+ def _sfilter(self, s):
+ """meta escaping, returns quoted string for use in sql statements"""
+ return "\"%s\"" % s.replace("\\","\\\\").replace("\"","\\\"")
+
+
+ def _getitem(self, cpv):
+ try:
+ self.con.execute("SELECT key, value FROM %s NATURAL JOIN %s "
+ "WHERE label=%s AND cpv=%s" % (self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME,
+ self.label, self._sfilter(cpv)))
+ except self._BaseError as e:
+ raise cache_errors.CacheCorruption(self, cpv, e)
+
+ rows = self.con.fetchall()
+
+ if len(rows) == 0:
+ raise KeyError(cpv)
+
+ vals = dict([(k,"") for k in self._known_keys])
+ vals.update(dict(rows))
+ return vals
+
+
+ def _delitem(self, cpv):
+ """delete a cpv cache entry
+ derived RDBM classes for this *must* either support cascaded deletes, or
+ override this method"""
+ try:
+ try:
+ self.con.execute("DELETE FROM %s WHERE label=%s AND cpv=%s" % \
+ (self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv)))
+ if self.autocommits:
+ self.commit()
+ except self._BaseError as e:
+ raise cache_errors.CacheCorruption(self, cpv, e)
+ if self.con.rowcount <= 0:
+ raise KeyError(cpv)
+ except SystemExit:
+ raise
+ except Exception:
+ if not self.autocommits:
+ self.db.rollback()
+ # yes, this can roll back a lot more then just the delete. deal.
+ raise
+
+ def __del__(self):
+ # just to be safe.
+ if "db" in self.__dict__ and self.db != None:
+ self.commit()
+ self.db.close()
+
+ def _setitem(self, cpv, values):
+
+ try:
+ # insert.
+ try:
+ pkgid = self._insert_cpv(cpv)
+ except self._BaseError as e:
+ raise cache_errors.CacheCorruption(cpv, e)
+
+ # __getitem__ fills out missing values,
+ # so we store only what's handed to us and is a known key
+ db_values = []
+ for key in self._known_keys:
+ if key in values and values[key]:
+ db_values.append({"key":key, "value":values[key]})
+
+ if len(db_values) > 0:
+ try:
+ self.con.executemany("INSERT INTO %s (pkgid, key, value) VALUES(\"%s\", %%(key)s, %%(value)s)" % \
+ (self.SCHEMA_VALUES_NAME, str(pkgid)), db_values)
+ except self._BaseError as e:
+ raise cache_errors.CacheCorruption(cpv, e)
+ if self.autocommits:
+ self.commit()
+
+ except SystemExit:
+ raise
+ except Exception:
+ if not self.autocommits:
+ try:
+ self.db.rollback()
+ except self._BaseError:
+ pass
+ raise
+
+
+ def _insert_cpv(self, cpv):
+ """uses SCHEMA_INSERT_CPV_INTO_PACKAGE, which must be overloaded if the table definition
+ doesn't support auto-increment columns for pkgid.
+ returns the cpvs new pkgid
+ note this doesn't commit the transaction. The caller is expected to."""
+
+ cpv = self._sfilter(cpv)
+ if self._supports_replace:
+ query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE.replace("INSERT","REPLACE",1)
+ else:
+ # just delete it.
+ try:
+ del self[cpv]
+ except (cache_errors.CacheCorruption, KeyError):
+ pass
+ query_str = self.SCHEMA_INSERT_CPV_INTO_PACKAGE
+ try:
+ self.con.execute(query_str % (self.label, cpv))
+ except self._BaseError:
+ self.db.rollback()
+ raise
+ self.con.execute("SELECT pkgid FROM %s WHERE label=%s AND cpv=%s" % \
+ (self.SCHEMA_PACKAGE_NAME, self.label, cpv))
+
+ if self.con.rowcount != 1:
+ raise cache_error.CacheCorruption(cpv, "Tried to insert the cpv, but found "
+ " %i matches upon the following select!" % len(rows))
+ return self.con.fetchone()[0]
+
+
+ def __contains__(self, cpv):
+ if not self.autocommits:
+ try:
+ self.commit()
+ except self._BaseError as e:
+ raise cache_errors.GeneralCacheCorruption(e)
+
+ try:
+ self.con.execute("SELECT cpv FROM %s WHERE label=%s AND cpv=%s" % \
+ (self.SCHEMA_PACKAGE_NAME, self.label, self._sfilter(cpv)))
+ except self._BaseError as e:
+ raise cache_errors.GeneralCacheCorruption(e)
+ return self.con.rowcount > 0
+
+
+ def __iter__(self):
+ if not self.autocommits:
+ try:
+ self.commit()
+ except self._BaseError as e:
+ raise cache_errors.GeneralCacheCorruption(e)
+
+ try:
+ self.con.execute("SELECT cpv FROM %s WHERE label=%s" %
+ (self.SCHEMA_PACKAGE_NAME, self.label))
+ except self._BaseError as e:
+ raise cache_errors.GeneralCacheCorruption(e)
+# return [ row[0] for row in self.con.fetchall() ]
+ for x in self.con.fetchall():
+ yield x[0]
+
+ def iteritems(self):
+ try:
+ self.con.execute("SELECT cpv, key, value FROM %s NATURAL JOIN %s "
+ "WHERE label=%s" % (self.SCHEMA_PACKAGE_NAME, self.SCHEMA_VALUES_NAME,
+ self.label))
+ except self._BaseError as e:
+ raise cache_errors.CacheCorruption(self, cpv, e)
+
+ oldcpv = None
+ l = []
+ for x, y, v in self.con.fetchall():
+ if oldcpv != x:
+ if oldcpv != None:
+ d = dict(l)
+ if "_eclasses_" in d:
+ d["_eclasses_"] = reconstruct_eclasses(oldcpv, d["_eclasses_"])
+ else:
+ d["_eclasses_"] = {}
+ yield cpv, d
+ l.clear()
+ oldcpv = x
+ l.append((y,v))
+ if oldcpv != None:
+ d = dict(l)
+ if "_eclasses_" in d:
+ d["_eclasses_"] = reconstruct_eclasses(oldcpv, d["_eclasses_"])
+ else:
+ d["_eclasses_"] = {}
+ yield cpv, d
+
+ def commit(self):
+ self.db.commit()
+
+ def get_matches(self,match_dict):
+ query_list = []
+ for k,v in match_dict.items():
+ if k not in self._known_keys:
+ raise cache_errors.InvalidRestriction(k, v, "key isn't known to this cache instance")
+ v = v.replace("%","\\%")
+ v = v.replace(".*","%")
+ query_list.append("(key=%s AND value LIKE %s)" % (self._sfilter(k), self._sfilter(v)))
+
+ if len(query_list):
+ query = " AND "+" AND ".join(query_list)
+ else:
+ query = ''
+
+ print("query = SELECT cpv from package_cache natural join values_cache WHERE label=%s %s" % (self.label, query))
+ try:
+ self.con.execute("SELECT cpv from package_cache natural join values_cache WHERE label=%s %s" % \
+ (self.label, query))
+ except self._BaseError as e:
+ raise cache_errors.GeneralCacheCorruption(e)
+
+ return [ row[0] for row in self.con.fetchall() ]
+
+ if sys.hexversion >= 0x3000000:
+ items = iteritems
+ keys = __iter__
diff --git a/lib/portage/cache/sqlite.py b/lib/portage/cache/sqlite.py
new file mode 100644
index 000000000..69150f679
--- /dev/null
+++ b/lib/portage/cache/sqlite.py
@@ -0,0 +1,285 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division, unicode_literals
+
+import re
+import sys
+from portage.cache import fs_template
+from portage.cache import cache_errors
+from portage import os
+from portage import _unicode_decode
+from portage.util import writemsg
+from portage.localization import _
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+class database(fs_template.FsBased):
+
+ validation_chf = 'md5'
+ chf_types = ('md5', 'mtime')
+
+ autocommits = False
+ synchronous = False
+ # cache_bytes is used together with page_size (set at sqlite build time)
+ # to calculate the number of pages requested, according to the following
+ # equation: cache_bytes = page_bytes * page_count
+ cache_bytes = 1024 * 1024 * 10
+
+ def __init__(self, *args, **config):
+ super(database, self).__init__(*args, **config)
+ self._import_sqlite()
+ self._allowed_keys = ["_eclasses_"]
+ self._allowed_keys.extend(self._known_keys)
+ self._allowed_keys.extend('_%s_' % k for k in self.chf_types)
+ self._allowed_keys_set = frozenset(self._allowed_keys)
+ self._allowed_keys = sorted(self._allowed_keys_set)
+
+ self.location = os.path.join(self.location,
+ self.label.lstrip(os.path.sep).rstrip(os.path.sep))
+
+ if not self.readonly and not os.path.exists(self.location):
+ self._ensure_dirs()
+
+ config.setdefault("autocommit", self.autocommits)
+ config.setdefault("cache_bytes", self.cache_bytes)
+ config.setdefault("synchronous", self.synchronous)
+ # Set longer timeout for throwing a "database is locked" exception.
+ # Default timeout in sqlite3 module is 5.0 seconds.
+ config.setdefault("timeout", 15)
+ self._db_init_connection(config)
+ self._db_init_structures()
+
+ def _import_sqlite(self):
+ # sqlite3 is optional with >=python-2.5
+ try:
+ import sqlite3 as db_module
+ except ImportError as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+ self._db_module = db_module
+ self._db_error = db_module.Error
+
+ def _db_escape_string(self, s):
+ """meta escaping, returns quoted string for use in sql statements"""
+ if not isinstance(s, basestring):
+ # Avoid potential UnicodeEncodeError in python-2.x by
+ # only calling str() when it's absolutely necessary.
+ s = str(s)
+ return "'%s'" % s.replace("'", "''")
+
+ def _db_init_connection(self, config):
+ self._dbpath = self.location + ".sqlite"
+ #if os.path.exists(self._dbpath):
+ # os.unlink(self._dbpath)
+ connection_kwargs = {}
+ connection_kwargs["timeout"] = config["timeout"]
+ try:
+ if not self.readonly:
+ self._ensure_dirs()
+ self._db_connection = self._db_module.connect(
+ database=_unicode_decode(self._dbpath), **connection_kwargs)
+ self._db_cursor = self._db_connection.cursor()
+ self._db_cursor.execute("PRAGMA encoding = %s" % self._db_escape_string("UTF-8"))
+ if not self.readonly and not self._ensure_access(self._dbpath):
+ raise cache_errors.InitializationError(self.__class__, "can't ensure perms on %s" % self._dbpath)
+ self._db_init_cache_size(config["cache_bytes"])
+ self._db_init_synchronous(config["synchronous"])
+ except self._db_error as e:
+ raise cache_errors.InitializationError(self.__class__, e)
+
+ def _db_init_structures(self):
+ self._db_table = {}
+ self._db_table["packages"] = {}
+ mytable = "portage_packages"
+ self._db_table["packages"]["table_name"] = mytable
+ self._db_table["packages"]["package_id"] = "internal_db_package_id"
+ self._db_table["packages"]["package_key"] = "portage_package_key"
+ create_statement = []
+ create_statement.append("CREATE TABLE")
+ create_statement.append(mytable)
+ create_statement.append("(")
+ table_parameters = []
+ table_parameters.append("%s INTEGER PRIMARY KEY AUTOINCREMENT" % self._db_table["packages"]["package_id"])
+ table_parameters.append("%s TEXT" % self._db_table["packages"]["package_key"])
+ for k in self._allowed_keys:
+ table_parameters.append("%s TEXT" % k)
+ table_parameters.append("UNIQUE(%s)" % self._db_table["packages"]["package_key"])
+ create_statement.append(",".join(table_parameters))
+ create_statement.append(")")
+
+ self._db_table["packages"]["create"] = " ".join(create_statement)
+
+ cursor = self._db_cursor
+ for k, v in self._db_table.items():
+ if self._db_table_exists(v["table_name"]):
+ create_statement = self._db_table_get_create(v["table_name"])
+ table_ok, missing_keys = self._db_validate_create_statement(create_statement)
+ if table_ok:
+ if missing_keys:
+ for k in sorted(missing_keys):
+ cursor.execute("ALTER TABLE %s ADD COLUMN %s TEXT" %
+ (self._db_table["packages"]["table_name"], k))
+ else:
+ writemsg(_("sqlite: dropping old table: %s\n") % v["table_name"])
+ cursor.execute("DROP TABLE %s" % v["table_name"])
+ cursor.execute(v["create"])
+ else:
+ cursor.execute(v["create"])
+
+ def _db_table_exists(self, table_name):
+ """return true/false dependant on a tbl existing"""
+ cursor = self._db_cursor
+ cursor.execute("SELECT name FROM sqlite_master WHERE type=\"table\" AND name=%s" % \
+ self._db_escape_string(table_name))
+ return len(cursor.fetchall()) == 1
+
+ def _db_table_get_create(self, table_name):
+ """return true/false dependant on a tbl existing"""
+ cursor = self._db_cursor
+ cursor.execute("SELECT sql FROM sqlite_master WHERE name=%s" % \
+ self._db_escape_string(table_name))
+ return cursor.fetchall()[0][0]
+
+ def _db_validate_create_statement(self, statement):
+ missing_keys = None
+ if statement == self._db_table["packages"]["create"]:
+ return True, missing_keys
+
+ m = re.match(r'^\s*CREATE\s*TABLE\s*%s\s*\(\s*%s\s*INTEGER\s*PRIMARY\s*KEY\s*AUTOINCREMENT\s*,(.*)\)\s*$' %
+ (self._db_table["packages"]["table_name"],
+ self._db_table["packages"]["package_id"]),
+ statement)
+ if m is None:
+ return False, missing_keys
+
+ unique_constraints = set([self._db_table["packages"]["package_key"]])
+ missing_keys = set(self._allowed_keys)
+ unique_re = re.compile(r'^\s*UNIQUE\s*\(\s*(\w*)\s*\)\s*$')
+ column_re = re.compile(r'^\s*(\w*)\s*TEXT\s*$')
+ for x in m.group(1).split(","):
+ m = column_re.match(x)
+ if m is not None:
+ missing_keys.discard(m.group(1))
+ continue
+ m = unique_re.match(x)
+ if m is not None:
+ unique_constraints.discard(m.group(1))
+ continue
+
+ if unique_constraints:
+ return False, missing_keys
+
+ return True, missing_keys
+
+ def _db_init_cache_size(self, cache_bytes):
+ cursor = self._db_cursor
+ cursor.execute("PRAGMA page_size")
+ page_size=int(cursor.fetchone()[0])
+ # number of pages, sqlite default is 2000
+ cache_size = cache_bytes // page_size
+ cursor.execute("PRAGMA cache_size = %d" % cache_size)
+ cursor.execute("PRAGMA cache_size")
+ actual_cache_size = int(cursor.fetchone()[0])
+ del cursor
+ if actual_cache_size != cache_size:
+ raise cache_errors.InitializationError(self.__class__,"actual cache_size = "+actual_cache_size+" does does not match requested size of "+cache_size)
+
+ def _db_init_synchronous(self, synchronous):
+ cursor = self._db_cursor
+ cursor.execute("PRAGMA synchronous = %d" % synchronous)
+ cursor.execute("PRAGMA synchronous")
+ actual_synchronous=int(cursor.fetchone()[0])
+ del cursor
+ if actual_synchronous!=synchronous:
+ raise cache_errors.InitializationError(self.__class__,"actual synchronous = "+actual_synchronous+" does does not match requested value of "+synchronous)
+
+ def _getitem(self, cpv):
+ cursor = self._db_cursor
+ cursor.execute("select * from %s where %s=%s" % \
+ (self._db_table["packages"]["table_name"],
+ self._db_table["packages"]["package_key"],
+ self._db_escape_string(cpv)))
+ result = cursor.fetchall()
+ if len(result) == 1:
+ pass
+ elif len(result) == 0:
+ raise KeyError(cpv)
+ else:
+ raise cache_errors.CacheCorruption(cpv, "key is not unique")
+ result = result[0]
+ d = {}
+ allowed_keys_set = self._allowed_keys_set
+ for column_index, column_info in enumerate(cursor.description):
+ k = column_info[0]
+ if k in allowed_keys_set:
+ v = result[column_index]
+ if v is None:
+ # This happens after a new empty column has been added.
+ v = ""
+ d[k] = v
+
+ return d
+
+ def _setitem(self, cpv, values):
+ update_statement = []
+ update_statement.append("REPLACE INTO %s" % self._db_table["packages"]["table_name"])
+ update_statement.append("(")
+ update_statement.append(','.join([self._db_table["packages"]["package_key"]] + self._allowed_keys))
+ update_statement.append(")")
+ update_statement.append("VALUES")
+ update_statement.append("(")
+ values_parameters = []
+ values_parameters.append(self._db_escape_string(cpv))
+ for k in self._allowed_keys:
+ values_parameters.append(self._db_escape_string(values.get(k, '')))
+ update_statement.append(",".join(values_parameters))
+ update_statement.append(")")
+ cursor = self._db_cursor
+ try:
+ s = " ".join(update_statement)
+ cursor.execute(s)
+ except self._db_error as e:
+ writemsg("%s: %s\n" % (cpv, str(e)))
+ raise
+
+ def commit(self):
+ self._db_connection.commit()
+
+ def _delitem(self, cpv):
+ cursor = self._db_cursor
+ cursor.execute("DELETE FROM %s WHERE %s=%s" % \
+ (self._db_table["packages"]["table_name"],
+ self._db_table["packages"]["package_key"],
+ self._db_escape_string(cpv)))
+
+ def __contains__(self, cpv):
+ cursor = self._db_cursor
+ cursor.execute(" ".join(
+ ["SELECT %s FROM %s" %
+ (self._db_table["packages"]["package_id"],
+ self._db_table["packages"]["table_name"]),
+ "WHERE %s=%s" % (
+ self._db_table["packages"]["package_key"],
+ self._db_escape_string(cpv))]))
+ result = cursor.fetchall()
+ if len(result) == 0:
+ return False
+ elif len(result) == 1:
+ return True
+ else:
+ raise cache_errors.CacheCorruption(cpv, "key is not unique")
+
+ def __iter__(self):
+ """generator for walking the dir struct"""
+ cursor = self._db_cursor
+ cursor.execute("SELECT %s FROM %s" % \
+ (self._db_table["packages"]["package_key"],
+ self._db_table["packages"]["table_name"]))
+ result = cursor.fetchall()
+ key_list = [x[0] for x in result]
+ del result
+ while key_list:
+ yield key_list.pop()
diff --git a/lib/portage/cache/template.py b/lib/portage/cache/template.py
new file mode 100644
index 000000000..8662d859f
--- /dev/null
+++ b/lib/portage/cache/template.py
@@ -0,0 +1,373 @@
+# Copyright 2005-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Brian Harring (ferringb@gentoo.org)
+
+from portage.cache import cache_errors
+from portage.cache.cache_errors import InvalidRestriction
+from portage.cache.mappings import ProtectedDict
+import sys
+import warnings
+import operator
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ _unicode = str
+ basestring = str
+ long = int
+else:
+ _unicode = unicode
+
+class database(object):
+ # this is for metadata/cache transfer.
+ # basically flags the cache needs be updated when transfered cache to cache.
+ # leave this.
+
+ complete_eclass_entries = True
+ autocommits = False
+ cleanse_keys = False
+ serialize_eclasses = True
+ validation_chf = 'mtime'
+ store_eclass_paths = True
+
+ def __init__(self, location, label, auxdbkeys, readonly=False):
+ """ initialize the derived class; specifically, store label/keys"""
+ self._known_keys = auxdbkeys
+ self.location = location
+ self.label = label
+ self.readonly = readonly
+ self.sync_rate = 0
+ self.updates = 0
+
+ def __getitem__(self, cpv):
+ """set a cpv to values
+ This shouldn't be overriden in derived classes since it handles the __eclasses__ conversion.
+ that said, if the class handles it, they can override it."""
+ if self.updates > self.sync_rate:
+ self.commit()
+ self.updates = 0
+ d=self._getitem(cpv)
+
+ try:
+ chf_types = self.chf_types
+ except AttributeError:
+ chf_types = (self.validation_chf,)
+
+ if self.serialize_eclasses and "_eclasses_" in d:
+ for chf_type in chf_types:
+ if '_%s_' % chf_type not in d:
+ # Skip the reconstruct_eclasses call, since it's
+ # a waste of time if it contains a different chf_type
+ # than the current one. In the past, it was possible
+ # for reconstruct_eclasses called with chf_type='md5'
+ # to "successfully" return invalid data here, because
+ # it was unable to distinguish between md5 data and
+ # mtime data.
+ continue
+ try:
+ d["_eclasses_"] = reconstruct_eclasses(cpv, d["_eclasses_"],
+ chf_type, paths=self.store_eclass_paths)
+ except cache_errors.CacheCorruption:
+ if chf_type is chf_types[-1]:
+ raise
+ else:
+ break
+ else:
+ raise cache_errors.CacheCorruption(cpv,
+ 'entry does not contain a recognized chf_type')
+
+ elif "_eclasses_" not in d:
+ d["_eclasses_"] = {}
+ # Never return INHERITED, since portdbapi.aux_get() will
+ # generate it automatically from _eclasses_, and we want
+ # to omit it in comparisons between cache entries like
+ # those that egencache uses to avoid redundant writes.
+ d.pop("INHERITED", None)
+
+ mtime_required = not any(d.get('_%s_' % x)
+ for x in chf_types if x != 'mtime')
+
+ mtime = d.get('_mtime_')
+ if not mtime:
+ if mtime_required:
+ raise cache_errors.CacheCorruption(cpv,
+ '_mtime_ field is missing')
+ d.pop('_mtime_', None)
+ else:
+ try:
+ mtime = long(mtime)
+ except ValueError:
+ raise cache_errors.CacheCorruption(cpv,
+ '_mtime_ conversion to long failed: %s' % (mtime,))
+ d['_mtime_'] = mtime
+ return d
+
+ def _getitem(self, cpv):
+ """get cpv's values.
+ override this in derived classess"""
+ raise NotImplementedError
+
+ @staticmethod
+ def _internal_eclasses(extern_ec_dict, chf_type, paths):
+ """
+ When serialize_eclasses is False, we have to convert an external
+ eclass dict containing hashed_path objects into an appropriate
+ internal dict containing values of chf_type (and eclass dirs
+ if store_eclass_paths is True).
+ """
+ if not extern_ec_dict:
+ return extern_ec_dict
+ chf_getter = operator.attrgetter(chf_type)
+ if paths:
+ intern_ec_dict = dict((k, (v.eclass_dir, chf_getter(v)))
+ for k, v in extern_ec_dict.items())
+ else:
+ intern_ec_dict = dict((k, chf_getter(v))
+ for k, v in extern_ec_dict.items())
+ return intern_ec_dict
+
+ def __setitem__(self, cpv, values):
+ """set a cpv to values
+ This shouldn't be overriden in derived classes since it handles the readonly checks"""
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction()
+ d = None
+ if self.cleanse_keys:
+ d=ProtectedDict(values)
+ for k, v in list(d.items()):
+ if not v:
+ del d[k]
+ if "_eclasses_" in values:
+ if d is None:
+ d = ProtectedDict(values)
+ if self.serialize_eclasses:
+ d["_eclasses_"] = serialize_eclasses(d["_eclasses_"],
+ self.validation_chf, paths=self.store_eclass_paths)
+ else:
+ d["_eclasses_"] = self._internal_eclasses(d["_eclasses_"],
+ self.validation_chf, self.store_eclass_paths)
+ elif d is None:
+ d = values
+ self._setitem(cpv, d)
+ if not self.autocommits:
+ self.updates += 1
+ if self.updates > self.sync_rate:
+ self.commit()
+ self.updates = 0
+
+ def _setitem(self, name, values):
+ """__setitem__ calls this after readonly checks. override it in derived classes
+ note _eclassees_ key *must* be handled"""
+ raise NotImplementedError
+
+ def __delitem__(self, cpv):
+ """delete a key from the cache.
+ This shouldn't be overriden in derived classes since it handles the readonly checks"""
+ if self.readonly:
+ raise cache_errors.ReadOnlyRestriction()
+ if not self.autocommits:
+ self.updates += 1
+ self._delitem(cpv)
+ if self.updates > self.sync_rate:
+ self.commit()
+ self.updates = 0
+
+ def _delitem(self,cpv):
+ """__delitem__ calls this after readonly checks. override it in derived classes"""
+ raise NotImplementedError
+
+ def has_key(self, cpv):
+ return cpv in self
+
+ def keys(self):
+ return list(self)
+
+ def iterkeys(self):
+ return iter(self)
+
+ def iteritems(self):
+ for x in self:
+ yield (x, self[x])
+
+ def items(self):
+ return list(self.iteritems())
+
+ def sync(self, rate=0):
+ self.sync_rate = rate
+ if(rate == 0):
+ self.commit()
+
+ def commit(self):
+ if not self.autocommits:
+ raise NotImplementedError(self)
+
+ def __del__(self):
+ # This used to be handled by an atexit hook that called
+ # close_portdbapi_caches() for all portdbapi instances, but that was
+ # prone to memory leaks for API consumers that needed to create/destroy
+ # many portdbapi instances. So, instead we rely on __del__.
+ self.sync()
+
+ def __contains__(self, cpv):
+ """This method should always be overridden. It is provided only for
+ backward compatibility with modules that override has_key instead. It
+ will automatically raise a NotImplementedError if has_key has not been
+ overridden."""
+ if self.has_key is database.has_key:
+ # prevent a possible recursive loop
+ raise NotImplementedError
+ warnings.warn("portage.cache.template.database.has_key() is "
+ "deprecated, override __contains__ instead",
+ DeprecationWarning)
+ return self.has_key(cpv)
+
+ def __iter__(self):
+ """This method should always be overridden. It is provided only for
+ backward compatibility with modules that override iterkeys instead. It
+ will automatically raise a NotImplementedError if iterkeys has not been
+ overridden."""
+ if self.iterkeys is database.iterkeys:
+ # prevent a possible recursive loop
+ raise NotImplementedError(self)
+ return iter(self.keys())
+
+ def get(self, k, x=None):
+ try:
+ return self[k]
+ except KeyError:
+ return x
+
+ def validate_entry(self, entry, ebuild_hash, eclass_db):
+ try:
+ chf_types = self.chf_types
+ except AttributeError:
+ chf_types = (self.validation_chf,)
+
+ for chf_type in chf_types:
+ if self._validate_entry(chf_type, entry, ebuild_hash, eclass_db):
+ return True
+
+ return False
+
+ def _validate_entry(self, chf_type, entry, ebuild_hash, eclass_db):
+ hash_key = '_%s_' % chf_type
+ try:
+ entry_hash = entry[hash_key]
+ except KeyError:
+ return False
+ else:
+ if entry_hash != getattr(ebuild_hash, chf_type):
+ return False
+ update = eclass_db.validate_and_rewrite_cache(entry['_eclasses_'], chf_type,
+ self.store_eclass_paths)
+ if update is None:
+ return False
+ if update:
+ entry['_eclasses_'] = update
+ return True
+
+ def get_matches(self, match_dict):
+ """generic function for walking the entire cache db, matching restrictions to
+ filter what cpv's are returned. Derived classes should override this if they
+ can implement a faster method then pulling each cpv:values, and checking it.
+
+ For example, RDBMS derived classes should push the matching logic down to the
+ actual RDBM."""
+
+ import re
+ restricts = {}
+ for key,match in match_dict.items():
+ # XXX this sucks.
+ try:
+ if isinstance(match, basestring):
+ restricts[key] = re.compile(match).match
+ else:
+ restricts[key] = re.compile(match[0],match[1]).match
+ except re.error as e:
+ raise InvalidRestriction(key, match, e)
+ if key not in self.__known_keys:
+ raise InvalidRestriction(key, match, "Key isn't valid")
+
+ for cpv in self:
+ cont = True
+ vals = self[cpv]
+ for key, match in restricts.items():
+ if not match(vals[key]):
+ cont = False
+ break
+ if cont:
+ yield cpv
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+ items = iteritems
+
+_keysorter = operator.itemgetter(0)
+
+def serialize_eclasses(eclass_dict, chf_type='mtime', paths=True):
+ """takes a dict, returns a string representing said dict"""
+ """The "new format", which causes older versions of <portage-2.1.2 to
+ traceback with a ValueError due to failed long() conversion. This format
+ isn't currently written, but the the capability to read it is already built
+ in.
+ return "\t".join(["%s\t%s" % (k, str(v)) \
+ for k, v in eclass_dict.iteritems()])
+ """
+ if not eclass_dict:
+ return ""
+ getter = operator.attrgetter(chf_type)
+ if paths:
+ return "\t".join("%s\t%s\t%s" % (k, v.eclass_dir, getter(v))
+ for k, v in sorted(eclass_dict.items(), key=_keysorter))
+ return "\t".join("%s\t%s" % (k, getter(v))
+ for k, v in sorted(eclass_dict.items(), key=_keysorter))
+
+
+def _md5_deserializer(md5):
+ """
+ Without this validation, it's possible for reconstruct_eclasses to
+ mistakenly interpret mtime data as md5 data, and return an invalid
+ data structure containing strings where ints are expected.
+ """
+ if len(md5) != 32:
+ raise ValueError('expected 32 hex digits')
+ return md5
+
+
+_chf_deserializers = {
+ 'md5': _md5_deserializer,
+ 'mtime': long,
+}
+
+
+def reconstruct_eclasses(cpv, eclass_string, chf_type='mtime', paths=True):
+ """returns a dict when handed a string generated by serialize_eclasses"""
+ eclasses = eclass_string.rstrip().lstrip().split("\t")
+ if eclasses == [""]:
+ # occasionally this occurs in the fs backends. they suck.
+ return {}
+
+ converter = _chf_deserializers.get(chf_type, lambda x: x)
+
+ if paths:
+ if len(eclasses) % 3 != 0:
+ raise cache_errors.CacheCorruption(cpv, "_eclasses_ was of invalid len %i" % len(eclasses))
+ elif len(eclasses) % 2 != 0:
+ raise cache_errors.CacheCorruption(cpv, "_eclasses_ was of invalid len %i" % len(eclasses))
+ d={}
+ try:
+ i = iter(eclasses)
+ if paths:
+ # The old format contains paths that will be discarded.
+ for name, path, val in zip(i, i, i):
+ d[name] = (path, converter(val))
+ else:
+ for name, val in zip(i, i):
+ d[name] = converter(val)
+ except IndexError:
+ raise cache_errors.CacheCorruption(cpv,
+ "_eclasses_ was of invalid len %i" % len(eclasses))
+ except ValueError:
+ raise cache_errors.CacheCorruption(cpv,
+ "_eclasses_ not valid for chf_type {}".format(chf_type))
+ del eclasses
+ return d
diff --git a/lib/portage/cache/volatile.py b/lib/portage/cache/volatile.py
new file mode 100644
index 000000000..55167451b
--- /dev/null
+++ b/lib/portage/cache/volatile.py
@@ -0,0 +1,30 @@
+# Copyright 1999-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import copy
+from portage.cache import template
+
+class database(template.database):
+
+ autocommits = True
+ serialize_eclasses = False
+ store_eclass_paths = False
+
+ def __init__(self, *args, **config):
+ config.pop("gid", None)
+ config.pop("perms", None)
+ super(database, self).__init__(*args, **config)
+ self._data = {}
+ self._delitem = self._data.__delitem__
+
+ def _setitem(self, name, values):
+ self._data[name] = copy.deepcopy(values)
+
+ def __getitem__(self, cpv):
+ return copy.deepcopy(self._data[cpv])
+
+ def __iter__(self):
+ return iter(self._data)
+
+ def __contains__(self, key):
+ return key in self._data
diff --git a/lib/portage/checksum.py b/lib/portage/checksum.py
new file mode 100644
index 000000000..4174638e6
--- /dev/null
+++ b/lib/portage/checksum.py
@@ -0,0 +1,583 @@
+# checksum.py -- core Portage functionality
+# Copyright 1998-2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.const import PRELINK_BINARY, HASHING_BLOCKSIZE
+from portage.localization import _
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode, _unicode_encode
+import errno
+import functools
+import hashlib
+import stat
+import sys
+import subprocess
+import tempfile
+
+
+# Summary of all available hashes and their implementations,
+# most preferred first. Please keep this in sync with logic below.
+# ================================================================
+#
+# MD5: hashlib
+# SHA1: hashlib
+# SHA256: hashlib
+# SHA512: hashlib
+# RMD160: hashlib, pycrypto, mhash
+# WHIRLPOOL: hashlib, mhash, bundled
+# BLAKE2B (512): hashlib (3.6+), pyblake2, pycrypto
+# BLAKE2S (512): hashlib (3.6+), pyblake2, pycrypto
+# SHA3_256: hashlib (3.6+), pysha3, pycrypto
+# SHA3_512: hashlib (3.6+), pysha3, pycrypto
+
+
+#dict of all available hash functions
+hashfunc_map = {}
+hashorigin_map = {}
+
+def _open_file(filename):
+ try:
+ return open(_unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ except IOError as e:
+ func_call = "open('%s')" % _unicode_decode(filename)
+ if e.errno == errno.EPERM:
+ raise portage.exception.OperationNotPermitted(func_call)
+ elif e.errno == errno.EACCES:
+ raise portage.exception.PermissionDenied(func_call)
+ elif e.errno == errno.ENOENT:
+ raise portage.exception.FileNotFound(filename)
+ else:
+ raise
+
+class _generate_hash_function(object):
+
+ __slots__ = ("_hashobject",)
+
+ def __init__(self, hashtype, hashobject, origin="unknown"):
+ self._hashobject = hashobject
+ hashfunc_map[hashtype] = self
+ hashorigin_map[hashtype] = origin
+
+ def checksum_str(self, data):
+ """
+ Obtain a checksum of a byte-string.
+
+ @param data: Data to hash
+ @type data: bytes
+ @return: The hash of the data (hex-digest)
+ """
+ checksum = self._hashobject()
+ checksum.update(data)
+ return checksum.hexdigest()
+
+ def checksum_file(self, filename):
+ """
+ Run a checksum against a file.
+
+ @param filename: File to run the checksum against
+ @type filename: String
+ @return: The hash and size of the data
+ """
+ with _open_file(filename) as f:
+ blocksize = HASHING_BLOCKSIZE
+ size = 0
+ checksum = self._hashobject()
+ data = f.read(blocksize)
+ while data:
+ checksum.update(data)
+ size = size + len(data)
+ data = f.read(blocksize)
+
+ return (checksum.hexdigest(), size)
+
+
+# Define hash functions, try to use the best module available. Preferred
+# modules should go first, latter ones should check if the hashes aren't
+# already defined.
+
+
+# Use hashlib from python-2.5 if available and prefer it over pycrypto and internal fallbacks.
+# Need special handling for RMD160/WHIRLPOOL as they may not always be provided by hashlib.
+_generate_hash_function("MD5", hashlib.md5, origin="hashlib")
+_generate_hash_function("SHA1", hashlib.sha1, origin="hashlib")
+_generate_hash_function("SHA256", hashlib.sha256, origin="hashlib")
+_generate_hash_function("SHA512", hashlib.sha512, origin="hashlib")
+for local_name, hash_name in (
+ ("RMD160", "ripemd160"),
+ ("WHIRLPOOL", "whirlpool"),
+ # available since Python 3.6
+ ("BLAKE2B", "blake2b"),
+ ("BLAKE2S", "blake2s"),
+ ("SHA3_256", "sha3_256"),
+ ("SHA3_512", "sha3_512"),
+ ):
+ try:
+ hashlib.new(hash_name)
+ except ValueError:
+ pass
+ else:
+ _generate_hash_function(local_name,
+ functools.partial(hashlib.new, hash_name),
+ origin='hashlib')
+
+
+# Support using pyblake2 as fallback for python<3.6
+if "BLAKE2B" not in hashfunc_map or "BLAKE2S" not in hashfunc_map:
+ try:
+ import pyblake2
+
+ _generate_hash_function("BLAKE2B", pyblake2.blake2b, origin="pyblake2")
+ _generate_hash_function("BLAKE2S", pyblake2.blake2s, origin="pyblake2")
+ except ImportError:
+ pass
+
+
+# Support using pysha3 as fallback for python<3.6
+if "SHA3_256" not in hashfunc_map or "SHA3_512" not in hashfunc_map:
+ try:
+ import sha3
+
+ _generate_hash_function("SHA3_256", sha3.sha3_256, origin="pysha3")
+ _generate_hash_function("SHA3_512", sha3.sha3_512, origin="pysha3")
+ except ImportError:
+ pass
+
+
+# Support pygcrypt as fallback using optimized routines from libgcrypt
+# (GnuPG).
+gcrypt_algos = frozenset(('RMD160', 'WHIRLPOOL', 'SHA3_256', 'SHA3_512',
+ 'STREEBOG256', 'STREEBOG512'))
+# Note: currently disabled due to resource exhaustion bugs in pygcrypt.
+# Please do not reenable until upstream has a fix.
+# https://bugs.gentoo.org/615620
+if False:
+#if gcrypt_algos.difference(hashfunc_map):
+ try:
+ import binascii
+ import pygcrypt.hashcontext
+
+ class GCryptHashWrapper(object):
+ def __init__(self, algo):
+ self._obj = pygcrypt.hashcontext.HashContext(algo=algo,
+ secure=False)
+
+ def update(self, data):
+ self._obj.write(data)
+
+ def hexdigest(self):
+ return binascii.b2a_hex(self._obj.read()).decode()
+
+ name_mapping = {
+ 'RMD160': 'ripemd160',
+ 'WHIRLPOOL': 'whirlpool',
+ 'SHA3_256': 'sha3-256',
+ 'SHA3_512': 'sha3-512',
+ 'STREEBOG256': 'stribog256',
+ 'STREEBOG512': 'stribog512',
+ }
+
+ for local_name, gcry_name in name_mapping.items():
+ try:
+ pygcrypt.hashcontext.HashContext(algo=gcry_name)
+ except Exception: # yes, it throws Exception...
+ pass
+ else:
+ _generate_hash_function(local_name,
+ functools.partial(GCryptHashWrapper, gcry_name),
+ origin="pygcrypt")
+ except ImportError:
+ pass
+
+
+# Use pycrypto when available, prefer it over the internal fallbacks
+# Check for 'new' attributes, since they can be missing if the module
+# is broken somehow.
+if 'RMD160' not in hashfunc_map:
+ try:
+ from Crypto.Hash import RIPEMD
+ rmd160hash_ = getattr(RIPEMD, 'new', None)
+ if rmd160hash_ is not None:
+ _generate_hash_function("RMD160",
+ rmd160hash_, origin="pycrypto")
+ except ImportError:
+ pass
+
+# The following hashes were added in pycryptodome (pycrypto fork)
+if 'BLAKE2B' not in hashfunc_map:
+ try:
+ from Crypto.Hash import BLAKE2b
+ blake2bhash_ = getattr(BLAKE2b, 'new', None)
+ if blake2bhash_ is not None:
+ _generate_hash_function("BLAKE2B",
+ functools.partial(blake2bhash_, digest_bytes=64), origin="pycrypto")
+ except ImportError:
+ pass
+
+if 'BLAKE2S' not in hashfunc_map:
+ try:
+ from Crypto.Hash import BLAKE2s
+ blake2shash_ = getattr(BLAKE2s, 'new', None)
+ if blake2shash_ is not None:
+ _generate_hash_function("BLAKE2S",
+ functools.partial(blake2shash_, digest_bytes=32), origin="pycrypto")
+ except ImportError:
+ pass
+
+if 'SHA3_256' not in hashfunc_map:
+ try:
+ from Crypto.Hash import SHA3_256
+ sha3_256hash_ = getattr(SHA3_256, 'new', None)
+ if sha3_256hash_ is not None:
+ _generate_hash_function("SHA3_256",
+ sha3_256hash_, origin="pycrypto")
+ except ImportError:
+ pass
+
+if 'SHA3_512' not in hashfunc_map:
+ try:
+ from Crypto.Hash import SHA3_512
+ sha3_512hash_ = getattr(SHA3_512, 'new', None)
+ if sha3_512hash_ is not None:
+ _generate_hash_function("SHA3_512",
+ sha3_512hash_, origin="pycrypto")
+ except ImportError:
+ pass
+
+
+# Try to use mhash if available
+# mhash causes GIL presently, so it gets less priority than hashlib and
+# pycrypto. However, it might be the only accelerated implementation of
+# WHIRLPOOL available.
+if 'RMD160' not in hashfunc_map or 'WHIRLPOOL' not in hashfunc_map:
+ try:
+ import mhash
+ for local_name, hash_name in (("RMD160", "RIPEMD160"), ("WHIRLPOOL", "WHIRLPOOL")):
+ if local_name not in hashfunc_map and hasattr(mhash, 'MHASH_%s' % hash_name):
+ _generate_hash_function(local_name,
+ functools.partial(mhash.MHASH, getattr(mhash, 'MHASH_%s' % hash_name)),
+ origin='mhash')
+ except ImportError:
+ pass
+
+
+# Support pygost as fallback streebog provider
+# It's mostly provided as a reference implementation; it's pure Python,
+# slow and reads all data to memory (i.e. doesn't hash on update()...)
+if 'STREEBOG256' not in hashfunc_map or 'STREEBOG512' not in hashfunc_map:
+ try:
+ import pygost.gost34112012
+
+ _generate_hash_function("STREEBOG256",
+ functools.partial(pygost.gost34112012.GOST34112012, digest_size=32), origin="pygost")
+ _generate_hash_function("STREEBOG512",
+ functools.partial(pygost.gost34112012.GOST34112012, digest_size=64), origin="pygost")
+ except ImportError:
+ pass
+
+
+_whirlpool_unaccelerated = False
+if "WHIRLPOOL" not in hashfunc_map:
+ # Bundled WHIRLPOOL implementation
+ _whirlpool_unaccelerated = True
+ from portage.util.whirlpool import new as _new_whirlpool
+ _generate_hash_function("WHIRLPOOL", _new_whirlpool, origin="bundled")
+
+
+# There is only one implementation for size
+class SizeHash(object):
+ def checksum_file(self, filename):
+ size = os.stat(filename).st_size
+ return (size, size)
+
+hashfunc_map["size"] = SizeHash()
+
+# cache all supported hash methods in a frozenset
+hashfunc_keys = frozenset(hashfunc_map)
+
+# end actual hash functions
+
+
+prelink_capable = False
+if os.path.exists(PRELINK_BINARY):
+ cmd = [PRELINK_BINARY, "--version"]
+ cmd = [_unicode_encode(x, encoding=_encodings['fs'], errors='strict')
+ for x in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ proc.communicate()
+ status = proc.wait()
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
+ prelink_capable = 1
+ del cmd, proc, status
+
+def is_prelinkable_elf(filename):
+ f = _open_file(filename)
+ try:
+ magic = f.read(17)
+ finally:
+ f.close()
+ return (len(magic) == 17 and magic.startswith(b'\x7fELF') and
+ magic[16:17] in (b'\x02', b'\x03')) # 2=ET_EXEC, 3=ET_DYN
+
+def perform_md5(x, calc_prelink=0):
+ return perform_checksum(x, "MD5", calc_prelink)[0]
+
+def _perform_md5_merge(x, **kwargs):
+ return perform_md5(_unicode_encode(x,
+ encoding=_encodings['merge'], errors='strict'), **kwargs)
+
+def perform_all(x, calc_prelink=0):
+ mydict = {}
+ for k in hashfunc_keys:
+ mydict[k] = perform_checksum(x, k, calc_prelink)[0]
+ return mydict
+
+def get_valid_checksum_keys():
+ return hashfunc_keys
+
+def get_hash_origin(hashtype):
+ if hashtype not in hashfunc_keys:
+ raise KeyError(hashtype)
+ return hashorigin_map.get(hashtype, "unknown")
+
+def _filter_unaccelarated_hashes(digests):
+ """
+ If multiple digests are available and some are unaccelerated,
+ then return a new dict that omits the unaccelerated ones. This
+ allows extreme performance problems like bug #425046 to be
+ avoided whenever practical, especially for cases like stage
+ builds where acceleration may not be available for some hashes
+ due to minimization of dependencies.
+ """
+ if _whirlpool_unaccelerated and "WHIRLPOOL" in digests:
+ verifiable_hash_types = set(digests).intersection(hashfunc_keys)
+ verifiable_hash_types.discard("size")
+ if len(verifiable_hash_types) > 1:
+ digests = dict(digests)
+ digests.pop("WHIRLPOOL")
+
+ return digests
+
+class _hash_filter(object):
+ """
+ Implements filtering for PORTAGE_CHECKSUM_FILTER.
+ """
+
+ __slots__ = ('transparent', '_tokens',)
+
+ def __init__(self, filter_str):
+ tokens = filter_str.upper().split()
+ if not tokens or tokens[-1] == "*":
+ del tokens[:]
+ self.transparent = not tokens
+ tokens.reverse()
+ self._tokens = tuple(tokens)
+
+ def __call__(self, hash_name):
+ if self.transparent:
+ return True
+ matches = ("*", hash_name)
+ for token in self._tokens:
+ if token in matches:
+ return True
+ elif token[:1] == "-":
+ if token[1:] in matches:
+ return False
+ return False
+
+def _apply_hash_filter(digests, hash_filter):
+ """
+ Return a new dict containing the filtered digests, or the same
+ dict if no changes are necessary. This will always preserve at
+ at least one digest, in order to ensure that they are not all
+ discarded.
+ @param digests: dictionary of digests
+ @type digests: dict
+ @param hash_filter: A callable that takes a single hash name
+ argument, and returns True if the hash is to be used or
+ False otherwise
+ @type hash_filter: callable
+ """
+
+ verifiable_hash_types = set(digests).intersection(hashfunc_keys)
+ verifiable_hash_types.discard("size")
+ modified = False
+ if len(verifiable_hash_types) > 1:
+ for k in list(verifiable_hash_types):
+ if not hash_filter(k):
+ modified = True
+ verifiable_hash_types.remove(k)
+ if len(verifiable_hash_types) == 1:
+ break
+
+ if modified:
+ digests = dict((k, v) for (k, v) in digests.items()
+ if k == "size" or k in verifiable_hash_types)
+
+ return digests
+
+def verify_all(filename, mydict, calc_prelink=0, strict=0):
+ """
+ Verify all checksums against a file.
+
+ @param filename: File to run the checksums against
+ @type filename: String
+ @param calc_prelink: Whether or not to reverse prelink before running the checksum
+ @type calc_prelink: Integer
+ @param strict: Enable/Disable strict checking (which stops exactly at a checksum failure and throws an exception)
+ @type strict: Integer
+ @rtype: Tuple
+ @return: Result of the checks and possible message:
+ 1) If size fails, False, and a tuple containing a message, the given size, and the actual size
+ 2) If there is an os error, False, and a tuple containing the system error followed by 2 nulls
+ 3) If a checksum fails, False and a tuple containing a message, the given hash, and the actual hash
+ 4) If all checks succeed, return True and a fake reason
+ """
+ # Dict relates to single file only.
+ # returns: (passed,reason)
+ file_is_ok = True
+ reason = "Reason unknown"
+ try:
+ mysize = os.stat(filename)[stat.ST_SIZE]
+ if mydict.get("size") is not None and mydict["size"] != mysize:
+ return False,(_("Filesize does not match recorded size"), mysize, mydict["size"])
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise portage.exception.FileNotFound(filename)
+ return False, (str(e), None, None)
+
+ verifiable_hash_types = set(mydict).intersection(hashfunc_keys)
+ verifiable_hash_types.discard("size")
+ if not verifiable_hash_types:
+ expected = set(hashfunc_keys)
+ expected.discard("size")
+ expected = list(expected)
+ expected.sort()
+ expected = " ".join(expected)
+ got = set(mydict)
+ got.discard("size")
+ got = list(got)
+ got.sort()
+ got = " ".join(got)
+ return False, (_("Insufficient data for checksum verification"), got, expected)
+
+ for x in sorted(mydict):
+ if x == "size":
+ continue
+ elif x in hashfunc_keys:
+ myhash = perform_checksum(filename, x, calc_prelink=calc_prelink)[0]
+ if mydict[x] != myhash:
+ if strict:
+ raise portage.exception.DigestException(
+ ("Failed to verify '$(file)s' on " + \
+ "checksum type '%(type)s'") % \
+ {"file" : filename, "type" : x})
+ else:
+ file_is_ok = False
+ reason = (("Failed on %s verification" % x), myhash, mydict[x])
+ break
+
+ return file_is_ok, reason
+
+def perform_checksum(filename, hashname="MD5", calc_prelink=0):
+ """
+ Run a specific checksum against a file. The filename can
+ be either unicode or an encoded byte string. If filename
+ is unicode then a UnicodeDecodeError will be raised if
+ necessary.
+
+ @param filename: File to run the checksum against
+ @type filename: String
+ @param hashname: The type of hash function to run
+ @type hashname: String
+ @param calc_prelink: Whether or not to reverse prelink before running the checksum
+ @type calc_prelink: Integer
+ @rtype: Tuple
+ @return: The hash and size of the data
+ """
+ global prelink_capable
+ # Make sure filename is encoded with the correct encoding before
+ # it is passed to spawn (for prelink) and/or the hash function.
+ filename = _unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict')
+ myfilename = filename
+ prelink_tmpfile = None
+ try:
+ if (calc_prelink and prelink_capable and
+ is_prelinkable_elf(filename)):
+ # Create non-prelinked temporary file to checksum.
+ # Files rejected by prelink are summed in place.
+ try:
+ tmpfile_fd, prelink_tmpfile = tempfile.mkstemp()
+ try:
+ retval = portage.process.spawn([PRELINK_BINARY,
+ "--verify", filename], fd_pipes={1:tmpfile_fd})
+ finally:
+ os.close(tmpfile_fd)
+ if retval == os.EX_OK:
+ myfilename = prelink_tmpfile
+ except portage.exception.CommandNotFound:
+ # This happens during uninstallation of prelink.
+ prelink_capable = False
+ try:
+ if hashname not in hashfunc_keys:
+ raise portage.exception.DigestException(hashname + \
+ " hash function not available (needs dev-python/pycrypto)")
+ myhash, mysize = hashfunc_map[hashname].checksum_file(myfilename)
+ except (OSError, IOError) as e:
+ if e.errno in (errno.ENOENT, errno.ESTALE):
+ raise portage.exception.FileNotFound(myfilename)
+ elif e.errno == portage.exception.PermissionDenied.errno:
+ raise portage.exception.PermissionDenied(myfilename)
+ raise
+ return myhash, mysize
+ finally:
+ if prelink_tmpfile:
+ try:
+ os.unlink(prelink_tmpfile)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+def perform_multiple_checksums(filename, hashes=["MD5"], calc_prelink=0):
+ """
+ Run a group of checksums against a file.
+
+ @param filename: File to run the checksums against
+ @type filename: String
+ @param hashes: A list of checksum functions to run against the file
+ @type hashname: List
+ @param calc_prelink: Whether or not to reverse prelink before running the checksum
+ @type calc_prelink: Integer
+ @rtype: Tuple
+ @return: A dictionary in the form:
+ return_value[hash_name] = (hash_result,size)
+ for each given checksum
+ """
+ rVal = {}
+ for x in hashes:
+ if x not in hashfunc_keys:
+ raise portage.exception.DigestException(x+" hash function not available (needs dev-python/pycrypto or >=dev-lang/python-2.5)")
+ rVal[x] = perform_checksum(filename, x, calc_prelink)[0]
+ return rVal
+
+
+def checksum_str(data, hashname="MD5"):
+ """
+ Run a specific checksum against a byte string.
+
+ @param filename: Data to checksum
+ @type filename: Bytes
+ @param hashname: The type of hash function to run
+ @type hashname: String
+ @rtype: String
+ @return: The hash (hex-digest) of the data
+ """
+ if hashname not in hashfunc_keys:
+ raise portage.exception.DigestException(hashname + \
+ " hash function not available (needs dev-python/pycrypto)")
+ return hashfunc_map[hashname].checksum_str(data)
diff --git a/lib/portage/const.py b/lib/portage/const.py
new file mode 100644
index 000000000..7f84bf0e9
--- /dev/null
+++ b/lib/portage/const.py
@@ -0,0 +1,265 @@
+# portage: Constants
+# Copyright 1998-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import os
+
+# ===========================================================================
+# START OF CONSTANTS -- START OF CONSTANTS -- START OF CONSTANTS -- START OF
+# ===========================================================================
+
+# There are two types of variables here which can easily be confused,
+# resulting in arbitrary bugs, mainly exposed with an offset
+# installation (Prefix). The two types relate to the usage of
+# config_root or target_root.
+# The first, config_root (PORTAGE_CONFIGROOT), can be a path somewhere,
+# from which all derived paths need to be relative (e.g.
+# USER_CONFIG_PATH) without EPREFIX prepended in Prefix. This means
+# config_root can for instance be set to "$HOME/my/config". Obviously,
+# in such case it is not appropriate to prepend EPREFIX to derived
+# constants. The default value of config_root is EPREFIX (in non-Prefix
+# the empty string) -- overriding the value loses the EPREFIX as one
+# would expect.
+# Second there is target_root (ROOT) which is used to install somewhere
+# completely else, in Prefix of limited use. Because this is an offset
+# always given, the EPREFIX should always be applied in it, hence the
+# code always prefixes them with EROOT.
+# The variables in this file are grouped by config_root, target_root.
+
+# variables used with config_root (these need to be relative)
+USER_CONFIG_PATH = "etc/portage"
+MAKE_CONF_FILE = USER_CONFIG_PATH + "/make.conf"
+MODULES_FILE_PATH = USER_CONFIG_PATH + "/modules"
+CUSTOM_PROFILE_PATH = USER_CONFIG_PATH + "/profile"
+USER_VIRTUALS_FILE = USER_CONFIG_PATH + "/virtuals"
+EBUILD_SH_ENV_FILE = USER_CONFIG_PATH + "/bashrc"
+EBUILD_SH_ENV_DIR = USER_CONFIG_PATH + "/env"
+CUSTOM_MIRRORS_FILE = USER_CONFIG_PATH + "/mirrors"
+COLOR_MAP_FILE = USER_CONFIG_PATH + "/color.map"
+PROFILE_PATH = USER_CONFIG_PATH + "/make.profile"
+MAKE_DEFAULTS_FILE = PROFILE_PATH + "/make.defaults" # FIXME: not used
+DEPRECATED_PROFILE_FILE = PROFILE_PATH + "/deprecated"
+
+# variables used with targetroot (these need to be absolute, but not
+# have a leading '/' since they are used directly with os.path.join on EROOT)
+VDB_PATH = "var/db/pkg"
+CACHE_PATH = "var/cache/edb"
+PRIVATE_PATH = "var/lib/portage"
+WORLD_FILE = PRIVATE_PATH + "/world"
+WORLD_SETS_FILE = PRIVATE_PATH + "/world_sets"
+CONFIG_MEMORY_FILE = PRIVATE_PATH + "/config"
+NEWS_LIB_PATH = "var/lib/gentoo"
+
+# these variables get EPREFIX prepended automagically when they are
+# translated into their lowercase variants
+DEPCACHE_PATH = "/var/cache/edb/dep"
+GLOBAL_CONFIG_PATH = "/usr/share/portage/config"
+
+# these variables are not used with target_root or config_root
+# NOTE: Use realpath(__file__) so that python module symlinks in site-packages
+# are followed back to the real location of the whole portage installation.
+# NOTE: Please keep PORTAGE_BASE_PATH in one line to help substitutions.
+PORTAGE_BASE_PATH = os.path.join(os.sep, os.sep.join(os.path.realpath(__file__.rstrip("co")).split(os.sep)[:-3]))
+PORTAGE_BIN_PATH = PORTAGE_BASE_PATH + "/bin"
+PORTAGE_PYM_PATH = os.path.realpath(os.path.join(__file__, '../..'))
+LOCALE_DATA_PATH = PORTAGE_BASE_PATH + "/locale" # FIXME: not used
+EBUILD_SH_BINARY = PORTAGE_BIN_PATH + "/ebuild.sh"
+MISC_SH_BINARY = PORTAGE_BIN_PATH + "/misc-functions.sh"
+SANDBOX_BINARY = "/usr/bin/sandbox"
+FAKEROOT_BINARY = "/usr/bin/fakeroot"
+BASH_BINARY = "/bin/bash"
+MOVE_BINARY = "/bin/mv"
+PRELINK_BINARY = "/usr/sbin/prelink"
+
+INVALID_ENV_FILE = "/etc/spork/is/not/valid/profile.env"
+MERGING_IDENTIFIER = "-MERGING-"
+REPO_NAME_FILE = "repo_name"
+REPO_NAME_LOC = "profiles" + "/" + REPO_NAME_FILE
+
+PORTAGE_PACKAGE_ATOM = "sys-apps/portage"
+LIBC_PACKAGE_ATOM = "virtual/libc"
+OS_HEADERS_PACKAGE_ATOM = "virtual/os-headers"
+CVS_PACKAGE_ATOM = "dev-vcs/cvs"
+GIT_PACKAGE_ATOM = "dev-vcs/git"
+RSYNC_PACKAGE_ATOM = "net-misc/rsync"
+
+INCREMENTALS = (
+ "ACCEPT_KEYWORDS",
+ "CONFIG_PROTECT",
+ "CONFIG_PROTECT_MASK",
+ "ENV_UNSET",
+ "FEATURES",
+ "IUSE_IMPLICIT",
+ "PRELINK_PATH",
+ "PRELINK_PATH_MASK",
+ "PROFILE_ONLY_VARIABLES",
+ "USE",
+ "USE_EXPAND",
+ "USE_EXPAND_HIDDEN",
+ "USE_EXPAND_IMPLICIT",
+ "USE_EXPAND_UNPREFIXED",
+)
+EBUILD_PHASES = (
+ "pretend",
+ "setup",
+ "unpack",
+ "prepare",
+ "configure",
+ "compile",
+ "test",
+ "install",
+ "package",
+ "preinst",
+ "postinst",
+ "prerm",
+ "postrm",
+ "nofetch",
+ "config",
+ "info",
+ "other",
+)
+SUPPORTED_FEATURES = frozenset([
+ "assume-digests",
+ "binpkg-logs",
+ "binpkg-multi-instance",
+ "buildpkg",
+ "buildsyspkg",
+ "candy",
+ "case-insensitive-fs",
+ "ccache",
+ "cgroup",
+ "chflags",
+ "clean-logs",
+ "collision-protect",
+ "compress-build-logs",
+ "compressdebug",
+ "compress-index",
+ "config-protect-if-modified",
+ "digest",
+ "distcc",
+ "distcc-pump",
+ "distlocks",
+ "downgrade-backup",
+ "ebuild-locks",
+ "fail-clean",
+ "fakeroot",
+ "fixlafiles",
+ "force-mirror",
+ "force-prefix",
+ "getbinpkg",
+ "icecream",
+ "installsources",
+ "ipc-sandbox",
+ "keeptemp",
+ "keepwork",
+ "lmirror",
+ "merge-sync",
+ "metadata-transfer",
+ "mirror",
+ "multilib-strict",
+ "network-sandbox",
+ "network-sandbox-proxy",
+ "news",
+ "noauto",
+ "noclean",
+ "nodoc",
+ "noinfo",
+ "noman",
+ "nostrip",
+ "notitles",
+ "parallel-fetch",
+ "parallel-install",
+ "prelink-checksums",
+ "preserve-libs",
+ "protect-owned",
+ "python-trace",
+ "sandbox",
+ "selinux",
+ "sesandbox",
+ "sfperms",
+ "sign",
+ "skiprocheck",
+ "splitdebug",
+ "split-elog",
+ "split-log",
+ "strict",
+ "strict-keepdir",
+ "stricter",
+ "suidctl",
+ "test",
+ "test-fail-continue",
+ "unknown-features-filter",
+ "unknown-features-warn",
+ "unmerge-backup",
+ "unmerge-logs",
+ "unmerge-orphans",
+ "unprivileged",
+ "userfetch",
+ "userpriv",
+ "usersandbox",
+ "usersync",
+ "webrsync-gpg",
+ "xattr",
+])
+
+EAPI = 7
+
+HASHING_BLOCKSIZE = 32768
+
+MANIFEST2_HASH_DEFAULTS = frozenset(["BLAKE2B", "SHA512"])
+MANIFEST2_HASH_DEFAULT = "BLAKE2B"
+
+MANIFEST2_IDENTIFIERS = ("AUX", "MISC", "DIST", "EBUILD")
+
+# The EPREFIX for the current install is hardcoded here, but access to this
+# constant should be minimal, in favor of access via the EPREFIX setting of
+# a config instance (since it's possible to contruct a config instance with
+# a different EPREFIX). Therefore, the EPREFIX constant should *NOT* be used
+# in the definition of any other constants within this file.
+EPREFIX = ""
+
+# pick up EPREFIX from the environment if set
+if "PORTAGE_OVERRIDE_EPREFIX" in os.environ:
+ EPREFIX = os.environ["PORTAGE_OVERRIDE_EPREFIX"]
+ if EPREFIX:
+ EPREFIX = os.path.normpath(EPREFIX)
+ if EPREFIX == os.sep:
+ EPREFIX = ""
+
+VCS_DIRS = ("CVS", "RCS", "SCCS", ".bzr", ".git", ".hg", ".svn")
+
+# List of known live eclasses. Keep it in sync with cnf/sets/portage.conf
+LIVE_ECLASSES = frozenset([
+ "bzr",
+ "cvs",
+ "darcs",
+ "git",
+ "git-2",
+ "git-r3",
+ "golang-vcs",
+ "mercurial",
+ "subversion",
+ "tla",
+])
+
+SUPPORTED_BINPKG_FORMATS = ("tar", "rpm")
+SUPPORTED_XPAK_EXTENSIONS = (".tbz2", ".xpak")
+
+# Time formats used in various places like metadata.chk.
+TIMESTAMP_FORMAT = "%a, %d %b %Y %H:%M:%S +0000" # to be used with time.gmtime()
+
+# Top-level names of Python packages installed by Portage.
+PORTAGE_PYM_PACKAGES = ("_emerge", "portage")
+
+RETURNCODE_POSTINST_FAILURE = 5
+
+# ===========================================================================
+# END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANTS -- END OF CONSTANT
+# ===========================================================================
+
+# Private constants for use in conditional code in order to minimize the diff
+# between branches.
+_DEPCLEAN_LIB_CHECK_DEFAULT = True
+_ENABLE_SET_CONFIG = True
diff --git a/lib/portage/cvstree.py b/lib/portage/cvstree.py
new file mode 100644
index 000000000..87bbed8bb
--- /dev/null
+++ b/lib/portage/cvstree.py
@@ -0,0 +1,315 @@
+# cvstree.py -- cvs tree utilities
+# Copyright 1998-2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import io
+import re
+import stat
+import sys
+import time
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+# [D]/Name/Version/Date/Flags/Tags
+
+def pathdata(entries, path):
+ """Returns the data(dict) for a specific file/dir at the path specified."""
+ mysplit = path.split("/")
+ myentries = entries
+ mytarget = mysplit[-1]
+ mysplit = mysplit[:-1]
+ for mys in mysplit:
+ if mys in myentries["dirs"]:
+ myentries = myentries["dirs"][mys]
+ else:
+ return None
+ if mytarget in myentries["dirs"]:
+ return myentries["dirs"][mytarget]
+ elif mytarget in myentries["files"]:
+ return myentries["files"][mytarget]
+ else:
+ return None
+
+def fileat(entries, path):
+ return pathdata(entries, path)
+
+def isadded(entries, path):
+ """Returns True if the path exists and is added to the cvs tree."""
+ mytarget = pathdata(entries, path)
+ if mytarget:
+ if "cvs" in mytarget["status"]:
+ return 1
+
+ basedir = os.path.dirname(path)
+ filename = os.path.basename(path)
+
+ try:
+ myfile = io.open(
+ _unicode_encode(os.path.join(basedir, 'CVS', 'Entries'),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='strict')
+ except IOError:
+ return 0
+ mylines = myfile.readlines()
+ myfile.close()
+
+ rep = re.compile(r"^\/%s\/" % re.escape(filename))
+ for x in mylines:
+ if rep.search(x):
+ return 1
+
+ return 0
+
+def findnew(entries, recursive=0, basedir=""):
+ """Recurses the entries tree to find all elements that have been added but
+ have not yet been committed. Returns a list of paths, optionally prepended
+ with a basedir.
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
+
+ mylist = []
+ for myfile in entries["files"]:
+ if "cvs" in entries["files"][myfile]["status"]:
+ if "0" == entries["files"][myfile]["revision"]:
+ mylist.append(basedir + myfile)
+
+ if recursive:
+ for mydir in entries["dirs"]:
+ mylist += findnew(entries["dirs"][mydir], recursive, basedir + mydir)
+
+ return mylist
+
+def findoption(entries, pattern, recursive=0, basedir=""):
+ """Iterate over paths of cvs entries for which the pattern.search() method
+ finds a match. Returns a list of paths, optionally prepended with a
+ basedir.
+ """
+ if not basedir.endswith("/"):
+ basedir += "/"
+
+ for myfile, mydata in entries["files"].items():
+ if "cvs" in mydata["status"]:
+ if pattern.search(mydata["flags"]):
+ yield basedir + myfile
+
+ if recursive:
+ for mydir, mydata in entries["dirs"].items():
+ for x in findoption(mydata, pattern,
+ recursive, basedir + mydir):
+ yield x
+
+def findchanged(entries, recursive=0, basedir=""):
+ """Recurses the entries tree to find all elements that exist in the cvs tree
+ and differ from the committed version. Returns a list of paths, optionally
+ prepended with a basedir.
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
+
+ mylist = []
+ for myfile in entries["files"]:
+ if "cvs" in entries["files"][myfile]["status"]:
+ if "current" not in entries["files"][myfile]["status"]:
+ if "exists" in entries["files"][myfile]["status"]:
+ if entries["files"][myfile]["revision"] != "0":
+ mylist.append(basedir + myfile)
+
+ if recursive:
+ for mydir in entries["dirs"]:
+ mylist += findchanged(entries["dirs"][mydir], recursive, basedir + mydir)
+
+ return mylist
+
+def findmissing(entries, recursive=0, basedir=""):
+ """Recurses the entries tree to find all elements that are listed in the cvs
+ tree but do not exist on the filesystem. Returns a list of paths,
+ optionally prepended with a basedir.
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
+
+ mylist = []
+ for myfile in entries["files"]:
+ if "cvs" in entries["files"][myfile]["status"]:
+ if "exists" not in entries["files"][myfile]["status"]:
+ if "removed" not in entries["files"][myfile]["status"]:
+ mylist.append(basedir + myfile)
+
+ if recursive:
+ for mydir in entries["dirs"]:
+ mylist += findmissing(entries["dirs"][mydir], recursive, basedir + mydir)
+
+ return mylist
+
+def findunadded(entries, recursive=0, basedir=""):
+ """Recurses the entries tree to find all elements that are in valid cvs
+ directories but are not part of the cvs tree. Returns a list of paths,
+ optionally prepended with a basedir.
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
+
+ # Ignore what cvs ignores.
+ mylist = []
+ for myfile in entries["files"]:
+ if "cvs" not in entries["files"][myfile]["status"]:
+ mylist.append(basedir + myfile)
+
+ if recursive:
+ for mydir in entries["dirs"]:
+ mylist += findunadded(entries["dirs"][mydir], recursive, basedir + mydir)
+
+ return mylist
+
+def findremoved(entries, recursive=0, basedir=""):
+ """Recurses the entries tree to find all elements that are in flagged for cvs
+ deletions. Returns a list of paths, optionally prepended with a basedir.
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
+
+ mylist = []
+ for myfile in entries["files"]:
+ if "removed" in entries["files"][myfile]["status"]:
+ mylist.append(basedir + myfile)
+
+ if recursive:
+ for mydir in entries["dirs"]:
+ mylist += findremoved(entries["dirs"][mydir], recursive, basedir + mydir)
+
+ return mylist
+
+def findall(entries, recursive=0, basedir=""):
+ """Recurses the entries tree to find all new, changed, missing, and unadded
+ entities. Returns a 4 element list of lists as returned from each find*().
+ """
+ if basedir and basedir[-1] != "/":
+ basedir += "/"
+ mynew = findnew(entries, recursive, basedir)
+ mychanged = findchanged(entries, recursive, basedir)
+ mymissing = findmissing(entries, recursive, basedir)
+ myunadded = findunadded(entries, recursive, basedir)
+ myremoved = findremoved(entries, recursive, basedir)
+ return [mynew, mychanged, mymissing, myunadded, myremoved]
+
+ignore_list = re.compile(r"(^|/)(RCS(|LOG)|SCCS|CVS(|\.adm)|cvslog\..*|tags|TAGS|\.(make\.state|nse_depinfo)|.*~|(\.|)#.*|,.*|_$.*|.*\$|\.del-.*|.*\.(old|BAK|bak|orig|rej|a|olb|o|obj|so|exe|Z|elc|ln)|core)$")
+def apply_cvsignore_filter(list):
+ x = 0
+ while x < len(list):
+ if ignore_list.match(list[x].split("/")[-1]):
+ list.pop(x)
+ else:
+ x += 1
+ return list
+
+def getentries(mydir, recursive=0):
+ """Scans the given directory and returns a datadict of all the entries in
+ the directory separated as a dirs dict and a files dict.
+ """
+ myfn = mydir + "/CVS/Entries"
+ # entries=[dirs, files]
+ entries = {"dirs":{}, "files":{}}
+ if not os.path.exists(mydir):
+ return entries
+ try:
+ myfile = io.open(_unicode_encode(myfn,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='strict')
+ mylines = myfile.readlines()
+ myfile.close()
+ except SystemExit as e:
+ raise
+ except:
+ mylines = []
+
+ for line in mylines:
+ if line and line[-1] == "\n":
+ line = line[:-1]
+ if not line:
+ continue
+ if line == "D": # End of entries file
+ break
+ mysplit = line.split("/")
+ if len(mysplit) != 6:
+ print("Confused:", mysplit)
+ continue
+ if mysplit[0] == "D":
+ entries["dirs"][mysplit[1]] = {"dirs":{}, "files":{}, "status":[]}
+ entries["dirs"][mysplit[1]]["status"] = ["cvs"]
+ if os.path.isdir(mydir+"/"+mysplit[1]):
+ entries["dirs"][mysplit[1]]["status"] += ["exists"]
+ entries["dirs"][mysplit[1]]["flags"] = mysplit[2:]
+ if recursive:
+ rentries = getentries(mydir + "/" + mysplit[1], recursive)
+ entries["dirs"][mysplit[1]]["dirs"] = rentries["dirs"]
+ entries["dirs"][mysplit[1]]["files"] = rentries["files"]
+ else:
+ # [D]/Name/revision/Date/Flags/Tags
+ entries["files"][mysplit[1]] = {}
+ entries["files"][mysplit[1]]["revision"] = mysplit[2]
+ entries["files"][mysplit[1]]["date"] = mysplit[3]
+ entries["files"][mysplit[1]]["flags"] = mysplit[4]
+ entries["files"][mysplit[1]]["tags"] = mysplit[5]
+ entries["files"][mysplit[1]]["status"] = ["cvs"]
+ if entries["files"][mysplit[1]]["revision"][0] == "-":
+ entries["files"][mysplit[1]]["status"] += ["removed"]
+
+ for file in os.listdir(mydir):
+ if file == "CVS":
+ continue
+ if os.path.isdir(mydir + "/" + file):
+ if file not in entries["dirs"]:
+ if ignore_list.match(file) is not None:
+ continue
+ entries["dirs"][file] = {"dirs":{}, "files":{}}
+ # It's normal for a directory to be unlisted in Entries
+ # when checked out without -P (see bug #257660).
+ rentries = getentries(mydir + "/" + file, recursive)
+ entries["dirs"][file]["dirs"] = rentries["dirs"]
+ entries["dirs"][file]["files"] = rentries["files"]
+ if "status" in entries["dirs"][file]:
+ if "exists" not in entries["dirs"][file]["status"]:
+ entries["dirs"][file]["status"] += ["exists"]
+ else:
+ entries["dirs"][file]["status"] = ["exists"]
+ elif os.path.isfile(mydir + "/" + file):
+ if file not in entries["files"]:
+ if ignore_list.match(file) is not None:
+ continue
+ entries["files"][file] = {"revision":"", "date":"", "flags":"", "tags":""}
+ if "status" in entries["files"][file]:
+ if "exists" not in entries["files"][file]["status"]:
+ entries["files"][file]["status"] += ["exists"]
+ else:
+ entries["files"][file]["status"] = ["exists"]
+ try:
+ mystat = os.stat(mydir + "/" + file)
+ mytime = time.asctime(time.gmtime(mystat[stat.ST_MTIME]))
+ if "status" not in entries["files"][file]:
+ entries["files"][file]["status"] = []
+ if mytime == entries["files"][file]["date"]:
+ entries["files"][file]["status"] += ["current"]
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ print("failed to stat", file)
+ print(e)
+ return
+
+ elif ignore_list.match(file) is not None:
+ pass
+ else:
+ print()
+ print("File of unknown type:", mydir + "/" + file)
+ print()
+
+ return entries
diff --git a/lib/portage/data.py b/lib/portage/data.py
new file mode 100644
index 000000000..28d6eb79d
--- /dev/null
+++ b/lib/portage/data.py
@@ -0,0 +1,322 @@
+# data.py -- Calculated/Discovered Data Values
+# Copyright 1998-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os, pwd, grp, platform, sys
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.output:colorize',
+ 'portage.util:writemsg',
+ 'portage.util.path:first_existing',
+ 'subprocess'
+)
+from portage.localization import _
+
+ostype = platform.system()
+userland = None
+if ostype == "DragonFly" or ostype.endswith("BSD"):
+ userland = "BSD"
+else:
+ userland = "GNU"
+
+lchown = getattr(os, "lchown", None)
+
+if not lchown:
+ if ostype == "Darwin":
+ def lchown(*_args, **_kwargs):
+ pass
+ else:
+ def lchown(*_args, **_kwargs):
+ writemsg(colorize("BAD", "!!!") + _(
+ " It seems that os.lchown does not"
+ " exist. Please rebuild python.\n"), noiselevel=-1)
+ lchown()
+
+lchown = portage._unicode_func_wrapper(lchown)
+
+def _target_eprefix():
+ """
+ Calculate the target EPREFIX, which may be different from
+ portage.const.EPREFIX due to cross-prefix support. The result
+ is equivalent to portage.settings["EPREFIX"], but the calculation
+ is done without the expense of instantiating portage.settings.
+ @rtype: str
+ @return: the target EPREFIX
+ """
+ eprefix = os.environ.get("EPREFIX", portage.const.EPREFIX)
+ if eprefix:
+ eprefix = portage.util.normalize_path(eprefix)
+ return eprefix
+
+def _target_root():
+ """
+ Calculate the target ROOT. The result is equivalent to
+ portage.settings["ROOT"], but the calculation
+ is done without the expense of instantiating portage.settings.
+ @rtype: str
+ @return: the target ROOT (always ends with a slash)
+ """
+ root = os.environ.get("ROOT")
+ if not root:
+ # Handle either empty or unset ROOT.
+ root = os.sep
+ root = portage.util.normalize_path(root)
+ return root.rstrip(os.sep) + os.sep
+
+def portage_group_warning():
+ warn_prefix = colorize("BAD", "*** WARNING *** ")
+ mylines = [
+ "For security reasons, only system administrators should be",
+ "allowed in the portage group. Untrusted users or processes",
+ "can potentially exploit the portage group for attacks such as",
+ "local privilege escalation."
+ ]
+ for x in mylines:
+ writemsg(warn_prefix, noiselevel=-1)
+ writemsg(x, noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+
+# Portage has 3 security levels that depend on the uid and gid of the main
+# process and are assigned according to the following table:
+#
+# Privileges secpass uid gid
+# normal 0 any any
+# group 1 any portage_gid
+# super 2 0 any
+#
+# If the "wheel" group does not exist then wheelgid falls back to 0.
+# If the "portage" group does not exist then portage_uid falls back to wheelgid.
+
+# If the current user is not root, but has write access to the
+# EROOT directory (not due to the 0002 bit), then use "unprivileged"
+# mode which sets secpass = 2 and uses the UID and GID of the EROOT
+# directory to generate default PORTAGE_INST_GID, PORTAGE_INST_UID,
+# PORTAGE_USERNAME, and PORTAGE_GRPNAME settings.
+def _unprivileged_mode(eroot, eroot_st):
+ return os.getuid() != 0 and os.access(eroot, os.W_OK) and \
+ not eroot_st.st_mode & 0o0002
+
+uid = os.getuid()
+wheelgid = 0
+try:
+ wheelgid = grp.getgrnam("wheel")[2]
+except KeyError:
+ pass
+
+# The portage_uid and portage_gid global constants, and others that
+# depend on them are initialized lazily, in order to allow configuration
+# via make.conf. Eventually, these constants may be deprecated in favor
+# of config attributes, since it's conceivable that multiple
+# configurations with different constants could be used simultaneously.
+_initialized_globals = set()
+
+def _get_global(k):
+ if k in _initialized_globals:
+ return globals()[k]
+
+ if k == 'secpass':
+
+ unprivileged = False
+ if hasattr(portage, 'settings'):
+ unprivileged = "unprivileged" in portage.settings.features
+ else:
+ # The config class has equivalent code, but we also need to
+ # do it here if _disable_legacy_globals() has been called.
+ eroot_or_parent = first_existing(os.path.join(
+ _target_root(), _target_eprefix().lstrip(os.sep)))
+ try:
+ eroot_st = os.stat(eroot_or_parent)
+ except OSError:
+ pass
+ else:
+ unprivileged = _unprivileged_mode(
+ eroot_or_parent, eroot_st)
+
+ v = 0
+ if uid == 0:
+ v = 2
+ elif unprivileged:
+ v = 2
+ elif _get_global('portage_gid') in os.getgroups():
+ v = 1
+
+ elif k in ('portage_gid', 'portage_uid'):
+
+ #Discover the uid and gid of the portage user/group
+ keyerror = False
+ try:
+ portage_uid = pwd.getpwnam(_get_global('_portage_username')).pw_uid
+ except KeyError:
+ keyerror = True
+ portage_uid = 0
+
+ try:
+ portage_gid = grp.getgrnam(_get_global('_portage_grpname')).gr_gid
+ except KeyError:
+ keyerror = True
+ portage_gid = 0
+
+ # Suppress this error message if both PORTAGE_GRPNAME and
+ # PORTAGE_USERNAME are set to "root", for things like
+ # Android (see bug #454060).
+ if keyerror and not (_get_global('_portage_username') == "root" and
+ _get_global('_portage_grpname') == "root"):
+ writemsg(colorize("BAD",
+ _("portage: 'portage' user or group missing.")) + "\n", noiselevel=-1)
+ writemsg(_(
+ " For the defaults, line 1 goes into passwd, "
+ "and 2 into group.\n"), noiselevel=-1)
+ writemsg(colorize("GOOD",
+ " portage:x:250:250:portage:/var/tmp/portage:/bin/false") \
+ + "\n", noiselevel=-1)
+ writemsg(colorize("GOOD", " portage::250:portage") + "\n",
+ noiselevel=-1)
+ portage_group_warning()
+
+ globals()['portage_gid'] = portage_gid
+ _initialized_globals.add('portage_gid')
+ globals()['portage_uid'] = portage_uid
+ _initialized_globals.add('portage_uid')
+
+ if k == 'portage_gid':
+ return portage_gid
+ elif k == 'portage_uid':
+ return portage_uid
+ else:
+ raise AssertionError('unknown name: %s' % k)
+
+ elif k == 'userpriv_groups':
+ v = [_get_global('portage_gid')]
+ if secpass >= 2:
+ # Get a list of group IDs for the portage user. Do not use
+ # grp.getgrall() since it is known to trigger spurious
+ # SIGPIPE problems with nss_ldap.
+ cmd = ["id", "-G", _portage_username]
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000:
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see https://bugs.python.org/issue8513).
+ fullname = portage.process.find_binary(cmd[0])
+ if fullname is None:
+ globals()[k] = v
+ _initialized_globals.add(k)
+ return v
+ cmd[0] = fullname
+
+ encoding = portage._encodings['content']
+ cmd = [portage._unicode_encode(x,
+ encoding=encoding, errors='strict') for x in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ myoutput = proc.communicate()[0]
+ status = proc.wait()
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
+ for x in portage._unicode_decode(myoutput,
+ encoding=encoding, errors='strict').split():
+ try:
+ v.append(int(x))
+ except ValueError:
+ pass
+ v = sorted(set(v))
+
+ # Avoid instantiating portage.settings when the desired
+ # variable is set in os.environ.
+ elif k in ('_portage_grpname', '_portage_username'):
+ v = None
+ if k == '_portage_grpname':
+ env_key = 'PORTAGE_GRPNAME'
+ else:
+ env_key = 'PORTAGE_USERNAME'
+
+ if env_key in os.environ:
+ v = os.environ[env_key]
+ elif hasattr(portage, 'settings'):
+ v = portage.settings.get(env_key)
+ else:
+ # The config class has equivalent code, but we also need to
+ # do it here if _disable_legacy_globals() has been called.
+ eroot_or_parent = first_existing(os.path.join(
+ _target_root(), _target_eprefix().lstrip(os.sep)))
+ try:
+ eroot_st = os.stat(eroot_or_parent)
+ except OSError:
+ pass
+ else:
+ if _unprivileged_mode(eroot_or_parent, eroot_st):
+ if k == '_portage_grpname':
+ try:
+ grp_struct = grp.getgrgid(eroot_st.st_gid)
+ except KeyError:
+ pass
+ else:
+ v = grp_struct.gr_name
+ else:
+ try:
+ pwd_struct = pwd.getpwuid(eroot_st.st_uid)
+ except KeyError:
+ pass
+ else:
+ v = pwd_struct.pw_name
+
+ if v is None:
+ v = 'portage'
+ else:
+ raise AssertionError('unknown name: %s' % k)
+
+ globals()[k] = v
+ _initialized_globals.add(k)
+ return v
+
+class _GlobalProxy(portage.proxy.objectproxy.ObjectProxy):
+
+ __slots__ = ('_name',)
+
+ def __init__(self, name):
+ portage.proxy.objectproxy.ObjectProxy.__init__(self)
+ object.__setattr__(self, '_name', name)
+
+ def _get_target(self):
+ return _get_global(object.__getattribute__(self, '_name'))
+
+for k in ('portage_gid', 'portage_uid', 'secpass', 'userpriv_groups',
+ '_portage_grpname', '_portage_username'):
+ globals()[k] = _GlobalProxy(k)
+del k
+
+def _init(settings):
+ """
+ Use config variables like PORTAGE_GRPNAME and PORTAGE_USERNAME to
+ initialize global variables. This allows settings to come from make.conf
+ instead of requiring them to be set in the calling environment.
+ """
+ if '_portage_grpname' not in _initialized_globals and \
+ '_portage_username' not in _initialized_globals:
+
+ # Prevents "TypeError: expected string" errors
+ # from grp.getgrnam() with PyPy
+ native_string = platform.python_implementation() == 'PyPy'
+
+ v = settings.get('PORTAGE_GRPNAME', 'portage')
+ if native_string:
+ v = portage._native_string(v)
+ globals()['_portage_grpname'] = v
+ _initialized_globals.add('_portage_grpname')
+
+ v = settings.get('PORTAGE_USERNAME', 'portage')
+ if native_string:
+ v = portage._native_string(v)
+ globals()['_portage_username'] = v
+ _initialized_globals.add('_portage_username')
+
+ if 'secpass' not in _initialized_globals:
+ v = 0
+ if uid == 0:
+ v = 2
+ elif "unprivileged" in settings.features:
+ v = 2
+ elif portage_gid in os.getgroups():
+ v = 1
+ globals()['secpass'] = v
+ _initialized_globals.add('secpass')
diff --git a/lib/portage/dbapi/DummyTree.py b/lib/portage/dbapi/DummyTree.py
new file mode 100644
index 000000000..6579e88e2
--- /dev/null
+++ b/lib/portage/dbapi/DummyTree.py
@@ -0,0 +1,16 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class DummyTree(object):
+ """
+ Most internal code only accesses the "dbapi" attribute of the
+ binarytree, portagetree, and vartree classes. DummyTree is useful
+ in cases where alternative dbapi implementations (or wrappers that
+ modify or extend behavior of existing dbapi implementations) are
+ needed, since it allows these implementations to be exposed through
+ an interface which is minimally compatible with the *tree classes.
+ """
+ __slots__ = ("dbapi",)
+
+ def __init__(self, dbapi):
+ self.dbapi = dbapi
diff --git a/lib/portage/dbapi/IndexedPortdb.py b/lib/portage/dbapi/IndexedPortdb.py
new file mode 100644
index 000000000..510e0278c
--- /dev/null
+++ b/lib/portage/dbapi/IndexedPortdb.py
@@ -0,0 +1,171 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import io
+import functools
+import operator
+import os
+
+import portage
+from portage import _encodings
+from portage.dep import Atom
+from portage.exception import FileNotFound
+from portage.cache.index.IndexStreamIterator import IndexStreamIterator
+from portage.cache.index.pkg_desc_index import \
+ pkg_desc_index_line_read, pkg_desc_index_node
+from portage.util.iterators.MultiIterGroupBy import MultiIterGroupBy
+from portage.versions import _pkg_str
+
+class IndexedPortdb(object):
+ """
+ A portdbapi interface that uses a package description index to
+ improve performance. If the description index is missing for a
+ particular repository, then all metadata for that repository is
+ obtained using the normal pordbapi.aux_get method.
+
+ For performance reasons, the match method only supports package
+ name and version constraints. For the same reason, the xmatch
+ method is not implemented.
+ """
+
+ # Match returns unordered results.
+ match_unordered = True
+
+ _copy_attrs = ('cpv_exists', 'findname', 'getFetchMap',
+ '_aux_cache_keys', '_cpv_sort_ascending',
+ '_have_root_eclass_dir')
+
+ def __init__(self, portdb):
+
+ self._portdb = portdb
+
+ for k in self._copy_attrs:
+ setattr(self, k, getattr(portdb, k))
+
+ self._desc_cache = None
+ self._cp_map = None
+ self._unindexed_cp_map = None
+
+ def _init_index(self):
+
+ cp_map = {}
+ desc_cache = {}
+ self._desc_cache = desc_cache
+ self._cp_map = cp_map
+ index_missing = []
+
+ streams = []
+ for repo_path in self._portdb.porttrees:
+ outside_repo = os.path.join(self._portdb.depcachedir,
+ repo_path.lstrip(os.sep))
+ filenames = []
+ for parent_dir in (repo_path, outside_repo):
+ filenames.append(os.path.join(parent_dir,
+ "metadata", "pkg_desc_index"))
+
+ repo_name = self._portdb.getRepositoryName(repo_path)
+
+ try:
+ f = None
+ for filename in filenames:
+ try:
+ f = io.open(filename,
+ encoding=_encodings["repo.content"])
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ else:
+ break
+
+ if f is None:
+ raise FileNotFound(filename)
+
+ streams.append(iter(IndexStreamIterator(f,
+ functools.partial(pkg_desc_index_line_read,
+ repo = repo_name))))
+ except FileNotFound:
+ index_missing.append(repo_path)
+
+ if index_missing:
+ self._unindexed_cp_map = {}
+
+ class _NonIndexedStream(object):
+ def __iter__(self_):
+ for cp in self._portdb.cp_all(
+ trees = index_missing):
+ # Don't call cp_list yet, since it's a waste
+ # if the package name does not match the current
+ # search.
+ self._unindexed_cp_map[cp] = index_missing
+ yield pkg_desc_index_node(cp, (), None)
+
+ streams.append(iter(_NonIndexedStream()))
+
+ if streams:
+ if len(streams) == 1:
+ cp_group_iter = ([node] for node in streams[0])
+ else:
+ cp_group_iter = MultiIterGroupBy(streams,
+ key = operator.attrgetter("cp"))
+
+ for cp_group in cp_group_iter:
+
+ new_cp = None
+ cp_list = cp_map.get(cp_group[0].cp)
+ if cp_list is None:
+ new_cp = cp_group[0].cp
+ cp_list = []
+ cp_map[cp_group[0].cp] = cp_list
+
+ for entry in cp_group:
+ cp_list.extend(entry.cpv_list)
+ if entry.desc is not None:
+ for cpv in entry.cpv_list:
+ desc_cache[cpv] = entry.desc
+
+ if new_cp is not None:
+ yield cp_group[0].cp
+
+ def cp_all(self, sort=True):
+ """
+ Returns an ordered iterator instead of a list, so that search
+ results can be displayed incrementally.
+ """
+ if self._cp_map is None:
+ return self._init_index()
+ return iter(sorted(self._cp_map)) if sort else iter(self._cp_map)
+
+ def match(self, atom):
+ """
+ For performance reasons, only package name and version
+ constraints are supported, and the returned list is
+ unordered.
+ """
+ if not isinstance(atom, Atom):
+ atom = Atom(atom)
+ cp_list = self._cp_map.get(atom.cp)
+ if cp_list is None:
+ return []
+
+ if self._unindexed_cp_map is not None:
+ try:
+ unindexed = self._unindexed_cp_map.pop(atom.cp)
+ except KeyError:
+ pass
+ else:
+ cp_list.extend(self._portdb.cp_list(atom.cp,
+ mytree=unindexed))
+
+ if atom == atom.cp:
+ return cp_list[:]
+ else:
+ return portage.match_from_list(atom, cp_list)
+
+ def aux_get(self, cpv, attrs, myrepo=None):
+ if len(attrs) == 1 and attrs[0] == "DESCRIPTION":
+ try:
+ return [self._desc_cache[cpv]]
+ except KeyError:
+ pass
+ return self._portdb.aux_get(cpv, attrs)
diff --git a/lib/portage/dbapi/IndexedVardb.py b/lib/portage/dbapi/IndexedVardb.py
new file mode 100644
index 000000000..e2910b27f
--- /dev/null
+++ b/lib/portage/dbapi/IndexedVardb.py
@@ -0,0 +1,114 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.dep import Atom
+from portage.exception import InvalidData
+from portage.versions import _pkg_str
+
+class IndexedVardb(object):
+ """
+ A vardbapi interface that sacrifices validation in order to
+ improve performance. It takes advantage of vardbdbapi._aux_cache,
+ which is backed by vdb_metadata.pickle. Since _aux_cache is
+ not updated for every single merge/unmerge (see
+ _aux_cache_threshold), the list of packages is obtained directly
+ from the real vardbapi instance. If a package is missing from
+ _aux_cache, then its metadata is obtained using the normal
+ (validated) vardbapi.aux_get method.
+
+ For performance reasons, the match method only supports package
+ name and version constraints.
+ """
+
+ # Match returns unordered results.
+ match_unordered = True
+
+ _copy_attrs = ('cpv_exists',
+ '_aux_cache_keys', '_cpv_sort_ascending')
+
+ def __init__(self, vardb):
+ self._vardb = vardb
+
+ for k in self._copy_attrs:
+ setattr(self, k, getattr(vardb, k))
+
+ self._cp_map = None
+
+ def cp_all(self, sort=True):
+ """
+ Returns an ordered iterator instead of a list, so that search
+ results can be displayed incrementally.
+ """
+ if self._cp_map is not None:
+ return iter(sorted(self._cp_map)) if sort else iter(self._cp_map)
+
+ delta_data = self._vardb._cache_delta.loadRace()
+ if delta_data is None:
+ return self._iter_cp_all()
+
+ self._vardb._cache_delta.applyDelta(delta_data)
+
+ self._cp_map = cp_map = {}
+ for cpv in self._vardb._aux_cache["packages"]:
+ try:
+ cpv = _pkg_str(cpv, db=self._vardb)
+ except InvalidData:
+ continue
+
+ cp_list = cp_map.get(cpv.cp)
+ if cp_list is None:
+ cp_list = []
+ cp_map[cpv.cp] = cp_list
+ cp_list.append(cpv)
+
+ return iter(sorted(self._cp_map)) if sort else iter(self._cp_map)
+
+ def _iter_cp_all(self):
+ self._cp_map = cp_map = {}
+ previous_cp = None
+ for cpv in self._vardb._iter_cpv_all(sort = True):
+ cp = portage.cpv_getkey(cpv)
+ if cp is not None:
+ cp_list = cp_map.get(cp)
+ if cp_list is None:
+ cp_list = []
+ cp_map[cp] = cp_list
+ cp_list.append(cpv)
+ if previous_cp is not None and \
+ previous_cp != cp:
+ yield previous_cp
+ previous_cp = cp
+
+ if previous_cp is not None:
+ yield previous_cp
+
+ def match(self, atom):
+ """
+ For performance reasons, only package name and version
+ constraints are supported, and the returned list is
+ unordered.
+ """
+ if not isinstance(atom, Atom):
+ atom = Atom(atom)
+ cp_list = self._cp_map.get(atom.cp)
+ if cp_list is None:
+ return []
+
+ if atom == atom.cp:
+ return cp_list[:]
+ else:
+ return portage.match_from_list(atom, cp_list)
+
+ def aux_get(self, cpv, attrs, myrepo=None):
+ pkg_data = self._vardb._aux_cache["packages"].get(cpv)
+ if not isinstance(pkg_data, tuple) or \
+ len(pkg_data) != 2 or \
+ not isinstance(pkg_data[1], dict):
+ pkg_data = None
+ if pkg_data is None:
+ # It may be missing from _aux_cache due to
+ # _aux_cache_threshold.
+ return self._vardb.aux_get(cpv, attrs)
+ metadata = pkg_data[1]
+ return [metadata.get(k, "") for k in attrs]
diff --git a/lib/portage/dbapi/_ContentsCaseSensitivityManager.py b/lib/portage/dbapi/_ContentsCaseSensitivityManager.py
new file mode 100644
index 000000000..c479ec971
--- /dev/null
+++ b/lib/portage/dbapi/_ContentsCaseSensitivityManager.py
@@ -0,0 +1,93 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class ContentsCaseSensitivityManager(object):
+ """
+ Implicitly handles case transformations that are needed for
+ case-insensitive support.
+ """
+
+ def __init__(self, db):
+ """
+ @param db: A dblink instance
+ @type db: vartree.dblink
+ """
+ self.getcontents = db.getcontents
+
+ if "case-insensitive-fs" in db.settings.features:
+ self.unmap_key = self._unmap_key_case_insensitive
+ self.contains = self._contains_case_insensitive
+ self.keys = self._keys_case_insensitive
+
+ self._contents_insensitive = None
+ self._reverse_key_map = None
+
+ def clear_cache(self):
+ """
+ Clear all cached contents data.
+ """
+ self._contents_insensitive = None
+ self._reverse_key_map = None
+
+ def keys(self):
+ """
+ Iterate over all contents keys, which are transformed to
+ lowercase when appropriate, for use in case-insensitive
+ comparisons.
+ @rtype: iterator
+ @return: An iterator over all the contents keys
+ """
+ return iter(self.getcontents())
+
+ def contains(self, key):
+ """
+ Check if the given key is contained in the contents, using
+ case-insensitive comparison when appropriate.
+ @param key: A filesystem path (including ROOT and EPREFIX)
+ @type key: str
+ @rtype: bool
+ @return: True if the given key is contained in the contents,
+ False otherwise
+ """
+ return key in self.getcontents()
+
+ def unmap_key(self, key):
+ """
+ Map a key (from the keys method) back to its case-preserved
+ form.
+ @param key: A filesystem path (including ROOT and EPREFIX)
+ @type key: str
+ @rtype: str
+ @return: The case-preserved form of key
+ """
+ return key
+
+ def _case_insensitive_init(self):
+ """
+ Initialize data structures for case-insensitive support.
+ """
+ self._contents_insensitive = dict(
+ (k.lower(), v) for k, v in self.getcontents().items())
+ self._reverse_key_map = dict(
+ (k.lower(), k) for k in self.getcontents())
+
+ def _keys_case_insensitive(self):
+ if self._contents_insensitive is None:
+ self._case_insensitive_init()
+ return iter(self._contents_insensitive)
+
+ _keys_case_insensitive.__doc__ = keys.__doc__
+
+ def _contains_case_insensitive(self, key):
+ if self._contents_insensitive is None:
+ self._case_insensitive_init()
+ return key.lower() in self._contents_insensitive
+
+ _contains_case_insensitive.__doc__ = contains.__doc__
+
+ def _unmap_key_case_insensitive(self, key):
+ if self._reverse_key_map is None:
+ self._case_insensitive_init()
+ return self._reverse_key_map[key]
+
+ _unmap_key_case_insensitive.__doc__ = unmap_key.__doc__
diff --git a/lib/portage/dbapi/_MergeProcess.py b/lib/portage/dbapi/_MergeProcess.py
new file mode 100644
index 000000000..371550079
--- /dev/null
+++ b/lib/portage/dbapi/_MergeProcess.py
@@ -0,0 +1,287 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import platform
+import signal
+import sys
+import traceback
+
+import fcntl
+import portage
+from portage import os, _unicode_decode
+from portage.util._ctypes import find_library
+import portage.elog.messages
+from portage.util._async.ForkProcess import ForkProcess
+
+class MergeProcess(ForkProcess):
+ """
+ Merge packages in a subprocess, so the Scheduler can run in the main
+ thread while files are moved or copied asynchronously.
+ """
+
+ __slots__ = ('mycat', 'mypkg', 'settings', 'treetype',
+ 'vartree', 'blockers', 'pkgloc', 'infloc', 'myebuild',
+ 'mydbapi', 'postinst_failure', 'prev_mtimes', 'unmerge',
+ '_elog_reader_fd',
+ '_buf', '_elog_keys', '_locked_vdb')
+
+ def _start(self):
+ # Portage should always call setcpv prior to this
+ # point, but here we have a fallback as a convenience
+ # for external API consumers. It's important that
+ # this metadata access happens in the parent process,
+ # since closing of file descriptors in the subprocess
+ # can prevent access to open database connections such
+ # as that used by the sqlite metadata cache module.
+ cpv = "%s/%s" % (self.mycat, self.mypkg)
+ settings = self.settings
+ if cpv != settings.mycpv or \
+ "EAPI" not in settings.configdict["pkg"]:
+ settings.reload()
+ settings.reset()
+ settings.setcpv(cpv, mydb=self.mydbapi)
+
+ # This caches the libc library lookup in the current
+ # process, so that it's only done once rather than
+ # for each child process.
+ if platform.system() == "Linux" and \
+ "merge-sync" in settings.features:
+ find_library("c")
+
+ # Inherit stdin by default, so that the pdb SIGUSR1
+ # handler is usable for the subprocess.
+ if self.fd_pipes is None:
+ self.fd_pipes = {}
+ else:
+ self.fd_pipes = self.fd_pipes.copy()
+ self.fd_pipes.setdefault(0, portage._get_stdin().fileno())
+
+ super(MergeProcess, self)._start()
+
+ def _lock_vdb(self):
+ """
+ Lock the vdb if FEATURES=parallel-install is NOT enabled,
+ otherwise do nothing. This is implemented with
+ vardbapi.lock(), which supports reentrance by the
+ subprocess that we spawn.
+ """
+ if "parallel-install" not in self.settings.features:
+ self.vartree.dbapi.lock()
+ self._locked_vdb = True
+
+ def _unlock_vdb(self):
+ """
+ Unlock the vdb if we hold a lock, otherwise do nothing.
+ """
+ if self._locked_vdb:
+ self.vartree.dbapi.unlock()
+ self._locked_vdb = False
+
+ def _elog_output_handler(self):
+ output = self._read_buf(self._elog_reader_fd)
+ if output:
+ lines = _unicode_decode(output).split('\n')
+ if len(lines) == 1:
+ self._buf += lines[0]
+ else:
+ lines[0] = self._buf + lines[0]
+ self._buf = lines.pop()
+ out = io.StringIO()
+ for line in lines:
+ funcname, phase, key, msg = line.split(' ', 3)
+ self._elog_keys.add(key)
+ reporter = getattr(portage.elog.messages, funcname)
+ reporter(msg, phase=phase, key=key, out=out)
+
+ elif output is not None: # EIO/POLLHUP
+ self.scheduler.remove_reader(self._elog_reader_fd)
+ os.close(self._elog_reader_fd)
+ self._elog_reader_fd = None
+ return False
+
+ def _spawn(self, args, fd_pipes, **kwargs):
+ """
+ Fork a subprocess, apply local settings, and call
+ dblink.merge(). TODO: Share code with ForkProcess.
+ """
+
+ elog_reader_fd, elog_writer_fd = os.pipe()
+
+ fcntl.fcntl(elog_reader_fd, fcntl.F_SETFL,
+ fcntl.fcntl(elog_reader_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(elog_reader_fd, fcntl.F_SETFD,
+ fcntl.fcntl(elog_reader_fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ blockers = None
+ if self.blockers is not None:
+ # Query blockers in the main process, since closing
+ # of file descriptors in the subprocess can prevent
+ # access to open database connections such as that
+ # used by the sqlite metadata cache module.
+ blockers = self.blockers()
+ mylink = portage.dblink(self.mycat, self.mypkg, settings=self.settings,
+ treetype=self.treetype, vartree=self.vartree,
+ blockers=blockers, pipe=elog_writer_fd)
+ fd_pipes[elog_writer_fd] = elog_writer_fd
+ self.scheduler.add_reader(elog_reader_fd, self._elog_output_handler)
+
+ # If a concurrent emerge process tries to install a package
+ # in the same SLOT as this one at the same time, there is an
+ # extremely unlikely chance that the COUNTER values will not be
+ # ordered correctly unless we lock the vdb here.
+ # FEATURES=parallel-install skips this lock in order to
+ # improve performance, and the risk is practically negligible.
+ self._lock_vdb()
+ counter = None
+ if not self.unmerge:
+ counter = self.vartree.dbapi.counter_tick()
+
+ parent_pid = os.getpid()
+ pid = None
+ try:
+ pid = os.fork()
+
+ if pid != 0:
+ if not isinstance(pid, int):
+ raise AssertionError(
+ "fork returned non-integer: %s" % (repr(pid),))
+
+ os.close(elog_writer_fd)
+ self._elog_reader_fd = elog_reader_fd
+ self._buf = ""
+ self._elog_keys = set()
+ # Discard messages which will be collected by the subprocess,
+ # in order to avoid duplicates (bug #446136).
+ portage.elog.messages.collect_messages(key=mylink.mycpv)
+
+ # invalidate relevant vardbapi caches
+ if self.vartree.dbapi._categories is not None:
+ self.vartree.dbapi._categories = None
+ self.vartree.dbapi._pkgs_changed = True
+ self.vartree.dbapi._clear_pkg_cache(mylink)
+
+ return [pid]
+
+ os.close(elog_reader_fd)
+
+ # Use default signal handlers in order to avoid problems
+ # killing subprocesses as reported in bug #353239.
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ # Unregister SIGCHLD handler and wakeup_fd for the parent
+ # process's event loop (bug 655656).
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ try:
+ wakeup_fd = signal.set_wakeup_fd(-1)
+ if wakeup_fd > 0:
+ os.close(wakeup_fd)
+ except (ValueError, OSError):
+ pass
+
+ portage.locks._close_fds()
+ # We don't exec, so use close_fds=False
+ # (see _setup_pipes docstring).
+ portage.process._setup_pipes(fd_pipes, close_fds=False)
+
+ portage.output.havecolor = self.settings.get('NOCOLOR') \
+ not in ('yes', 'true')
+
+ # Avoid wastful updates of the vdb cache.
+ self.vartree.dbapi._flush_cache_enabled = False
+
+ # In this subprocess we don't want PORTAGE_BACKGROUND to
+ # suppress stdout/stderr output since they are pipes. We
+ # also don't want to open PORTAGE_LOG_FILE, since it will
+ # already be opened by the parent process, so we set the
+ # "subprocess" value for use in conditional logging code
+ # involving PORTAGE_LOG_FILE.
+ if not self.unmerge:
+ # unmerge phases have separate logs
+ if self.settings.get("PORTAGE_BACKGROUND") == "1":
+ self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "1"
+ else:
+ self.settings["PORTAGE_BACKGROUND_UNMERGE"] = "0"
+ self.settings.backup_changes("PORTAGE_BACKGROUND_UNMERGE")
+ self.settings["PORTAGE_BACKGROUND"] = "subprocess"
+ self.settings.backup_changes("PORTAGE_BACKGROUND")
+
+ rval = 1
+ try:
+ if self.unmerge:
+ if not mylink.exists():
+ rval = os.EX_OK
+ elif mylink.unmerge(
+ ldpath_mtimes=self.prev_mtimes) == os.EX_OK:
+ mylink.lockdb()
+ try:
+ mylink.delete()
+ finally:
+ mylink.unlockdb()
+ rval = os.EX_OK
+ else:
+ rval = mylink.merge(self.pkgloc, self.infloc,
+ myebuild=self.myebuild, mydbapi=self.mydbapi,
+ prev_mtimes=self.prev_mtimes, counter=counter)
+ except SystemExit:
+ raise
+ except:
+ traceback.print_exc()
+ # os._exit() skips stderr flush!
+ sys.stderr.flush()
+ finally:
+ os._exit(rval)
+
+ finally:
+ if pid == 0 or (pid is None and os.getpid() != parent_pid):
+ # Call os._exit() from a finally block in order
+ # to suppress any finally blocks from earlier
+ # in the call stack (see bug #345289). This
+ # finally block has to be setup before the fork
+ # in order to avoid a race condition.
+ os._exit(1)
+
+ def _async_waitpid_cb(self, *args, **kwargs):
+ """
+ Override _async_waitpid_cb to perform cleanup that is
+ not necessarily idempotent.
+ """
+ ForkProcess._async_waitpid_cb(self, *args, **kwargs)
+ if self.returncode == portage.const.RETURNCODE_POSTINST_FAILURE:
+ self.postinst_failure = True
+ self.returncode = os.EX_OK
+
+ def _unregister(self):
+ """
+ Unregister from the scheduler and close open files.
+ """
+
+ if not self.unmerge:
+ # Populate the vardbapi cache for the new package
+ # while its inodes are still hot.
+ try:
+ self.vartree.dbapi.aux_get(self.settings.mycpv, ["EAPI"])
+ except KeyError:
+ pass
+
+ self._unlock_vdb()
+ if self._elog_reader_fd is not None:
+ self.scheduler.remove_reader(self._elog_reader_fd)
+ os.close(self._elog_reader_fd)
+ self._elog_reader_fd = None
+ if self._elog_keys is not None:
+ for key in self._elog_keys:
+ portage.elog.elog_process(key, self.settings,
+ phasefilter=("prerm", "postrm"))
+ self._elog_keys = None
+
+ super(MergeProcess, self)._unregister()
diff --git a/lib/portage/dbapi/_SyncfsProcess.py b/lib/portage/dbapi/_SyncfsProcess.py
new file mode 100644
index 000000000..767dc2061
--- /dev/null
+++ b/lib/portage/dbapi/_SyncfsProcess.py
@@ -0,0 +1,53 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.util._ctypes import find_library, LoadLibrary
+from portage.util._async.ForkProcess import ForkProcess
+
+class SyncfsProcess(ForkProcess):
+ """
+ Isolate ctypes usage in a subprocess, in order to avoid
+ potential problems with stale cached libraries as
+ described in bug #448858, comment #14 (also see
+ https://bugs.python.org/issue14597).
+ """
+
+ __slots__ = ('paths',)
+
+ @staticmethod
+ def _get_syncfs():
+
+ filename = find_library("c")
+ if filename is not None:
+ library = LoadLibrary(filename)
+ if library is not None:
+ try:
+ return library.syncfs
+ except AttributeError:
+ pass
+
+ return None
+
+ def _run(self):
+
+ syncfs_failed = False
+ syncfs = self._get_syncfs()
+
+ if syncfs is not None:
+ for path in self.paths:
+ try:
+ fd = os.open(path, os.O_RDONLY)
+ except OSError:
+ pass
+ else:
+ try:
+ if syncfs(fd) != 0:
+ # Happens with PyPy (bug #446610)
+ syncfs_failed = True
+ finally:
+ os.close(fd)
+
+ if syncfs is None or syncfs_failed:
+ return 1
+ return os.EX_OK
diff --git a/lib/portage/dbapi/_VdbMetadataDelta.py b/lib/portage/dbapi/_VdbMetadataDelta.py
new file mode 100644
index 000000000..7461f87c5
--- /dev/null
+++ b/lib/portage/dbapi/_VdbMetadataDelta.py
@@ -0,0 +1,176 @@
+# Copyright 2014-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import io
+import json
+import os
+
+from portage import _encodings
+from portage.util import atomic_ofstream
+from portage.versions import cpv_getkey
+
+class VdbMetadataDelta(object):
+
+ _format_version = "1"
+
+ def __init__(self, vardb):
+ self._vardb = vardb
+
+ def initialize(self, timestamp):
+ f = atomic_ofstream(self._vardb._cache_delta_filename, 'w',
+ encoding=_encodings['repo.content'], errors='strict')
+ json.dump({
+ "version": self._format_version,
+ "timestamp": timestamp
+ }, f, ensure_ascii=False)
+ f.close()
+
+ def load(self):
+
+ if not os.path.exists(self._vardb._aux_cache_filename):
+ # If the primary cache doesn't exist yet, then
+ # we can't record a delta against it.
+ return None
+
+ try:
+ with io.open(self._vardb._cache_delta_filename, 'r',
+ encoding=_encodings['repo.content'],
+ errors='strict') as f:
+ cache_obj = json.load(f)
+ except EnvironmentError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except Exception:
+ # Corrupt, or not json format.
+ pass
+ else:
+ try:
+ version = cache_obj["version"]
+ except KeyError:
+ pass
+ else:
+ # Verify that the format version is compatible,
+ # since a newer version of portage may have
+ # written an incompatible file.
+ if version == self._format_version:
+ try:
+ deltas = cache_obj["deltas"]
+ except KeyError:
+ cache_obj["deltas"] = deltas = []
+
+ if isinstance(deltas, list):
+ return cache_obj
+
+ return None
+
+ def loadRace(self):
+ """
+ This calls self.load() and validates the timestamp
+ against the currently loaded self._vardb._aux_cache. If a
+ concurrent update causes the timestamps to be inconsistent,
+ then it reloads the caches and tries one more time before
+ it aborts. In practice, the race is very unlikely, so
+ this will usually succeed on the first try.
+ """
+
+ tries = 2
+ while tries:
+ tries -= 1
+ cache_delta = self.load()
+ if cache_delta is not None and \
+ cache_delta.get("timestamp") != \
+ self._vardb._aux_cache.get("timestamp", False):
+ self._vardb._aux_cache_obj = None
+ else:
+ return cache_delta
+
+ return None
+
+ def recordEvent(self, event, cpv, slot, counter):
+
+ self._vardb.lock()
+ try:
+ deltas_obj = self.load()
+
+ if deltas_obj is None:
+ # We can't record meaningful deltas without
+ # a pre-existing state.
+ return
+
+ delta_node = {
+ "event": event,
+ "package": cpv.cp,
+ "version": cpv.version,
+ "slot": slot,
+ "counter": "%s" % counter
+ }
+
+ deltas_obj["deltas"].append(delta_node)
+
+ # Eliminate earlier nodes cancelled out by later nodes
+ # that have identical package and slot attributes.
+ filtered_list = []
+ slot_keys = set()
+ version_keys = set()
+ for delta_node in reversed(deltas_obj["deltas"]):
+ slot_key = (delta_node["package"],
+ delta_node["slot"])
+ version_key = (delta_node["package"],
+ delta_node["version"])
+ if not (slot_key in slot_keys or \
+ version_key in version_keys):
+ filtered_list.append(delta_node)
+ slot_keys.add(slot_key)
+ version_keys.add(version_key)
+
+ filtered_list.reverse()
+ deltas_obj["deltas"] = filtered_list
+
+ f = atomic_ofstream(self._vardb._cache_delta_filename,
+ mode='w', encoding=_encodings['repo.content'])
+ json.dump(deltas_obj, f, ensure_ascii=False)
+ f.close()
+
+ finally:
+ self._vardb.unlock()
+
+ def applyDelta(self, data):
+ packages = self._vardb._aux_cache["packages"]
+ deltas = {}
+ for delta in data["deltas"]:
+ cpv = delta["package"] + "-" + delta["version"]
+ deltas[cpv] = delta
+ event = delta["event"]
+ if event == "add":
+ # Use aux_get to populate the cache
+ # for this cpv.
+ if cpv not in packages:
+ try:
+ self._vardb.aux_get(cpv, ["DESCRIPTION"])
+ except KeyError:
+ pass
+ elif event == "remove":
+ packages.pop(cpv, None)
+
+ if deltas:
+ # Delete removed or replaced versions from affected slots
+ for cached_cpv, (mtime, metadata) in list(packages.items()):
+ if cached_cpv in deltas:
+ continue
+
+ removed = False
+ for cpv, delta in deltas.items():
+ if (cached_cpv.startswith(delta["package"]) and
+ metadata.get("SLOT") == delta["slot"] and
+ cpv_getkey(cached_cpv) == delta["package"]):
+ removed = True
+ break
+
+ if removed:
+ del packages[cached_cpv]
+ del deltas[cpv]
+ if not deltas:
+ break
diff --git a/lib/portage/dbapi/__init__.py b/lib/portage/dbapi/__init__.py
new file mode 100644
index 000000000..6fca6090c
--- /dev/null
+++ b/lib/portage/dbapi/__init__.py
@@ -0,0 +1,443 @@
+# Copyright 1998-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = ["dbapi"]
+
+import functools
+import re
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.dbapi.dep_expand:dep_expand@_dep_expand',
+ 'portage.dep:Atom,match_from_list,_match_slot',
+ 'portage.output:colorize',
+ 'portage.util:cmp_sort_key,writemsg',
+ 'portage.versions:catsplit,catpkgsplit,vercmp,_pkg_str',
+)
+
+from portage.const import MERGING_IDENTIFIER
+
+from portage import os
+from portage import auxdbkeys
+from portage.eapi import _get_eapi_attrs
+from portage.exception import InvalidData
+from portage.localization import _
+from _emerge.Package import Package
+
+class dbapi(object):
+ _category_re = re.compile(r'^\w[-.+\w]*$', re.UNICODE)
+ _categories = None
+ _use_mutable = False
+ _known_keys = frozenset(x for x in auxdbkeys
+ if not x.startswith("UNUSED_0"))
+ _pkg_str_aux_keys = ("BUILD_TIME", "EAPI", "BUILD_ID",
+ "KEYWORDS", "SLOT", "repository")
+
+ def __init__(self):
+ pass
+
+ @property
+ def categories(self):
+ """
+ Use self.cp_all() to generate a category list. Mutable instances
+ can delete the self._categories attribute in cases when the cached
+ categories become invalid and need to be regenerated.
+ """
+ if self._categories is not None:
+ return self._categories
+ self._categories = tuple(sorted(set(catsplit(x)[0] \
+ for x in self.cp_all())))
+ return self._categories
+
+ def close_caches(self):
+ pass
+
+ def cp_list(self, cp, use_cache=1):
+ raise NotImplementedError(self)
+
+ @staticmethod
+ def _cmp_cpv(cpv1, cpv2):
+ result = vercmp(cpv1.version, cpv2.version)
+ if (result == 0 and cpv1.build_time is not None and
+ cpv2.build_time is not None):
+ result = ((cpv1.build_time > cpv2.build_time) -
+ (cpv1.build_time < cpv2.build_time))
+ return result
+
+ @staticmethod
+ def _cpv_sort_ascending(cpv_list):
+ """
+ Use this to sort self.cp_list() results in ascending
+ order. It sorts in place and returns None.
+ """
+ if len(cpv_list) > 1:
+ # If the cpv includes explicit -r0, it has to be preserved
+ # for consistency in findname and aux_get calls, so use a
+ # dict to map strings back to their original values.
+ cpv_list.sort(key=cmp_sort_key(dbapi._cmp_cpv))
+
+ def cpv_all(self):
+ """Return all CPVs in the db
+ Args:
+ None
+ Returns:
+ A list of Strings, 1 per CPV
+
+ This function relies on a subclass implementing cp_all, this is why the hasattr is there
+ """
+
+ if not hasattr(self, "cp_all"):
+ raise NotImplementedError
+ cpv_list = []
+ for cp in self.cp_all():
+ cpv_list.extend(self.cp_list(cp))
+ return cpv_list
+
+ def cp_all(self, sort=False):
+ """ Implement this in a child class
+ Args
+ sort - return sorted results
+ Returns:
+ A list of strings 1 per CP in the datastore
+ """
+ return NotImplementedError
+
+ def aux_get(self, mycpv, mylist, myrepo=None):
+ """Return the metadata keys in mylist for mycpv
+ Args:
+ mycpv - "sys-apps/foo-1.0"
+ mylist - ["SLOT","DEPEND","HOMEPAGE"]
+ myrepo - The repository name.
+ Returns:
+ a list of results, in order of keys in mylist, such as:
+ ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or [] if mycpv not found'
+ """
+ raise NotImplementedError
+
+ def aux_update(self, cpv, metadata_updates):
+ """
+ Args:
+ cpv - "sys-apps/foo-1.0"
+ metadata_updates = { key : newvalue }
+ Returns:
+ None
+ """
+ raise NotImplementedError
+
+ def match(self, origdep, use_cache=1):
+ """Given a dependency, try to find packages that match
+ Args:
+ origdep - Depend atom
+ use_cache - Boolean indicating if we should use the cache or not
+ NOTE: Do we ever not want the cache?
+ Returns:
+ a list of packages that match origdep
+ """
+ mydep = _dep_expand(origdep, mydb=self, settings=self.settings)
+ return list(self._iter_match(mydep,
+ self.cp_list(mydep.cp, use_cache=use_cache)))
+
+ def _iter_match(self, atom, cpv_iter):
+ cpv_iter = iter(match_from_list(atom, cpv_iter))
+ if atom.repo:
+ cpv_iter = self._iter_match_repo(atom, cpv_iter)
+ if atom.slot:
+ cpv_iter = self._iter_match_slot(atom, cpv_iter)
+ if atom.unevaluated_atom.use:
+ cpv_iter = self._iter_match_use(atom, cpv_iter)
+ return cpv_iter
+
+ def _pkg_str(self, cpv, repo):
+ """
+ This is used to contruct _pkg_str instances on-demand during
+ matching. If cpv is a _pkg_str instance with slot attribute,
+ then simply return it. Otherwise, fetch metadata and construct
+ a _pkg_str instance. This may raise KeyError or InvalidData.
+ """
+ try:
+ cpv.slot
+ except AttributeError:
+ pass
+ else:
+ return cpv
+
+ metadata = dict(zip(self._pkg_str_aux_keys,
+ self.aux_get(cpv, self._pkg_str_aux_keys, myrepo=repo)))
+
+ return _pkg_str(cpv, metadata=metadata, settings=self.settings, db=self)
+
+ def _iter_match_repo(self, atom, cpv_iter):
+ for cpv in cpv_iter:
+ try:
+ pkg_str = self._pkg_str(cpv, atom.repo)
+ except (KeyError, InvalidData):
+ pass
+ else:
+ if pkg_str.repo == atom.repo:
+ yield pkg_str
+
+ def _iter_match_slot(self, atom, cpv_iter):
+ for cpv in cpv_iter:
+ try:
+ pkg_str = self._pkg_str(cpv, atom.repo)
+ except (KeyError, InvalidData):
+ pass
+ else:
+ if _match_slot(atom, pkg_str):
+ yield pkg_str
+
+ def _iter_match_use(self, atom, cpv_iter):
+ """
+ 1) Check for required IUSE intersection (need implicit IUSE here).
+ 2) Check enabled/disabled flag states.
+ """
+
+ aux_keys = ["EAPI", "IUSE", "KEYWORDS", "SLOT", "USE", "repository"]
+ for cpv in cpv_iter:
+ try:
+ metadata = dict(zip(aux_keys,
+ self.aux_get(cpv, aux_keys, myrepo=atom.repo)))
+ except KeyError:
+ continue
+
+ try:
+ cpv.slot
+ except AttributeError:
+ try:
+ cpv = _pkg_str(cpv, metadata=metadata,
+ settings=self.settings)
+ except InvalidData:
+ continue
+
+ if not self._match_use(atom, cpv, metadata):
+ continue
+
+ yield cpv
+
+ def _repoman_iuse_implicit_cnstr(self, pkg, metadata):
+ """
+ In repoman's version of _iuse_implicit_cnstr, account for modifications
+ of the self.settings reference between calls.
+ """
+ eapi_attrs = _get_eapi_attrs(metadata["EAPI"])
+ if eapi_attrs.iuse_effective:
+ iuse_implicit_match = lambda flag: self.settings._iuse_effective_match(flag)
+ else:
+ iuse_implicit_match = lambda flag: self.settings._iuse_implicit_match(flag)
+ return iuse_implicit_match
+
+ def _iuse_implicit_cnstr(self, pkg, metadata):
+ """
+ Construct a callable that checks if a given USE flag should
+ be considered to be a member of the implicit IUSE for the
+ given package.
+
+ @param pkg: package
+ @type pkg: _pkg_str
+ @param metadata: package metadata
+ @type metadata: Mapping
+ @return: a callable that accepts a single USE flag argument,
+ and returns True only if the USE flag should be considered
+ to be a member of the implicit IUSE for the given package.
+ @rtype: callable
+ """
+ eapi_attrs = _get_eapi_attrs(metadata["EAPI"])
+ if eapi_attrs.iuse_effective:
+ iuse_implicit_match = self.settings._iuse_effective_match
+ else:
+ iuse_implicit_match = self.settings._iuse_implicit_match
+
+ if not self._use_mutable and eapi_attrs.iuse_effective:
+ # For built packages, it is desirable for the built USE setting to
+ # be independent of the profile's current IUSE_IMPLICIT state, since
+ # the profile's IUSE_IMPLICT setting may have diverged. Therefore,
+ # any member of the built USE setting is considered to be a valid
+ # member of IUSE_EFFECTIVE. Note that the binary package may be
+ # remote, so it's only possible to rely on metadata that is available
+ # in the remote Packages file, and the IUSE_IMPLICIT header in the
+ # Packages file is vulnerable to mutation (see bug 640318).
+ #
+ # This behavior is only used for EAPIs that support IUSE_EFFECTIVE,
+ # since built USE settings for earlier EAPIs may contain a large
+ # number of irrelevant flags.
+ prof_iuse = iuse_implicit_match
+ enabled = frozenset(metadata["USE"].split()).__contains__
+ iuse_implicit_match = lambda flag: prof_iuse(flag) or enabled(flag)
+
+ return iuse_implicit_match
+
+ def _match_use(self, atom, pkg, metadata, ignore_profile=False):
+ iuse_implicit_match = self._iuse_implicit_cnstr(pkg, metadata)
+ usealiases = self.settings._use_manager.getUseAliases(pkg)
+ iuse = Package._iuse(None, metadata["IUSE"].split(), iuse_implicit_match, usealiases, metadata["EAPI"])
+
+ for x in atom.unevaluated_atom.use.required:
+ if iuse.get_real_flag(x) is None:
+ return False
+
+ if atom.use is None:
+ pass
+
+ elif not self._use_mutable:
+ # Use IUSE to validate USE settings for built packages,
+ # in case the package manager that built this package
+ # failed to do that for some reason (or in case of
+ # data corruption). The enabled flags must be consistent
+ # with implicit IUSE, in order to avoid potential
+ # inconsistencies in USE dep matching (see bug #453400).
+ use = frozenset(x for x in metadata["USE"].split() if iuse.get_real_flag(x) is not None)
+ missing_enabled = frozenset(x for x in atom.use.missing_enabled if iuse.get_real_flag(x) is None)
+ missing_disabled = frozenset(x for x in atom.use.missing_disabled if iuse.get_real_flag(x) is None)
+ enabled = frozenset((iuse.get_real_flag(x) or x) for x in atom.use.enabled)
+ disabled = frozenset((iuse.get_real_flag(x) or x) for x in atom.use.disabled)
+
+ if enabled:
+ if any(x in enabled for x in missing_disabled):
+ return False
+ need_enabled = enabled.difference(use)
+ if need_enabled:
+ if any(x not in missing_enabled for x in need_enabled):
+ return False
+
+ if disabled:
+ if any(x in disabled for x in missing_enabled):
+ return False
+ need_disabled = disabled.intersection(use)
+ if need_disabled:
+ if any(x not in missing_disabled for x in need_disabled):
+ return False
+
+ elif not self.settings.local_config:
+ if not ignore_profile:
+ # Check masked and forced flags for repoman.
+ usemask = self.settings._getUseMask(pkg,
+ stable=self.settings._parent_stable)
+ if any(x in usemask for x in atom.use.enabled):
+ return False
+
+ useforce = self.settings._getUseForce(pkg,
+ stable=self.settings._parent_stable)
+ if any(x in useforce and x not in usemask
+ for x in atom.use.disabled):
+ return False
+
+ # Check unsatisfied use-default deps
+ if atom.use.enabled:
+ missing_disabled = frozenset(x for x in atom.use.missing_disabled if iuse.get_real_flag(x) is None)
+ if any(x in atom.use.enabled for x in missing_disabled):
+ return False
+ if atom.use.disabled:
+ missing_enabled = frozenset(x for x in atom.use.missing_enabled if iuse.get_real_flag(x) is None)
+ if any(x in atom.use.disabled for x in missing_enabled):
+ return False
+
+ return True
+
+ def invalidentry(self, mypath):
+ if "/" + MERGING_IDENTIFIER in mypath:
+ if os.path.exists(mypath):
+ writemsg(colorize("BAD", _("INCOMPLETE MERGE:"))+" %s\n" % mypath,
+ noiselevel=-1)
+ else:
+ writemsg("!!! Invalid db entry: %s\n" % mypath, noiselevel=-1)
+
+ def update_ents(self, updates, onProgress=None, onUpdate=None):
+ """
+ Update metadata of all packages for package moves.
+ @param updates: A list of move commands, or dict of {repo_name: list}
+ @type updates: list or dict
+ @param onProgress: A progress callback function
+ @type onProgress: a callable that takes 2 integer arguments: maxval and curval
+ @param onUpdate: A progress callback function called only
+ for packages that are modified by updates.
+ @type onUpdate: a callable that takes 2 integer arguments:
+ maxval and curval
+ """
+ cpv_all = self.cpv_all()
+ cpv_all.sort()
+ maxval = len(cpv_all)
+ aux_get = self.aux_get
+ aux_update = self.aux_update
+ update_keys = Package._dep_keys
+ meta_keys = update_keys + self._pkg_str_aux_keys
+ repo_dict = None
+ if isinstance(updates, dict):
+ repo_dict = updates
+ if onUpdate:
+ onUpdate(maxval, 0)
+ if onProgress:
+ onProgress(maxval, 0)
+ for i, cpv in enumerate(cpv_all):
+ try:
+ metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys)))
+ except KeyError:
+ continue
+ try:
+ pkg = _pkg_str(cpv, metadata=metadata, settings=self.settings)
+ except InvalidData:
+ continue
+ metadata = dict((k, metadata[k]) for k in update_keys)
+ if repo_dict is None:
+ updates_list = updates
+ else:
+ try:
+ updates_list = repo_dict[pkg.repo]
+ except KeyError:
+ try:
+ updates_list = repo_dict['DEFAULT']
+ except KeyError:
+ continue
+
+ if not updates_list:
+ continue
+
+ metadata_updates = \
+ portage.update_dbentries(updates_list, metadata, parent=pkg)
+ if metadata_updates:
+ aux_update(cpv, metadata_updates)
+ if onUpdate:
+ onUpdate(maxval, i+1)
+ if onProgress:
+ onProgress(maxval, i+1)
+
+ def move_slot_ent(self, mylist, repo_match=None):
+ """This function takes a sequence:
+ Args:
+ mylist: a sequence of (atom, originalslot, newslot)
+ repo_match: callable that takes single repo_name argument
+ and returns True if the update should be applied
+ Returns:
+ The number of slotmoves this function did
+ """
+ atom = mylist[1]
+ origslot = mylist[2]
+ newslot = mylist[3]
+
+ try:
+ atom.with_slot
+ except AttributeError:
+ atom = Atom(atom).with_slot(origslot)
+ else:
+ atom = atom.with_slot(origslot)
+
+ origmatches = self.match(atom)
+ moves = 0
+ if not origmatches:
+ return moves
+ for mycpv in origmatches:
+ try:
+ mycpv = self._pkg_str(mycpv, atom.repo)
+ except (KeyError, InvalidData):
+ continue
+ if repo_match is not None and not repo_match(mycpv.repo):
+ continue
+ moves += 1
+ if "/" not in newslot and \
+ mycpv.sub_slot and \
+ mycpv.sub_slot not in (mycpv.slot, newslot):
+ newslot = "%s/%s" % (newslot, mycpv.sub_slot)
+ mydata = {"SLOT": newslot+"\n"}
+ self.aux_update(mycpv, mydata)
+ return moves
diff --git a/lib/portage/dbapi/_expand_new_virt.py b/lib/portage/dbapi/_expand_new_virt.py
new file mode 100644
index 000000000..9aa603d11
--- /dev/null
+++ b/lib/portage/dbapi/_expand_new_virt.py
@@ -0,0 +1,81 @@
+# Copyright 2011-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import portage
+from portage.dep import Atom, _get_useflag_re
+from portage.eapi import _get_eapi_attrs
+
+def expand_new_virt(vardb, atom):
+ """
+ Iterate over the recursively expanded RDEPEND atoms of
+ a new-style virtual. If atom is not a new-style virtual
+ or it does not match an installed package then it is
+ yielded without any expansion.
+ """
+ if not isinstance(atom, Atom):
+ atom = Atom(atom)
+
+ if not atom.cp.startswith("virtual/"):
+ yield atom
+ return
+
+ traversed = set()
+ stack = [atom]
+
+ while stack:
+ atom = stack.pop()
+ if atom.blocker or \
+ not atom.cp.startswith("virtual/"):
+ yield atom
+ continue
+
+ matches = vardb.match(atom)
+ if not (matches and matches[-1].startswith("virtual/")):
+ yield atom
+ continue
+
+ virt_cpv = matches[-1]
+ if virt_cpv in traversed:
+ continue
+
+ traversed.add(virt_cpv)
+ eapi, iuse, rdepend, use = vardb.aux_get(virt_cpv,
+ ["EAPI", "IUSE", "RDEPEND", "USE"])
+ if not portage.eapi_is_supported(eapi):
+ yield atom
+ continue
+
+ eapi_attrs = _get_eapi_attrs(eapi)
+ # Validate IUSE and IUSE, for early detection of vardb corruption.
+ useflag_re = _get_useflag_re(eapi)
+ valid_iuse = []
+ for x in iuse.split():
+ if x[:1] in ("+", "-"):
+ x = x[1:]
+ if useflag_re.match(x) is not None:
+ valid_iuse.append(x)
+ valid_iuse = frozenset(valid_iuse)
+
+ if eapi_attrs.iuse_effective:
+ iuse_implicit_match = vardb.settings._iuse_effective_match
+ else:
+ iuse_implicit_match = vardb.settings._iuse_implicit_match
+
+ valid_use = []
+ for x in use.split():
+ if x in valid_iuse or iuse_implicit_match(x):
+ valid_use.append(x)
+ valid_use = frozenset(valid_use)
+
+ success, atoms = portage.dep_check(rdepend,
+ None, vardb.settings, myuse=valid_use,
+ myroot=vardb.settings['EROOT'],
+ trees={vardb.settings['EROOT']:{"porttree":vardb.vartree,
+ "vartree":vardb.vartree}})
+
+ if success:
+ stack.extend(atoms)
+ else:
+ yield atom
diff --git a/lib/portage/dbapi/_similar_name_search.py b/lib/portage/dbapi/_similar_name_search.py
new file mode 100644
index 000000000..b6e4a1fbe
--- /dev/null
+++ b/lib/portage/dbapi/_similar_name_search.py
@@ -0,0 +1,57 @@
+# Copyright 2011-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import difflib
+
+from portage.versions import catsplit
+
+def similar_name_search(dbs, atom):
+
+ cp_lower = atom.cp.lower()
+ cat, pkg = catsplit(cp_lower)
+ if cat == "null":
+ cat = None
+
+ all_cp = set()
+ for db in dbs:
+ all_cp.update(db.cp_all())
+
+ # discard dir containing no ebuilds
+ all_cp.discard(atom.cp)
+
+ orig_cp_map = {}
+ for cp_orig in all_cp:
+ orig_cp_map.setdefault(cp_orig.lower(), []).append(cp_orig)
+ all_cp = set(orig_cp_map)
+
+ if cat:
+ matches = difflib.get_close_matches(cp_lower, all_cp)
+ else:
+ pkg_to_cp = {}
+ for other_cp in list(all_cp):
+ other_pkg = catsplit(other_cp)[1]
+ if other_pkg == pkg:
+ # Check for non-identical package that
+ # differs only by upper/lower case.
+ identical = True
+ for cp_orig in orig_cp_map[other_cp]:
+ if catsplit(cp_orig)[1] != \
+ catsplit(atom.cp)[1]:
+ identical = False
+ break
+ if identical:
+ # discard dir containing no ebuilds
+ all_cp.discard(other_cp)
+ continue
+ pkg_to_cp.setdefault(other_pkg, set()).add(other_cp)
+
+ pkg_matches = difflib.get_close_matches(pkg, pkg_to_cp)
+ matches = []
+ for pkg_match in pkg_matches:
+ matches.extend(pkg_to_cp[pkg_match])
+
+ matches_orig_case = []
+ for cp in matches:
+ matches_orig_case.extend(orig_cp_map[cp])
+
+ return matches_orig_case
diff --git a/lib/portage/dbapi/bintree.py b/lib/portage/dbapi/bintree.py
new file mode 100644
index 000000000..9c2d877e7
--- /dev/null
+++ b/lib/portage/dbapi/bintree.py
@@ -0,0 +1,1710 @@
+# Copyright 1998-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = ["bindbapi", "binarytree"]
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.checksum:get_valid_checksum_keys,perform_multiple_checksums,' + \
+ 'verify_all,_apply_hash_filter,_hash_filter',
+ 'portage.dbapi.dep_expand:dep_expand',
+ 'portage.dep:dep_getkey,isjustname,isvalidatom,match_from_list',
+ 'portage.output:EOutput,colorize',
+ 'portage.locks:lockfile,unlockfile',
+ 'portage.package.ebuild.fetch:_check_distfile,_hide_url_passwd',
+ 'portage.update:update_dbentries',
+ 'portage.util:atomic_ofstream,ensure_dirs,normalize_path,' + \
+ 'writemsg,writemsg_stdout',
+ 'portage.util.path:first_existing',
+ 'portage.util._urlopen:urlopen@_urlopen,have_pep_476@_have_pep_476',
+ 'portage.versions:best,catpkgsplit,catsplit,_pkg_str',
+)
+
+from portage.cache.mappings import slot_dict_class
+from portage.const import CACHE_PATH, SUPPORTED_XPAK_EXTENSIONS
+from portage.dbapi.virtual import fakedbapi
+from portage.dep import Atom, use_reduce, paren_enclose
+from portage.exception import AlarmSignal, InvalidData, InvalidPackageName, \
+ ParseError, PermissionDenied, PortageException
+from portage.localization import _
+from portage.package.ebuild.profile_iuse import iter_iuse_vars
+from portage import _movefile
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+
+import codecs
+import errno
+import io
+import stat
+import subprocess
+import sys
+import tempfile
+import textwrap
+import time
+import traceback
+import warnings
+from gzip import GzipFile
+from itertools import chain
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ _unicode = str
+ basestring = str
+ long = int
+else:
+ _unicode = unicode
+
+class UseCachedCopyOfRemoteIndex(Exception):
+ # If the local copy is recent enough
+ # then fetching the remote index can be skipped.
+ pass
+
+class bindbapi(fakedbapi):
+ _known_keys = frozenset(list(fakedbapi._known_keys) + \
+ ["CHOST", "repository", "USE"])
+ def __init__(self, mybintree=None, **kwargs):
+ # Always enable multi_instance mode for bindbapi indexing. This
+ # does not affect the local PKGDIR file layout, since that is
+ # controlled independently by FEATURES=binpkg-multi-instance.
+ # The multi_instance mode is useful for the following reasons:
+ # * binary packages with the same cpv from multiple binhosts
+ # can be considered simultaneously
+ # * if binpkg-multi-instance is disabled, it's still possible
+ # to properly access a PKGDIR which has binpkg-multi-instance
+ # layout (or mixed layout)
+ fakedbapi.__init__(self, exclusive_slots=False,
+ multi_instance=True, **kwargs)
+ self.bintree = mybintree
+ self.move_ent = mybintree.move_ent
+ # Selectively cache metadata in order to optimize dep matching.
+ self._aux_cache_keys = set(
+ ["BDEPEND", "BUILD_ID", "BUILD_TIME", "CHOST", "DEFINED_PHASES",
+ "DEPEND", "EAPI", "HDEPEND", "IUSE", "KEYWORDS",
+ "LICENSE", "MD5", "PDEPEND", "PROPERTIES",
+ "PROVIDES", "RDEPEND", "repository", "REQUIRES", "RESTRICT",
+ "SIZE", "SLOT", "USE", "_mtime_"
+ ])
+ self._aux_cache_slot_dict = slot_dict_class(self._aux_cache_keys)
+ self._aux_cache = {}
+
+ @property
+ def writable(self):
+ """
+ Check if PKGDIR is writable, or permissions are sufficient
+ to create it if it does not exist yet.
+ @rtype: bool
+ @return: True if PKGDIR is writable or can be created,
+ False otherwise
+ """
+ return os.access(first_existing(self.bintree.pkgdir), os.W_OK)
+
+ def match(self, *pargs, **kwargs):
+ if self.bintree and not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.match(self, *pargs, **kwargs)
+
+ def cpv_exists(self, cpv, myrepo=None):
+ if self.bintree and not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cpv_exists(self, cpv)
+
+ def cpv_inject(self, cpv, **kwargs):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ fakedbapi.cpv_inject(self, cpv,
+ metadata=cpv._metadata, **kwargs)
+
+ def cpv_remove(self, cpv):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ fakedbapi.cpv_remove(self, cpv)
+
+ def aux_get(self, mycpv, wants, myrepo=None):
+ if self.bintree and not self.bintree.populated:
+ self.bintree.populate()
+ # Support plain string for backward compatibility with API
+ # consumers (including portageq, which passes in a cpv from
+ # a command-line argument).
+ instance_key = self._instance_key(mycpv,
+ support_string=True)
+ if not self._known_keys.intersection(
+ wants).difference(self._aux_cache_keys):
+ aux_cache = self.cpvdict[instance_key]
+ if aux_cache is not None:
+ return [aux_cache.get(x, "") for x in wants]
+ mysplit = mycpv.split("/")
+ mylist = []
+ if not self.bintree._remotepkgs or \
+ not self.bintree.isremote(mycpv):
+ try:
+ tbz2_path = self.bintree._pkg_paths[instance_key]
+ except KeyError:
+ raise KeyError(mycpv)
+ tbz2_path = os.path.join(self.bintree.pkgdir, tbz2_path)
+ try:
+ st = os.lstat(tbz2_path)
+ except OSError:
+ raise KeyError(mycpv)
+ metadata_bytes = portage.xpak.tbz2(tbz2_path).get_data()
+ def getitem(k):
+ if k == "_mtime_":
+ return _unicode(st[stat.ST_MTIME])
+ elif k == "SIZE":
+ return _unicode(st.st_size)
+ v = metadata_bytes.get(_unicode_encode(k,
+ encoding=_encodings['repo.content'],
+ errors='backslashreplace'))
+ if v is not None:
+ v = _unicode_decode(v,
+ encoding=_encodings['repo.content'], errors='replace')
+ return v
+ else:
+ getitem = self.cpvdict[instance_key].get
+ mydata = {}
+ mykeys = wants
+ for x in mykeys:
+ myval = getitem(x)
+ # myval is None if the key doesn't exist
+ # or the tbz2 is corrupt.
+ if myval:
+ mydata[x] = " ".join(myval.split())
+
+ if not mydata.setdefault('EAPI', '0'):
+ mydata['EAPI'] = '0'
+
+ return [mydata.get(x, '') for x in wants]
+
+ def aux_update(self, cpv, values):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ build_id = None
+ try:
+ build_id = cpv.build_id
+ except AttributeError:
+ if self.bintree._multi_instance:
+ # The cpv.build_id attribute is required if we are in
+ # multi-instance mode, since otherwise we won't know
+ # which instance to update.
+ raise
+ else:
+ cpv = self._instance_key(cpv, support_string=True)[0]
+ build_id = cpv.build_id
+
+ tbz2path = self.bintree.getname(cpv)
+ if not os.path.exists(tbz2path):
+ raise KeyError(cpv)
+ mytbz2 = portage.xpak.tbz2(tbz2path)
+ mydata = mytbz2.get_data()
+
+ for k, v in values.items():
+ k = _unicode_encode(k,
+ encoding=_encodings['repo.content'], errors='backslashreplace')
+ v = _unicode_encode(v,
+ encoding=_encodings['repo.content'], errors='backslashreplace')
+ mydata[k] = v
+
+ for k, v in list(mydata.items()):
+ if not v:
+ del mydata[k]
+ mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
+ # inject will clear stale caches via cpv_inject.
+ self.bintree.inject(cpv, filename=tbz2path)
+
+ def cp_list(self, *pargs, **kwargs):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cp_list(self, *pargs, **kwargs)
+
+ def cp_all(self, sort=False):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cp_all(self, sort=sort)
+
+ def cpv_all(self):
+ if not self.bintree.populated:
+ self.bintree.populate()
+ return fakedbapi.cpv_all(self)
+
+ def getfetchsizes(self, pkg):
+ """
+ This will raise MissingSignature if SIZE signature is not available,
+ or InvalidSignature if SIZE signature is invalid.
+ """
+
+ if not self.bintree.populated:
+ self.bintree.populate()
+
+ pkg = getattr(pkg, 'cpv', pkg)
+
+ filesdict = {}
+ if not self.bintree.isremote(pkg):
+ pass
+ else:
+ metadata = self.bintree._remotepkgs[self._instance_key(pkg)]
+ try:
+ size = int(metadata["SIZE"])
+ except KeyError:
+ raise portage.exception.MissingSignature("SIZE")
+ except ValueError:
+ raise portage.exception.InvalidSignature(
+ "SIZE: %s" % metadata["SIZE"])
+ else:
+ filesdict[os.path.basename(self.bintree.getname(pkg))] = size
+
+ return filesdict
+
+class binarytree(object):
+ "this tree scans for a list of all packages available in PKGDIR"
+ def __init__(self, _unused=DeprecationWarning, pkgdir=None,
+ virtual=DeprecationWarning, settings=None):
+
+ if pkgdir is None:
+ raise TypeError("pkgdir parameter is required")
+
+ if settings is None:
+ raise TypeError("settings parameter is required")
+
+ if _unused is not DeprecationWarning:
+ warnings.warn("The first parameter of the "
+ "portage.dbapi.bintree.binarytree"
+ " constructor is now unused. Instead "
+ "settings['ROOT'] is used.",
+ DeprecationWarning, stacklevel=2)
+
+ if virtual is not DeprecationWarning:
+ warnings.warn("The 'virtual' parameter of the "
+ "portage.dbapi.bintree.binarytree"
+ " constructor is unused",
+ DeprecationWarning, stacklevel=2)
+
+ if True:
+ self.pkgdir = normalize_path(pkgdir)
+ # NOTE: Event if binpkg-multi-instance is disabled, it's
+ # still possible to access a PKGDIR which uses the
+ # binpkg-multi-instance layout (or mixed layout).
+ self._multi_instance = ("binpkg-multi-instance" in
+ settings.features)
+ if self._multi_instance:
+ self._allocate_filename = self._allocate_filename_multi
+ self.dbapi = bindbapi(self, settings=settings)
+ self.update_ents = self.dbapi.update_ents
+ self.move_slot_ent = self.dbapi.move_slot_ent
+ self.populated = 0
+ self.tree = {}
+ self._remote_has_index = False
+ self._remotepkgs = None # remote metadata indexed by cpv
+ self.invalids = []
+ self.settings = settings
+ self._pkg_paths = {}
+ self._populating = False
+ self._all_directory = os.path.isdir(
+ os.path.join(self.pkgdir, "All"))
+ self._pkgindex_version = 0
+ self._pkgindex_hashes = ["MD5","SHA1"]
+ self._pkgindex_file = os.path.join(self.pkgdir, "Packages")
+ self._pkgindex_keys = self.dbapi._aux_cache_keys.copy()
+ self._pkgindex_keys.update(["CPV", "SIZE"])
+ self._pkgindex_aux_keys = \
+ ["BASE_URI", "BDEPEND", "BUILD_ID", "BUILD_TIME", "CHOST",
+ "DEFINED_PHASES", "DEPEND", "DESCRIPTION", "EAPI",
+ "HDEPEND", "IUSE", "KEYWORDS", "LICENSE", "PDEPEND",
+ "PKGINDEX_URI", "PROPERTIES", "PROVIDES",
+ "RDEPEND", "repository", "REQUIRES", "RESTRICT",
+ "SIZE", "SLOT", "USE"]
+ self._pkgindex_aux_keys = list(self._pkgindex_aux_keys)
+ self._pkgindex_use_evaluated_keys = \
+ ("BDEPEND", "DEPEND", "HDEPEND", "LICENSE", "RDEPEND",
+ "PDEPEND", "PROPERTIES", "RESTRICT")
+ self._pkgindex_header = None
+ self._pkgindex_header_keys = set([
+ "ACCEPT_KEYWORDS", "ACCEPT_LICENSE",
+ "ACCEPT_PROPERTIES", "ACCEPT_RESTRICT", "CBUILD",
+ "CONFIG_PROTECT", "CONFIG_PROTECT_MASK", "FEATURES",
+ "GENTOO_MIRRORS", "INSTALL_MASK", "IUSE_IMPLICIT", "USE",
+ "USE_EXPAND", "USE_EXPAND_HIDDEN", "USE_EXPAND_IMPLICIT",
+ "USE_EXPAND_UNPREFIXED"])
+ self._pkgindex_default_pkg_data = {
+ "BDEPEND" : "",
+ "BUILD_ID" : "",
+ "BUILD_TIME" : "",
+ "DEFINED_PHASES" : "",
+ "DEPEND" : "",
+ "EAPI" : "0",
+ "HDEPEND" : "",
+ "IUSE" : "",
+ "KEYWORDS": "",
+ "LICENSE" : "",
+ "PATH" : "",
+ "PDEPEND" : "",
+ "PROPERTIES" : "",
+ "PROVIDES": "",
+ "RDEPEND" : "",
+ "REQUIRES": "",
+ "RESTRICT": "",
+ "SLOT" : "0",
+ "USE" : "",
+ }
+ self._pkgindex_inherited_keys = ["CHOST", "repository"]
+
+ # Populate the header with appropriate defaults.
+ self._pkgindex_default_header_data = {
+ "CHOST" : self.settings.get("CHOST", ""),
+ "repository" : "",
+ }
+
+ self._pkgindex_translated_keys = (
+ ("DESCRIPTION" , "DESC"),
+ ("_mtime_" , "MTIME"),
+ ("repository" , "REPO"),
+ )
+
+ self._pkgindex_allowed_pkg_keys = set(chain(
+ self._pkgindex_keys,
+ self._pkgindex_aux_keys,
+ self._pkgindex_hashes,
+ self._pkgindex_default_pkg_data,
+ self._pkgindex_inherited_keys,
+ chain(*self._pkgindex_translated_keys)
+ ))
+
+ @property
+ def root(self):
+ warnings.warn("The root attribute of "
+ "portage.dbapi.bintree.binarytree"
+ " is deprecated. Use "
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=3)
+ return self.settings['ROOT']
+
+ def move_ent(self, mylist, repo_match=None):
+ if not self.populated:
+ self.populate()
+ origcp = mylist[1]
+ newcp = mylist[2]
+ # sanity check
+ for atom in (origcp, newcp):
+ if not isjustname(atom):
+ raise InvalidPackageName(_unicode(atom))
+ mynewcat = catsplit(newcp)[0]
+ origmatches=self.dbapi.cp_list(origcp)
+ moves = 0
+ if not origmatches:
+ return moves
+ for mycpv in origmatches:
+ try:
+ mycpv = self.dbapi._pkg_str(mycpv, None)
+ except (KeyError, InvalidData):
+ continue
+ mycpv_cp = portage.cpv_getkey(mycpv)
+ if mycpv_cp != origcp:
+ # Ignore PROVIDE virtual match.
+ continue
+ if repo_match is not None \
+ and not repo_match(mycpv.repo):
+ continue
+
+ # Use isvalidatom() to check if this move is valid for the
+ # EAPI (characters allowed in package names may vary).
+ if not isvalidatom(newcp, eapi=mycpv.eapi):
+ continue
+
+ mynewcpv = mycpv.replace(mycpv_cp, _unicode(newcp), 1)
+ myoldpkg = catsplit(mycpv)[1]
+ mynewpkg = catsplit(mynewcpv)[1]
+
+ if (mynewpkg != myoldpkg) and os.path.exists(self.getname(mynewcpv)):
+ writemsg(_("!!! Cannot update binary: Destination exists.\n"),
+ noiselevel=-1)
+ writemsg("!!! "+mycpv+" -> "+mynewcpv+"\n", noiselevel=-1)
+ continue
+
+ tbz2path = self.getname(mycpv)
+ if os.path.exists(tbz2path) and not os.access(tbz2path,os.W_OK):
+ writemsg(_("!!! Cannot update readonly binary: %s\n") % mycpv,
+ noiselevel=-1)
+ continue
+
+ moves += 1
+ mytbz2 = portage.xpak.tbz2(tbz2path)
+ mydata = mytbz2.get_data()
+ updated_items = update_dbentries([mylist], mydata, parent=mycpv)
+ mydata.update(updated_items)
+ mydata[b'PF'] = \
+ _unicode_encode(mynewpkg + "\n",
+ encoding=_encodings['repo.content'])
+ mydata[b'CATEGORY'] = \
+ _unicode_encode(mynewcat + "\n",
+ encoding=_encodings['repo.content'])
+ if mynewpkg != myoldpkg:
+ ebuild_data = mydata.pop(_unicode_encode(myoldpkg + '.ebuild',
+ encoding=_encodings['repo.content']), None)
+ if ebuild_data is not None:
+ mydata[_unicode_encode(mynewpkg + '.ebuild',
+ encoding=_encodings['repo.content'])] = ebuild_data
+
+ mytbz2.recompose_mem(portage.xpak.xpak_mem(mydata))
+
+ self.dbapi.cpv_remove(mycpv)
+ del self._pkg_paths[self.dbapi._instance_key(mycpv)]
+ metadata = self.dbapi._aux_cache_slot_dict()
+ for k in self.dbapi._aux_cache_keys:
+ v = mydata.get(_unicode_encode(k))
+ if v is not None:
+ v = _unicode_decode(v)
+ metadata[k] = " ".join(v.split())
+ mynewcpv = _pkg_str(mynewcpv, metadata=metadata, db=self.dbapi)
+ new_path = self.getname(mynewcpv)
+ self._pkg_paths[
+ self.dbapi._instance_key(mynewcpv)] = new_path[len(self.pkgdir)+1:]
+ if new_path != mytbz2:
+ self._ensure_dir(os.path.dirname(new_path))
+ _movefile(tbz2path, new_path, mysettings=self.settings)
+ self.inject(mynewcpv)
+
+ return moves
+
+ def prevent_collision(self, cpv):
+ warnings.warn("The "
+ "portage.dbapi.bintree.binarytree.prevent_collision "
+ "method is deprecated.",
+ DeprecationWarning, stacklevel=2)
+
+ def _ensure_dir(self, path):
+ """
+ Create the specified directory. Also, copy gid and group mode
+ bits from self.pkgdir if possible.
+ @param cat_dir: Absolute path of the directory to be created.
+ @type cat_dir: String
+ """
+ try:
+ pkgdir_st = os.stat(self.pkgdir)
+ except OSError:
+ ensure_dirs(path)
+ return
+ pkgdir_gid = pkgdir_st.st_gid
+ pkgdir_grp_mode = 0o2070 & pkgdir_st.st_mode
+ try:
+ ensure_dirs(path, gid=pkgdir_gid, mode=pkgdir_grp_mode, mask=0)
+ except PortageException:
+ if not os.path.isdir(path):
+ raise
+
+ def _file_permissions(self, path):
+ try:
+ pkgdir_st = os.stat(self.pkgdir)
+ except OSError:
+ pass
+ else:
+ pkgdir_gid = pkgdir_st.st_gid
+ pkgdir_grp_mode = 0o0060 & pkgdir_st.st_mode
+ try:
+ portage.util.apply_permissions(path, gid=pkgdir_gid,
+ mode=pkgdir_grp_mode, mask=0)
+ except PortageException:
+ pass
+
+ def populate(self, getbinpkgs=False, getbinpkg_refresh=True):
+ """
+ Populates the binarytree with package metadata.
+
+ @param getbinpkgs: include remote packages
+ @type getbinpkgs: bool
+ @param getbinpkg_refresh: attempt to refresh the cache
+ of remote package metadata if getbinpkgs is also True
+ @type getbinpkg_refresh: bool
+ """
+
+ if self._populating:
+ return
+
+ if not os.path.isdir(self.pkgdir) and not getbinpkgs:
+ self.populated = True
+ return
+
+ # Clear all caches in case populate is called multiple times
+ # as may be the case when _global_updates calls populate()
+ # prior to performing package moves since it only wants to
+ # operate on local packages (getbinpkgs=0).
+ self._remotepkgs = None
+
+ self._populating = True
+ try:
+ update_pkgindex = self._populate_local()
+
+ if update_pkgindex and self.dbapi.writable:
+ # If the Packages file needs to be updated, then _populate_local
+ # needs to be called once again while the file is locked, so
+ # that changes made by a concurrent process cannot be lost. This
+ # case is avoided when possible, in order to minimize lock
+ # contention.
+ pkgindex_lock = None
+ try:
+ pkgindex_lock = lockfile(self._pkgindex_file,
+ wantnewlockfile=True)
+ update_pkgindex = self._populate_local()
+ if update_pkgindex:
+ self._pkgindex_write(update_pkgindex)
+ finally:
+ if pkgindex_lock:
+ unlockfile(pkgindex_lock)
+
+ if getbinpkgs:
+ if not self.settings.get("PORTAGE_BINHOST"):
+ writemsg(_("!!! PORTAGE_BINHOST unset, but use is requested.\n"),
+ noiselevel=-1)
+ else:
+ self._populate_remote(getbinpkg_refresh=getbinpkg_refresh)
+
+ finally:
+ self._populating = False
+
+ self.populated = True
+
+ def _populate_local(self):
+ self.dbapi.clear()
+ _instance_key = self.dbapi._instance_key
+ # In order to minimize disk I/O, we never compute digests here.
+ # Therefore we exclude hashes from the minimum_keys, so that
+ # the Packages file will not be needlessly re-written due to
+ # missing digests.
+ minimum_keys = self._pkgindex_keys.difference(self._pkgindex_hashes)
+ if True:
+ pkg_paths = {}
+ self._pkg_paths = pkg_paths
+ dir_files = {}
+ for parent, dir_names, file_names in os.walk(self.pkgdir):
+ relative_parent = parent[len(self.pkgdir)+1:]
+ dir_files[relative_parent] = file_names
+
+ pkgindex = self._load_pkgindex()
+ if not self._pkgindex_version_supported(pkgindex):
+ pkgindex = self._new_pkgindex()
+ metadata = {}
+ basename_index = {}
+ for d in pkgindex.packages:
+ cpv = _pkg_str(d["CPV"], metadata=d,
+ settings=self.settings, db=self.dbapi)
+ d["CPV"] = cpv
+ metadata[_instance_key(cpv)] = d
+ path = d.get("PATH")
+ if not path:
+ path = cpv + ".tbz2"
+ basename = os.path.basename(path)
+ basename_index.setdefault(basename, []).append(d)
+
+ update_pkgindex = False
+ for mydir, file_names in dir_files.items():
+ try:
+ mydir = _unicode_decode(mydir,
+ encoding=_encodings["fs"], errors="strict")
+ except UnicodeDecodeError:
+ continue
+ for myfile in file_names:
+ try:
+ myfile = _unicode_decode(myfile,
+ encoding=_encodings["fs"], errors="strict")
+ except UnicodeDecodeError:
+ continue
+ if not myfile.endswith(SUPPORTED_XPAK_EXTENSIONS):
+ continue
+ mypath = os.path.join(mydir, myfile)
+ full_path = os.path.join(self.pkgdir, mypath)
+ s = os.lstat(full_path)
+
+ if not stat.S_ISREG(s.st_mode):
+ continue
+
+ # Validate data from the package index and try to avoid
+ # reading the xpak if possible.
+ possibilities = basename_index.get(myfile)
+ if possibilities:
+ match = None
+ for d in possibilities:
+ try:
+ if long(d["_mtime_"]) != s[stat.ST_MTIME]:
+ continue
+ except (KeyError, ValueError):
+ continue
+ try:
+ if long(d["SIZE"]) != long(s.st_size):
+ continue
+ except (KeyError, ValueError):
+ continue
+ if not minimum_keys.difference(d):
+ match = d
+ break
+ if match:
+ mycpv = match["CPV"]
+ instance_key = _instance_key(mycpv)
+ pkg_paths[instance_key] = mypath
+ # update the path if the package has been moved
+ oldpath = d.get("PATH")
+ if oldpath and oldpath != mypath:
+ update_pkgindex = True
+ # Omit PATH if it is the default path for
+ # the current Packages format version.
+ if mypath != mycpv + ".tbz2":
+ d["PATH"] = mypath
+ if not oldpath:
+ update_pkgindex = True
+ else:
+ d.pop("PATH", None)
+ if oldpath:
+ update_pkgindex = True
+ self.dbapi.cpv_inject(mycpv)
+ continue
+ if not os.access(full_path, os.R_OK):
+ writemsg(_("!!! Permission denied to read " \
+ "binary package: '%s'\n") % full_path,
+ noiselevel=-1)
+ self.invalids.append(myfile[:-5])
+ continue
+ pkg_metadata = self._read_metadata(full_path, s,
+ keys=chain(self.dbapi._aux_cache_keys,
+ ("PF", "CATEGORY")))
+ mycat = pkg_metadata.get("CATEGORY", "")
+ mypf = pkg_metadata.get("PF", "")
+ slot = pkg_metadata.get("SLOT", "")
+ mypkg = myfile[:-5]
+ if not mycat or not mypf or not slot:
+ #old-style or corrupt package
+ writemsg(_("\n!!! Invalid binary package: '%s'\n") % full_path,
+ noiselevel=-1)
+ missing_keys = []
+ if not mycat:
+ missing_keys.append("CATEGORY")
+ if not mypf:
+ missing_keys.append("PF")
+ if not slot:
+ missing_keys.append("SLOT")
+ msg = []
+ if missing_keys:
+ missing_keys.sort()
+ msg.append(_("Missing metadata key(s): %s.") % \
+ ", ".join(missing_keys))
+ msg.append(_(" This binary package is not " \
+ "recoverable and should be deleted."))
+ for line in textwrap.wrap("".join(msg), 72):
+ writemsg("!!! %s\n" % line, noiselevel=-1)
+ self.invalids.append(mypkg)
+ continue
+
+ multi_instance = False
+ invalid_name = False
+ build_id = None
+ if myfile.endswith(".xpak"):
+ multi_instance = True
+ build_id = self._parse_build_id(myfile)
+ if build_id < 1:
+ invalid_name = True
+ elif myfile != "%s-%s.xpak" % (
+ mypf, build_id):
+ invalid_name = True
+ else:
+ mypkg = mypkg[:-len(str(build_id))-1]
+ elif myfile != mypf + ".tbz2":
+ invalid_name = True
+
+ if invalid_name:
+ writemsg(_("\n!!! Binary package name is "
+ "invalid: '%s'\n") % full_path,
+ noiselevel=-1)
+ continue
+
+ if pkg_metadata.get("BUILD_ID"):
+ try:
+ build_id = long(pkg_metadata["BUILD_ID"])
+ except ValueError:
+ writemsg(_("!!! Binary package has "
+ "invalid BUILD_ID: '%s'\n") %
+ full_path, noiselevel=-1)
+ continue
+ else:
+ build_id = None
+
+ if multi_instance:
+ name_split = catpkgsplit("%s/%s" %
+ (mycat, mypf))
+ if (name_split is None or
+ tuple(catsplit(mydir)) != name_split[:2]):
+ continue
+ elif mycat != mydir and mydir != "All":
+ continue
+ if mypkg != mypf.strip():
+ continue
+ mycpv = mycat + "/" + mypkg
+ if not self.dbapi._category_re.match(mycat):
+ writemsg(_("!!! Binary package has an " \
+ "unrecognized category: '%s'\n") % full_path,
+ noiselevel=-1)
+ writemsg(_("!!! '%s' has a category that is not" \
+ " listed in %setc/portage/categories\n") % \
+ (mycpv, self.settings["PORTAGE_CONFIGROOT"]),
+ noiselevel=-1)
+ continue
+ if build_id is not None:
+ pkg_metadata["BUILD_ID"] = _unicode(build_id)
+ pkg_metadata["SIZE"] = _unicode(s.st_size)
+ # Discard items used only for validation above.
+ pkg_metadata.pop("CATEGORY")
+ pkg_metadata.pop("PF")
+ mycpv = _pkg_str(mycpv,
+ metadata=self.dbapi._aux_cache_slot_dict(pkg_metadata),
+ db=self.dbapi)
+ pkg_paths[_instance_key(mycpv)] = mypath
+ self.dbapi.cpv_inject(mycpv)
+ update_pkgindex = True
+ d = metadata.get(_instance_key(mycpv),
+ pkgindex._pkg_slot_dict())
+ if d:
+ try:
+ if long(d["_mtime_"]) != s[stat.ST_MTIME]:
+ d.clear()
+ except (KeyError, ValueError):
+ d.clear()
+ if d:
+ try:
+ if long(d["SIZE"]) != long(s.st_size):
+ d.clear()
+ except (KeyError, ValueError):
+ d.clear()
+
+ for k in self._pkgindex_allowed_pkg_keys:
+ v = pkg_metadata.get(k)
+ if v:
+ d[k] = v
+ d["CPV"] = mycpv
+
+ try:
+ self._eval_use_flags(mycpv, d)
+ except portage.exception.InvalidDependString:
+ writemsg(_("!!! Invalid binary package: '%s'\n") % \
+ self.getname(mycpv), noiselevel=-1)
+ self.dbapi.cpv_remove(mycpv)
+ del pkg_paths[_instance_key(mycpv)]
+
+ # record location if it's non-default
+ if mypath != mycpv + ".tbz2":
+ d["PATH"] = mypath
+ else:
+ d.pop("PATH", None)
+ metadata[_instance_key(mycpv)] = d
+
+ for instance_key in list(metadata):
+ if instance_key not in pkg_paths:
+ del metadata[instance_key]
+
+ if update_pkgindex:
+ del pkgindex.packages[:]
+ pkgindex.packages.extend(iter(metadata.values()))
+ self._update_pkgindex_header(pkgindex.header)
+
+ self._pkgindex_header = {}
+ self._merge_pkgindex_header(pkgindex.header,
+ self._pkgindex_header)
+
+ return pkgindex if update_pkgindex else None
+
+ def _populate_remote(self, getbinpkg_refresh=True):
+
+ self._remote_has_index = False
+ self._remotepkgs = {}
+ for base_url in self.settings["PORTAGE_BINHOST"].split():
+ parsed_url = urlparse(base_url)
+ host = parsed_url.netloc
+ port = parsed_url.port
+ user = None
+ passwd = None
+ user_passwd = ""
+ if "@" in host:
+ user, host = host.split("@", 1)
+ user_passwd = user + "@"
+ if ":" in user:
+ user, passwd = user.split(":", 1)
+
+ if port is not None:
+ port_str = ":%s" % (port,)
+ if host.endswith(port_str):
+ host = host[:-len(port_str)]
+ pkgindex_file = os.path.join(self.settings["EROOT"], CACHE_PATH, "binhost",
+ host, parsed_url.path.lstrip("/"), "Packages")
+ pkgindex = self._new_pkgindex()
+ try:
+ f = io.open(_unicode_encode(pkgindex_file,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ try:
+ pkgindex.read(f)
+ finally:
+ f.close()
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ local_timestamp = pkgindex.header.get("TIMESTAMP", None)
+ try:
+ download_timestamp = \
+ float(pkgindex.header.get("DOWNLOAD_TIMESTAMP", 0))
+ except ValueError:
+ download_timestamp = 0
+ remote_timestamp = None
+ rmt_idx = self._new_pkgindex()
+ proc = None
+ tmp_filename = None
+ try:
+ # urlparse.urljoin() only works correctly with recognized
+ # protocols and requires the base url to have a trailing
+ # slash, so join manually...
+ url = base_url.rstrip("/") + "/Packages"
+ f = None
+
+ if not getbinpkg_refresh and local_timestamp:
+ raise UseCachedCopyOfRemoteIndex()
+
+ try:
+ ttl = float(pkgindex.header.get("TTL", 0))
+ except ValueError:
+ pass
+ else:
+ if download_timestamp and ttl and \
+ download_timestamp + ttl > time.time():
+ raise UseCachedCopyOfRemoteIndex()
+
+ # Don't use urlopen for https, unless
+ # PEP 476 is supported (bug #469888).
+ if parsed_url.scheme not in ('https',) or _have_pep_476():
+ try:
+ f = _urlopen(url, if_modified_since=local_timestamp)
+ if hasattr(f, 'headers') and f.headers.get('timestamp', ''):
+ remote_timestamp = f.headers.get('timestamp')
+ except IOError as err:
+ if hasattr(err, 'code') and err.code == 304: # not modified (since local_timestamp)
+ raise UseCachedCopyOfRemoteIndex()
+
+ if parsed_url.scheme in ('ftp', 'http', 'https'):
+ # This protocol is supposedly supported by urlopen,
+ # so apparently there's a problem with the url
+ # or a bug in urlopen.
+ if self.settings.get("PORTAGE_DEBUG", "0") != "0":
+ traceback.print_exc()
+
+ raise
+ except ValueError:
+ raise ParseError("Invalid Portage BINHOST value '%s'"
+ % url.lstrip())
+
+ if f is None:
+
+ path = parsed_url.path.rstrip("/") + "/Packages"
+
+ if parsed_url.scheme == 'ssh':
+ # Use a pipe so that we can terminate the download
+ # early if we detect that the TIMESTAMP header
+ # matches that of the cached Packages file.
+ ssh_args = ['ssh']
+ if port is not None:
+ ssh_args.append("-p%s" % (port,))
+ # NOTE: shlex evaluates embedded quotes
+ ssh_args.extend(portage.util.shlex_split(
+ self.settings.get("PORTAGE_SSH_OPTS", "")))
+ ssh_args.append(user_passwd + host)
+ ssh_args.append('--')
+ ssh_args.append('cat')
+ ssh_args.append(path)
+
+ proc = subprocess.Popen(ssh_args,
+ stdout=subprocess.PIPE)
+ f = proc.stdout
+ else:
+ setting = 'FETCHCOMMAND_' + parsed_url.scheme.upper()
+ fcmd = self.settings.get(setting)
+ if not fcmd:
+ fcmd = self.settings.get('FETCHCOMMAND')
+ if not fcmd:
+ raise EnvironmentError("FETCHCOMMAND is unset")
+
+ fd, tmp_filename = tempfile.mkstemp()
+ tmp_dirname, tmp_basename = os.path.split(tmp_filename)
+ os.close(fd)
+
+ fcmd_vars = {
+ "DISTDIR": tmp_dirname,
+ "FILE": tmp_basename,
+ "URI": url
+ }
+
+ for k in ("PORTAGE_SSH_OPTS",):
+ v = self.settings.get(k)
+ if v is not None:
+ fcmd_vars[k] = v
+
+ success = portage.getbinpkg.file_get(
+ fcmd=fcmd, fcmd_vars=fcmd_vars)
+ if not success:
+ raise EnvironmentError("%s failed" % (setting,))
+ f = open(tmp_filename, 'rb')
+
+ f_dec = codecs.iterdecode(f,
+ _encodings['repo.content'], errors='replace')
+ try:
+ rmt_idx.readHeader(f_dec)
+ if not remote_timestamp: # in case it had not been read from HTTP header
+ remote_timestamp = rmt_idx.header.get("TIMESTAMP", None)
+ if not remote_timestamp:
+ # no timestamp in the header, something's wrong
+ pkgindex = None
+ writemsg(_("\n\n!!! Binhost package index " \
+ " has no TIMESTAMP field.\n"), noiselevel=-1)
+ else:
+ if not self._pkgindex_version_supported(rmt_idx):
+ writemsg(_("\n\n!!! Binhost package index version" \
+ " is not supported: '%s'\n") % \
+ rmt_idx.header.get("VERSION"), noiselevel=-1)
+ pkgindex = None
+ elif local_timestamp != remote_timestamp:
+ rmt_idx.readBody(f_dec)
+ pkgindex = rmt_idx
+ finally:
+ # Timeout after 5 seconds, in case close() blocks
+ # indefinitely (see bug #350139).
+ try:
+ try:
+ AlarmSignal.register(5)
+ f.close()
+ finally:
+ AlarmSignal.unregister()
+ except AlarmSignal:
+ writemsg("\n\n!!! %s\n" % \
+ _("Timed out while closing connection to binhost"),
+ noiselevel=-1)
+ except UseCachedCopyOfRemoteIndex:
+ writemsg_stdout("\n")
+ writemsg_stdout(
+ colorize("GOOD", _("Local copy of remote index is up-to-date and will be used.")) + \
+ "\n")
+ rmt_idx = pkgindex
+ except EnvironmentError as e:
+ # This includes URLError which is raised for SSL
+ # certificate errors when PEP 476 is supported.
+ writemsg(_("\n\n!!! Error fetching binhost package" \
+ " info from '%s'\n") % _hide_url_passwd(base_url))
+ # With Python 2, the EnvironmentError message may
+ # contain bytes or unicode, so use _unicode to ensure
+ # safety with all locales (bug #532784).
+ try:
+ error_msg = _unicode(e)
+ except UnicodeDecodeError as uerror:
+ error_msg = _unicode(uerror.object,
+ encoding='utf_8', errors='replace')
+ writemsg("!!! %s\n\n" % error_msg)
+ del e
+ pkgindex = None
+ if proc is not None:
+ if proc.poll() is None:
+ proc.kill()
+ proc.wait()
+ proc = None
+ if tmp_filename is not None:
+ try:
+ os.unlink(tmp_filename)
+ except OSError:
+ pass
+ if pkgindex is rmt_idx:
+ pkgindex.modified = False # don't update the header
+ pkgindex.header["DOWNLOAD_TIMESTAMP"] = "%d" % time.time()
+ try:
+ ensure_dirs(os.path.dirname(pkgindex_file))
+ f = atomic_ofstream(pkgindex_file)
+ pkgindex.write(f)
+ f.close()
+ except (IOError, PortageException):
+ if os.access(os.path.dirname(pkgindex_file), os.W_OK):
+ raise
+ # The current user doesn't have permission to cache the
+ # file, but that's alright.
+ if pkgindex:
+ remote_base_uri = pkgindex.header.get("URI", base_url)
+ for d in pkgindex.packages:
+ cpv = _pkg_str(d["CPV"], metadata=d,
+ settings=self.settings, db=self.dbapi)
+ # Local package instances override remote instances
+ # with the same instance_key.
+ if self.dbapi.cpv_exists(cpv):
+ continue
+
+ d["CPV"] = cpv
+ d["BASE_URI"] = remote_base_uri
+ d["PKGINDEX_URI"] = url
+ self._remotepkgs[self.dbapi._instance_key(cpv)] = d
+ self.dbapi.cpv_inject(cpv)
+
+ self._remote_has_index = True
+ self._merge_pkgindex_header(pkgindex.header,
+ self._pkgindex_header)
+
+ def inject(self, cpv, filename=None):
+ """Add a freshly built package to the database. This updates
+ $PKGDIR/Packages with the new package metadata (including MD5).
+ @param cpv: The cpv of the new package to inject
+ @type cpv: string
+ @param filename: File path of the package to inject, or None if it's
+ already in the location returned by getname()
+ @type filename: string
+ @rtype: _pkg_str or None
+ @return: A _pkg_str instance on success, or None on failure.
+ """
+ mycat, mypkg = catsplit(cpv)
+ if not self.populated:
+ self.populate()
+ if filename is None:
+ full_path = self.getname(cpv)
+ else:
+ full_path = filename
+ try:
+ s = os.stat(full_path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ writemsg(_("!!! Binary package does not exist: '%s'\n") % full_path,
+ noiselevel=-1)
+ return
+ metadata = self._read_metadata(full_path, s)
+ invalid_depend = False
+ try:
+ self._eval_use_flags(cpv, metadata)
+ except portage.exception.InvalidDependString:
+ invalid_depend = True
+ if invalid_depend or not metadata.get("SLOT"):
+ writemsg(_("!!! Invalid binary package: '%s'\n") % full_path,
+ noiselevel=-1)
+ return
+
+ fetched = False
+ try:
+ build_id = cpv.build_id
+ except AttributeError:
+ build_id = None
+ else:
+ instance_key = self.dbapi._instance_key(cpv)
+ if instance_key in self.dbapi.cpvdict:
+ # This means we've been called by aux_update (or
+ # similar). The instance key typically changes (due to
+ # file modification), so we need to discard existing
+ # instance key references.
+ self.dbapi.cpv_remove(cpv)
+ self._pkg_paths.pop(instance_key, None)
+ if self._remotepkgs is not None:
+ fetched = self._remotepkgs.pop(instance_key, None)
+
+ cpv = _pkg_str(cpv, metadata=metadata, settings=self.settings,
+ db=self.dbapi)
+
+ # Reread the Packages index (in case it's been changed by another
+ # process) and then updated it, all while holding a lock.
+ pkgindex_lock = None
+ try:
+ pkgindex_lock = lockfile(self._pkgindex_file,
+ wantnewlockfile=1)
+ if filename is not None:
+ new_filename = self.getname(cpv, allocate_new=True)
+ try:
+ samefile = os.path.samefile(filename, new_filename)
+ except OSError:
+ samefile = False
+ if not samefile:
+ self._ensure_dir(os.path.dirname(new_filename))
+ _movefile(filename, new_filename, mysettings=self.settings)
+ full_path = new_filename
+
+ basename = os.path.basename(full_path)
+ pf = catsplit(cpv)[1]
+ if (build_id is None and not fetched and
+ basename.endswith(".xpak")):
+ # Apply the newly assigned BUILD_ID. This is intended
+ # to occur only for locally built packages. If the
+ # package was fetched, we want to preserve its
+ # attributes, so that we can later distinguish that it
+ # is identical to its remote counterpart.
+ build_id = self._parse_build_id(basename)
+ metadata["BUILD_ID"] = _unicode(build_id)
+ cpv = _pkg_str(cpv, metadata=metadata,
+ settings=self.settings, db=self.dbapi)
+ binpkg = portage.xpak.tbz2(full_path)
+ binary_data = binpkg.get_data()
+ binary_data[b"BUILD_ID"] = _unicode_encode(
+ metadata["BUILD_ID"])
+ binpkg.recompose_mem(portage.xpak.xpak_mem(binary_data))
+
+ self._file_permissions(full_path)
+ pkgindex = self._load_pkgindex()
+ if not self._pkgindex_version_supported(pkgindex):
+ pkgindex = self._new_pkgindex()
+
+ d = self._inject_file(pkgindex, cpv, full_path)
+ self._update_pkgindex_header(pkgindex.header)
+ self._pkgindex_write(pkgindex)
+
+ finally:
+ if pkgindex_lock:
+ unlockfile(pkgindex_lock)
+
+ # This is used to record BINPKGMD5 in the installed package
+ # database, for a package that has just been built.
+ cpv._metadata["MD5"] = d["MD5"]
+
+ return cpv
+
+ def _read_metadata(self, filename, st, keys=None):
+ """
+ Read metadata from a binary package. The returned metadata
+ dictionary will contain empty strings for any values that
+ are undefined (this is important because the _pkg_str class
+ distinguishes between missing and undefined values).
+
+ @param filename: File path of the binary package
+ @type filename: string
+ @param st: stat result for the binary package
+ @type st: os.stat_result
+ @param keys: optional list of specific metadata keys to retrieve
+ @type keys: iterable
+ @rtype: dict
+ @return: package metadata
+ """
+ if keys is None:
+ keys = self.dbapi._aux_cache_keys
+ metadata = self.dbapi._aux_cache_slot_dict()
+ else:
+ metadata = {}
+ binary_metadata = portage.xpak.tbz2(filename).get_data()
+ for k in keys:
+ if k == "_mtime_":
+ metadata[k] = _unicode(st[stat.ST_MTIME])
+ elif k == "SIZE":
+ metadata[k] = _unicode(st.st_size)
+ else:
+ v = binary_metadata.get(_unicode_encode(k))
+ if v is None:
+ if k == "EAPI":
+ metadata[k] = "0"
+ else:
+ metadata[k] = ""
+ else:
+ v = _unicode_decode(v)
+ metadata[k] = " ".join(v.split())
+ return metadata
+
+ def _inject_file(self, pkgindex, cpv, filename):
+ """
+ Add a package to internal data structures, and add an
+ entry to the given pkgindex.
+ @param pkgindex: The PackageIndex instance to which an entry
+ will be added.
+ @type pkgindex: PackageIndex
+ @param cpv: A _pkg_str instance corresponding to the package
+ being injected.
+ @type cpv: _pkg_str
+ @param filename: Absolute file path of the package to inject.
+ @type filename: string
+ @rtype: dict
+ @return: A dict corresponding to the new entry which has been
+ added to pkgindex. This may be used to access the checksums
+ which have just been generated.
+ """
+ # Update state for future isremote calls.
+ instance_key = self.dbapi._instance_key(cpv)
+ if self._remotepkgs is not None:
+ self._remotepkgs.pop(instance_key, None)
+
+ self.dbapi.cpv_inject(cpv)
+ self._pkg_paths[instance_key] = filename[len(self.pkgdir)+1:]
+ d = self._pkgindex_entry(cpv)
+
+ # If found, remove package(s) with duplicate path.
+ path = d.get("PATH", "")
+ for i in range(len(pkgindex.packages) - 1, -1, -1):
+ d2 = pkgindex.packages[i]
+ if path and path == d2.get("PATH"):
+ # Handle path collisions in $PKGDIR/All
+ # when CPV is not identical.
+ del pkgindex.packages[i]
+ elif cpv == d2.get("CPV"):
+ if path == d2.get("PATH", ""):
+ del pkgindex.packages[i]
+
+ pkgindex.packages.append(d)
+ return d
+
+ def _pkgindex_write(self, pkgindex):
+ contents = codecs.getwriter(_encodings['repo.content'])(io.BytesIO())
+ pkgindex.write(contents)
+ contents = contents.getvalue()
+ atime = mtime = long(pkgindex.header["TIMESTAMP"])
+ output_files = [(atomic_ofstream(self._pkgindex_file, mode="wb"),
+ self._pkgindex_file, None)]
+
+ if "compress-index" in self.settings.features:
+ gz_fname = self._pkgindex_file + ".gz"
+ fileobj = atomic_ofstream(gz_fname, mode="wb")
+ output_files.append((GzipFile(filename='', mode="wb",
+ fileobj=fileobj, mtime=mtime), gz_fname, fileobj))
+
+ for f, fname, f_close in output_files:
+ f.write(contents)
+ f.close()
+ if f_close is not None:
+ f_close.close()
+ self._file_permissions(fname)
+ # some seconds might have elapsed since TIMESTAMP
+ os.utime(fname, (atime, mtime))
+
+ def _pkgindex_entry(self, cpv):
+ """
+ Performs checksums, and gets size and mtime via lstat.
+ Raises InvalidDependString if necessary.
+ @rtype: dict
+ @return: a dict containing entry for the give cpv.
+ """
+
+ pkg_path = self.getname(cpv)
+
+ d = dict(cpv._metadata.items())
+ d.update(perform_multiple_checksums(
+ pkg_path, hashes=self._pkgindex_hashes))
+
+ d["CPV"] = cpv
+ st = os.lstat(pkg_path)
+ d["_mtime_"] = _unicode(st[stat.ST_MTIME])
+ d["SIZE"] = _unicode(st.st_size)
+
+ rel_path = pkg_path[len(self.pkgdir)+1:]
+ # record location if it's non-default
+ if rel_path != cpv + ".tbz2":
+ d["PATH"] = rel_path
+
+ return d
+
+ def _new_pkgindex(self):
+ return portage.getbinpkg.PackageIndex(
+ allowed_pkg_keys=self._pkgindex_allowed_pkg_keys,
+ default_header_data=self._pkgindex_default_header_data,
+ default_pkg_data=self._pkgindex_default_pkg_data,
+ inherited_keys=self._pkgindex_inherited_keys,
+ translated_keys=self._pkgindex_translated_keys)
+
+ @staticmethod
+ def _merge_pkgindex_header(src, dest):
+ """
+ Merge Packages header settings from src to dest, in order to
+ propagate implicit IUSE and USE_EXPAND settings for use with
+ binary and installed packages. Values are appended, so the
+ result is a union of elements from src and dest.
+
+ Pull in ARCH if it's not defined, since it's used for validation
+ by emerge's profile_check function, and also for KEYWORDS logic
+ in the _getmaskingstatus function.
+
+ @param src: source mapping (read only)
+ @type src: Mapping
+ @param dest: destination mapping
+ @type dest: MutableMapping
+ """
+ for k, v in iter_iuse_vars(src):
+ v_before = dest.get(k)
+ if v_before is not None:
+ merged_values = set(v_before.split())
+ merged_values.update(v.split())
+ v = ' '.join(sorted(merged_values))
+ dest[k] = v
+
+ if 'ARCH' not in dest and 'ARCH' in src:
+ dest['ARCH'] = src['ARCH']
+
+ def _propagate_config(self, config):
+ """
+ Propagate implicit IUSE and USE_EXPAND settings from the binary
+ package database to a config instance. If settings are not
+ available to propagate, then this will do nothing and return
+ False.
+
+ @param config: config instance
+ @type config: portage.config
+ @rtype: bool
+ @return: True if settings successfully propagated, False if settings
+ were not available to propagate.
+ """
+ if self._pkgindex_header is None:
+ return False
+
+ self._merge_pkgindex_header(self._pkgindex_header,
+ config.configdict['defaults'])
+ config.regenerate()
+ config._init_iuse()
+ return True
+
+ def _update_pkgindex_header(self, header):
+ """
+ Add useful settings to the Packages file header, for use by
+ binhost clients.
+
+ This will return silently if the current profile is invalid or
+ does not have an IUSE_IMPLICIT variable, since it's useful to
+ maintain a cache of implicit IUSE settings for use with binary
+ packages.
+ """
+ if not (self.settings.profile_path and
+ "IUSE_IMPLICIT" in self.settings):
+ header.setdefault("VERSION", _unicode(self._pkgindex_version))
+ return
+
+ portdir = normalize_path(os.path.realpath(self.settings["PORTDIR"]))
+ profiles_base = os.path.join(portdir, "profiles") + os.path.sep
+ if self.settings.profile_path:
+ profile_path = normalize_path(
+ os.path.realpath(self.settings.profile_path))
+ if profile_path.startswith(profiles_base):
+ profile_path = profile_path[len(profiles_base):]
+ header["PROFILE"] = profile_path
+ header["VERSION"] = _unicode(self._pkgindex_version)
+ base_uri = self.settings.get("PORTAGE_BINHOST_HEADER_URI")
+ if base_uri:
+ header["URI"] = base_uri
+ else:
+ header.pop("URI", None)
+ for k in list(self._pkgindex_header_keys) + \
+ self.settings.get("USE_EXPAND_IMPLICIT", "").split() + \
+ self.settings.get("USE_EXPAND_UNPREFIXED", "").split():
+ v = self.settings.get(k, None)
+ if v:
+ header[k] = v
+ else:
+ header.pop(k, None)
+
+ # These values may be useful for using a binhost without
+ # having a local copy of the profile (bug #470006).
+ for k in self.settings.get("USE_EXPAND_IMPLICIT", "").split():
+ k = "USE_EXPAND_VALUES_" + k
+ v = self.settings.get(k)
+ if v:
+ header[k] = v
+ else:
+ header.pop(k, None)
+
+ def _pkgindex_version_supported(self, pkgindex):
+ version = pkgindex.header.get("VERSION")
+ if version:
+ try:
+ if int(version) <= self._pkgindex_version:
+ return True
+ except ValueError:
+ pass
+ return False
+
+ def _eval_use_flags(self, cpv, metadata):
+ use = frozenset(metadata.get("USE", "").split())
+ for k in self._pkgindex_use_evaluated_keys:
+ if k.endswith('DEPEND'):
+ token_class = Atom
+ else:
+ token_class = None
+
+ deps = metadata.get(k)
+ if deps is None:
+ continue
+ try:
+ deps = use_reduce(deps, uselist=use, token_class=token_class)
+ deps = paren_enclose(deps)
+ except portage.exception.InvalidDependString as e:
+ writemsg("%s: %s\n" % (k, e), noiselevel=-1)
+ raise
+ metadata[k] = deps
+
+ def exists_specific(self, cpv):
+ if not self.populated:
+ self.populate()
+ return self.dbapi.match(
+ dep_expand("="+cpv, mydb=self.dbapi, settings=self.settings))
+
+ def dep_bestmatch(self, mydep):
+ "compatibility method -- all matches, not just visible ones"
+ if not self.populated:
+ self.populate()
+ writemsg("\n\n", 1)
+ writemsg("mydep: %s\n" % mydep, 1)
+ mydep = dep_expand(mydep, mydb=self.dbapi, settings=self.settings)
+ writemsg("mydep: %s\n" % mydep, 1)
+ mykey = dep_getkey(mydep)
+ writemsg("mykey: %s\n" % mykey, 1)
+ mymatch = best(match_from_list(mydep,self.dbapi.cp_list(mykey)))
+ writemsg("mymatch: %s\n" % mymatch, 1)
+ if mymatch is None:
+ return ""
+ return mymatch
+
+ def getname(self, cpv, allocate_new=None):
+ """Returns a file location for this package.
+ If cpv has both build_time and build_id attributes, then the
+ path to the specific corresponding instance is returned.
+ Otherwise, allocate a new path and return that. When allocating
+ a new path, behavior depends on the binpkg-multi-instance
+ FEATURES setting.
+ """
+ if not self.populated:
+ self.populate()
+
+ try:
+ cpv.cp
+ except AttributeError:
+ cpv = _pkg_str(cpv)
+
+ filename = None
+ if allocate_new:
+ filename = self._allocate_filename(cpv)
+ elif self._is_specific_instance(cpv):
+ instance_key = self.dbapi._instance_key(cpv)
+ path = self._pkg_paths.get(instance_key)
+ if path is not None:
+ filename = os.path.join(self.pkgdir, path)
+
+ if filename is None and not allocate_new:
+ try:
+ instance_key = self.dbapi._instance_key(cpv,
+ support_string=True)
+ except KeyError:
+ pass
+ else:
+ filename = self._pkg_paths.get(instance_key)
+ if filename is not None:
+ filename = os.path.join(self.pkgdir, filename)
+
+ if filename is None:
+ if self._multi_instance:
+ pf = catsplit(cpv)[1]
+ filename = "%s-%s.xpak" % (
+ os.path.join(self.pkgdir, cpv.cp, pf), "1")
+ else:
+ filename = os.path.join(self.pkgdir, cpv + ".tbz2")
+
+ return filename
+
+ def _is_specific_instance(self, cpv):
+ specific = True
+ try:
+ build_time = cpv.build_time
+ build_id = cpv.build_id
+ except AttributeError:
+ specific = False
+ else:
+ if build_time is None or build_id is None:
+ specific = False
+ return specific
+
+ def _max_build_id(self, cpv):
+ max_build_id = 0
+ for x in self.dbapi.cp_list(cpv.cp):
+ if (x == cpv and x.build_id is not None and
+ x.build_id > max_build_id):
+ max_build_id = x.build_id
+ return max_build_id
+
+ def _allocate_filename(self, cpv):
+ return os.path.join(self.pkgdir, cpv + ".tbz2")
+
+ def _allocate_filename_multi(self, cpv):
+
+ # First, get the max build_id found when _populate was
+ # called.
+ max_build_id = self._max_build_id(cpv)
+
+ # A new package may have been added concurrently since the
+ # last _populate call, so use increment build_id until
+ # we locate an unused id.
+ pf = catsplit(cpv)[1]
+ build_id = max_build_id + 1
+
+ while True:
+ filename = "%s-%s.xpak" % (
+ os.path.join(self.pkgdir, cpv.cp, pf), build_id)
+ if os.path.exists(filename):
+ build_id += 1
+ else:
+ return filename
+
+ @staticmethod
+ def _parse_build_id(filename):
+ build_id = -1
+ suffixlen = len(".xpak")
+ hyphen = filename.rfind("-", 0, -(suffixlen + 1))
+ if hyphen != -1:
+ build_id = filename[hyphen+1:-suffixlen]
+ try:
+ build_id = long(build_id)
+ except ValueError:
+ pass
+ return build_id
+
+ def isremote(self, pkgname):
+ """Returns true if the package is kept remotely and it has not been
+ downloaded (or it is only partially downloaded)."""
+ if (self._remotepkgs is None or
+ self.dbapi._instance_key(pkgname) not in self._remotepkgs):
+ return False
+ # Presence in self._remotepkgs implies that it's remote. When a
+ # package is downloaded, state is updated by self.inject().
+ return True
+
+ def get_pkgindex_uri(self, cpv):
+ """Returns the URI to the Packages file for a given package."""
+ uri = None
+ if self._remotepkgs is not None:
+ metadata = self._remotepkgs.get(self.dbapi._instance_key(cpv))
+ if metadata is not None:
+ uri = metadata["PKGINDEX_URI"]
+ return uri
+
+ def gettbz2(self, pkgname):
+ """Fetches the package from a remote site, if necessary. Attempts to
+ resume if the file appears to be partially downloaded."""
+ instance_key = self.dbapi._instance_key(pkgname)
+ tbz2_path = self.getname(pkgname)
+ tbz2name = os.path.basename(tbz2_path)
+ resume = False
+ if os.path.exists(tbz2_path):
+ if tbz2name[:-5] not in self.invalids:
+ return
+ else:
+ resume = True
+ writemsg(_("Resuming download of this tbz2, but it is possible that it is corrupt.\n"),
+ noiselevel=-1)
+
+ mydest = os.path.dirname(self.getname(pkgname))
+ self._ensure_dir(mydest)
+ # urljoin doesn't work correctly with unrecognized protocols like sftp
+ if self._remote_has_index:
+ rel_url = self._remotepkgs[instance_key].get("PATH")
+ if not rel_url:
+ rel_url = pkgname + ".tbz2"
+ remote_base_uri = self._remotepkgs[instance_key]["BASE_URI"]
+ url = remote_base_uri.rstrip("/") + "/" + rel_url.lstrip("/")
+ else:
+ url = self.settings["PORTAGE_BINHOST"].rstrip("/") + "/" + tbz2name
+ protocol = urlparse(url)[0]
+ fcmd_prefix = "FETCHCOMMAND"
+ if resume:
+ fcmd_prefix = "RESUMECOMMAND"
+ fcmd = self.settings.get(fcmd_prefix + "_" + protocol.upper())
+ if not fcmd:
+ fcmd = self.settings.get(fcmd_prefix)
+ success = portage.getbinpkg.file_get(url, mydest, fcmd=fcmd)
+ if not success:
+ try:
+ os.unlink(self.getname(pkgname))
+ except OSError:
+ pass
+ raise portage.exception.FileNotFound(mydest)
+ self.inject(pkgname)
+
+ def _load_pkgindex(self):
+ pkgindex = self._new_pkgindex()
+ try:
+ f = io.open(_unicode_encode(self._pkgindex_file,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ except EnvironmentError:
+ pass
+ else:
+ try:
+ pkgindex.read(f)
+ finally:
+ f.close()
+ return pkgindex
+
+ def _get_digests(self, pkg):
+
+ try:
+ cpv = pkg.cpv
+ except AttributeError:
+ cpv = pkg
+
+ _instance_key = self.dbapi._instance_key
+ instance_key = _instance_key(cpv)
+ digests = {}
+ metadata = (None if self._remotepkgs is None else
+ self._remotepkgs.get(instance_key))
+ if metadata is None:
+ for d in self._load_pkgindex().packages:
+ if (d["CPV"] == cpv and
+ instance_key == _instance_key(_pkg_str(d["CPV"],
+ metadata=d, settings=self.settings))):
+ metadata = d
+ break
+
+ if metadata is None:
+ return digests
+
+ for k in get_valid_checksum_keys():
+ v = metadata.get(k)
+ if not v:
+ continue
+ digests[k] = v
+
+ if "SIZE" in metadata:
+ try:
+ digests["size"] = int(metadata["SIZE"])
+ except ValueError:
+ writemsg(_("!!! Malformed SIZE attribute in remote " \
+ "metadata for '%s'\n") % cpv)
+
+ return digests
+
+ def digestCheck(self, pkg):
+ """
+ Verify digests for the given package and raise DigestException
+ if verification fails.
+ @rtype: bool
+ @return: True if digests could be located, False otherwise.
+ """
+
+ digests = self._get_digests(pkg)
+
+ if not digests:
+ return False
+
+ try:
+ cpv = pkg.cpv
+ except AttributeError:
+ cpv = pkg
+
+ pkg_path = self.getname(cpv)
+ hash_filter = _hash_filter(
+ self.settings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if not hash_filter.transparent:
+ digests = _apply_hash_filter(digests, hash_filter)
+ eout = EOutput()
+ eout.quiet = self.settings.get("PORTAGE_QUIET") == "1"
+ ok, st = _check_distfile(pkg_path, digests, eout, show_errors=0)
+ if not ok:
+ ok, reason = verify_all(pkg_path, digests)
+ if not ok:
+ raise portage.exception.DigestException(
+ (pkg_path,) + tuple(reason))
+
+ return True
+
+ def getslot(self, mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ myslot = ""
+ try:
+ myslot = self.dbapi._pkg_str(mycatpkg, None).slot
+ except KeyError:
+ pass
+ return myslot
diff --git a/lib/portage/dbapi/cpv_expand.py b/lib/portage/dbapi/cpv_expand.py
new file mode 100644
index 000000000..70ee78245
--- /dev/null
+++ b/lib/portage/dbapi/cpv_expand.py
@@ -0,0 +1,108 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = ["cpv_expand"]
+
+import portage
+from portage.exception import AmbiguousPackageName
+from portage.localization import _
+from portage.util import writemsg
+from portage.versions import _pkgsplit
+
+def cpv_expand(mycpv, mydb=None, use_cache=1, settings=None):
+ """Given a string (packagename or virtual) expand it into a valid
+ cat/package string. Virtuals use the mydb to determine which provided
+ virtual is a valid choice and defaults to the first element when there
+ are no installed/available candidates."""
+ myslash=mycpv.split("/")
+ mysplit = _pkgsplit(myslash[-1])
+ if settings is None:
+ try:
+ settings = mydb.settings
+ except AttributeError:
+ settings = portage.settings
+ if len(myslash)>2:
+ # this is illegal case.
+ mysplit=[]
+ mykey=mycpv
+ elif len(myslash)==2:
+ if mysplit:
+ mykey=myslash[0]+"/"+mysplit[0]
+ else:
+ mykey=mycpv
+
+ # Since Gentoo stopped using old-style virtuals in
+ # 2011, typically it's possible to avoid getvirtuals()
+ # calls entirely. Therefore, only call getvirtuals()
+ # if the atom category is "virtual" and cp_list()
+ # returns nothing.
+ if mykey.startswith("virtual/") and \
+ hasattr(mydb, "cp_list") and \
+ not mydb.cp_list(mykey, use_cache=use_cache):
+ if hasattr(mydb, "vartree"):
+ settings._populate_treeVirtuals_if_needed(mydb.vartree)
+ virts = settings.getvirtuals().get(mykey)
+ if virts:
+ mykey_orig = mykey
+ for vkey in virts:
+ # The virtuals file can contain a versioned atom, so
+ # it may be necessary to remove the operator and
+ # version from the atom before it is passed into
+ # dbapi.cp_list().
+ if mydb.cp_list(vkey.cp):
+ mykey = str(vkey)
+ break
+ if mykey == mykey_orig:
+ mykey = str(virts[0])
+ #we only perform virtual expansion if we are passed a dbapi
+ else:
+ #specific cpv, no category, ie. "foo-1.0"
+ if mysplit:
+ myp=mysplit[0]
+ else:
+ # "foo" ?
+ myp=mycpv
+ mykey=None
+ matches=[]
+ if mydb and hasattr(mydb, "categories"):
+ for x in mydb.categories:
+ if mydb.cp_list(x+"/"+myp,use_cache=use_cache):
+ matches.append(x+"/"+myp)
+ if len(matches) > 1:
+ virtual_name_collision = False
+ if len(matches) == 2:
+ for x in matches:
+ if not x.startswith("virtual/"):
+ # Assume that the non-virtual is desired. This helps
+ # avoid the ValueError for invalid deps that come from
+ # installed packages (during reverse blocker detection,
+ # for example).
+ mykey = x
+ else:
+ virtual_name_collision = True
+ if not virtual_name_collision:
+ # AmbiguousPackageName inherits from ValueError,
+ # for backward compatibility with calling code
+ # that already handles ValueError.
+ raise AmbiguousPackageName(matches)
+ elif matches:
+ mykey=matches[0]
+
+ if not mykey and not isinstance(mydb, list):
+ if hasattr(mydb, "vartree"):
+ settings._populate_treeVirtuals_if_needed(mydb.vartree)
+ virts_p = settings.get_virts_p().get(myp)
+ if virts_p:
+ mykey = virts_p[0]
+ #again, we only perform virtual expansion if we have a dbapi (not a list)
+ if not mykey:
+ mykey="null/"+myp
+ if mysplit:
+ if mysplit[2]=="r0":
+ return mykey+"-"+mysplit[1]
+ else:
+ return mykey+"-"+mysplit[1]+"-"+mysplit[2]
+ else:
+ return mykey
diff --git a/lib/portage/dbapi/dep_expand.py b/lib/portage/dbapi/dep_expand.py
new file mode 100644
index 000000000..9515b7dec
--- /dev/null
+++ b/lib/portage/dbapi/dep_expand.py
@@ -0,0 +1,58 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = ["dep_expand"]
+
+import re
+
+from portage.dbapi.cpv_expand import cpv_expand
+from portage.dep import Atom, isvalidatom
+from portage.exception import InvalidAtom
+from portage.versions import catsplit
+
+def dep_expand(mydep, mydb=None, use_cache=1, settings=None):
+ '''
+ @rtype: Atom
+ '''
+ orig_dep = mydep
+ if isinstance(orig_dep, Atom):
+ has_cat = True
+ else:
+ if not mydep:
+ return mydep
+ if mydep[0] == "*":
+ mydep = mydep[1:]
+ orig_dep = mydep
+ has_cat = '/' in orig_dep.split(':')[0]
+ if not has_cat:
+ alphanum = re.search(r'\w', orig_dep)
+ if alphanum:
+ mydep = orig_dep[:alphanum.start()] + "null/" + \
+ orig_dep[alphanum.start():]
+ try:
+ mydep = Atom(mydep, allow_repo=True)
+ except InvalidAtom:
+ # Missing '=' prefix is allowed for backward compatibility.
+ if not isvalidatom("=" + mydep, allow_repo=True):
+ raise
+ mydep = Atom('=' + mydep, allow_repo=True)
+ orig_dep = '=' + orig_dep
+ if not has_cat:
+ null_cat, pn = catsplit(mydep.cp)
+ mydep = pn
+
+ if has_cat:
+ # Optimize most common cases to avoid calling cpv_expand.
+ if not mydep.cp.startswith("virtual/"):
+ return mydep
+ if not hasattr(mydb, "cp_list") or \
+ mydb.cp_list(mydep.cp):
+ return mydep
+ # Fallback to legacy cpv_expand for old-style PROVIDE virtuals.
+ mydep = mydep.cp
+
+ expanded = cpv_expand(mydep, mydb=mydb,
+ use_cache=use_cache, settings=settings)
+ return Atom(orig_dep.replace(mydep, expanded, 1), allow_repo=True)
diff --git a/lib/portage/dbapi/porttree.py b/lib/portage/dbapi/porttree.py
new file mode 100644
index 000000000..677452273
--- /dev/null
+++ b/lib/portage/dbapi/porttree.py
@@ -0,0 +1,1526 @@
+# Copyright 1998-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = [
+ "close_portdbapi_caches", "FetchlistDict", "portagetree", "portdbapi"
+]
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.checksum',
+ 'portage.data:portage_gid,secpass',
+ 'portage.dbapi.dep_expand:dep_expand',
+ 'portage.dep:Atom,dep_getkey,match_from_list,use_reduce,_match_slot',
+ 'portage.package.ebuild.doebuild:doebuild',
+ 'portage.util:ensure_dirs,shlex_split,writemsg,writemsg_level',
+ 'portage.util.listdir:listdir',
+ 'portage.versions:best,catsplit,catpkgsplit,_pkgsplit@pkgsplit,ver_regexp,_pkg_str',
+)
+
+from portage.cache import volatile
+from portage.cache.cache_errors import CacheError
+from portage.cache.mappings import Mapping
+from portage.dbapi import dbapi
+from portage.exception import PortageException, PortageKeyError, \
+ FileNotFound, InvalidAtom, InvalidData, \
+ InvalidDependString, InvalidPackageName
+from portage.localization import _
+
+from portage import eclass_cache, \
+ eapi_is_supported, \
+ _eapi_is_deprecated
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage import OrderedDict
+from portage.util._eventloop.EventLoop import EventLoop
+from portage.util.futures import asyncio
+from portage.util.futures.iter_completed import iter_gather
+from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
+
+import os as _os
+import sys
+import traceback
+import warnings
+import errno
+import collections
+import functools
+
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+ long = int
+
+def close_portdbapi_caches():
+ # The python interpreter does _not_ guarantee that destructors are
+ # called for objects that remain when the interpreter exits, so we
+ # use an atexit hook to call destructors for any global portdbapi
+ # instances that may have been constructed.
+ try:
+ portage._legacy_globals_constructed
+ except AttributeError:
+ pass
+ else:
+ if "db" in portage._legacy_globals_constructed:
+ try:
+ db = portage.db
+ except AttributeError:
+ pass
+ else:
+ if isinstance(db, dict):
+ for x in db.values():
+ try:
+ if "porttree" in x.lazy_items:
+ continue
+ except (AttributeError, TypeError):
+ continue
+ try:
+ x = x.pop("porttree").dbapi
+ except (AttributeError, KeyError):
+ continue
+ if not isinstance(x, portdbapi):
+ continue
+ x.close_caches()
+
+portage.process.atexit_register(close_portdbapi_caches)
+
+# It used to be necessary for API consumers to remove portdbapi instances
+# from portdbapi_instances, in order to avoid having accumulated instances
+# consume memory. Now, portdbapi_instances is just an empty dummy list, so
+# for backward compatibility, ignore ValueError for removal on non-existent
+# items.
+class _dummy_list(list):
+ def remove(self, item):
+ # TODO: Trigger a DeprecationWarning here, after stable portage
+ # has dummy portdbapi_instances.
+ try:
+ list.remove(self, item)
+ except ValueError:
+ pass
+
+
+class _better_cache(object):
+
+ """
+ The purpose of better_cache is to locate catpkgs in repositories using ``os.listdir()`` as much as possible, which
+ is less expensive IO-wise than exhaustively doing a stat on each repo for a particular catpkg. better_cache stores a
+ list of repos in which particular catpkgs appear. Various dbapi methods use better_cache to locate repositories of
+ interest related to particular catpkg rather than performing an exhaustive scan of all repos/overlays.
+
+ Better_cache.items data may look like this::
+
+ { "sys-apps/portage" : [ repo1, repo2 ] }
+
+ Without better_cache, Portage will get slower and slower (due to excessive IO) as more overlays are added.
+
+ Also note that it is OK if this cache has some 'false positive' catpkgs in it. We use it to search for specific
+ catpkgs listed in ebuilds. The likelihood of a false positive catpkg in our cache causing a problem is extremely
+ low, because the user of our cache is passing us a catpkg that came from somewhere and has already undergone some
+ validation, and even then will further interrogate the short-list of repos we return to gather more information
+ on the catpkg.
+
+ Thus, the code below is optimized for speed rather than painstaking correctness. I have added a note to
+ ``dbapi.getRepositories()`` to ensure that developers are aware of this just in case.
+
+ The better_cache has been redesigned to perform on-demand scans -- it will only scan a category at a time, as
+ needed. This should further optimize IO performance by not scanning category directories that are not needed by
+ Portage.
+ """
+
+ def __init__(self, repositories):
+ self._items = collections.defaultdict(list)
+ self._scanned_cats = set()
+
+ # ordered list of all portree locations we'll scan:
+ self._repo_list = [repo for repo in reversed(list(repositories))
+ if repo.location is not None]
+
+ def __getitem__(self, catpkg):
+ result = self._items.get(catpkg)
+ if result is not None:
+ return result
+
+ cat, pkg = catsplit(catpkg)
+ if cat not in self._scanned_cats:
+ self._scan_cat(cat)
+ return self._items[catpkg]
+
+ def _scan_cat(self, cat):
+ for repo in self._repo_list:
+ cat_dir = repo.location + "/" + cat
+ try:
+ pkg_list = os.listdir(cat_dir)
+ except OSError as e:
+ if e.errno not in (errno.ENOTDIR, errno.ENOENT, errno.ESTALE):
+ raise
+ continue
+ for p in pkg_list:
+ if os.path.isdir(cat_dir + "/" + p):
+ self._items[cat + "/" + p].append(repo)
+ self._scanned_cats.add(cat)
+
+
+class portdbapi(dbapi):
+ """this tree will scan a portage directory located at root (passed to init)"""
+ portdbapi_instances = _dummy_list()
+ _use_mutable = True
+
+ @property
+ def _categories(self):
+ return self.settings.categories
+
+ @property
+ def porttree_root(self):
+ warnings.warn("portage.dbapi.porttree.portdbapi.porttree_root is deprecated in favor of portage.repository.config.RepoConfig.location "
+ "(available as repositories[repo_name].location attribute of instances of portage.dbapi.porttree.portdbapi class)",
+ DeprecationWarning, stacklevel=2)
+ return self.settings.repositories.mainRepoLocation()
+
+ @property
+ def eclassdb(self):
+ warnings.warn("portage.dbapi.porttree.portdbapi.eclassdb is deprecated in favor of portage.repository.config.RepoConfig.eclass_db "
+ "(available as repositories[repo_name].eclass_db attribute of instances of portage.dbapi.porttree.portdbapi class)",
+ DeprecationWarning, stacklevel=2)
+ main_repo = self.repositories.mainRepo()
+ if main_repo is None:
+ return None
+ return main_repo.eclass_db
+
+ def __init__(self, _unused_param=DeprecationWarning, mysettings=None):
+ """
+ @param _unused_param: deprecated, use mysettings['PORTDIR'] instead
+ @type _unused_param: None
+ @param mysettings: an immutable config instance
+ @type mysettings: portage.config
+ """
+
+ from portage import config
+ if mysettings:
+ self.settings = mysettings
+ else:
+ from portage import settings
+ self.settings = config(clone=settings)
+
+ if _unused_param is not DeprecationWarning:
+ warnings.warn("The first parameter of the " + \
+ "portage.dbapi.porttree.portdbapi" + \
+ " constructor is unused since portage-2.1.8. " + \
+ "mysettings['PORTDIR'] is used instead.",
+ DeprecationWarning, stacklevel=2)
+
+ self.repositories = self.settings.repositories
+ self.treemap = self.repositories.treemap
+
+ # This is strictly for use in aux_get() doebuild calls when metadata
+ # is generated by the depend phase. It's safest to use a clone for
+ # this purpose because doebuild makes many changes to the config
+ # instance that is passed in.
+ self.doebuild_settings = config(clone=self.settings)
+ self.depcachedir = os.path.realpath(self.settings.depcachedir)
+
+ if os.environ.get("SANDBOX_ON") == "1":
+ # Make api consumers exempt from sandbox violations
+ # when doing metadata cache updates.
+ sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":")
+ if self.depcachedir not in sandbox_write:
+ sandbox_write.append(self.depcachedir)
+ os.environ["SANDBOX_WRITE"] = \
+ ":".join(filter(None, sandbox_write))
+
+ self.porttrees = list(self.settings.repositories.repoLocationList())
+
+ # This is used as sanity check for aux_get(). If there is no
+ # root eclass dir, we assume that PORTDIR is invalid or
+ # missing. This check allows aux_get() to detect a missing
+ # portage tree and return early by raising a KeyError.
+ self._have_root_eclass_dir = os.path.isdir(
+ os.path.join(self.settings.repositories.mainRepoLocation(), "eclass"))
+
+ #if the portdbapi is "frozen", then we assume that we can cache everything (that no updates to it are happening)
+ self.xcache = {}
+ self.frozen = 0
+
+ #Keep a list of repo names, sorted by priority (highest priority first).
+ self._ordered_repo_name_list = tuple(reversed(self.repositories.prepos_order))
+
+ self.auxdbmodule = self.settings.load_best_module("portdbapi.auxdbmodule")
+ self.auxdb = {}
+ self._pregen_auxdb = {}
+ # If the current user doesn't have depcachedir write permission,
+ # then the depcachedir cache is kept here read-only access.
+ self._ro_auxdb = {}
+ self._init_cache_dirs()
+ try:
+ depcachedir_st = os.stat(self.depcachedir)
+ depcachedir_w_ok = os.access(self.depcachedir, os.W_OK)
+ except OSError:
+ depcachedir_st = None
+ depcachedir_w_ok = False
+
+ cache_kwargs = {}
+
+ depcachedir_unshared = False
+ if portage.data.secpass < 1 and \
+ depcachedir_w_ok and \
+ depcachedir_st is not None and \
+ os.getuid() == depcachedir_st.st_uid and \
+ os.getgid() == depcachedir_st.st_gid:
+ # If this user owns depcachedir and is not in the
+ # portage group, then don't bother to set permissions
+ # on cache entries. This makes it possible to run
+ # egencache without any need to be a member of the
+ # portage group.
+ depcachedir_unshared = True
+ else:
+ cache_kwargs.update({
+ 'gid' : portage_gid,
+ 'perms' : 0o664
+ })
+
+ # If secpass < 1, we don't want to write to the cache
+ # since then we won't be able to apply group permissions
+ # to the cache entries/directories.
+ if (secpass < 1 and not depcachedir_unshared) or not depcachedir_w_ok:
+ for x in self.porttrees:
+ self.auxdb[x] = volatile.database(
+ self.depcachedir, x, self._known_keys,
+ **cache_kwargs)
+ try:
+ self._ro_auxdb[x] = self.auxdbmodule(self.depcachedir, x,
+ self._known_keys, readonly=True, **cache_kwargs)
+ except CacheError:
+ pass
+ else:
+ for x in self.porttrees:
+ if x in self.auxdb:
+ continue
+ # location, label, auxdbkeys
+ self.auxdb[x] = self.auxdbmodule(
+ self.depcachedir, x, self._known_keys, **cache_kwargs)
+ if "metadata-transfer" not in self.settings.features:
+ for x in self.porttrees:
+ if x in self._pregen_auxdb:
+ continue
+ cache = self._create_pregen_cache(x)
+ if cache is not None:
+ self._pregen_auxdb[x] = cache
+ # Selectively cache metadata in order to optimize dep matching.
+ self._aux_cache_keys = set(
+ ["BDEPEND", "DEPEND", "EAPI", "HDEPEND",
+ "INHERITED", "IUSE", "KEYWORDS", "LICENSE",
+ "PDEPEND", "PROPERTIES", "RDEPEND", "repository",
+ "RESTRICT", "SLOT", "DEFINED_PHASES", "REQUIRED_USE"])
+
+ self._aux_cache = {}
+ self._better_cache = None
+ self._broken_ebuilds = set()
+
+ def _set_porttrees(self, porttrees):
+ """
+ Consumers, such as repoman and emirrordist, may modify the porttrees
+ attribute in order to modify the effective set of repositories for
+ all portdbapi operations.
+
+ @param porttrees: list of repo locations, in ascending order by
+ repo priority
+ @type porttrees: list
+ """
+ self._porttrees_repos = portage.OrderedDict((repo.name, repo)
+ for repo in (self.repositories.get_repo_for_location(location)
+ for location in porttrees))
+ self._porttrees = tuple(porttrees)
+
+ def _get_porttrees(self):
+ return self._porttrees
+
+ porttrees = property(_get_porttrees, _set_porttrees)
+
+ @property
+ def _event_loop(self):
+ if portage._internal_caller:
+ # For internal portage usage, asyncio._wrap_loop() is safe.
+ return asyncio._wrap_loop()
+ else:
+ # For external API consumers, use a local EventLoop, since
+ # we don't want to assume that it's safe to override the
+ # global SIGCHLD handler.
+ return EventLoop(main=False)
+
+ def _create_pregen_cache(self, tree):
+ conf = self.repositories.get_repo_for_location(tree)
+ cache = conf.get_pregenerated_cache(
+ self._known_keys, readonly=True)
+ if cache is not None:
+ try:
+ cache.ec = self.repositories.get_repo_for_location(tree).eclass_db
+ except AttributeError:
+ pass
+
+ if not cache.complete_eclass_entries:
+ warnings.warn(
+ ("Repository '%s' used deprecated 'pms' cache format. "
+ "Please migrate to 'md5-dict' format.") % (conf.name,),
+ DeprecationWarning)
+
+ return cache
+
+ def _init_cache_dirs(self):
+ """Create /var/cache/edb/dep and adjust permissions for the portage
+ group."""
+
+ dirmode = 0o2070
+ modemask = 0o2
+
+ try:
+ ensure_dirs(self.depcachedir, gid=portage_gid,
+ mode=dirmode, mask=modemask)
+ except PortageException:
+ pass
+
+ def close_caches(self):
+ if not hasattr(self, "auxdb"):
+ # unhandled exception thrown from constructor
+ return
+ for x in self.auxdb:
+ self.auxdb[x].sync()
+ self.auxdb.clear()
+
+ def flush_cache(self):
+ for x in self.auxdb.values():
+ x.sync()
+
+ def findLicensePath(self, license_name):
+ for x in reversed(self.porttrees):
+ license_path = os.path.join(x, "licenses", license_name)
+ if os.access(license_path, os.R_OK):
+ return license_path
+ return None
+
+ def findname(self,mycpv, mytree = None, myrepo = None):
+ return self.findname2(mycpv, mytree, myrepo)[0]
+
+ def getRepositoryPath(self, repository_id):
+ """
+ This function is required for GLEP 42 compliance; given a valid repository ID
+ it must return a path to the repository
+ TreeMap = { id:path }
+ """
+ return self.treemap.get(repository_id)
+
+ def getRepositoryName(self, canonical_repo_path):
+ """
+ This is the inverse of getRepositoryPath().
+ @param canonical_repo_path: the canonical path of a repository, as
+ resolved by os.path.realpath()
+ @type canonical_repo_path: String
+ @return: The repo_name for the corresponding repository, or None
+ if the path does not correspond a known repository
+ @rtype: String or None
+ """
+ try:
+ return self.repositories.get_name_for_location(canonical_repo_path)
+ except KeyError:
+ return None
+
+ def getRepositories(self, catpkg=None):
+
+ """
+ With catpkg=None, this will return a complete list of repositories in this dbapi. With catpkg set to a value,
+ this method will return a short-list of repositories that contain this catpkg. Use this second approach if
+ possible, to avoid exhaustively searching all repos for a particular catpkg. It's faster for this method to
+ find the catpkg than for you do it yourself. When specifying catpkg, you should have reasonable assurance that
+ the category is valid and PMS-compliant as the caching mechanism we use does not perform validation checks for
+ categories.
+
+ This function is required for GLEP 42 compliance.
+
+ @param catpkg: catpkg for which we want a list of repositories; we'll get a list of all repos containing this
+ catpkg; if None, return a list of all Repositories that contain a particular catpkg.
+ @return: a list of repositories.
+ """
+
+ if catpkg is not None and self._better_cache is not None:
+ return [repo.name for repo in self._better_cache[catpkg]]
+ return self._ordered_repo_name_list
+
+ def getMissingRepoNames(self):
+ """
+ Returns a list of repository paths that lack profiles/repo_name.
+ """
+ return self.settings.repositories.missing_repo_names
+
+ def getIgnoredRepos(self):
+ """
+ Returns a list of repository paths that have been ignored, because
+ another repo with the same name exists.
+ """
+ return self.settings.repositories.ignored_repos
+
+ def findname2(self, mycpv, mytree=None, myrepo=None):
+ """
+ Returns the location of the CPV, and what overlay it was in.
+ Searches overlays first, then PORTDIR; this allows us to return the first
+ matching file. As opposed to starting in portdir and then doing overlays
+ second, we would have to exhaustively search the overlays until we found
+ the file we wanted.
+ If myrepo is not None it will find packages from this repository(overlay)
+ """
+ if not mycpv:
+ return (None, 0)
+
+ if myrepo is not None:
+ mytree = self.treemap.get(myrepo)
+ if mytree is None:
+ return (None, 0)
+ elif mytree is not None:
+ # myrepo enables cached results when available
+ myrepo = self.repositories.location_map.get(mytree)
+
+ mysplit = mycpv.split("/")
+ psplit = pkgsplit(mysplit[1])
+ if psplit is None or len(mysplit) != 2:
+ raise InvalidPackageName(mycpv)
+
+ try:
+ cp = mycpv.cp
+ except AttributeError:
+ cp = mysplit[0] + "/" + psplit[0]
+
+ if self._better_cache is None:
+ if mytree:
+ mytrees = [mytree]
+ else:
+ mytrees = reversed(self.porttrees)
+ else:
+ try:
+ repos = self._better_cache[cp]
+ except KeyError:
+ return (None, 0)
+
+ mytrees = []
+ for repo in repos:
+ if mytree is not None and mytree != repo.location:
+ continue
+ mytrees.append(repo.location)
+
+ # For optimal performace in this hot spot, we do manual unicode
+ # handling here instead of using the wrapped os module.
+ encoding = _encodings['fs']
+ errors = 'strict'
+
+ relative_path = mysplit[0] + _os.sep + psplit[0] + _os.sep + \
+ mysplit[1] + ".ebuild"
+
+ # There is no need to access the filesystem when the package
+ # comes from this db and the package repo attribute corresponds
+ # to the desired repo, since the file was previously found by
+ # the cp_list method.
+ if (myrepo is not None and myrepo == getattr(mycpv, 'repo', None)
+ and self is getattr(mycpv, '_db', None)):
+ return (mytree + _os.sep + relative_path, mytree)
+
+ for x in mytrees:
+ filename = x + _os.sep + relative_path
+ if _os.access(_unicode_encode(filename,
+ encoding=encoding, errors=errors), _os.R_OK):
+ return (filename, x)
+ return (None, 0)
+
+ def _write_cache(self, cpv, repo_path, metadata, ebuild_hash):
+
+ try:
+ cache = self.auxdb[repo_path]
+ chf = cache.validation_chf
+ metadata['_%s_' % chf] = getattr(ebuild_hash, chf)
+ except CacheError:
+ # Normally this shouldn't happen, so we'll show
+ # a traceback for debugging purposes.
+ traceback.print_exc()
+ cache = None
+
+ if cache is not None:
+ try:
+ cache[cpv] = metadata
+ except CacheError:
+ # Normally this shouldn't happen, so we'll show
+ # a traceback for debugging purposes.
+ traceback.print_exc()
+
+ def _pull_valid_cache(self, cpv, ebuild_path, repo_path):
+ try:
+ ebuild_hash = eclass_cache.hashed_path(ebuild_path)
+ # snag mtime since we use it later, and to trigger stat failure
+ # if it doesn't exist
+ ebuild_hash.mtime
+ except FileNotFound:
+ writemsg(_("!!! aux_get(): ebuild for " \
+ "'%s' does not exist at:\n") % (cpv,), noiselevel=-1)
+ writemsg("!!! %s\n" % ebuild_path, noiselevel=-1)
+ raise PortageKeyError(cpv)
+
+ # Pull pre-generated metadata from the metadata/cache/
+ # directory if it exists and is valid, otherwise fall
+ # back to the normal writable cache.
+ auxdbs = []
+ pregen_auxdb = self._pregen_auxdb.get(repo_path)
+ if pregen_auxdb is not None:
+ auxdbs.append(pregen_auxdb)
+ ro_auxdb = self._ro_auxdb.get(repo_path)
+ if ro_auxdb is not None:
+ auxdbs.append(ro_auxdb)
+ auxdbs.append(self.auxdb[repo_path])
+ eclass_db = self.repositories.get_repo_for_location(repo_path).eclass_db
+
+ for auxdb in auxdbs:
+ try:
+ metadata = auxdb[cpv]
+ except KeyError:
+ continue
+ except CacheError:
+ if not auxdb.readonly:
+ try:
+ del auxdb[cpv]
+ except (KeyError, CacheError):
+ pass
+ continue
+ eapi = metadata.get('EAPI', '').strip()
+ if not eapi:
+ eapi = '0'
+ metadata['EAPI'] = eapi
+ if not eapi_is_supported(eapi):
+ # Since we're supposed to be able to efficiently obtain the
+ # EAPI from _parse_eapi_ebuild_head, we disregard cache entries
+ # for unsupported EAPIs.
+ continue
+ if auxdb.validate_entry(metadata, ebuild_hash, eclass_db):
+ break
+ else:
+ metadata = None
+
+ return (metadata, ebuild_hash)
+
+ def aux_get(self, mycpv, mylist, mytree=None, myrepo=None):
+ "stub code for returning auxilliary db information, such as SLOT, DEPEND, etc."
+ 'input: "sys-apps/foo-1.0",["SLOT","DEPEND","HOMEPAGE"]'
+ 'return: ["0",">=sys-libs/bar-1.0","http://www.foo.com"] or raise PortageKeyError if error'
+ # For external API consumers, self._event_loop returns a new event
+ # loop on each access, so a local reference is needed in order
+ # to avoid instantiating more than one.
+ loop = self._event_loop
+ return loop.run_until_complete(
+ self.async_aux_get(mycpv, mylist, mytree=mytree,
+ myrepo=myrepo, loop=loop))
+
+ def async_aux_get(self, mycpv, mylist, mytree=None, myrepo=None, loop=None):
+ """
+ Asynchronous form form of aux_get.
+
+ @param mycpv: cpv for an ebuild
+ @type mycpv: str
+ @param mylist: list of metadata keys
+ @type mylist: list
+ @param mytree: The canonical path of the tree in which the ebuild
+ is located, or None for automatic lookup
+ @type mytree: str
+ @param myrepo: name of the repo in which the ebuild is located,
+ or None for automatic lookup
+ @type myrepo: str
+ @param loop: event loop (defaults to global event loop)
+ @type loop: EventLoop
+ @return: list of metadata values
+ @rtype: asyncio.Future (or compatible)
+ """
+ # Don't default to self._event_loop here, since that creates a
+ # local event loop for thread safety, and that could easily lead
+ # to simultaneous instantiation of multiple event loops here.
+ # Callers of this method certainly want the same event loop to
+ # be used for all calls.
+ loop = asyncio._wrap_loop(loop)
+ future = loop.create_future()
+ cache_me = False
+ if myrepo is not None:
+ mytree = self.treemap.get(myrepo)
+ if mytree is None:
+ future.set_exception(PortageKeyError(myrepo))
+ return future
+
+ if mytree is not None and len(self.porttrees) == 1 \
+ and mytree == self.porttrees[0]:
+ # mytree matches our only tree, so it's safe to
+ # ignore mytree and cache the result
+ mytree = None
+ myrepo = None
+
+ if mytree is None:
+ cache_me = True
+ if mytree is None and not self._known_keys.intersection(
+ mylist).difference(self._aux_cache_keys):
+ aux_cache = self._aux_cache.get(mycpv)
+ if aux_cache is not None:
+ future.set_result([aux_cache.get(x, "") for x in mylist])
+ return future
+ cache_me = True
+
+ try:
+ cat, pkg = mycpv.split("/", 1)
+ except ValueError:
+ # Missing slash. Can't find ebuild so raise PortageKeyError.
+ future.set_exception(PortageKeyError(mycpv))
+ return future
+
+ myebuild, mylocation = self.findname2(mycpv, mytree)
+
+ if not myebuild:
+ writemsg("!!! aux_get(): %s\n" % \
+ _("ebuild not found for '%s'") % mycpv, noiselevel=1)
+ future.set_exception(PortageKeyError(mycpv))
+ return future
+
+ mydata, ebuild_hash = self._pull_valid_cache(mycpv, myebuild, mylocation)
+
+ if mydata is not None:
+ self._aux_get_return(
+ future, mycpv, mylist, myebuild, ebuild_hash,
+ mydata, mylocation, cache_me, None)
+ return future
+
+ if myebuild in self._broken_ebuilds:
+ future.set_exception(PortageKeyError(mycpv))
+ return future
+
+ proc = EbuildMetadataPhase(cpv=mycpv,
+ ebuild_hash=ebuild_hash, portdb=self,
+ repo_path=mylocation, scheduler=loop,
+ settings=self.doebuild_settings)
+
+ proc.addExitListener(functools.partial(self._aux_get_return,
+ future, mycpv, mylist, myebuild, ebuild_hash, mydata, mylocation,
+ cache_me))
+ future.add_done_callback(functools.partial(self._aux_get_cancel, proc))
+ proc.start()
+ return future
+
+ @staticmethod
+ def _aux_get_cancel(proc, future):
+ if future.cancelled() and proc.returncode is None:
+ proc.cancel()
+
+ def _aux_get_return(self, future, mycpv, mylist, myebuild, ebuild_hash,
+ mydata, mylocation, cache_me, proc):
+ if future.cancelled():
+ return
+ if proc is not None:
+ if proc.returncode != os.EX_OK:
+ self._broken_ebuilds.add(myebuild)
+ future.set_exception(PortageKeyError(mycpv))
+ return
+ mydata = proc.metadata
+ mydata["repository"] = self.repositories.get_name_for_location(mylocation)
+ mydata["_mtime_"] = ebuild_hash.mtime
+ eapi = mydata.get("EAPI")
+ if not eapi:
+ eapi = "0"
+ mydata["EAPI"] = eapi
+ if eapi_is_supported(eapi):
+ mydata["INHERITED"] = " ".join(mydata.get("_eclasses_", []))
+
+ #finally, we look at our internal cache entry and return the requested data.
+ returnme = [mydata.get(x, "") for x in mylist]
+
+ if cache_me and self.frozen:
+ aux_cache = {}
+ for x in self._aux_cache_keys:
+ aux_cache[x] = mydata.get(x, "")
+ self._aux_cache[mycpv] = aux_cache
+
+ future.set_result(returnme)
+
+ def getFetchMap(self, mypkg, useflags=None, mytree=None):
+ """
+ Get the SRC_URI metadata as a dict which maps each file name to a
+ set of alternative URIs.
+
+ @param mypkg: cpv for an ebuild
+ @type mypkg: String
+ @param useflags: a collection of enabled USE flags, for evaluation of
+ conditionals
+ @type useflags: set, or None to enable all conditionals
+ @param mytree: The canonical path of the tree in which the ebuild
+ is located, or None for automatic lookup
+ @type mypkg: String
+ @return: A dict which maps each file name to a set of alternative
+ URIs.
+ @rtype: dict
+ """
+ loop = self._event_loop
+ return loop.run_until_complete(
+ self.async_fetch_map(mypkg, useflags=useflags,
+ mytree=mytree, loop=loop))
+
+ def async_fetch_map(self, mypkg, useflags=None, mytree=None, loop=None):
+ """
+ Asynchronous form of getFetchMap.
+
+ @param mypkg: cpv for an ebuild
+ @type mypkg: String
+ @param useflags: a collection of enabled USE flags, for evaluation of
+ conditionals
+ @type useflags: set, or None to enable all conditionals
+ @param mytree: The canonical path of the tree in which the ebuild
+ is located, or None for automatic lookup
+ @type mypkg: String
+ @param loop: event loop (defaults to global event loop)
+ @type loop: EventLoop
+ @return: A future that results in a dict which maps each file name to
+ a set of alternative URIs.
+ @rtype: asyncio.Future (or compatible)
+ """
+ loop = asyncio._wrap_loop(loop)
+ result = loop.create_future()
+
+ def aux_get_done(aux_get_future):
+ if result.cancelled():
+ return
+ if aux_get_future.exception() is not None:
+ if isinstance(aux_get_future.exception(), PortageKeyError):
+ # Convert this to an InvalidDependString exception since
+ # callers already handle it.
+ result.set_exception(portage.exception.InvalidDependString(
+ "getFetchMap(): aux_get() error reading "
+ + mypkg + "; aborting."))
+ else:
+ result.set_exception(future.exception())
+ return
+
+ eapi, myuris = aux_get_future.result()
+
+ if not eapi_is_supported(eapi):
+ # Convert this to an InvalidDependString exception
+ # since callers already handle it.
+ result.set_exception(portage.exception.InvalidDependString(
+ "getFetchMap(): '%s' has unsupported EAPI: '%s'" % \
+ (mypkg, eapi)))
+ return
+
+ result.set_result(_parse_uri_map(mypkg,
+ {'EAPI':eapi,'SRC_URI':myuris}, use=useflags))
+
+ aux_get_future = self.async_aux_get(
+ mypkg, ["EAPI", "SRC_URI"], mytree=mytree, loop=loop)
+ result.add_done_callback(lambda result:
+ aux_get_future.cancel() if result.cancelled() else None)
+ aux_get_future.add_done_callback(aux_get_done)
+ return result
+
+ def getfetchsizes(self, mypkg, useflags=None, debug=0, myrepo=None):
+ # returns a filename:size dictionnary of remaining downloads
+ myebuild, mytree = self.findname2(mypkg, myrepo=myrepo)
+ if myebuild is None:
+ raise AssertionError(_("ebuild not found for '%s'") % mypkg)
+ pkgdir = os.path.dirname(myebuild)
+ mf = self.repositories.get_repo_for_location(
+ os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
+ pkgdir, self.settings["DISTDIR"])
+ checksums = mf.getDigests()
+ if not checksums:
+ if debug:
+ writemsg(_("[empty/missing/bad digest]: %s\n") % (mypkg,))
+ return {}
+ filesdict={}
+ myfiles = self.getFetchMap(mypkg, useflags=useflags, mytree=mytree)
+ #XXX: maybe this should be improved: take partial downloads
+ # into account? check checksums?
+ for myfile in myfiles:
+ try:
+ fetch_size = int(checksums[myfile]["size"])
+ except (KeyError, ValueError):
+ if debug:
+ writemsg(_("[bad digest]: missing %(file)s for %(pkg)s\n") % {"file":myfile, "pkg":mypkg})
+ continue
+ file_path = os.path.join(self.settings["DISTDIR"], myfile)
+ mystat = None
+ try:
+ mystat = os.stat(file_path)
+ except OSError:
+ pass
+ if mystat is None:
+ existing_size = 0
+ ro_distdirs = self.settings.get("PORTAGE_RO_DISTDIRS")
+ if ro_distdirs is not None:
+ for x in shlex_split(ro_distdirs):
+ try:
+ mystat = os.stat(os.path.join(x, myfile))
+ except OSError:
+ pass
+ else:
+ if mystat.st_size == fetch_size:
+ existing_size = fetch_size
+ break
+ else:
+ existing_size = mystat.st_size
+ remaining_size = fetch_size - existing_size
+ if remaining_size > 0:
+ # Assume the download is resumable.
+ filesdict[myfile] = remaining_size
+ elif remaining_size < 0:
+ # The existing file is too large and therefore corrupt.
+ filesdict[myfile] = int(checksums[myfile]["size"])
+ return filesdict
+
+ def fetch_check(self, mypkg, useflags=None, mysettings=None, all=False, myrepo=None):
+ """
+ TODO: account for PORTAGE_RO_DISTDIRS
+ """
+ if all:
+ useflags = None
+ elif useflags is None:
+ if mysettings:
+ useflags = mysettings["USE"].split()
+ if myrepo is not None:
+ mytree = self.treemap.get(myrepo)
+ if mytree is None:
+ return False
+ else:
+ mytree = None
+
+ myfiles = self.getFetchMap(mypkg, useflags=useflags, mytree=mytree)
+ myebuild = self.findname(mypkg, myrepo=myrepo)
+ if myebuild is None:
+ raise AssertionError(_("ebuild not found for '%s'") % mypkg)
+ pkgdir = os.path.dirname(myebuild)
+ mf = self.repositories.get_repo_for_location(
+ os.path.dirname(os.path.dirname(pkgdir)))
+ mf = mf.load_manifest(pkgdir, self.settings["DISTDIR"])
+ mysums = mf.getDigests()
+
+ failures = {}
+ for x in myfiles:
+ if not mysums or x not in mysums:
+ ok = False
+ reason = _("digest missing")
+ else:
+ try:
+ ok, reason = portage.checksum.verify_all(
+ os.path.join(self.settings["DISTDIR"], x), mysums[x])
+ except FileNotFound as e:
+ ok = False
+ reason = _("File Not Found: '%s'") % (e,)
+ if not ok:
+ failures[x] = reason
+ if failures:
+ return False
+ return True
+
+ def cpv_exists(self, mykey, myrepo=None):
+ "Tells us whether an actual ebuild exists on disk (no masking)"
+ cps2 = mykey.split("/")
+ cps = catpkgsplit(mykey, silent=0)
+ if not cps:
+ #invalid cat/pkg-v
+ return 0
+ if self.findname(cps[0] + "/" + cps2[1], myrepo=myrepo):
+ return 1
+ else:
+ return 0
+
+ def cp_all(self, categories=None, trees=None, reverse=False, sort=True):
+ """
+ This returns a list of all keys in our tree or trees
+ @param categories: optional list of categories to search or
+ defaults to self.settings.categories
+ @param trees: optional list of trees to search the categories in or
+ defaults to self.porttrees
+ @param reverse: reverse sort order (default is False)
+ @param sort: return sorted results (default is True)
+ @rtype list of [cat/pkg,...]
+ """
+ d = {}
+ if categories is None:
+ categories = self.settings.categories
+ if trees is None:
+ trees = self.porttrees
+ for x in categories:
+ for oroot in trees:
+ for y in listdir(oroot+"/"+x, EmptyOnError=1, ignorecvs=1, dirsonly=1):
+ try:
+ atom = Atom("%s/%s" % (x, y))
+ except InvalidAtom:
+ continue
+ if atom != atom.cp:
+ continue
+ d[atom.cp] = None
+ l = list(d)
+ if sort:
+ l.sort(reverse=reverse)
+ return l
+
+ def cp_list(self, mycp, use_cache=1, mytree=None):
+ # NOTE: Cache can be safely shared with the match cache, since the
+ # match cache uses the result from dep_expand for the cache_key.
+ if self.frozen and mytree is not None \
+ and len(self.porttrees) == 1 \
+ and mytree == self.porttrees[0]:
+ # mytree matches our only tree, so it's safe to
+ # ignore mytree and cache the result
+ mytree = None
+
+ if self.frozen and mytree is None:
+ cachelist = self.xcache["cp-list"].get(mycp)
+ if cachelist is not None:
+ # Try to propagate this to the match-all cache here for
+ # repoman since he uses separate match-all caches for each
+ # profile (due to differences in _get_implicit_iuse).
+ self.xcache["match-all"][(mycp, mycp)] = cachelist
+ return cachelist[:]
+ mysplit = mycp.split("/")
+ invalid_category = mysplit[0] not in self._categories
+ # Process repos in ascending order by repo.priority, so that
+ # stable sort by version produces results ordered by
+ # (pkg.version, repo.priority).
+ if mytree is not None:
+ if isinstance(mytree, basestring):
+ repos = [self.repositories.get_repo_for_location(mytree)]
+ else:
+ # assume it's iterable
+ repos = [self.repositories.get_repo_for_location(location)
+ for location in mytree]
+ elif self._better_cache is None:
+ repos = self._porttrees_repos.values()
+ else:
+ repos = [repo for repo in reversed(self._better_cache[mycp])
+ if repo.name in self._porttrees_repos]
+ mylist = []
+ for repo in repos:
+ oroot = repo.location
+ try:
+ file_list = os.listdir(os.path.join(oroot, mycp))
+ except OSError:
+ continue
+ for x in file_list:
+ pf = None
+ if x[-7:] == '.ebuild':
+ pf = x[:-7]
+
+ if pf is not None:
+ ps = pkgsplit(pf)
+ if not ps:
+ writemsg(_("\nInvalid ebuild name: %s\n") % \
+ os.path.join(oroot, mycp, x), noiselevel=-1)
+ continue
+ if ps[0] != mysplit[1]:
+ writemsg(_("\nInvalid ebuild name: %s\n") % \
+ os.path.join(oroot, mycp, x), noiselevel=-1)
+ continue
+ ver_match = ver_regexp.match("-".join(ps[1:]))
+ if ver_match is None or not ver_match.groups():
+ writemsg(_("\nInvalid ebuild version: %s\n") % \
+ os.path.join(oroot, mycp, x), noiselevel=-1)
+ continue
+ mylist.append(_pkg_str(mysplit[0]+"/"+pf, db=self, repo=repo.name))
+ if invalid_category and mylist:
+ writemsg(_("\n!!! '%s' has a category that is not listed in " \
+ "%setc/portage/categories\n") % \
+ (mycp, self.settings["PORTAGE_CONFIGROOT"]), noiselevel=-1)
+ mylist = []
+ # Always sort in ascending order here since it's handy and
+ # the result can be easily cached and reused. Since mylist
+ # is initially in ascending order by repo.priority, stable
+ # sort by version produces results in ascending order by
+ # (pkg.version, repo.priority).
+ self._cpv_sort_ascending(mylist)
+ if self.frozen and mytree is None:
+ cachelist = mylist[:]
+ self.xcache["cp-list"][mycp] = cachelist
+ self.xcache["match-all"][(mycp, mycp)] = cachelist
+ return mylist
+
+ def freeze(self):
+ for x in ("bestmatch-visible", "cp-list", "match-all",
+ "match-all-cpv-only", "match-visible", "minimum-all",
+ "minimum-all-ignore-profile", "minimum-visible"):
+ self.xcache[x]={}
+ self.frozen=1
+ self._better_cache = _better_cache(self.repositories)
+
+ def melt(self):
+ self.xcache = {}
+ self._aux_cache = {}
+ self._better_cache = None
+ self.frozen = 0
+
+ def xmatch(self,level,origdep,mydep=None,mykey=None,mylist=None):
+ "caching match function; very trick stuff"
+ if level == "list-visible":
+ level = "match-visible"
+ warnings.warn("The 'list-visible' mode of "
+ "portage.dbapi.porttree.portdbapi.xmatch "
+ "has been renamed to match-visible",
+ DeprecationWarning, stacklevel=2)
+
+ if mydep is None:
+ #this stuff only runs on first call of xmatch()
+ #create mydep, mykey from origdep
+ mydep = dep_expand(origdep, mydb=self, settings=self.settings)
+ mykey = mydep.cp
+
+ #if no updates are being made to the tree, we can consult our xcache...
+ cache_key = None
+ if self.frozen:
+ cache_key = (mydep, mydep.unevaluated_atom)
+ try:
+ return self.xcache[level][cache_key][:]
+ except KeyError:
+ pass
+
+ myval = None
+ mytree = None
+ if mydep.repo is not None:
+ mytree = self.treemap.get(mydep.repo)
+ if mytree is None:
+ if level.startswith("match-"):
+ myval = []
+ else:
+ myval = ""
+
+ if myval is not None:
+ # Unknown repo, empty result.
+ pass
+ elif level == "match-all-cpv-only":
+ # match *all* packages, only against the cpv, in order
+ # to bypass unnecessary cache access for things like IUSE
+ # and SLOT.
+ if mydep == mykey:
+ # Share cache with match-all/cp_list when the result is the
+ # same. Note that this requires that mydep.repo is None and
+ # thus mytree is also None.
+ level = "match-all"
+ myval = self.cp_list(mykey, mytree=mytree)
+ else:
+ myval = match_from_list(mydep,
+ self.cp_list(mykey, mytree=mytree))
+
+ elif level in ("bestmatch-visible", "match-all",
+ "match-visible", "minimum-all", "minimum-all-ignore-profile",
+ "minimum-visible"):
+ # Find the minimum matching visible version. This is optimized to
+ # minimize the number of metadata accesses (improves performance
+ # especially in cases where metadata needs to be generated).
+ if mydep == mykey:
+ mylist = self.cp_list(mykey, mytree=mytree)
+ else:
+ mylist = match_from_list(mydep,
+ self.cp_list(mykey, mytree=mytree))
+
+ ignore_profile = level in ("minimum-all-ignore-profile",)
+ visibility_filter = level not in ("match-all",
+ "minimum-all", "minimum-all-ignore-profile")
+ single_match = level not in ("match-all", "match-visible")
+ myval = []
+ aux_keys = list(self._aux_cache_keys)
+ if level == "bestmatch-visible":
+ iterfunc = reversed
+ else:
+ iterfunc = iter
+
+ for cpv in iterfunc(mylist):
+ try:
+ metadata = dict(zip(aux_keys,
+ self.aux_get(cpv, aux_keys, myrepo=cpv.repo)))
+ except KeyError:
+ # ebuild not in this repo, or masked by corruption
+ continue
+
+ try:
+ pkg_str = _pkg_str(cpv, metadata=metadata,
+ settings=self.settings, db=self)
+ except InvalidData:
+ continue
+
+ if visibility_filter and not self._visible(pkg_str, metadata):
+ continue
+
+ if mydep.slot is not None and \
+ not _match_slot(mydep, pkg_str):
+ continue
+
+ if mydep.unevaluated_atom.use is not None and \
+ not self._match_use(mydep, pkg_str, metadata,
+ ignore_profile=ignore_profile):
+ continue
+
+ myval.append(pkg_str)
+ if single_match:
+ break
+
+ if single_match:
+ if myval:
+ myval = myval[0]
+ else:
+ myval = ""
+
+ elif level == "bestmatch-list":
+ #dep match -- find best match but restrict search to sublist
+ warnings.warn("The 'bestmatch-list' mode of "
+ "portage.dbapi.porttree.portdbapi.xmatch is deprecated",
+ DeprecationWarning, stacklevel=2)
+ myval = best(list(self._iter_match(mydep, mylist)))
+ elif level == "match-list":
+ #dep match -- find all matches but restrict search to sublist (used in 2nd half of visible())
+ warnings.warn("The 'match-list' mode of "
+ "portage.dbapi.porttree.portdbapi.xmatch is deprecated",
+ DeprecationWarning, stacklevel=2)
+ myval = list(self._iter_match(mydep, mylist))
+ else:
+ raise AssertionError(
+ "Invalid level argument: '%s'" % level)
+
+ if self.frozen:
+ xcache_this_level = self.xcache.get(level)
+ if xcache_this_level is not None:
+ xcache_this_level[cache_key] = myval
+ if not isinstance(myval, _pkg_str):
+ myval = myval[:]
+
+ return myval
+
+ def match(self, mydep, use_cache=1):
+ return self.xmatch("match-visible", mydep)
+
+ def gvisible(self, mylist):
+ warnings.warn("The 'gvisible' method of "
+ "portage.dbapi.porttree.portdbapi "
+ "is deprecated",
+ DeprecationWarning, stacklevel=2)
+ return list(self._iter_visible(iter(mylist)))
+
+ def visible(self, cpv_iter):
+ warnings.warn("The 'visible' method of "
+ "portage.dbapi.porttree.portdbapi "
+ "is deprecated",
+ DeprecationWarning, stacklevel=2)
+ if cpv_iter is None:
+ return []
+ return list(self._iter_visible(iter(cpv_iter)))
+
+ def _iter_visible(self, cpv_iter, myrepo=None):
+ """
+ Return a new list containing only visible packages.
+ """
+ aux_keys = list(self._aux_cache_keys)
+ metadata = {}
+
+ if myrepo is not None:
+ repos = [myrepo]
+ else:
+ # We iterate over self.porttrees, since it's common to
+ # tweak this attribute in order to adjust match behavior.
+ repos = []
+ for tree in reversed(self.porttrees):
+ repos.append(self.repositories.get_name_for_location(tree))
+
+ for mycpv in cpv_iter:
+ for repo in repos:
+ metadata.clear()
+ try:
+ metadata.update(zip(aux_keys,
+ self.aux_get(mycpv, aux_keys, myrepo=repo)))
+ except KeyError:
+ continue
+ except PortageException as e:
+ writemsg("!!! Error: aux_get('%s', %s)\n" %
+ (mycpv, aux_keys), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
+ del e
+ continue
+
+ if not self._visible(mycpv, metadata):
+ continue
+
+ yield mycpv
+ # only yield a given cpv once
+ break
+
+ def _visible(self, cpv, metadata):
+ eapi = metadata["EAPI"]
+ if not eapi_is_supported(eapi):
+ return False
+ if _eapi_is_deprecated(eapi):
+ return False
+ if not metadata["SLOT"]:
+ return False
+
+ settings = self.settings
+ if settings._getMaskAtom(cpv, metadata):
+ return False
+ if settings._getMissingKeywords(cpv, metadata):
+ return False
+ if settings.local_config:
+ metadata['CHOST'] = settings.get('CHOST', '')
+ if not settings._accept_chost(cpv, metadata):
+ return False
+ metadata["USE"] = ""
+ if "?" in metadata["LICENSE"] or \
+ "?" in metadata["PROPERTIES"]:
+ self.doebuild_settings.setcpv(cpv, mydb=metadata)
+ metadata['USE'] = self.doebuild_settings['PORTAGE_USE']
+ try:
+ if settings._getMissingLicenses(cpv, metadata):
+ return False
+ if settings._getMissingProperties(cpv, metadata):
+ return False
+ if settings._getMissingRestrict(cpv, metadata):
+ return False
+ except InvalidDependString:
+ return False
+
+ return True
+
+class portagetree(object):
+ def __init__(self, root=DeprecationWarning, virtual=DeprecationWarning,
+ settings=None):
+ """
+ Constructor for a PortageTree
+
+ @param root: deprecated, defaults to settings['ROOT']
+ @type root: String/Path
+ @param virtual: UNUSED
+ @type virtual: No Idea
+ @param settings: Portage Configuration object (portage.settings)
+ @type settings: Instance of portage.config
+ """
+
+ if settings is None:
+ settings = portage.settings
+ self.settings = settings
+
+ if root is not DeprecationWarning:
+ warnings.warn("The root parameter of the " + \
+ "portage.dbapi.porttree.portagetree" + \
+ " constructor is now unused. Use " + \
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=2)
+
+ if virtual is not DeprecationWarning:
+ warnings.warn("The 'virtual' parameter of the "
+ "portage.dbapi.porttree.portagetree"
+ " constructor is unused",
+ DeprecationWarning, stacklevel=2)
+
+ self.portroot = settings["PORTDIR"]
+ self.__virtual = virtual
+ self.dbapi = portdbapi(mysettings=settings)
+
+ @property
+ def root(self):
+ warnings.warn("The root attribute of " + \
+ "portage.dbapi.porttree.portagetree" + \
+ " is deprecated. Use " + \
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=3)
+ return self.settings['ROOT']
+
+ @property
+ def virtual(self):
+ warnings.warn("The 'virtual' attribute of " + \
+ "portage.dbapi.porttree.portagetree" + \
+ " is deprecated.",
+ DeprecationWarning, stacklevel=3)
+ return self.__virtual
+
+ def dep_bestmatch(self,mydep):
+ "compatibility method"
+ mymatch = self.dbapi.xmatch("bestmatch-visible",mydep)
+ if mymatch is None:
+ return ""
+ return mymatch
+
+ def dep_match(self,mydep):
+ "compatibility method"
+ mymatch = self.dbapi.xmatch("match-visible",mydep)
+ if mymatch is None:
+ return []
+ return mymatch
+
+ def exists_specific(self,cpv):
+ return self.dbapi.cpv_exists(cpv)
+
+ def getallnodes(self):
+ """new behavior: these are all *unmasked* nodes. There may or may not be available
+ masked package for nodes in this nodes list."""
+ return self.dbapi.cp_all()
+
+ def getname(self, pkgname):
+ "returns file location for this particular package (DEPRECATED)"
+ if not pkgname:
+ return ""
+ mysplit = pkgname.split("/")
+ psplit = pkgsplit(mysplit[1])
+ return "/".join([self.portroot, mysplit[0], psplit[0], mysplit[1]])+".ebuild"
+
+ def getslot(self,mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ myslot = ""
+ try:
+ myslot = self.dbapi._pkg_str(mycatpkg, None).slot
+ except KeyError:
+ pass
+ return myslot
+
+class FetchlistDict(Mapping):
+ """
+ This provide a mapping interface to retrieve fetch lists. It's used
+ to allow portage.manifest.Manifest to access fetch lists via a standard
+ mapping interface rather than use the dbapi directly.
+ """
+ def __init__(self, pkgdir, settings, mydbapi):
+ """pkgdir is a directory containing ebuilds and settings is passed into
+ portdbapi.getfetchlist for __getitem__ calls."""
+ self.pkgdir = pkgdir
+ self.cp = os.sep.join(pkgdir.split(os.sep)[-2:])
+ self.settings = settings
+ self.mytree = os.path.realpath(os.path.dirname(os.path.dirname(pkgdir)))
+ self.portdb = mydbapi
+
+ def __getitem__(self, pkg_key):
+ """Returns the complete fetch list for a given package."""
+ return list(self.portdb.getFetchMap(pkg_key, mytree=self.mytree))
+
+ def __contains__(self, cpv):
+ return cpv in self.__iter__()
+
+ def has_key(self, pkg_key):
+ """Returns true if the given package exists within pkgdir."""
+ warnings.warn("portage.dbapi.porttree.FetchlistDict.has_key() is "
+ "deprecated, use the 'in' operator instead",
+ DeprecationWarning, stacklevel=2)
+ return pkg_key in self
+
+ def __iter__(self):
+ return iter(self.portdb.cp_list(self.cp, mytree=self.mytree))
+
+ def __len__(self):
+ """This needs to be implemented in order to avoid
+ infinite recursion in some cases."""
+ return len(self.portdb.cp_list(self.cp, mytree=self.mytree))
+
+ def keys(self):
+ """Returns keys for all packages within pkgdir"""
+ return self.portdb.cp_list(self.cp, mytree=self.mytree)
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+
+
+def _async_manifest_fetchlist(portdb, repo_config, cp, cpv_list=None,
+ max_jobs=None, max_load=None, loop=None):
+ """
+ Asynchronous form of FetchlistDict, with max_jobs and max_load
+ parameters in order to control async_aux_get concurrency.
+
+ @param portdb: portdbapi instance
+ @type portdb: portdbapi
+ @param repo_config: repository configuration for a Manifest
+ @type repo_config: RepoConfig
+ @param cp: cp for a Manifest
+ @type cp: str
+ @param cpv_list: list of ebuild cpv values for a Manifest
+ @type cpv_list: list
+ @param max_jobs: max number of futures to process concurrently (default
+ is multiprocessing.cpu_count())
+ @type max_jobs: int
+ @param max_load: max load allowed when scheduling a new future,
+ otherwise schedule no more than 1 future at a time (default
+ is multiprocessing.cpu_count())
+ @type max_load: int or float
+ @param loop: event loop
+ @type loop: EventLoop
+ @return: a Future resulting in a Mapping compatible with FetchlistDict
+ @rtype: asyncio.Future (or compatible)
+ """
+ loop = asyncio._wrap_loop(loop)
+ result = loop.create_future()
+ cpv_list = (portdb.cp_list(cp, mytree=repo_config.location)
+ if cpv_list is None else cpv_list)
+
+ def gather_done(gather_result):
+ # All exceptions must be consumed from gather_result before this
+ # function returns, in order to avoid triggering the event loop's
+ # exception handler.
+ e = None
+ if not gather_result.cancelled():
+ for future in gather_result.result():
+ if (future.done() and not future.cancelled() and
+ future.exception() is not None):
+ e = future.exception()
+
+ if result.cancelled():
+ return
+ elif e is None:
+ result.set_result(dict((k, list(v.result()))
+ for k, v in zip(cpv_list, gather_result.result())))
+ else:
+ result.set_exception(e)
+
+ gather_result = iter_gather(
+ # Use a generator expression for lazy evaluation, so that iter_gather
+ # controls the number of concurrent async_fetch_map calls.
+ (portdb.async_fetch_map(cpv, mytree=repo_config.location, loop=loop)
+ for cpv in cpv_list),
+ max_jobs=max_jobs,
+ max_load=max_load,
+ loop=loop,
+ )
+
+ gather_result.add_done_callback(gather_done)
+ result.add_done_callback(lambda result:
+ gather_result.cancel() if result.cancelled() else None)
+
+ return result
+
+
+def _parse_uri_map(cpv, metadata, use=None):
+
+ myuris = use_reduce(metadata.get('SRC_URI', ''),
+ uselist=use, matchall=(use is None),
+ is_src_uri=True,
+ eapi=metadata['EAPI'])
+
+ uri_map = OrderedDict()
+
+ myuris.reverse()
+ while myuris:
+ uri = myuris.pop()
+ if myuris and myuris[-1] == "->":
+ myuris.pop()
+ distfile = myuris.pop()
+ else:
+ distfile = os.path.basename(uri)
+ if not distfile:
+ raise portage.exception.InvalidDependString(
+ ("getFetchMap(): '%s' SRC_URI has no file " + \
+ "name: '%s'") % (cpv, uri))
+
+ uri_set = uri_map.get(distfile)
+ if uri_set is None:
+ # Use OrderedDict to preserve order from SRC_URI
+ # while ensuring uniqueness.
+ uri_set = OrderedDict()
+ uri_map[distfile] = uri_set
+
+ # SRC_URI may contain a file name with no scheme, and in
+ # this case it does not belong in uri_set.
+ if urlparse(uri).scheme:
+ uri_set[uri] = True
+
+ # Convert OrderedDicts to tuples.
+ for k, v in uri_map.items():
+ uri_map[k] = tuple(v)
+
+ return uri_map
diff --git a/lib/portage/dbapi/vartree.py b/lib/portage/dbapi/vartree.py
new file mode 100644
index 000000000..a104306eb
--- /dev/null
+++ b/lib/portage/dbapi/vartree.py
@@ -0,0 +1,5559 @@
+# Copyright 1998-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division, unicode_literals
+
+__all__ = [
+ "vardbapi", "vartree", "dblink"] + \
+ ["write_contents", "tar_contents"]
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'hashlib:md5',
+ 'portage.checksum:_perform_md5_merge@perform_md5',
+ 'portage.data:portage_gid,portage_uid,secpass',
+ 'portage.dbapi.dep_expand:dep_expand',
+ 'portage.dbapi._MergeProcess:MergeProcess',
+ 'portage.dbapi._SyncfsProcess:SyncfsProcess',
+ 'portage.dep:dep_getkey,isjustname,isvalidatom,match_from_list,' + \
+ 'use_reduce,_slot_separator,_repo_separator',
+ 'portage.eapi:_get_eapi_attrs',
+ 'portage.elog:collect_ebuild_messages,collect_messages,' + \
+ 'elog_process,_merge_logentries',
+ 'portage.locks:lockdir,unlockdir,lockfile,unlockfile',
+ 'portage.output:bold,colorize',
+ 'portage.package.ebuild.doebuild:doebuild_environment,' + \
+ '_merge_unicode_error', '_spawn_phase',
+ 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
+ 'portage.package.ebuild._ipc.QueryCommand:QueryCommand',
+ 'portage.process:find_binary',
+ 'portage.util:apply_secpass_permissions,ConfigProtect,ensure_dirs,' + \
+ 'writemsg,writemsg_level,write_atomic,atomic_ofstream,writedict,' + \
+ 'grabdict,normalize_path,new_protect_filename',
+ 'portage.util.digraph:digraph',
+ 'portage.util.env_update:env_update',
+ 'portage.util.install_mask:install_mask_dir,InstallMask',
+ 'portage.util.listdir:dircache,listdir',
+ 'portage.util.movefile:movefile',
+ 'portage.util.path:first_existing,iter_parents',
+ 'portage.util.writeable_check:get_ro_checker',
+ 'portage.util._xattr:xattr',
+ 'portage.util._dyn_libs.PreservedLibsRegistry:PreservedLibsRegistry',
+ 'portage.util._dyn_libs.LinkageMapELF:LinkageMapELF@LinkageMap',
+ 'portage.util._dyn_libs.NeededEntry:NeededEntry',
+ 'portage.util._async.SchedulerInterface:SchedulerInterface',
+ 'portage.util._eventloop.EventLoop:EventLoop',
+ 'portage.util._eventloop.global_event_loop:global_event_loop',
+ 'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,vercmp,' + \
+ '_get_slot_re,_pkgsplit@pkgsplit,_pkg_str,_unknown_repo',
+ 'subprocess',
+ 'tarfile',
+)
+
+from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \
+ MERGING_IDENTIFIER, PORTAGE_PACKAGE_ATOM, PRIVATE_PATH, VDB_PATH
+from portage.dbapi import dbapi
+from portage.exception import CommandNotFound, \
+ InvalidData, InvalidLocation, InvalidPackageName, \
+ FileNotFound, PermissionDenied, UnsupportedAPIException
+from portage.localization import _
+
+from portage import abssymlink, _movefile, bsd_chflags
+
+# This is a special version of the os module, wrapped for unicode support.
+from portage import os
+from portage import shutil
+from portage import _encodings
+from portage import _os_merge
+from portage import _selinux_merge
+from portage import _unicode_decode
+from portage import _unicode_encode
+from ._VdbMetadataDelta import VdbMetadataDelta
+
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.emergelog import emergelog
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from _emerge.SpawnProcess import SpawnProcess
+from ._ContentsCaseSensitivityManager import ContentsCaseSensitivityManager
+
+import errno
+import fnmatch
+import gc
+import grp
+import io
+from itertools import chain
+import logging
+import os as _os
+import platform
+import pwd
+import re
+import stat
+import sys
+import tempfile
+import textwrap
+import time
+import warnings
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+ long = int
+ _unicode = str
+else:
+ _unicode = unicode
+
+class vardbapi(dbapi):
+
+ _excluded_dirs = ["CVS", "lost+found"]
+ _excluded_dirs = [re.escape(x) for x in _excluded_dirs]
+ _excluded_dirs = re.compile(r'^(\..*|' + MERGING_IDENTIFIER + '.*|' + \
+ "|".join(_excluded_dirs) + r')$')
+
+ _aux_cache_version = "1"
+ _owners_cache_version = "1"
+
+ # Number of uncached packages to trigger cache update, since
+ # it's wasteful to update it for every vdb change.
+ _aux_cache_threshold = 5
+
+ _aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
+ _aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
+
+ def __init__(self, _unused_param=DeprecationWarning,
+ categories=None, settings=None, vartree=None):
+ """
+ The categories parameter is unused since the dbapi class
+ now has a categories property that is generated from the
+ available packages.
+ """
+
+ # Used by emerge to check whether any packages
+ # have been added or removed.
+ self._pkgs_changed = False
+
+ # The _aux_cache_threshold doesn't work as designed
+ # if the cache is flushed from a subprocess, so we
+ # use this to avoid waste vdb cache updates.
+ self._flush_cache_enabled = True
+
+ #cache for category directory mtimes
+ self.mtdircache = {}
+
+ #cache for dependency checks
+ self.matchcache = {}
+
+ #cache for cp_list results
+ self.cpcache = {}
+
+ self.blockers = None
+ if settings is None:
+ settings = portage.settings
+ self.settings = settings
+
+ if _unused_param is not DeprecationWarning:
+ warnings.warn("The first parameter of the "
+ "portage.dbapi.vartree.vardbapi"
+ " constructor is now unused. Instead "
+ "settings['ROOT'] is used.",
+ DeprecationWarning, stacklevel=2)
+
+ self._eroot = settings['EROOT']
+ self._dbroot = self._eroot + VDB_PATH
+ self._lock = None
+ self._lock_count = 0
+
+ self._conf_mem_file = self._eroot + CONFIG_MEMORY_FILE
+ self._fs_lock_obj = None
+ self._fs_lock_count = 0
+ self._slot_locks = {}
+
+ if vartree is None:
+ vartree = portage.db[settings['EROOT']]['vartree']
+ self.vartree = vartree
+ self._aux_cache_keys = set(
+ ["BDEPEND", "BUILD_TIME", "CHOST", "COUNTER", "DEPEND",
+ "DESCRIPTION", "EAPI", "HDEPEND", "HOMEPAGE",
+ "BUILD_ID", "IUSE", "KEYWORDS",
+ "LICENSE", "PDEPEND", "PROPERTIES", "RDEPEND",
+ "repository", "RESTRICT" , "SLOT", "USE", "DEFINED_PHASES",
+ "PROVIDES", "REQUIRES"
+ ])
+ self._aux_cache_obj = None
+ self._aux_cache_filename = os.path.join(self._eroot,
+ CACHE_PATH, "vdb_metadata.pickle")
+ self._cache_delta_filename = os.path.join(self._eroot,
+ CACHE_PATH, "vdb_metadata_delta.json")
+ self._cache_delta = VdbMetadataDelta(self)
+ self._counter_path = os.path.join(self._eroot,
+ CACHE_PATH, "counter")
+
+ self._plib_registry = PreservedLibsRegistry(settings["ROOT"],
+ os.path.join(self._eroot, PRIVATE_PATH, "preserved_libs_registry"))
+ self._linkmap = LinkageMap(self)
+ self._owners = self._owners_db(self)
+
+ self._cached_counter = None
+
+ @property
+ def writable(self):
+ """
+ Check if var/db/pkg is writable, or permissions are sufficient
+ to create it if it does not exist yet.
+ @rtype: bool
+ @return: True if var/db/pkg is writable or can be created,
+ False otherwise
+ """
+ return os.access(first_existing(self._dbroot), os.W_OK)
+
+ @property
+ def root(self):
+ warnings.warn("The root attribute of "
+ "portage.dbapi.vartree.vardbapi"
+ " is deprecated. Use "
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=3)
+ return self.settings['ROOT']
+
+ def getpath(self, mykey, filename=None):
+ # This is an optimized hotspot, so don't use unicode-wrapped
+ # os module and don't use os.path.join().
+ rValue = self._eroot + VDB_PATH + _os.sep + mykey
+ if filename is not None:
+ # If filename is always relative, we can do just
+ # rValue += _os.sep + filename
+ rValue = _os.path.join(rValue, filename)
+ return rValue
+
+ def lock(self):
+ """
+ Acquire a reentrant lock, blocking, for cooperation with concurrent
+ processes. State is inherited by subprocesses, allowing subprocesses
+ to reenter a lock that was acquired by a parent process. However,
+ a lock can be released only by the same process that acquired it.
+ """
+ if self._lock_count:
+ self._lock_count += 1
+ else:
+ if self._lock is not None:
+ raise AssertionError("already locked")
+ # At least the parent needs to exist for the lock file.
+ ensure_dirs(self._dbroot)
+ self._lock = lockdir(self._dbroot)
+ self._lock_count += 1
+
+ def unlock(self):
+ """
+ Release a lock, decrementing the recursion level. Each unlock() call
+ must be matched with a prior lock() call, or else an AssertionError
+ will be raised if unlock() is called while not locked.
+ """
+ if self._lock_count > 1:
+ self._lock_count -= 1
+ else:
+ if self._lock is None:
+ raise AssertionError("not locked")
+ self._lock_count = 0
+ unlockdir(self._lock)
+ self._lock = None
+
+ def _fs_lock(self):
+ """
+ Acquire a reentrant lock, blocking, for cooperation with concurrent
+ processes.
+ """
+ if self._fs_lock_count < 1:
+ if self._fs_lock_obj is not None:
+ raise AssertionError("already locked")
+ try:
+ self._fs_lock_obj = lockfile(self._conf_mem_file)
+ except InvalidLocation:
+ self.settings._init_dirs()
+ self._fs_lock_obj = lockfile(self._conf_mem_file)
+ self._fs_lock_count += 1
+
+ def _fs_unlock(self):
+ """
+ Release a lock, decrementing the recursion level.
+ """
+ if self._fs_lock_count <= 1:
+ if self._fs_lock_obj is None:
+ raise AssertionError("not locked")
+ unlockfile(self._fs_lock_obj)
+ self._fs_lock_obj = None
+ self._fs_lock_count -= 1
+
+ def _slot_lock(self, slot_atom):
+ """
+ Acquire a slot lock (reentrant).
+
+ WARNING: The varbapi._slot_lock method is not safe to call
+ in the main process when that process is scheduling
+ install/uninstall tasks in parallel, since the locks would
+ be inherited by child processes. In order to avoid this sort
+ of problem, this method should be called in a subprocess
+ (typically spawned by the MergeProcess class).
+ """
+ lock, counter = self._slot_locks.get(slot_atom, (None, 0))
+ if lock is None:
+ lock_path = self.getpath("%s:%s" % (slot_atom.cp, slot_atom.slot))
+ ensure_dirs(os.path.dirname(lock_path))
+ lock = lockfile(lock_path, wantnewlockfile=True)
+ self._slot_locks[slot_atom] = (lock, counter + 1)
+
+ def _slot_unlock(self, slot_atom):
+ """
+ Release a slot lock (or decrementing recursion level).
+ """
+ lock, counter = self._slot_locks.get(slot_atom, (None, 0))
+ if lock is None:
+ raise AssertionError("not locked")
+ counter -= 1
+ if counter == 0:
+ unlockfile(lock)
+ del self._slot_locks[slot_atom]
+ else:
+ self._slot_locks[slot_atom] = (lock, counter)
+
+ def _bump_mtime(self, cpv):
+ """
+ This is called before an after any modifications, so that consumers
+ can use directory mtimes to validate caches. See bug #290428.
+ """
+ base = self._eroot + VDB_PATH
+ cat = catsplit(cpv)[0]
+ catdir = base + _os.sep + cat
+ t = time.time()
+ t = (t, t)
+ try:
+ for x in (catdir, base):
+ os.utime(x, t)
+ except OSError:
+ ensure_dirs(catdir)
+
+ def cpv_exists(self, mykey, myrepo=None):
+ "Tells us whether an actual ebuild exists on disk (no masking)"
+ return os.path.exists(self.getpath(mykey))
+
+ def cpv_counter(self, mycpv):
+ "This method will grab the COUNTER. Returns a counter value."
+ try:
+ return long(self.aux_get(mycpv, ["COUNTER"])[0])
+ except (KeyError, ValueError):
+ pass
+ writemsg_level(_("portage: COUNTER for %s was corrupted; " \
+ "resetting to value of 0\n") % (mycpv,),
+ level=logging.ERROR, noiselevel=-1)
+ return 0
+
+ def cpv_inject(self, mycpv):
+ "injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
+ ensure_dirs(self.getpath(mycpv))
+ counter = self.counter_tick(mycpv=mycpv)
+ # write local package counter so that emerge clean does the right thing
+ write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter))
+
+ def isInjected(self, mycpv):
+ if self.cpv_exists(mycpv):
+ if os.path.exists(self.getpath(mycpv, filename="INJECTED")):
+ return True
+ if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")):
+ return True
+ return False
+
+ def move_ent(self, mylist, repo_match=None):
+ origcp = mylist[1]
+ newcp = mylist[2]
+
+ # sanity check
+ for atom in (origcp, newcp):
+ if not isjustname(atom):
+ raise InvalidPackageName(str(atom))
+ origmatches = self.match(origcp, use_cache=0)
+ moves = 0
+ if not origmatches:
+ return moves
+ for mycpv in origmatches:
+ try:
+ mycpv = self._pkg_str(mycpv, None)
+ except (KeyError, InvalidData):
+ continue
+ mycpv_cp = cpv_getkey(mycpv)
+ if mycpv_cp != origcp:
+ # Ignore PROVIDE virtual match.
+ continue
+ if repo_match is not None \
+ and not repo_match(mycpv.repo):
+ continue
+
+ # Use isvalidatom() to check if this move is valid for the
+ # EAPI (characters allowed in package names may vary).
+ if not isvalidatom(newcp, eapi=mycpv.eapi):
+ continue
+
+ mynewcpv = mycpv.replace(mycpv_cp, _unicode(newcp), 1)
+ mynewcat = catsplit(newcp)[0]
+ origpath = self.getpath(mycpv)
+ if not os.path.exists(origpath):
+ continue
+ moves += 1
+ if not os.path.exists(self.getpath(mynewcat)):
+ #create the directory
+ ensure_dirs(self.getpath(mynewcat))
+ newpath = self.getpath(mynewcpv)
+ if os.path.exists(newpath):
+ #dest already exists; keep this puppy where it is.
+ continue
+ _movefile(origpath, newpath, mysettings=self.settings)
+ self._clear_pkg_cache(self._dblink(mycpv))
+ self._clear_pkg_cache(self._dblink(mynewcpv))
+
+ # We need to rename the ebuild now.
+ old_pf = catsplit(mycpv)[1]
+ new_pf = catsplit(mynewcpv)[1]
+ if new_pf != old_pf:
+ try:
+ os.rename(os.path.join(newpath, old_pf + ".ebuild"),
+ os.path.join(newpath, new_pf + ".ebuild"))
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
+ write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
+
+ return moves
+
+ def cp_list(self, mycp, use_cache=1):
+ mysplit=catsplit(mycp)
+ if mysplit[0] == '*':
+ mysplit[0] = mysplit[0][1:]
+ try:
+ if sys.hexversion >= 0x3030000:
+ mystat = os.stat(self.getpath(mysplit[0])).st_mtime_ns
+ else:
+ mystat = os.stat(self.getpath(mysplit[0])).st_mtime
+ except OSError:
+ mystat = 0
+ if use_cache and mycp in self.cpcache:
+ cpc = self.cpcache[mycp]
+ if cpc[0] == mystat:
+ return cpc[1][:]
+ cat_dir = self.getpath(mysplit[0])
+ try:
+ dir_list = os.listdir(cat_dir)
+ except EnvironmentError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(cat_dir)
+ del e
+ dir_list = []
+
+ returnme = []
+ for x in dir_list:
+ if self._excluded_dirs.match(x) is not None:
+ continue
+ ps = pkgsplit(x)
+ if not ps:
+ self.invalidentry(os.path.join(self.getpath(mysplit[0]), x))
+ continue
+ if len(mysplit) > 1:
+ if ps[0] == mysplit[1]:
+ cpv = "%s/%s" % (mysplit[0], x)
+ metadata = dict(zip(self._aux_cache_keys,
+ self.aux_get(cpv, self._aux_cache_keys)))
+ returnme.append(_pkg_str(cpv, metadata=metadata,
+ settings=self.settings, db=self))
+ self._cpv_sort_ascending(returnme)
+ if use_cache:
+ self.cpcache[mycp] = [mystat, returnme[:]]
+ elif mycp in self.cpcache:
+ del self.cpcache[mycp]
+ return returnme
+
+ def cpv_all(self, use_cache=1):
+ """
+ Set use_cache=0 to bypass the portage.cachedir() cache in cases
+ when the accuracy of mtime staleness checks should not be trusted
+ (generally this is only necessary in critical sections that
+ involve merge or unmerge of packages).
+ """
+ return list(self._iter_cpv_all(use_cache=use_cache))
+
+ def _iter_cpv_all(self, use_cache=True, sort=False):
+ returnme = []
+ basepath = os.path.join(self._eroot, VDB_PATH) + os.path.sep
+
+ if use_cache:
+ from portage import listdir
+ else:
+ def listdir(p, **kwargs):
+ try:
+ return [x for x in os.listdir(p) \
+ if os.path.isdir(os.path.join(p, x))]
+ except EnvironmentError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(p)
+ del e
+ return []
+
+ catdirs = listdir(basepath, EmptyOnError=1, ignorecvs=1, dirsonly=1)
+ if sort:
+ catdirs.sort()
+
+ for x in catdirs:
+ if self._excluded_dirs.match(x) is not None:
+ continue
+ if not self._category_re.match(x):
+ continue
+
+ pkgdirs = listdir(basepath + x, EmptyOnError=1, dirsonly=1)
+ if sort:
+ pkgdirs.sort()
+
+ for y in pkgdirs:
+ if self._excluded_dirs.match(y) is not None:
+ continue
+ subpath = x + "/" + y
+ # -MERGING- should never be a cpv, nor should files.
+ try:
+ subpath = _pkg_str(subpath, db=self)
+ except InvalidData:
+ self.invalidentry(self.getpath(subpath))
+ continue
+
+ yield subpath
+
+ def cp_all(self, use_cache=1, sort=False):
+ mylist = self.cpv_all(use_cache=use_cache)
+ d={}
+ for y in mylist:
+ if y[0] == '*':
+ y = y[1:]
+ try:
+ mysplit = catpkgsplit(y)
+ except InvalidData:
+ self.invalidentry(self.getpath(y))
+ continue
+ if not mysplit:
+ self.invalidentry(self.getpath(y))
+ continue
+ d[mysplit[0]+"/"+mysplit[1]] = None
+ return sorted(d) if sort else list(d)
+
+ def checkblockers(self, origdep):
+ pass
+
+ def _clear_cache(self):
+ self.mtdircache.clear()
+ self.matchcache.clear()
+ self.cpcache.clear()
+ self._aux_cache_obj = None
+
+ def _add(self, pkg_dblink):
+ self._pkgs_changed = True
+ self._clear_pkg_cache(pkg_dblink)
+
+ def _remove(self, pkg_dblink):
+ self._pkgs_changed = True
+ self._clear_pkg_cache(pkg_dblink)
+
+ def _clear_pkg_cache(self, pkg_dblink):
+ # Due to 1 second mtime granularity in <python-2.5, mtime checks
+ # are not always sufficient to invalidate vardbapi caches. Therefore,
+ # the caches need to be actively invalidated here.
+ self.mtdircache.pop(pkg_dblink.cat, None)
+ self.matchcache.pop(pkg_dblink.cat, None)
+ self.cpcache.pop(pkg_dblink.mysplit[0], None)
+ dircache.pop(pkg_dblink.dbcatdir, None)
+
+ def match(self, origdep, use_cache=1):
+ "caching match function"
+ mydep = dep_expand(
+ origdep, mydb=self, use_cache=use_cache, settings=self.settings)
+ cache_key = (mydep, mydep.unevaluated_atom)
+ mykey = dep_getkey(mydep)
+ mycat = catsplit(mykey)[0]
+ if not use_cache:
+ if mycat in self.matchcache:
+ del self.mtdircache[mycat]
+ del self.matchcache[mycat]
+ return list(self._iter_match(mydep,
+ self.cp_list(mydep.cp, use_cache=use_cache)))
+ try:
+ if sys.hexversion >= 0x3030000:
+ curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime_ns
+ else:
+ curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime
+ except (IOError, OSError):
+ curmtime=0
+
+ if mycat not in self.matchcache or \
+ self.mtdircache[mycat] != curmtime:
+ # clear cache entry
+ self.mtdircache[mycat] = curmtime
+ self.matchcache[mycat] = {}
+ if mydep not in self.matchcache[mycat]:
+ mymatch = list(self._iter_match(mydep,
+ self.cp_list(mydep.cp, use_cache=use_cache)))
+ self.matchcache[mycat][cache_key] = mymatch
+ return self.matchcache[mycat][cache_key][:]
+
+ def findname(self, mycpv, myrepo=None):
+ return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild")
+
+ def flush_cache(self):
+ """If the current user has permission and the internal aux_get cache has
+ been updated, save it to disk and mark it unmodified. This is called
+ by emerge after it has loaded the full vdb for use in dependency
+ calculations. Currently, the cache is only written if the user has
+ superuser privileges (since that's required to obtain a lock), but all
+ users have read access and benefit from faster metadata lookups (as
+ long as at least part of the cache is still valid)."""
+ if self._flush_cache_enabled and \
+ self._aux_cache is not None and \
+ secpass >= 2 and \
+ (len(self._aux_cache["modified"]) >= self._aux_cache_threshold or
+ not os.path.exists(self._cache_delta_filename)):
+
+ ensure_dirs(os.path.dirname(self._aux_cache_filename))
+
+ self._owners.populate() # index any unindexed contents
+ valid_nodes = set(self.cpv_all())
+ for cpv in list(self._aux_cache["packages"]):
+ if cpv not in valid_nodes:
+ del self._aux_cache["packages"][cpv]
+ del self._aux_cache["modified"]
+ timestamp = time.time()
+ self._aux_cache["timestamp"] = timestamp
+
+ f = atomic_ofstream(self._aux_cache_filename, 'wb')
+ pickle.dump(self._aux_cache, f, protocol=2)
+ f.close()
+ apply_secpass_permissions(
+ self._aux_cache_filename, mode=0o644)
+
+ self._cache_delta.initialize(timestamp)
+ apply_secpass_permissions(
+ self._cache_delta_filename, mode=0o644)
+
+ self._aux_cache["modified"] = set()
+
+ @property
+ def _aux_cache(self):
+ if self._aux_cache_obj is None:
+ self._aux_cache_init()
+ return self._aux_cache_obj
+
+ def _aux_cache_init(self):
+ aux_cache = None
+ open_kwargs = {}
+ if sys.hexversion >= 0x3000000 and sys.hexversion < 0x3020000:
+ # Buffered io triggers extreme performance issues in
+ # Unpickler.load() (problem observed with python-3.0.1).
+ # Unfortunately, performance is still poor relative to
+ # python-2.x, but buffering makes it much worse (problem
+ # appears to be solved in Python >=3.2 at least).
+ open_kwargs["buffering"] = 0
+ try:
+ with open(_unicode_encode(self._aux_cache_filename,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='rb', **open_kwargs) as f:
+ mypickle = pickle.Unpickler(f)
+ try:
+ mypickle.find_global = None
+ except AttributeError:
+ # TODO: If py3k, override Unpickler.find_class().
+ pass
+ aux_cache = mypickle.load()
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except Exception as e:
+ if isinstance(e, EnvironmentError) and \
+ getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
+ pass
+ else:
+ writemsg(_("!!! Error loading '%s': %s\n") % \
+ (self._aux_cache_filename, e), noiselevel=-1)
+ del e
+
+ if not aux_cache or \
+ not isinstance(aux_cache, dict) or \
+ aux_cache.get("version") != self._aux_cache_version or \
+ not aux_cache.get("packages"):
+ aux_cache = {"version": self._aux_cache_version}
+ aux_cache["packages"] = {}
+
+ owners = aux_cache.get("owners")
+ if owners is not None:
+ if not isinstance(owners, dict):
+ owners = None
+ elif "version" not in owners:
+ owners = None
+ elif owners["version"] != self._owners_cache_version:
+ owners = None
+ elif "base_names" not in owners:
+ owners = None
+ elif not isinstance(owners["base_names"], dict):
+ owners = None
+
+ if owners is None:
+ owners = {
+ "base_names" : {},
+ "version" : self._owners_cache_version
+ }
+ aux_cache["owners"] = owners
+
+ aux_cache["modified"] = set()
+ self._aux_cache_obj = aux_cache
+
+ def aux_get(self, mycpv, wants, myrepo = None):
+ """This automatically caches selected keys that are frequently needed
+ by emerge for dependency calculations. The cached metadata is
+ considered valid if the mtime of the package directory has not changed
+ since the data was cached. The cache is stored in a pickled dict
+ object with the following format:
+
+ {version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
+
+ If an error occurs while loading the cache pickle or the version is
+ unrecognized, the cache will simple be recreated from scratch (it is
+ completely disposable).
+ """
+ cache_these_wants = self._aux_cache_keys.intersection(wants)
+ for x in wants:
+ if self._aux_cache_keys_re.match(x) is not None:
+ cache_these_wants.add(x)
+
+ if not cache_these_wants:
+ mydata = self._aux_get(mycpv, wants)
+ return [mydata[x] for x in wants]
+
+ cache_these = set(self._aux_cache_keys)
+ cache_these.update(cache_these_wants)
+
+ mydir = self.getpath(mycpv)
+ mydir_stat = None
+ try:
+ mydir_stat = os.stat(mydir)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ raise KeyError(mycpv)
+ # Use float mtime when available.
+ mydir_mtime = mydir_stat.st_mtime
+ pkg_data = self._aux_cache["packages"].get(mycpv)
+ pull_me = cache_these.union(wants)
+ mydata = {"_mtime_" : mydir_mtime}
+ cache_valid = False
+ cache_incomplete = False
+ cache_mtime = None
+ metadata = None
+ if pkg_data is not None:
+ if not isinstance(pkg_data, tuple) or len(pkg_data) != 2:
+ pkg_data = None
+ else:
+ cache_mtime, metadata = pkg_data
+ if not isinstance(cache_mtime, (float, long, int)) or \
+ not isinstance(metadata, dict):
+ pkg_data = None
+
+ if pkg_data:
+ cache_mtime, metadata = pkg_data
+ if isinstance(cache_mtime, float):
+ if cache_mtime == mydir_stat.st_mtime:
+ cache_valid = True
+
+ # Handle truncated mtime in order to avoid cache
+ # invalidation for livecd squashfs (bug 564222).
+ elif long(cache_mtime) == mydir_stat.st_mtime:
+ cache_valid = True
+ else:
+ # Cache may contain integer mtime.
+ cache_valid = cache_mtime == mydir_stat[stat.ST_MTIME]
+
+ if cache_valid:
+ # Migrate old metadata to unicode.
+ for k, v in metadata.items():
+ metadata[k] = _unicode_decode(v,
+ encoding=_encodings['repo.content'], errors='replace')
+
+ mydata.update(metadata)
+ pull_me.difference_update(mydata)
+
+ if pull_me:
+ # pull any needed data and cache it
+ aux_keys = list(pull_me)
+ mydata.update(self._aux_get(mycpv, aux_keys, st=mydir_stat))
+ if not cache_valid or cache_these.difference(metadata):
+ cache_data = {}
+ if cache_valid and metadata:
+ cache_data.update(metadata)
+ for aux_key in cache_these:
+ cache_data[aux_key] = mydata[aux_key]
+ self._aux_cache["packages"][_unicode(mycpv)] = \
+ (mydir_mtime, cache_data)
+ self._aux_cache["modified"].add(mycpv)
+
+ eapi_attrs = _get_eapi_attrs(mydata['EAPI'])
+ if _get_slot_re(eapi_attrs).match(mydata['SLOT']) is None:
+ # Empty or invalid slot triggers InvalidAtom exceptions when
+ # generating slot atoms for packages, so translate it to '0' here.
+ mydata['SLOT'] = '0'
+
+ return [mydata[x] for x in wants]
+
+ def _aux_get(self, mycpv, wants, st=None):
+ mydir = self.getpath(mycpv)
+ if st is None:
+ try:
+ st = os.stat(mydir)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise KeyError(mycpv)
+ elif e.errno == PermissionDenied.errno:
+ raise PermissionDenied(mydir)
+ else:
+ raise
+ if not stat.S_ISDIR(st.st_mode):
+ raise KeyError(mycpv)
+ results = {}
+ env_keys = []
+ for x in wants:
+ if x == "_mtime_":
+ results[x] = st[stat.ST_MTIME]
+ continue
+ try:
+ with io.open(
+ _unicode_encode(os.path.join(mydir, x),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ myd = f.read()
+ except IOError:
+ if x not in self._aux_cache_keys and \
+ self._aux_cache_keys_re.match(x) is None:
+ env_keys.append(x)
+ continue
+ myd = ''
+
+ # Preserve \n for metadata that is known to
+ # contain multiple lines.
+ if self._aux_multi_line_re.match(x) is None:
+ myd = " ".join(myd.split())
+
+ results[x] = myd
+
+ if env_keys:
+ env_results = self._aux_env_search(mycpv, env_keys)
+ for k in env_keys:
+ v = env_results.get(k)
+ if v is None:
+ v = ''
+ if self._aux_multi_line_re.match(k) is None:
+ v = " ".join(v.split())
+ results[k] = v
+
+ if results.get("EAPI") == "":
+ results["EAPI"] = '0'
+
+ return results
+
+ def _aux_env_search(self, cpv, variables):
+ """
+ Search environment.bz2 for the specified variables. Returns
+ a dict mapping variables to values, and any variables not
+ found in the environment will not be included in the dict.
+ This is useful for querying variables like ${SRC_URI} and
+ ${A}, which are not saved in separate files but are available
+ in environment.bz2 (see bug #395463).
+ """
+ env_file = self.getpath(cpv, filename="environment.bz2")
+ if not os.path.isfile(env_file):
+ return {}
+ bunzip2_cmd = portage.util.shlex_split(
+ self.settings.get("PORTAGE_BUNZIP2_COMMAND", ""))
+ if not bunzip2_cmd:
+ bunzip2_cmd = portage.util.shlex_split(
+ self.settings["PORTAGE_BZIP2_COMMAND"])
+ bunzip2_cmd.append("-d")
+ args = bunzip2_cmd + ["-c", env_file]
+ try:
+ proc = subprocess.Popen(args, stdout=subprocess.PIPE)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ raise portage.exception.CommandNotFound(args[0])
+
+ # Parts of the following code are borrowed from
+ # filter-bash-environment.py (keep them in sync).
+ var_assign_re = re.compile(r'(^|^declare\s+-\S+\s+|^declare\s+|^export\s+)([^=\s]+)=("|\')?(.*)$')
+ close_quote_re = re.compile(r'(\\"|"|\')\s*$')
+ def have_end_quote(quote, line):
+ close_quote_match = close_quote_re.search(line)
+ return close_quote_match is not None and \
+ close_quote_match.group(1) == quote
+
+ variables = frozenset(variables)
+ results = {}
+ for line in proc.stdout:
+ line = _unicode_decode(line,
+ encoding=_encodings['content'], errors='replace')
+ var_assign_match = var_assign_re.match(line)
+ if var_assign_match is not None:
+ key = var_assign_match.group(2)
+ quote = var_assign_match.group(3)
+ if quote is not None:
+ if have_end_quote(quote,
+ line[var_assign_match.end(2)+2:]):
+ value = var_assign_match.group(4)
+ else:
+ value = [var_assign_match.group(4)]
+ for line in proc.stdout:
+ line = _unicode_decode(line,
+ encoding=_encodings['content'],
+ errors='replace')
+ value.append(line)
+ if have_end_quote(quote, line):
+ break
+ value = ''.join(value)
+ # remove trailing quote and whitespace
+ value = value.rstrip()[:-1]
+ else:
+ value = var_assign_match.group(4).rstrip()
+
+ if key in variables:
+ results[key] = value
+
+ proc.wait()
+ proc.stdout.close()
+ return results
+
+ def aux_update(self, cpv, values):
+ mylink = self._dblink(cpv)
+ if not mylink.exists():
+ raise KeyError(cpv)
+ self._bump_mtime(cpv)
+ self._clear_pkg_cache(mylink)
+ for k, v in values.items():
+ if v:
+ mylink.setfile(k, v)
+ else:
+ try:
+ os.unlink(os.path.join(self.getpath(cpv), k))
+ except EnvironmentError:
+ pass
+ self._bump_mtime(cpv)
+
+ def counter_tick(self, myroot=None, mycpv=None):
+ """
+ @param myroot: ignored, self._eroot is used instead
+ """
+ return self.counter_tick_core(incrementing=1, mycpv=mycpv)
+
+ def get_counter_tick_core(self, myroot=None, mycpv=None):
+ """
+ Use this method to retrieve the counter instead
+ of having to trust the value of a global counter
+ file that can lead to invalid COUNTER
+ generation. When cache is valid, the package COUNTER
+ files are not read and we rely on the timestamp of
+ the package directory to validate cache. The stat
+ calls should only take a short time, so performance
+ is sufficient without having to rely on a potentially
+ corrupt global counter file.
+
+ The global counter file located at
+ $CACHE_PATH/counter serves to record the
+ counter of the last installed package and
+ it also corresponds to the total number of
+ installation actions that have occurred in
+ the history of this package database.
+
+ @param myroot: ignored, self._eroot is used instead
+ """
+ del myroot
+ counter = -1
+ try:
+ with io.open(
+ _unicode_encode(self._counter_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ try:
+ counter = long(f.readline().strip())
+ except (OverflowError, ValueError) as e:
+ writemsg(_("!!! COUNTER file is corrupt: '%s'\n") %
+ self._counter_path, noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
+ except EnvironmentError as e:
+ # Silently allow ENOENT since files under
+ # /var/cache/ are allowed to disappear.
+ if e.errno != errno.ENOENT:
+ writemsg(_("!!! Unable to read COUNTER file: '%s'\n") % \
+ self._counter_path, noiselevel=-1)
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ del e
+
+ if self._cached_counter == counter:
+ max_counter = counter
+ else:
+ # We must ensure that we return a counter
+ # value that is at least as large as the
+ # highest one from the installed packages,
+ # since having a corrupt value that is too low
+ # can trigger incorrect AUTOCLEAN behavior due
+ # to newly installed packages having lower
+ # COUNTERs than the previous version in the
+ # same slot.
+ max_counter = counter
+ for cpv in self.cpv_all():
+ try:
+ pkg_counter = int(self.aux_get(cpv, ["COUNTER"])[0])
+ except (KeyError, OverflowError, ValueError):
+ continue
+ if pkg_counter > max_counter:
+ max_counter = pkg_counter
+
+ return max_counter + 1
+
+ def counter_tick_core(self, myroot=None, incrementing=1, mycpv=None):
+ """
+ This method will grab the next COUNTER value and record it back
+ to the global file. Note that every package install must have
+ a unique counter, since a slotmove update can move two packages
+ into the same SLOT and in that case it's important that both
+ packages have different COUNTER metadata.
+
+ @param myroot: ignored, self._eroot is used instead
+ @param mycpv: ignored
+ @rtype: int
+ @return: new counter value
+ """
+ myroot = None
+ mycpv = None
+ self.lock()
+ try:
+ counter = self.get_counter_tick_core() - 1
+ if incrementing:
+ #increment counter
+ counter += 1
+ # update new global counter file
+ try:
+ write_atomic(self._counter_path, str(counter))
+ except InvalidLocation:
+ self.settings._init_dirs()
+ write_atomic(self._counter_path, str(counter))
+ self._cached_counter = counter
+
+ # Since we hold a lock, this is a good opportunity
+ # to flush the cache. Note that this will only
+ # flush the cache periodically in the main process
+ # when _aux_cache_threshold is exceeded.
+ self.flush_cache()
+ finally:
+ self.unlock()
+
+ return counter
+
+ def _dblink(self, cpv):
+ category, pf = catsplit(cpv)
+ return dblink(category, pf, settings=self.settings,
+ vartree=self.vartree, treetype="vartree")
+
+ def removeFromContents(self, pkg, paths, relative_paths=True):
+ """
+ @param pkg: cpv for an installed package
+ @type pkg: string
+ @param paths: paths of files to remove from contents
+ @type paths: iterable
+ """
+ if not hasattr(pkg, "getcontents"):
+ pkg = self._dblink(pkg)
+ root = self.settings['ROOT']
+ root_len = len(root) - 1
+ new_contents = pkg.getcontents().copy()
+ removed = 0
+
+ for filename in paths:
+ filename = _unicode_decode(filename,
+ encoding=_encodings['content'], errors='strict')
+ filename = normalize_path(filename)
+ if relative_paths:
+ relative_filename = filename
+ else:
+ relative_filename = filename[root_len:]
+ contents_key = pkg._match_contents(relative_filename)
+ if contents_key:
+ # It's possible for two different paths to refer to the same
+ # contents_key, due to directory symlinks. Therefore, pass a
+ # default value to pop, in order to avoid a KeyError which
+ # could otherwise be triggered (see bug #454400).
+ new_contents.pop(contents_key, None)
+ removed += 1
+
+ if removed:
+ # Also remove corresponding NEEDED lines, so that they do
+ # no corrupt LinkageMap data for preserve-libs.
+ needed_filename = os.path.join(pkg.dbdir, LinkageMap._needed_aux_key)
+ new_needed = None
+ try:
+ with io.open(_unicode_encode(needed_filename,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ needed_lines = f.readlines()
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ else:
+ new_needed = []
+ for l in needed_lines:
+ l = l.rstrip("\n")
+ if not l:
+ continue
+ try:
+ entry = NeededEntry.parse(needed_filename, l)
+ except InvalidData as e:
+ writemsg_level("\n%s\n\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+
+ filename = os.path.join(root, entry.filename.lstrip(os.sep))
+ if filename in new_contents:
+ new_needed.append(entry)
+
+ self.writeContentsToContentsFile(pkg, new_contents, new_needed=new_needed)
+
+ def writeContentsToContentsFile(self, pkg, new_contents, new_needed=None):
+ """
+ @param pkg: package to write contents file for
+ @type pkg: dblink
+ @param new_contents: contents to write to CONTENTS file
+ @type new_contents: contents dictionary of the form
+ {u'/path/to/file' : (contents_attribute 1, ...), ...}
+ @param new_needed: new NEEDED entries
+ @type new_needed: list of NeededEntry
+ """
+ root = self.settings['ROOT']
+ self._bump_mtime(pkg.mycpv)
+ if new_needed is not None:
+ f = atomic_ofstream(os.path.join(pkg.dbdir, LinkageMap._needed_aux_key))
+ for entry in new_needed:
+ f.write(_unicode(entry))
+ f.close()
+ f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
+ write_contents(new_contents, root, f)
+ f.close()
+ self._bump_mtime(pkg.mycpv)
+ pkg._clear_contents_cache()
+
+ class _owners_cache(object):
+ """
+ This class maintains an hash table that serves to index package
+ contents by mapping the basename of file to a list of possible
+ packages that own it. This is used to optimize owner lookups
+ by narrowing the search down to a smaller number of packages.
+ """
+ _new_hash = md5
+ _hash_bits = 16
+ _hex_chars = _hash_bits // 4
+
+ def __init__(self, vardb):
+ self._vardb = vardb
+
+ def add(self, cpv):
+ eroot_len = len(self._vardb._eroot)
+ pkg_hash = self._hash_pkg(cpv)
+ db = self._vardb._dblink(cpv)
+ if not db.getcontents():
+ # Empty path is a code used to represent empty contents.
+ self._add_path("", pkg_hash)
+
+ for x in db._contents.keys():
+ self._add_path(x[eroot_len:], pkg_hash)
+
+ self._vardb._aux_cache["modified"].add(cpv)
+
+ def _add_path(self, path, pkg_hash):
+ """
+ Empty path is a code that represents empty contents.
+ """
+ if path:
+ name = os.path.basename(path.rstrip(os.path.sep))
+ if not name:
+ return
+ else:
+ name = path
+ name_hash = self._hash_str(name)
+ base_names = self._vardb._aux_cache["owners"]["base_names"]
+ pkgs = base_names.get(name_hash)
+ if pkgs is None:
+ pkgs = {}
+ base_names[name_hash] = pkgs
+ pkgs[pkg_hash] = None
+
+ def _hash_str(self, s):
+ h = self._new_hash()
+ # Always use a constant utf_8 encoding here, since
+ # the "default" encoding can change.
+ h.update(_unicode_encode(s,
+ encoding=_encodings['repo.content'],
+ errors='backslashreplace'))
+ h = h.hexdigest()
+ h = h[-self._hex_chars:]
+ h = int(h, 16)
+ return h
+
+ def _hash_pkg(self, cpv):
+ counter, mtime = self._vardb.aux_get(
+ cpv, ["COUNTER", "_mtime_"])
+ try:
+ counter = int(counter)
+ except ValueError:
+ counter = 0
+ return (_unicode(cpv), counter, mtime)
+
+ class _owners_db(object):
+
+ def __init__(self, vardb):
+ self._vardb = vardb
+
+ def populate(self):
+ self._populate()
+
+ def _populate(self):
+ owners_cache = vardbapi._owners_cache(self._vardb)
+ cached_hashes = set()
+ base_names = self._vardb._aux_cache["owners"]["base_names"]
+
+ # Take inventory of all cached package hashes.
+ for name, hash_values in list(base_names.items()):
+ if not isinstance(hash_values, dict):
+ del base_names[name]
+ continue
+ cached_hashes.update(hash_values)
+
+ # Create sets of valid package hashes and uncached packages.
+ uncached_pkgs = set()
+ hash_pkg = owners_cache._hash_pkg
+ valid_pkg_hashes = set()
+ for cpv in self._vardb.cpv_all():
+ hash_value = hash_pkg(cpv)
+ valid_pkg_hashes.add(hash_value)
+ if hash_value not in cached_hashes:
+ uncached_pkgs.add(cpv)
+
+ # Cache any missing packages.
+ for cpv in uncached_pkgs:
+ owners_cache.add(cpv)
+
+ # Delete any stale cache.
+ stale_hashes = cached_hashes.difference(valid_pkg_hashes)
+ if stale_hashes:
+ for base_name_hash, bucket in list(base_names.items()):
+ for hash_value in stale_hashes.intersection(bucket):
+ del bucket[hash_value]
+ if not bucket:
+ del base_names[base_name_hash]
+
+ return owners_cache
+
+ def get_owners(self, path_iter):
+ """
+ @return the owners as a dblink -> set(files) mapping.
+ """
+ owners = {}
+ for owner, f in self.iter_owners(path_iter):
+ owned_files = owners.get(owner)
+ if owned_files is None:
+ owned_files = set()
+ owners[owner] = owned_files
+ owned_files.add(f)
+ return owners
+
+ def getFileOwnerMap(self, path_iter):
+ owners = self.get_owners(path_iter)
+ file_owners = {}
+ for pkg_dblink, files in owners.items():
+ for f in files:
+ owner_set = file_owners.get(f)
+ if owner_set is None:
+ owner_set = set()
+ file_owners[f] = owner_set
+ owner_set.add(pkg_dblink)
+ return file_owners
+
+ def iter_owners(self, path_iter):
+ """
+ Iterate over tuples of (dblink, path). In order to avoid
+ consuming too many resources for too much time, resources
+ are only allocated for the duration of a given iter_owners()
+ call. Therefore, to maximize reuse of resources when searching
+ for multiple files, it's best to search for them all in a single
+ call.
+ """
+
+ if not isinstance(path_iter, list):
+ path_iter = list(path_iter)
+ owners_cache = self._populate()
+ vardb = self._vardb
+ root = vardb._eroot
+ hash_pkg = owners_cache._hash_pkg
+ hash_str = owners_cache._hash_str
+ base_names = self._vardb._aux_cache["owners"]["base_names"]
+ case_insensitive = "case-insensitive-fs" \
+ in vardb.settings.features
+
+ dblink_cache = {}
+
+ def dblink(cpv):
+ x = dblink_cache.get(cpv)
+ if x is None:
+ if len(dblink_cache) > 20:
+ # Ensure that we don't run out of memory.
+ raise StopIteration()
+ x = self._vardb._dblink(cpv)
+ dblink_cache[cpv] = x
+ return x
+
+ while path_iter:
+
+ path = path_iter.pop()
+ if case_insensitive:
+ path = path.lower()
+ is_basename = os.sep != path[:1]
+ if is_basename:
+ name = path
+ else:
+ name = os.path.basename(path.rstrip(os.path.sep))
+
+ if not name:
+ continue
+
+ name_hash = hash_str(name)
+ pkgs = base_names.get(name_hash)
+ owners = []
+ if pkgs is not None:
+ try:
+ for hash_value in pkgs:
+ if not isinstance(hash_value, tuple) or \
+ len(hash_value) != 3:
+ continue
+ cpv, counter, mtime = hash_value
+ if not isinstance(cpv, basestring):
+ continue
+ try:
+ current_hash = hash_pkg(cpv)
+ except KeyError:
+ continue
+
+ if current_hash != hash_value:
+ continue
+
+ if is_basename:
+ for p in dblink(cpv)._contents.keys():
+ if os.path.basename(p) == name:
+ owners.append((cpv, dblink(cpv).
+ _contents.unmap_key(
+ p)[len(root):]))
+ else:
+ key = dblink(cpv)._match_contents(path)
+ if key is not False:
+ owners.append(
+ (cpv, key[len(root):]))
+
+ except StopIteration:
+ path_iter.append(path)
+ del owners[:]
+ dblink_cache.clear()
+ gc.collect()
+ for x in self._iter_owners_low_mem(path_iter):
+ yield x
+ return
+ else:
+ for cpv, p in owners:
+ yield (dblink(cpv), p)
+
+ def _iter_owners_low_mem(self, path_list):
+ """
+ This implemention will make a short-lived dblink instance (and
+ parse CONTENTS) for every single installed package. This is
+ slower and but uses less memory than the method which uses the
+ basename cache.
+ """
+
+ if not path_list:
+ return
+
+ case_insensitive = "case-insensitive-fs" \
+ in self._vardb.settings.features
+ path_info_list = []
+ for path in path_list:
+ if case_insensitive:
+ path = path.lower()
+ is_basename = os.sep != path[:1]
+ if is_basename:
+ name = path
+ else:
+ name = os.path.basename(path.rstrip(os.path.sep))
+ path_info_list.append((path, name, is_basename))
+
+ # Do work via the global event loop, so that it can be used
+ # for indication of progress during the search (bug #461412).
+ event_loop = (portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
+ root = self._vardb._eroot
+
+ def search_pkg(cpv, search_future):
+ dblnk = self._vardb._dblink(cpv)
+ results = []
+ for path, name, is_basename in path_info_list:
+ if is_basename:
+ for p in dblnk._contents.keys():
+ if os.path.basename(p) == name:
+ results.append((dblnk,
+ dblnk._contents.unmap_key(
+ p)[len(root):]))
+ else:
+ key = dblnk._match_contents(path)
+ if key is not False:
+ results.append(
+ (dblnk, key[len(root):]))
+ search_future.set_result(results)
+
+ for cpv in self._vardb.cpv_all():
+ search_future = event_loop.create_future()
+ event_loop.call_soon(search_pkg, cpv, search_future)
+ event_loop.run_until_complete(search_future)
+ for result in search_future.result():
+ yield result
+
+class vartree(object):
+ "this tree will scan a var/db/pkg database located at root (passed to init)"
+ def __init__(self, root=None, virtual=DeprecationWarning, categories=None,
+ settings=None):
+
+ if settings is None:
+ settings = portage.settings
+
+ if root is not None and root != settings['ROOT']:
+ warnings.warn("The 'root' parameter of the "
+ "portage.dbapi.vartree.vartree"
+ " constructor is now unused. Use "
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=2)
+
+ if virtual is not DeprecationWarning:
+ warnings.warn("The 'virtual' parameter of the "
+ "portage.dbapi.vartree.vartree"
+ " constructor is unused",
+ DeprecationWarning, stacklevel=2)
+
+ self.settings = settings
+ self.dbapi = vardbapi(settings=settings, vartree=self)
+ self.populated = 1
+
+ @property
+ def root(self):
+ warnings.warn("The root attribute of "
+ "portage.dbapi.vartree.vartree"
+ " is deprecated. Use "
+ "settings['ROOT'] instead.",
+ DeprecationWarning, stacklevel=3)
+ return self.settings['ROOT']
+
+ def getpath(self, mykey, filename=None):
+ return self.dbapi.getpath(mykey, filename=filename)
+
+ def zap(self, mycpv):
+ return
+
+ def inject(self, mycpv):
+ return
+
+ def get_provide(self, mycpv):
+ return []
+
+ def get_all_provides(self):
+ return {}
+
+ def dep_bestmatch(self, mydep, use_cache=1):
+ "compatibility method -- all matches, not just visible ones"
+ #mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
+ mymatch = best(self.dbapi.match(
+ dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
+ use_cache=use_cache))
+ if mymatch is None:
+ return ""
+ else:
+ return mymatch
+
+ def dep_match(self, mydep, use_cache=1):
+ "compatibility method -- we want to see all matches, not just visible ones"
+ #mymatch = match(mydep,self.dbapi)
+ mymatch = self.dbapi.match(mydep, use_cache=use_cache)
+ if mymatch is None:
+ return []
+ else:
+ return mymatch
+
+ def exists_specific(self, cpv):
+ return self.dbapi.cpv_exists(cpv)
+
+ def getallcpv(self):
+ """temporary function, probably to be renamed --- Gets a list of all
+ category/package-versions installed on the system."""
+ return self.dbapi.cpv_all()
+
+ def getallnodes(self):
+ """new behavior: these are all *unmasked* nodes. There may or may not be available
+ masked package for nodes in this nodes list."""
+ return self.dbapi.cp_all()
+
+ def getebuildpath(self, fullpackage):
+ cat, package = catsplit(fullpackage)
+ return self.getpath(fullpackage, filename=package+".ebuild")
+
+ def getslot(self, mycatpkg):
+ "Get a slot for a catpkg; assume it exists."
+ try:
+ return self.dbapi._pkg_str(mycatpkg, None).slot
+ except KeyError:
+ return ""
+
+ def populate(self):
+ self.populated=1
+
+class dblink(object):
+ """
+ This class provides an interface to the installed package database
+ At present this is implemented as a text backend in /var/db/pkg.
+ """
+
+ import re
+ _normalize_needed = re.compile(r'//|^[^/]|./$|(^|/)\.\.?(/|$)')
+
+ _contents_re = re.compile(r'^(' + \
+ r'(?P<dir>(dev|dir|fif) (.+))|' + \
+ r'(?P<obj>(obj) (.+) (\S+) (\d+))|' + \
+ r'(?P<sym>(sym) (.+) -> (.+) ((\d+)|(?P<oldsym>(' + \
+ r'\(\d+, \d+L, \d+L, \d+, \d+, \d+, \d+L, \d+, (\d+), \d+\)))))' + \
+ r')$'
+ )
+
+ # These files are generated by emerge, so we need to remove
+ # them when they are the only thing left in a directory.
+ _infodir_cleanup = frozenset(["dir", "dir.old"])
+
+ _ignored_unlink_errnos = (
+ errno.EBUSY, errno.ENOENT,
+ errno.ENOTDIR, errno.EISDIR)
+
+ _ignored_rmdir_errnos = (
+ errno.EEXIST, errno.ENOTEMPTY,
+ errno.EBUSY, errno.ENOENT,
+ errno.ENOTDIR, errno.EISDIR,
+ errno.EPERM)
+
+ def __init__(self, cat, pkg, myroot=None, settings=None, treetype=None,
+ vartree=None, blockers=None, scheduler=None, pipe=None):
+ """
+ Creates a DBlink object for a given CPV.
+ The given CPV may not be present in the database already.
+
+ @param cat: Category
+ @type cat: String
+ @param pkg: Package (PV)
+ @type pkg: String
+ @param myroot: ignored, settings['ROOT'] is used instead
+ @type myroot: String (Path)
+ @param settings: Typically portage.settings
+ @type settings: portage.config
+ @param treetype: one of ['porttree','bintree','vartree']
+ @type treetype: String
+ @param vartree: an instance of vartree corresponding to myroot.
+ @type vartree: vartree
+ """
+
+ if settings is None:
+ raise TypeError("settings argument is required")
+
+ mysettings = settings
+ self._eroot = mysettings['EROOT']
+ self.cat = cat
+ self.pkg = pkg
+ self.mycpv = self.cat + "/" + self.pkg
+ if self.mycpv == settings.mycpv and \
+ isinstance(settings.mycpv, _pkg_str):
+ self.mycpv = settings.mycpv
+ else:
+ self.mycpv = _pkg_str(self.mycpv)
+ self.mysplit = list(self.mycpv.cpv_split[1:])
+ self.mysplit[0] = self.mycpv.cp
+ self.treetype = treetype
+ if vartree is None:
+ vartree = portage.db[self._eroot]["vartree"]
+ self.vartree = vartree
+ self._blockers = blockers
+ self._scheduler = scheduler
+ self.dbroot = normalize_path(os.path.join(self._eroot, VDB_PATH))
+ self.dbcatdir = self.dbroot+"/"+cat
+ self.dbpkgdir = self.dbcatdir+"/"+pkg
+ self.dbtmpdir = self.dbcatdir+"/"+MERGING_IDENTIFIER+pkg
+ self.dbdir = self.dbpkgdir
+ self.settings = mysettings
+ self._verbose = self.settings.get("PORTAGE_VERBOSE") == "1"
+
+ self.myroot = self.settings['ROOT']
+ self._installed_instance = None
+ self.contentscache = None
+ self._contents_inodes = None
+ self._contents_basenames = None
+ self._linkmap_broken = False
+ self._device_path_map = {}
+ self._hardlink_merge_map = {}
+ self._hash_key = (self._eroot, self.mycpv)
+ self._protect_obj = None
+ self._pipe = pipe
+ self._postinst_failure = False
+
+ # When necessary, this attribute is modified for
+ # compliance with RESTRICT=preserve-libs.
+ self._preserve_libs = "preserve-libs" in mysettings.features
+ self._contents = ContentsCaseSensitivityManager(self)
+ self._slot_locks = []
+
+ def __hash__(self):
+ return hash(self._hash_key)
+
+ def __eq__(self, other):
+ return isinstance(other, dblink) and \
+ self._hash_key == other._hash_key
+
+ def _get_protect_obj(self):
+
+ if self._protect_obj is None:
+ self._protect_obj = ConfigProtect(self._eroot,
+ portage.util.shlex_split(
+ self.settings.get("CONFIG_PROTECT", "")),
+ portage.util.shlex_split(
+ self.settings.get("CONFIG_PROTECT_MASK", "")),
+ case_insensitive=("case-insensitive-fs"
+ in self.settings.features))
+
+ return self._protect_obj
+
+ def isprotected(self, obj):
+ return self._get_protect_obj().isprotected(obj)
+
+ def updateprotect(self):
+ self._get_protect_obj().updateprotect()
+
+ def lockdb(self):
+ self.vartree.dbapi.lock()
+
+ def unlockdb(self):
+ self.vartree.dbapi.unlock()
+
+ def _slot_locked(f):
+ """
+ A decorator function which, when parallel-install is enabled,
+ acquires and releases slot locks for the current package and
+ blocked packages. This is required in order to account for
+ interactions with blocked packages (involving resolution of
+ file collisions).
+ """
+ def wrapper(self, *args, **kwargs):
+ if "parallel-install" in self.settings.features:
+ self._acquire_slot_locks(
+ kwargs.get("mydbapi", self.vartree.dbapi))
+ try:
+ return f(self, *args, **kwargs)
+ finally:
+ self._release_slot_locks()
+ return wrapper
+
+ def _acquire_slot_locks(self, db):
+ """
+ Acquire slot locks for the current package and blocked packages.
+ """
+
+ slot_atoms = []
+
+ try:
+ slot = self.mycpv.slot
+ except AttributeError:
+ slot, = db.aux_get(self.mycpv, ["SLOT"])
+ slot = slot.partition("/")[0]
+
+ slot_atoms.append(portage.dep.Atom(
+ "%s:%s" % (self.mycpv.cp, slot)))
+
+ for blocker in self._blockers or []:
+ slot_atoms.append(blocker.slot_atom)
+
+ # Sort atoms so that locks are acquired in a predictable
+ # order, preventing deadlocks with competitors that may
+ # be trying to acquire overlapping locks.
+ slot_atoms.sort()
+ for slot_atom in slot_atoms:
+ self.vartree.dbapi._slot_lock(slot_atom)
+ self._slot_locks.append(slot_atom)
+
+ def _release_slot_locks(self):
+ """
+ Release all slot locks.
+ """
+ while self._slot_locks:
+ self.vartree.dbapi._slot_unlock(self._slot_locks.pop())
+
+ def getpath(self):
+ "return path to location of db information (for >>> informational display)"
+ return self.dbdir
+
+ def exists(self):
+ "does the db entry exist? boolean."
+ return os.path.exists(self.dbdir)
+
+ def delete(self):
+ """
+ Remove this entry from the database
+ """
+ try:
+ os.lstat(self.dbdir)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ENOTDIR, errno.ESTALE):
+ raise
+ return
+
+ # Check validity of self.dbdir before attempting to remove it.
+ if not self.dbdir.startswith(self.dbroot):
+ writemsg(_("portage.dblink.delete(): invalid dbdir: %s\n") % \
+ self.dbdir, noiselevel=-1)
+ return
+
+ if self.dbdir is self.dbpkgdir:
+ counter, = self.vartree.dbapi.aux_get(
+ self.mycpv, ["COUNTER"])
+ self.vartree.dbapi._cache_delta.recordEvent(
+ "remove", self.mycpv,
+ self.settings["SLOT"].split("/")[0], counter)
+
+ shutil.rmtree(self.dbdir)
+ # If empty, remove parent category directory.
+ try:
+ os.rmdir(os.path.dirname(self.dbdir))
+ except OSError:
+ pass
+ self.vartree.dbapi._remove(self)
+
+ # Use self.dbroot since we need an existing path for syncfs.
+ try:
+ self._merged_path(self.dbroot, os.lstat(self.dbroot))
+ except OSError:
+ pass
+
+ self._post_merge_sync()
+
+ def clearcontents(self):
+ """
+ For a given db entry (self), erase the CONTENTS values.
+ """
+ self.lockdb()
+ try:
+ if os.path.exists(self.dbdir+"/CONTENTS"):
+ os.unlink(self.dbdir+"/CONTENTS")
+ finally:
+ self.unlockdb()
+
+ def _clear_contents_cache(self):
+ self.contentscache = None
+ self._contents_inodes = None
+ self._contents_basenames = None
+ self._contents.clear_cache()
+
+ def getcontents(self):
+ """
+ Get the installed files of a given package (aka what that package installed)
+ """
+ if self.contentscache is not None:
+ return self.contentscache
+ contents_file = os.path.join(self.dbdir, "CONTENTS")
+ pkgfiles = {}
+ try:
+ with io.open(_unicode_encode(contents_file,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ mylines = f.readlines()
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ self.contentscache = pkgfiles
+ return pkgfiles
+
+ null_byte = "\0"
+ normalize_needed = self._normalize_needed
+ contents_re = self._contents_re
+ obj_index = contents_re.groupindex['obj']
+ dir_index = contents_re.groupindex['dir']
+ sym_index = contents_re.groupindex['sym']
+ # The old symlink format may exist on systems that have packages
+ # which were installed many years ago (see bug #351814).
+ oldsym_index = contents_re.groupindex['oldsym']
+ # CONTENTS files already contain EPREFIX
+ myroot = self.settings['ROOT']
+ if myroot == os.path.sep:
+ myroot = None
+ # used to generate parent dir entries
+ dir_entry = ("dir",)
+ eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1
+ pos = 0
+ errors = []
+ for pos, line in enumerate(mylines):
+ if null_byte in line:
+ # Null bytes are a common indication of corruption.
+ errors.append((pos + 1, _("Null byte found in CONTENTS entry")))
+ continue
+ line = line.rstrip("\n")
+ m = contents_re.match(line)
+ if m is None:
+ errors.append((pos + 1, _("Unrecognized CONTENTS entry")))
+ continue
+
+ if m.group(obj_index) is not None:
+ base = obj_index
+ #format: type, mtime, md5sum
+ data = (m.group(base+1), m.group(base+4), m.group(base+3))
+ elif m.group(dir_index) is not None:
+ base = dir_index
+ #format: type
+ data = (m.group(base+1),)
+ elif m.group(sym_index) is not None:
+ base = sym_index
+ if m.group(oldsym_index) is None:
+ mtime = m.group(base+5)
+ else:
+ mtime = m.group(base+8)
+ #format: type, mtime, dest
+ data = (m.group(base+1), mtime, m.group(base+3))
+ else:
+ # This won't happen as long the regular expression
+ # is written to only match valid entries.
+ raise AssertionError(_("required group not found " + \
+ "in CONTENTS entry: '%s'") % line)
+
+ path = m.group(base+2)
+ if normalize_needed.search(path) is not None:
+ path = normalize_path(path)
+ if not path.startswith(os.path.sep):
+ path = os.path.sep + path
+
+ if myroot is not None:
+ path = os.path.join(myroot, path.lstrip(os.path.sep))
+
+ # Implicitly add parent directories, since we can't necessarily
+ # assume that they are explicitly listed in CONTENTS, and it's
+ # useful for callers if they can rely on parent directory entries
+ # being generated here (crucial for things like dblink.isowner()).
+ path_split = path.split(os.sep)
+ path_split.pop()
+ while len(path_split) > eroot_split_len:
+ parent = os.sep.join(path_split)
+ if parent in pkgfiles:
+ break
+ pkgfiles[parent] = dir_entry
+ path_split.pop()
+
+ pkgfiles[path] = data
+
+ if errors:
+ writemsg(_("!!! Parse error in '%s'\n") % contents_file, noiselevel=-1)
+ for pos, e in errors:
+ writemsg(_("!!! line %d: %s\n") % (pos, e), noiselevel=-1)
+ self.contentscache = pkgfiles
+ return pkgfiles
+
+ def _prune_plib_registry(self, unmerge=False,
+ needed=None, preserve_paths=None):
+ # remove preserved libraries that don't have any consumers left
+ if not (self._linkmap_broken or
+ self.vartree.dbapi._linkmap is None or
+ self.vartree.dbapi._plib_registry is None):
+ self.vartree.dbapi._fs_lock()
+ plib_registry = self.vartree.dbapi._plib_registry
+ plib_registry.lock()
+ try:
+ plib_registry.load()
+
+ unmerge_with_replacement = \
+ unmerge and preserve_paths is not None
+ if unmerge_with_replacement:
+ # If self.mycpv is about to be unmerged and we
+ # have a replacement package, we want to exclude
+ # the irrelevant NEEDED data that belongs to
+ # files which are being unmerged now.
+ exclude_pkgs = (self.mycpv,)
+ else:
+ exclude_pkgs = None
+
+ self._linkmap_rebuild(exclude_pkgs=exclude_pkgs,
+ include_file=needed, preserve_paths=preserve_paths)
+
+ if unmerge:
+ unmerge_preserve = None
+ if not unmerge_with_replacement:
+ unmerge_preserve = \
+ self._find_libs_to_preserve(unmerge=True)
+ counter = self.vartree.dbapi.cpv_counter(self.mycpv)
+ try:
+ slot = self.mycpv.slot
+ except AttributeError:
+ slot = _pkg_str(self.mycpv, slot=self.settings["SLOT"]).slot
+ plib_registry.unregister(self.mycpv, slot, counter)
+ if unmerge_preserve:
+ for path in sorted(unmerge_preserve):
+ contents_key = self._match_contents(path)
+ if not contents_key:
+ continue
+ obj_type = self.getcontents()[contents_key][0]
+ self._display_merge(_(">>> needed %s %s\n") % \
+ (obj_type, contents_key), noiselevel=-1)
+ plib_registry.register(self.mycpv,
+ slot, counter, unmerge_preserve)
+ # Remove the preserved files from our contents
+ # so that they won't be unmerged.
+ self.vartree.dbapi.removeFromContents(self,
+ unmerge_preserve)
+
+ unmerge_no_replacement = \
+ unmerge and not unmerge_with_replacement
+ cpv_lib_map = self._find_unused_preserved_libs(
+ unmerge_no_replacement)
+ if cpv_lib_map:
+ self._remove_preserved_libs(cpv_lib_map)
+ self.vartree.dbapi.lock()
+ try:
+ for cpv, removed in cpv_lib_map.items():
+ if not self.vartree.dbapi.cpv_exists(cpv):
+ continue
+ self.vartree.dbapi.removeFromContents(cpv, removed)
+ finally:
+ self.vartree.dbapi.unlock()
+
+ plib_registry.store()
+ finally:
+ plib_registry.unlock()
+ self.vartree.dbapi._fs_unlock()
+
+ @_slot_locked
+ def unmerge(self, pkgfiles=None, trimworld=None, cleanup=True,
+ ldpath_mtimes=None, others_in_slot=None, needed=None,
+ preserve_paths=None):
+ """
+ Calls prerm
+ Unmerges a given package (CPV)
+ calls postrm
+ calls cleanrm
+ calls env_update
+
+ @param pkgfiles: files to unmerge (generally self.getcontents() )
+ @type pkgfiles: Dictionary
+ @param trimworld: Unused
+ @type trimworld: Boolean
+ @param cleanup: cleanup to pass to doebuild (see doebuild)
+ @type cleanup: Boolean
+ @param ldpath_mtimes: mtimes to pass to env_update (see env_update)
+ @type ldpath_mtimes: Dictionary
+ @param others_in_slot: all dblink instances in this slot, excluding self
+ @type others_in_slot: list
+ @param needed: Filename containing libraries needed after unmerge.
+ @type needed: String
+ @param preserve_paths: Libraries preserved by a package instance that
+ is currently being merged. They need to be explicitly passed to the
+ LinkageMap, since they are not registered in the
+ PreservedLibsRegistry yet.
+ @type preserve_paths: set
+ @rtype: Integer
+ @return:
+ 1. os.EX_OK if everything went well.
+ 2. return code of the failed phase (for prerm, postrm, cleanrm)
+ """
+
+ if trimworld is not None:
+ warnings.warn("The trimworld parameter of the " + \
+ "portage.dbapi.vartree.dblink.unmerge()" + \
+ " method is now unused.",
+ DeprecationWarning, stacklevel=2)
+
+ background = False
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ if self._scheduler is None:
+ # We create a scheduler instance and use it to
+ # log unmerge output separately from merge output.
+ self._scheduler = SchedulerInterface(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
+ if self.settings.get("PORTAGE_BACKGROUND") == "subprocess":
+ if self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "1":
+ self.settings["PORTAGE_BACKGROUND"] = "1"
+ self.settings.backup_changes("PORTAGE_BACKGROUND")
+ background = True
+ elif self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "0":
+ self.settings["PORTAGE_BACKGROUND"] = "0"
+ self.settings.backup_changes("PORTAGE_BACKGROUND")
+ elif self.settings.get("PORTAGE_BACKGROUND") == "1":
+ background = True
+
+ self.vartree.dbapi._bump_mtime(self.mycpv)
+ showMessage = self._display_merge
+ if self.vartree.dbapi._categories is not None:
+ self.vartree.dbapi._categories = None
+
+ # When others_in_slot is not None, the backup has already been
+ # handled by the caller.
+ caller_handles_backup = others_in_slot is not None
+
+ # When others_in_slot is supplied, the security check has already been
+ # done for this slot, so it shouldn't be repeated until the next
+ # replacement or unmerge operation.
+ if others_in_slot is None:
+ slot = self.vartree.dbapi._pkg_str(self.mycpv, None).slot
+ slot_matches = self.vartree.dbapi.match(
+ "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
+ others_in_slot = []
+ for cur_cpv in slot_matches:
+ if cur_cpv == self.mycpv:
+ continue
+ others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
+ settings=self.settings, vartree=self.vartree,
+ treetype="vartree", pipe=self._pipe))
+
+ retval = self._security_check([self] + others_in_slot)
+ if retval:
+ return retval
+
+ contents = self.getcontents()
+ # Now, don't assume that the name of the ebuild is the same as the
+ # name of the dir; the package may have been moved.
+ myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
+ failures = 0
+ ebuild_phase = "prerm"
+ mystuff = os.listdir(self.dbdir)
+ for x in mystuff:
+ if x.endswith(".ebuild"):
+ if x[:-7] != self.pkg:
+ # Clean up after vardbapi.move_ent() breakage in
+ # portage versions before 2.1.2
+ os.rename(os.path.join(self.dbdir, x), myebuildpath)
+ write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
+ break
+
+ if self.mycpv != self.settings.mycpv or \
+ "EAPI" not in self.settings.configdict["pkg"]:
+ # We avoid a redundant setcpv call here when
+ # the caller has already taken care of it.
+ self.settings.setcpv(self.mycpv, mydb=self.vartree.dbapi)
+
+ eapi_unsupported = False
+ try:
+ doebuild_environment(myebuildpath, "prerm",
+ settings=self.settings, db=self.vartree.dbapi)
+ except UnsupportedAPIException as e:
+ eapi_unsupported = e
+
+ if self._preserve_libs and "preserve-libs" in \
+ self.settings["PORTAGE_RESTRICT"].split():
+ self._preserve_libs = False
+
+ builddir_lock = None
+ scheduler = self._scheduler
+ retval = os.EX_OK
+ try:
+ # Only create builddir_lock if the caller
+ # has not already acquired the lock.
+ if "PORTAGE_BUILDDIR_LOCKED" not in self.settings:
+ builddir_lock = EbuildBuildDir(
+ scheduler=scheduler,
+ settings=self.settings)
+ scheduler.run_until_complete(builddir_lock.async_lock())
+ prepare_build_dirs(settings=self.settings, cleanup=True)
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+
+ # Do this before the following _prune_plib_registry call, since
+ # that removes preserved libraries from our CONTENTS, and we
+ # may want to backup those libraries first.
+ if not caller_handles_backup:
+ retval = self._pre_unmerge_backup(background)
+ if retval != os.EX_OK:
+ showMessage(_("!!! FAILED prerm: quickpkg: %s\n") % retval,
+ level=logging.ERROR, noiselevel=-1)
+ return retval
+
+ self._prune_plib_registry(unmerge=True, needed=needed,
+ preserve_paths=preserve_paths)
+
+ # Log the error after PORTAGE_LOG_FILE is initialized
+ # by prepare_build_dirs above.
+ if eapi_unsupported:
+ # Sometimes this happens due to corruption of the EAPI file.
+ failures += 1
+ showMessage(_("!!! FAILED prerm: %s\n") % \
+ os.path.join(self.dbdir, "EAPI"),
+ level=logging.ERROR, noiselevel=-1)
+ showMessage("%s\n" % (eapi_unsupported,),
+ level=logging.ERROR, noiselevel=-1)
+ elif os.path.isfile(myebuildpath):
+ phase = EbuildPhase(background=background,
+ phase=ebuild_phase, scheduler=scheduler,
+ settings=self.settings)
+ phase.start()
+ retval = phase.wait()
+
+ # XXX: Decide how to handle failures here.
+ if retval != os.EX_OK:
+ failures += 1
+ showMessage(_("!!! FAILED prerm: %s\n") % retval,
+ level=logging.ERROR, noiselevel=-1)
+
+ self.vartree.dbapi._fs_lock()
+ try:
+ self._unmerge_pkgfiles(pkgfiles, others_in_slot)
+ finally:
+ self.vartree.dbapi._fs_unlock()
+ self._clear_contents_cache()
+
+ if not eapi_unsupported and os.path.isfile(myebuildpath):
+ ebuild_phase = "postrm"
+ phase = EbuildPhase(background=background,
+ phase=ebuild_phase, scheduler=scheduler,
+ settings=self.settings)
+ phase.start()
+ retval = phase.wait()
+
+ # XXX: Decide how to handle failures here.
+ if retval != os.EX_OK:
+ failures += 1
+ showMessage(_("!!! FAILED postrm: %s\n") % retval,
+ level=logging.ERROR, noiselevel=-1)
+
+ finally:
+ self.vartree.dbapi._bump_mtime(self.mycpv)
+ try:
+ if not eapi_unsupported and os.path.isfile(myebuildpath):
+ if retval != os.EX_OK:
+ msg_lines = []
+ msg = _("The '%(ebuild_phase)s' "
+ "phase of the '%(cpv)s' package "
+ "has failed with exit value %(retval)s.") % \
+ {"ebuild_phase":ebuild_phase, "cpv":self.mycpv,
+ "retval":retval}
+ from textwrap import wrap
+ msg_lines.extend(wrap(msg, 72))
+ msg_lines.append("")
+
+ ebuild_name = os.path.basename(myebuildpath)
+ ebuild_dir = os.path.dirname(myebuildpath)
+ msg = _("The problem occurred while executing "
+ "the ebuild file named '%(ebuild_name)s' "
+ "located in the '%(ebuild_dir)s' directory. "
+ "If necessary, manually remove "
+ "the environment.bz2 file and/or the "
+ "ebuild file located in that directory.") % \
+ {"ebuild_name":ebuild_name, "ebuild_dir":ebuild_dir}
+ msg_lines.extend(wrap(msg, 72))
+ msg_lines.append("")
+
+ msg = _("Removal "
+ "of the environment.bz2 file is "
+ "preferred since it may allow the "
+ "removal phases to execute successfully. "
+ "The ebuild will be "
+ "sourced and the eclasses "
+ "from the current portage tree will be used "
+ "when necessary. Removal of "
+ "the ebuild file will cause the "
+ "pkg_prerm() and pkg_postrm() removal "
+ "phases to be skipped entirely.")
+ msg_lines.extend(wrap(msg, 72))
+
+ self._eerror(ebuild_phase, msg_lines)
+
+ self._elog_process(phasefilter=("prerm", "postrm"))
+
+ if retval == os.EX_OK:
+ try:
+ doebuild_environment(myebuildpath, "cleanrm",
+ settings=self.settings, db=self.vartree.dbapi)
+ except UnsupportedAPIException:
+ pass
+ phase = EbuildPhase(background=background,
+ phase="cleanrm", scheduler=scheduler,
+ settings=self.settings)
+ phase.start()
+ retval = phase.wait()
+ finally:
+ if builddir_lock is not None:
+ scheduler.run_until_complete(
+ builddir_lock.async_unlock())
+
+ if log_path is not None:
+
+ if not failures and 'unmerge-logs' not in self.settings.features:
+ try:
+ os.unlink(log_path)
+ except OSError:
+ pass
+
+ try:
+ st = os.stat(log_path)
+ except OSError:
+ pass
+ else:
+ if st.st_size == 0:
+ try:
+ os.unlink(log_path)
+ except OSError:
+ pass
+
+ if log_path is not None and os.path.exists(log_path):
+ # Restore this since it gets lost somewhere above and it
+ # needs to be set for _display_merge() to be able to log.
+ # Note that the log isn't necessarily supposed to exist
+ # since if PORT_LOGDIR is unset then it's a temp file
+ # so it gets cleaned above.
+ self.settings["PORTAGE_LOG_FILE"] = log_path
+ else:
+ self.settings.pop("PORTAGE_LOG_FILE", None)
+
+ env_update(target_root=self.settings['ROOT'],
+ prev_mtimes=ldpath_mtimes,
+ contents=contents, env=self.settings,
+ writemsg_level=self._display_merge, vardbapi=self.vartree.dbapi)
+
+ unmerge_with_replacement = preserve_paths is not None
+ if not unmerge_with_replacement:
+ # When there's a replacement package which calls us via treewalk,
+ # treewalk will automatically call _prune_plib_registry for us.
+ # Otherwise, we need to call _prune_plib_registry ourselves.
+ # Don't pass in the "unmerge=True" flag here, since that flag
+ # is intended to be used _prior_ to unmerge, not after.
+ self._prune_plib_registry()
+
+ return os.EX_OK
+
+ def _display_merge(self, msg, level=0, noiselevel=0):
+ if not self._verbose and noiselevel >= 0 and level < logging.WARN:
+ return
+ if self._scheduler is None:
+ writemsg_level(msg, level=level, noiselevel=noiselevel)
+ else:
+ log_path = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ background = self.settings.get("PORTAGE_BACKGROUND") == "1"
+
+ if background and log_path is None:
+ if level >= logging.WARN:
+ writemsg_level(msg, level=level, noiselevel=noiselevel)
+ else:
+ self._scheduler.output(msg,
+ log_path=log_path, background=background,
+ level=level, noiselevel=noiselevel)
+
+ def _show_unmerge(self, zing, desc, file_type, file_name):
+ self._display_merge("%s %s %s %s\n" % \
+ (zing, desc.ljust(8), file_type, file_name))
+
+ def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
+ """
+
+ Unmerges the contents of a package from the liveFS
+ Removes the VDB entry for self
+
+ @param pkgfiles: typically self.getcontents()
+ @type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
+ @param others_in_slot: all dblink instances in this slot, excluding self
+ @type others_in_slot: list
+ @rtype: None
+ """
+
+ os = _os_merge
+ perf_md5 = perform_md5
+ showMessage = self._display_merge
+ show_unmerge = self._show_unmerge
+ ignored_unlink_errnos = self._ignored_unlink_errnos
+ ignored_rmdir_errnos = self._ignored_rmdir_errnos
+
+ if not pkgfiles:
+ showMessage(_("No package files given... Grabbing a set.\n"))
+ pkgfiles = self.getcontents()
+
+ if others_in_slot is None:
+ others_in_slot = []
+ slot = self.vartree.dbapi._pkg_str(self.mycpv, None).slot
+ slot_matches = self.vartree.dbapi.match(
+ "%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
+ for cur_cpv in slot_matches:
+ if cur_cpv == self.mycpv:
+ continue
+ others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
+ settings=self.settings,
+ vartree=self.vartree, treetype="vartree", pipe=self._pipe))
+
+ cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
+ stale_confmem = []
+ protected_symlinks = {}
+
+ unmerge_orphans = "unmerge-orphans" in self.settings.features
+ calc_prelink = "prelink-checksums" in self.settings.features
+
+ if pkgfiles:
+ self.updateprotect()
+ mykeys = list(pkgfiles)
+ mykeys.sort()
+ mykeys.reverse()
+
+ #process symlinks second-to-last, directories last.
+ mydirs = set()
+
+ uninstall_ignore = portage.util.shlex_split(
+ self.settings.get("UNINSTALL_IGNORE", ""))
+
+ def unlink(file_name, lstatobj):
+ if bsd_chflags:
+ if lstatobj.st_flags != 0:
+ bsd_chflags.lchflags(file_name, 0)
+ parent_name = os.path.dirname(file_name)
+ # Use normal stat/chflags for the parent since we want to
+ # follow any symlinks to the real parent directory.
+ pflags = os.stat(parent_name).st_flags
+ if pflags != 0:
+ bsd_chflags.chflags(parent_name, 0)
+ try:
+ if not stat.S_ISLNK(lstatobj.st_mode):
+ # Remove permissions to ensure that any hardlinks to
+ # suid/sgid files are rendered harmless.
+ os.chmod(file_name, 0)
+ os.unlink(file_name)
+ except OSError as ose:
+ # If the chmod or unlink fails, you are in trouble.
+ # With Prefix this can be because the file is owned
+ # by someone else (a screwup by root?), on a normal
+ # system maybe filesystem corruption. In any case,
+ # if we backtrace and die here, we leave the system
+ # in a totally undefined state, hence we just bleed
+ # like hell and continue to hopefully finish all our
+ # administrative and pkg_postinst stuff.
+ self._eerror("postrm",
+ ["Could not chmod or unlink '%s': %s" % \
+ (file_name, ose)])
+ else:
+
+ # Even though the file no longer exists, we log it
+ # here so that _unmerge_dirs can see that we've
+ # removed a file from this device, and will record
+ # the parent directory for a syncfs call.
+ self._merged_path(file_name, lstatobj, exists=False)
+
+ finally:
+ if bsd_chflags and pflags != 0:
+ # Restore the parent flags we saved before unlinking
+ bsd_chflags.chflags(parent_name, pflags)
+
+ unmerge_desc = {}
+ unmerge_desc["cfgpro"] = _("cfgpro")
+ unmerge_desc["replaced"] = _("replaced")
+ unmerge_desc["!dir"] = _("!dir")
+ unmerge_desc["!empty"] = _("!empty")
+ unmerge_desc["!fif"] = _("!fif")
+ unmerge_desc["!found"] = _("!found")
+ unmerge_desc["!md5"] = _("!md5")
+ unmerge_desc["!mtime"] = _("!mtime")
+ unmerge_desc["!obj"] = _("!obj")
+ unmerge_desc["!sym"] = _("!sym")
+ unmerge_desc["!prefix"] = _("!prefix")
+
+ real_root = self.settings['ROOT']
+ real_root_len = len(real_root) - 1
+ eroot = self.settings["EROOT"]
+
+ infodirs = frozenset(infodir for infodir in chain(
+ self.settings.get("INFOPATH", "").split(":"),
+ self.settings.get("INFODIR", "").split(":")) if infodir)
+ infodirs_inodes = set()
+ for infodir in infodirs:
+ infodir = os.path.join(real_root, infodir.lstrip(os.sep))
+ try:
+ statobj = os.stat(infodir)
+ except OSError:
+ pass
+ else:
+ infodirs_inodes.add((statobj.st_dev, statobj.st_ino))
+
+ for i, objkey in enumerate(mykeys):
+
+ obj = normalize_path(objkey)
+ if os is _os_merge:
+ try:
+ _unicode_encode(obj,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ _unicode_encode(obj,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+ perf_md5 = portage.checksum.perform_md5
+
+ file_data = pkgfiles[objkey]
+ file_type = file_data[0]
+
+ # don't try to unmerge the prefix offset itself
+ if len(obj) <= len(eroot) or not obj.startswith(eroot):
+ show_unmerge("---", unmerge_desc["!prefix"], file_type, obj)
+ continue
+
+ statobj = None
+ try:
+ statobj = os.stat(obj)
+ except OSError:
+ pass
+ lstatobj = None
+ try:
+ lstatobj = os.lstat(obj)
+ except (OSError, AttributeError):
+ pass
+ islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
+ if lstatobj is None:
+ show_unmerge("---", unmerge_desc["!found"], file_type, obj)
+ continue
+
+ f_match = obj[len(eroot)-1:]
+ ignore = False
+ for pattern in uninstall_ignore:
+ if fnmatch.fnmatch(f_match, pattern):
+ ignore = True
+ break
+
+ if not ignore:
+ if islink and f_match in \
+ ("/lib", "/usr/lib", "/usr/local/lib"):
+ # Ignore libdir symlinks for bug #423127.
+ ignore = True
+
+ if ignore:
+ show_unmerge("---", unmerge_desc["cfgpro"], file_type, obj)
+ continue
+
+ # don't use EROOT, CONTENTS entries already contain EPREFIX
+ if obj.startswith(real_root):
+ relative_path = obj[real_root_len:]
+ is_owned = False
+ for dblnk in others_in_slot:
+ if dblnk.isowner(relative_path):
+ is_owned = True
+ break
+
+ if is_owned and islink and \
+ file_type in ("sym", "dir") and \
+ statobj and stat.S_ISDIR(statobj.st_mode):
+ # A new instance of this package claims the file, so
+ # don't unmerge it. If the file is symlink to a
+ # directory and the unmerging package installed it as
+ # a symlink, but the new owner has it listed as a
+ # directory, then we'll produce a warning since the
+ # symlink is a sort of orphan in this case (see
+ # bug #326685).
+ symlink_orphan = False
+ for dblnk in others_in_slot:
+ parent_contents_key = \
+ dblnk._match_contents(relative_path)
+ if not parent_contents_key:
+ continue
+ if not parent_contents_key.startswith(
+ real_root):
+ continue
+ if dblnk.getcontents()[
+ parent_contents_key][0] == "dir":
+ symlink_orphan = True
+ break
+
+ if symlink_orphan:
+ protected_symlinks.setdefault(
+ (statobj.st_dev, statobj.st_ino),
+ []).append(relative_path)
+
+ if is_owned:
+ show_unmerge("---", unmerge_desc["replaced"], file_type, obj)
+ continue
+ elif relative_path in cfgfiledict:
+ stale_confmem.append(relative_path)
+
+ # Don't unlink symlinks to directories here since that can
+ # remove /lib and /usr/lib symlinks.
+ if unmerge_orphans and \
+ lstatobj and not stat.S_ISDIR(lstatobj.st_mode) and \
+ not (islink and statobj and stat.S_ISDIR(statobj.st_mode)) and \
+ not self.isprotected(obj):
+ try:
+ unlink(obj, lstatobj)
+ except EnvironmentError as e:
+ if e.errno not in ignored_unlink_errnos:
+ raise
+ del e
+ show_unmerge("<<<", "", file_type, obj)
+ continue
+
+ lmtime = str(lstatobj[stat.ST_MTIME])
+ if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (lmtime != pkgfiles[objkey][1]):
+ show_unmerge("---", unmerge_desc["!mtime"], file_type, obj)
+ continue
+
+ if file_type == "dir" and not islink:
+ if lstatobj is None or not stat.S_ISDIR(lstatobj.st_mode):
+ show_unmerge("---", unmerge_desc["!dir"], file_type, obj)
+ continue
+ mydirs.add((obj, (lstatobj.st_dev, lstatobj.st_ino)))
+ elif file_type == "sym" or (file_type == "dir" and islink):
+ if not islink:
+ show_unmerge("---", unmerge_desc["!sym"], file_type, obj)
+ continue
+
+ # If this symlink points to a directory then we don't want
+ # to unmerge it if there are any other packages that
+ # installed files into the directory via this symlink
+ # (see bug #326685).
+ # TODO: Resolving a symlink to a directory will require
+ # simulation if $ROOT != / and the link is not relative.
+ if islink and statobj and stat.S_ISDIR(statobj.st_mode) \
+ and obj.startswith(real_root):
+
+ relative_path = obj[real_root_len:]
+ try:
+ target_dir_contents = os.listdir(obj)
+ except OSError:
+ pass
+ else:
+ if target_dir_contents:
+ # If all the children are regular files owned
+ # by this package, then the symlink should be
+ # safe to unmerge.
+ all_owned = True
+ for child in target_dir_contents:
+ child = os.path.join(relative_path, child)
+ if not self.isowner(child):
+ all_owned = False
+ break
+ try:
+ child_lstat = os.lstat(os.path.join(
+ real_root, child.lstrip(os.sep)))
+ except OSError:
+ continue
+
+ if not stat.S_ISREG(child_lstat.st_mode):
+ # Nested symlinks or directories make
+ # the issue very complex, so just
+ # preserve the symlink in order to be
+ # on the safe side.
+ all_owned = False
+ break
+
+ if not all_owned:
+ protected_symlinks.setdefault(
+ (statobj.st_dev, statobj.st_ino),
+ []).append(relative_path)
+ show_unmerge("---", unmerge_desc["!empty"],
+ file_type, obj)
+ continue
+
+ # Go ahead and unlink symlinks to directories here when
+ # they're actually recorded as symlinks in the contents.
+ # Normally, symlinks such as /lib -> lib64 are not recorded
+ # as symlinks in the contents of a package. If a package
+ # installs something into ${D}/lib/, it is recorded in the
+ # contents as a directory even if it happens to correspond
+ # to a symlink when it's merged to the live filesystem.
+ try:
+ unlink(obj, lstatobj)
+ show_unmerge("<<<", "", file_type, obj)
+ except (OSError, IOError) as e:
+ if e.errno not in ignored_unlink_errnos:
+ raise
+ del e
+ show_unmerge("!!!", "", file_type, obj)
+ elif pkgfiles[objkey][0] == "obj":
+ if statobj is None or not stat.S_ISREG(statobj.st_mode):
+ show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
+ continue
+ mymd5 = None
+ try:
+ mymd5 = perf_md5(obj, calc_prelink=calc_prelink)
+ except FileNotFound as e:
+ # the file has disappeared between now and our stat call
+ show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
+ continue
+
+ # string.lower is needed because db entries used to be in upper-case. The
+ # string.lower allows for backwards compatibility.
+ if mymd5 != pkgfiles[objkey][2].lower():
+ show_unmerge("---", unmerge_desc["!md5"], file_type, obj)
+ continue
+ try:
+ unlink(obj, lstatobj)
+ except (OSError, IOError) as e:
+ if e.errno not in ignored_unlink_errnos:
+ raise
+ del e
+ show_unmerge("<<<", "", file_type, obj)
+ elif pkgfiles[objkey][0] == "fif":
+ if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
+ show_unmerge("---", unmerge_desc["!fif"], file_type, obj)
+ continue
+ show_unmerge("---", "", file_type, obj)
+ elif pkgfiles[objkey][0] == "dev":
+ show_unmerge("---", "", file_type, obj)
+
+ self._unmerge_dirs(mydirs, infodirs_inodes,
+ protected_symlinks, unmerge_desc, unlink, os)
+ mydirs.clear()
+
+ if protected_symlinks:
+ self._unmerge_protected_symlinks(others_in_slot, infodirs_inodes,
+ protected_symlinks, unmerge_desc, unlink, os)
+
+ if protected_symlinks:
+ msg = "One or more symlinks to directories have been " + \
+ "preserved in order to ensure that files installed " + \
+ "via these symlinks remain accessible. " + \
+ "This indicates that the mentioned symlink(s) may " + \
+ "be obsolete remnants of an old install, and it " + \
+ "may be appropriate to replace a given symlink " + \
+ "with the directory that it points to."
+ lines = textwrap.wrap(msg, 72)
+ lines.append("")
+ flat_list = set()
+ flat_list.update(*protected_symlinks.values())
+ flat_list = sorted(flat_list)
+ for f in flat_list:
+ lines.append("\t%s" % (os.path.join(real_root,
+ f.lstrip(os.sep))))
+ lines.append("")
+ self._elog("elog", "postrm", lines)
+
+ # Remove stale entries from config memory.
+ if stale_confmem:
+ for filename in stale_confmem:
+ del cfgfiledict[filename]
+ writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
+
+ #remove self from vartree database so that our own virtual gets zapped if we're the last node
+ self.vartree.zap(self.mycpv)
+
+ def _unmerge_protected_symlinks(self, others_in_slot, infodirs_inodes,
+ protected_symlinks, unmerge_desc, unlink, os):
+
+ real_root = self.settings['ROOT']
+ show_unmerge = self._show_unmerge
+ ignored_unlink_errnos = self._ignored_unlink_errnos
+
+ flat_list = set()
+ flat_list.update(*protected_symlinks.values())
+ flat_list = sorted(flat_list)
+
+ for f in flat_list:
+ for dblnk in others_in_slot:
+ if dblnk.isowner(f):
+ # If another package in the same slot installed
+ # a file via a protected symlink, return early
+ # and don't bother searching for any other owners.
+ return
+
+ msg = []
+ msg.append("")
+ msg.append(_("Directory symlink(s) may need protection:"))
+ msg.append("")
+
+ for f in flat_list:
+ msg.append("\t%s" % \
+ os.path.join(real_root, f.lstrip(os.path.sep)))
+
+ msg.append("")
+ msg.append("Use the UNINSTALL_IGNORE variable to exempt specific symlinks")
+ msg.append("from the following search (see the make.conf man page).")
+ msg.append("")
+ msg.append(_("Searching all installed"
+ " packages for files installed via above symlink(s)..."))
+ msg.append("")
+ self._elog("elog", "postrm", msg)
+
+ self.lockdb()
+ try:
+ owners = self.vartree.dbapi._owners.get_owners(flat_list)
+ self.vartree.dbapi.flush_cache()
+ finally:
+ self.unlockdb()
+
+ for owner in list(owners):
+ if owner.mycpv == self.mycpv:
+ owners.pop(owner, None)
+
+ if not owners:
+ msg = []
+ msg.append(_("The above directory symlink(s) are all "
+ "safe to remove. Removing them now..."))
+ msg.append("")
+ self._elog("elog", "postrm", msg)
+ dirs = set()
+ for unmerge_syms in protected_symlinks.values():
+ for relative_path in unmerge_syms:
+ obj = os.path.join(real_root,
+ relative_path.lstrip(os.sep))
+ parent = os.path.dirname(obj)
+ while len(parent) > len(self._eroot):
+ try:
+ lstatobj = os.lstat(parent)
+ except OSError:
+ break
+ else:
+ dirs.add((parent,
+ (lstatobj.st_dev, lstatobj.st_ino)))
+ parent = os.path.dirname(parent)
+ try:
+ unlink(obj, os.lstat(obj))
+ show_unmerge("<<<", "", "sym", obj)
+ except (OSError, IOError) as e:
+ if e.errno not in ignored_unlink_errnos:
+ raise
+ del e
+ show_unmerge("!!!", "", "sym", obj)
+
+ protected_symlinks.clear()
+ self._unmerge_dirs(dirs, infodirs_inodes,
+ protected_symlinks, unmerge_desc, unlink, os)
+ dirs.clear()
+
+ def _unmerge_dirs(self, dirs, infodirs_inodes,
+ protected_symlinks, unmerge_desc, unlink, os):
+
+ show_unmerge = self._show_unmerge
+ infodir_cleanup = self._infodir_cleanup
+ ignored_unlink_errnos = self._ignored_unlink_errnos
+ ignored_rmdir_errnos = self._ignored_rmdir_errnos
+ real_root = self.settings['ROOT']
+
+ dirs = sorted(dirs)
+ revisit = {}
+
+ while True:
+ try:
+ obj, inode_key = dirs.pop()
+ except IndexError:
+ break
+ # Treat any directory named "info" as a candidate here,
+ # since it might have been in INFOPATH previously even
+ # though it may not be there now.
+ if inode_key in infodirs_inodes or \
+ os.path.basename(obj) == "info":
+ try:
+ remaining = os.listdir(obj)
+ except OSError:
+ pass
+ else:
+ cleanup_info_dir = ()
+ if remaining and \
+ len(remaining) <= len(infodir_cleanup):
+ if not set(remaining).difference(infodir_cleanup):
+ cleanup_info_dir = remaining
+
+ for child in cleanup_info_dir:
+ child = os.path.join(obj, child)
+ try:
+ lstatobj = os.lstat(child)
+ if stat.S_ISREG(lstatobj.st_mode):
+ unlink(child, lstatobj)
+ show_unmerge("<<<", "", "obj", child)
+ except EnvironmentError as e:
+ if e.errno not in ignored_unlink_errnos:
+ raise
+ del e
+ show_unmerge("!!!", "", "obj", child)
+
+ try:
+ parent_name = os.path.dirname(obj)
+ parent_stat = os.stat(parent_name)
+
+ if bsd_chflags:
+ lstatobj = os.lstat(obj)
+ if lstatobj.st_flags != 0:
+ bsd_chflags.lchflags(obj, 0)
+
+ # Use normal stat/chflags for the parent since we want to
+ # follow any symlinks to the real parent directory.
+ pflags = parent_stat.st_flags
+ if pflags != 0:
+ bsd_chflags.chflags(parent_name, 0)
+ try:
+ os.rmdir(obj)
+ finally:
+ if bsd_chflags and pflags != 0:
+ # Restore the parent flags we saved before unlinking
+ bsd_chflags.chflags(parent_name, pflags)
+
+ # Record the parent directory for use in syncfs calls.
+ # Note that we use a realpath and a regular stat here, since
+ # we want to follow any symlinks back to the real device where
+ # the real parent directory resides.
+ self._merged_path(os.path.realpath(parent_name), parent_stat)
+
+ show_unmerge("<<<", "", "dir", obj)
+ except EnvironmentError as e:
+ if e.errno not in ignored_rmdir_errnos:
+ raise
+ if e.errno != errno.ENOENT:
+ show_unmerge("---", unmerge_desc["!empty"], "dir", obj)
+ revisit[obj] = inode_key
+
+ # Since we didn't remove this directory, record the directory
+ # itself for use in syncfs calls, if we have removed another
+ # file from the same device.
+ # Note that we use a realpath and a regular stat here, since
+ # we want to follow any symlinks back to the real device where
+ # the real directory resides.
+ try:
+ dir_stat = os.stat(obj)
+ except OSError:
+ pass
+ else:
+ if dir_stat.st_dev in self._device_path_map:
+ self._merged_path(os.path.realpath(obj), dir_stat)
+
+ else:
+ # When a directory is successfully removed, there's
+ # no need to protect symlinks that point to it.
+ unmerge_syms = protected_symlinks.pop(inode_key, None)
+ if unmerge_syms is not None:
+ parents = []
+ for relative_path in unmerge_syms:
+ obj = os.path.join(real_root,
+ relative_path.lstrip(os.sep))
+ try:
+ unlink(obj, os.lstat(obj))
+ show_unmerge("<<<", "", "sym", obj)
+ except (OSError, IOError) as e:
+ if e.errno not in ignored_unlink_errnos:
+ raise
+ del e
+ show_unmerge("!!!", "", "sym", obj)
+ else:
+ parents.append(os.path.dirname(obj))
+
+ if parents:
+ # Revisit parents recursively (bug 640058).
+ recursive_parents = []
+ for parent in set(parents):
+ while parent in revisit:
+ recursive_parents.append(parent)
+ parent = os.path.dirname(parent)
+ if parent == '/':
+ break
+
+ for parent in sorted(set(recursive_parents)):
+ dirs.append((parent, revisit.pop(parent)))
+
+ def isowner(self, filename, destroot=None):
+ """
+ Check if a file belongs to this package. This may
+ result in a stat call for the parent directory of
+ every installed file, since the inode numbers are
+ used to work around the problem of ambiguous paths
+ caused by symlinked directories. The results of
+ stat calls are cached to optimize multiple calls
+ to this method.
+
+ @param filename:
+ @type filename:
+ @param destroot:
+ @type destroot:
+ @rtype: Boolean
+ @return:
+ 1. True if this package owns the file.
+ 2. False if this package does not own the file.
+ """
+
+ if destroot is not None and destroot != self._eroot:
+ warnings.warn("The second parameter of the " + \
+ "portage.dbapi.vartree.dblink.isowner()" + \
+ " is now unused. Instead " + \
+ "self.settings['EROOT'] will be used.",
+ DeprecationWarning, stacklevel=2)
+
+ return bool(self._match_contents(filename))
+
+ def _match_contents(self, filename, destroot=None):
+ """
+ The matching contents entry is returned, which is useful
+ since the path may differ from the one given by the caller,
+ due to symlinks.
+
+ @rtype: String
+ @return: the contents entry corresponding to the given path, or False
+ if the file is not owned by this package.
+ """
+
+ filename = _unicode_decode(filename,
+ encoding=_encodings['content'], errors='strict')
+
+ if destroot is not None and destroot != self._eroot:
+ warnings.warn("The second parameter of the " + \
+ "portage.dbapi.vartree.dblink._match_contents()" + \
+ " is now unused. Instead " + \
+ "self.settings['ROOT'] will be used.",
+ DeprecationWarning, stacklevel=2)
+
+ # don't use EROOT here, image already contains EPREFIX
+ destroot = self.settings['ROOT']
+
+ # The given filename argument might have a different encoding than the
+ # the filenames contained in the contents, so use separate wrapped os
+ # modules for each. The basename is more likely to contain non-ascii
+ # characters than the directory path, so use os_filename_arg for all
+ # operations involving the basename of the filename arg.
+ os_filename_arg = _os_merge
+ os = _os_merge
+
+ try:
+ _unicode_encode(filename,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ _unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os_filename_arg = portage.os
+
+ destfile = normalize_path(
+ os_filename_arg.path.join(destroot,
+ filename.lstrip(os_filename_arg.path.sep)))
+
+ if "case-insensitive-fs" in self.settings.features:
+ destfile = destfile.lower()
+
+ if self._contents.contains(destfile):
+ return self._contents.unmap_key(destfile)
+
+ if self.getcontents():
+ basename = os_filename_arg.path.basename(destfile)
+ if self._contents_basenames is None:
+
+ try:
+ for x in self._contents.keys():
+ _unicode_encode(x,
+ encoding=_encodings['merge'],
+ errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ for x in self._contents.keys():
+ _unicode_encode(x,
+ encoding=_encodings['fs'],
+ errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ self._contents_basenames = set(
+ os.path.basename(x) for x in self._contents.keys())
+ if basename not in self._contents_basenames:
+ # This is a shortcut that, in most cases, allows us to
+ # eliminate this package as an owner without the need
+ # to examine inode numbers of parent directories.
+ return False
+
+ # Use stat rather than lstat since we want to follow
+ # any symlinks to the real parent directory.
+ parent_path = os_filename_arg.path.dirname(destfile)
+ try:
+ parent_stat = os_filename_arg.stat(parent_path)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ return False
+ if self._contents_inodes is None:
+
+ if os is _os_merge:
+ try:
+ for x in self._contents.keys():
+ _unicode_encode(x,
+ encoding=_encodings['merge'],
+ errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ for x in self._contents.keys():
+ _unicode_encode(x,
+ encoding=_encodings['fs'],
+ errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ self._contents_inodes = {}
+ parent_paths = set()
+ for x in self._contents.keys():
+ p_path = os.path.dirname(x)
+ if p_path in parent_paths:
+ continue
+ parent_paths.add(p_path)
+ try:
+ s = os.stat(p_path)
+ except OSError:
+ pass
+ else:
+ inode_key = (s.st_dev, s.st_ino)
+ # Use lists of paths in case multiple
+ # paths reference the same inode.
+ p_path_list = self._contents_inodes.get(inode_key)
+ if p_path_list is None:
+ p_path_list = []
+ self._contents_inodes[inode_key] = p_path_list
+ if p_path not in p_path_list:
+ p_path_list.append(p_path)
+
+ p_path_list = self._contents_inodes.get(
+ (parent_stat.st_dev, parent_stat.st_ino))
+ if p_path_list:
+ for p_path in p_path_list:
+ x = os_filename_arg.path.join(p_path, basename)
+ if self._contents.contains(x):
+ return self._contents.unmap_key(x)
+
+ return False
+
+ def _linkmap_rebuild(self, **kwargs):
+ """
+ Rebuild the self._linkmap if it's not broken due to missing
+ scanelf binary. Also, return early if preserve-libs is disabled
+ and the preserve-libs registry is empty.
+ """
+ if self._linkmap_broken or \
+ self.vartree.dbapi._linkmap is None or \
+ self.vartree.dbapi._plib_registry is None or \
+ ("preserve-libs" not in self.settings.features and \
+ not self.vartree.dbapi._plib_registry.hasEntries()):
+ return
+ try:
+ self.vartree.dbapi._linkmap.rebuild(**kwargs)
+ except CommandNotFound as e:
+ self._linkmap_broken = True
+ self._display_merge(_("!!! Disabling preserve-libs " \
+ "due to error: Command Not Found: %s\n") % (e,),
+ level=logging.ERROR, noiselevel=-1)
+
+ def _find_libs_to_preserve(self, unmerge=False):
+ """
+ Get set of relative paths for libraries to be preserved. When
+ unmerge is False, file paths to preserve are selected from
+ self._installed_instance. Otherwise, paths are selected from
+ self.
+ """
+ if self._linkmap_broken or \
+ self.vartree.dbapi._linkmap is None or \
+ self.vartree.dbapi._plib_registry is None or \
+ (not unmerge and self._installed_instance is None) or \
+ not self._preserve_libs:
+ return set()
+
+ os = _os_merge
+ linkmap = self.vartree.dbapi._linkmap
+ if unmerge:
+ installed_instance = self
+ else:
+ installed_instance = self._installed_instance
+ old_contents = installed_instance.getcontents()
+ root = self.settings['ROOT']
+ root_len = len(root) - 1
+ lib_graph = digraph()
+ path_node_map = {}
+
+ def path_to_node(path):
+ node = path_node_map.get(path)
+ if node is None:
+ node = LinkageMap._LibGraphNode(linkmap._obj_key(path))
+ alt_path_node = lib_graph.get(node)
+ if alt_path_node is not None:
+ node = alt_path_node
+ node.alt_paths.add(path)
+ path_node_map[path] = node
+ return node
+
+ consumer_map = {}
+ provider_nodes = set()
+ # Create provider nodes and add them to the graph.
+ for f_abs in old_contents:
+
+ if os is _os_merge:
+ try:
+ _unicode_encode(f_abs,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ _unicode_encode(f_abs,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ f = f_abs[root_len:]
+ if not unmerge and self.isowner(f):
+ # We have an indentically named replacement file,
+ # so we don't try to preserve the old copy.
+ continue
+ try:
+ consumers = linkmap.findConsumers(f,
+ exclude_providers=(installed_instance.isowner,))
+ except KeyError:
+ continue
+ if not consumers:
+ continue
+ provider_node = path_to_node(f)
+ lib_graph.add(provider_node, None)
+ provider_nodes.add(provider_node)
+ consumer_map[provider_node] = consumers
+
+ # Create consumer nodes and add them to the graph.
+ # Note that consumers can also be providers.
+ for provider_node, consumers in consumer_map.items():
+ for c in consumers:
+ consumer_node = path_to_node(c)
+ if installed_instance.isowner(c) and \
+ consumer_node not in provider_nodes:
+ # This is not a provider, so it will be uninstalled.
+ continue
+ lib_graph.add(provider_node, consumer_node)
+
+ # Locate nodes which should be preserved. They consist of all
+ # providers that are reachable from consumers that are not
+ # providers themselves.
+ preserve_nodes = set()
+ for consumer_node in lib_graph.root_nodes():
+ if consumer_node in provider_nodes:
+ continue
+ # Preserve all providers that are reachable from this consumer.
+ node_stack = lib_graph.child_nodes(consumer_node)
+ while node_stack:
+ provider_node = node_stack.pop()
+ if provider_node in preserve_nodes:
+ continue
+ preserve_nodes.add(provider_node)
+ node_stack.extend(lib_graph.child_nodes(provider_node))
+
+ preserve_paths = set()
+ for preserve_node in preserve_nodes:
+ # Preserve the library itself, and also preserve the
+ # soname symlink which is the only symlink that is
+ # strictly required.
+ hardlinks = set()
+ soname_symlinks = set()
+ soname = linkmap.getSoname(next(iter(preserve_node.alt_paths)))
+ for f in preserve_node.alt_paths:
+ f_abs = os.path.join(root, f.lstrip(os.sep))
+ try:
+ if stat.S_ISREG(os.lstat(f_abs).st_mode):
+ hardlinks.add(f)
+ elif os.path.basename(f) == soname:
+ soname_symlinks.add(f)
+ except OSError:
+ pass
+
+ if hardlinks:
+ preserve_paths.update(hardlinks)
+ preserve_paths.update(soname_symlinks)
+
+ return preserve_paths
+
+ def _add_preserve_libs_to_contents(self, preserve_paths):
+ """
+ Preserve libs returned from _find_libs_to_preserve().
+ """
+
+ if not preserve_paths:
+ return
+
+ os = _os_merge
+ showMessage = self._display_merge
+ root = self.settings['ROOT']
+
+ # Copy contents entries from the old package to the new one.
+ new_contents = self.getcontents().copy()
+ old_contents = self._installed_instance.getcontents()
+ for f in sorted(preserve_paths):
+ f = _unicode_decode(f,
+ encoding=_encodings['content'], errors='strict')
+ f_abs = os.path.join(root, f.lstrip(os.sep))
+ contents_entry = old_contents.get(f_abs)
+ if contents_entry is None:
+ # This will probably never happen, but it might if one of the
+ # paths returned from findConsumers() refers to one of the libs
+ # that should be preserved yet the path is not listed in the
+ # contents. Such a path might belong to some other package, so
+ # it shouldn't be preserved here.
+ showMessage(_("!!! File '%s' will not be preserved "
+ "due to missing contents entry\n") % (f_abs,),
+ level=logging.ERROR, noiselevel=-1)
+ preserve_paths.remove(f)
+ continue
+ new_contents[f_abs] = contents_entry
+ obj_type = contents_entry[0]
+ showMessage(_(">>> needed %s %s\n") % (obj_type, f_abs),
+ noiselevel=-1)
+ # Add parent directories to contents if necessary.
+ parent_dir = os.path.dirname(f_abs)
+ while len(parent_dir) > len(root):
+ new_contents[parent_dir] = ["dir"]
+ prev = parent_dir
+ parent_dir = os.path.dirname(parent_dir)
+ if prev == parent_dir:
+ break
+ outfile = atomic_ofstream(os.path.join(self.dbtmpdir, "CONTENTS"))
+ write_contents(new_contents, root, outfile)
+ outfile.close()
+ self._clear_contents_cache()
+
+ def _find_unused_preserved_libs(self, unmerge_no_replacement):
+ """
+ Find preserved libraries that don't have any consumers left.
+ """
+
+ if self._linkmap_broken or \
+ self.vartree.dbapi._linkmap is None or \
+ self.vartree.dbapi._plib_registry is None or \
+ not self.vartree.dbapi._plib_registry.hasEntries():
+ return {}
+
+ # Since preserved libraries can be consumers of other preserved
+ # libraries, use a graph to track consumer relationships.
+ plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
+ linkmap = self.vartree.dbapi._linkmap
+ lib_graph = digraph()
+ preserved_nodes = set()
+ preserved_paths = set()
+ path_cpv_map = {}
+ path_node_map = {}
+ root = self.settings['ROOT']
+
+ def path_to_node(path):
+ node = path_node_map.get(path)
+ if node is None:
+ node = LinkageMap._LibGraphNode(linkmap._obj_key(path))
+ alt_path_node = lib_graph.get(node)
+ if alt_path_node is not None:
+ node = alt_path_node
+ node.alt_paths.add(path)
+ path_node_map[path] = node
+ return node
+
+ for cpv, plibs in plib_dict.items():
+ for f in plibs:
+ path_cpv_map[f] = cpv
+ preserved_node = path_to_node(f)
+ if not preserved_node.file_exists():
+ continue
+ lib_graph.add(preserved_node, None)
+ preserved_paths.add(f)
+ preserved_nodes.add(preserved_node)
+ for c in self.vartree.dbapi._linkmap.findConsumers(f):
+ consumer_node = path_to_node(c)
+ if not consumer_node.file_exists():
+ continue
+ # Note that consumers may also be providers.
+ lib_graph.add(preserved_node, consumer_node)
+
+ # Eliminate consumers having providers with the same soname as an
+ # installed library that is not preserved. This eliminates
+ # libraries that are erroneously preserved due to a move from one
+ # directory to another.
+ # Also eliminate consumers that are going to be unmerged if
+ # unmerge_no_replacement is True.
+ provider_cache = {}
+ for preserved_node in preserved_nodes:
+ soname = linkmap.getSoname(preserved_node)
+ for consumer_node in lib_graph.parent_nodes(preserved_node):
+ if consumer_node in preserved_nodes:
+ continue
+ if unmerge_no_replacement:
+ will_be_unmerged = True
+ for path in consumer_node.alt_paths:
+ if not self.isowner(path):
+ will_be_unmerged = False
+ break
+ if will_be_unmerged:
+ # This consumer is not preserved and it is
+ # being unmerged, so drop this edge.
+ lib_graph.remove_edge(preserved_node, consumer_node)
+ continue
+
+ providers = provider_cache.get(consumer_node)
+ if providers is None:
+ providers = linkmap.findProviders(consumer_node)
+ provider_cache[consumer_node] = providers
+ providers = providers.get(soname)
+ if providers is None:
+ continue
+ for provider in providers:
+ if provider in preserved_paths:
+ continue
+ provider_node = path_to_node(provider)
+ if not provider_node.file_exists():
+ continue
+ if provider_node in preserved_nodes:
+ continue
+ # An alternative provider seems to be
+ # installed, so drop this edge.
+ lib_graph.remove_edge(preserved_node, consumer_node)
+ break
+
+ cpv_lib_map = {}
+ while lib_graph:
+ root_nodes = preserved_nodes.intersection(lib_graph.root_nodes())
+ if not root_nodes:
+ break
+ lib_graph.difference_update(root_nodes)
+ unlink_list = set()
+ for node in root_nodes:
+ unlink_list.update(node.alt_paths)
+ unlink_list = sorted(unlink_list)
+ for obj in unlink_list:
+ cpv = path_cpv_map.get(obj)
+ if cpv is None:
+ # This means that a symlink is in the preserved libs
+ # registry, but the actual lib it points to is not.
+ self._display_merge(_("!!! symlink to lib is preserved, "
+ "but not the lib itself:\n!!! '%s'\n") % (obj,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+ removed = cpv_lib_map.get(cpv)
+ if removed is None:
+ removed = set()
+ cpv_lib_map[cpv] = removed
+ removed.add(obj)
+
+ return cpv_lib_map
+
+ def _remove_preserved_libs(self, cpv_lib_map):
+ """
+ Remove files returned from _find_unused_preserved_libs().
+ """
+
+ os = _os_merge
+
+ files_to_remove = set()
+ for files in cpv_lib_map.values():
+ files_to_remove.update(files)
+ files_to_remove = sorted(files_to_remove)
+ showMessage = self._display_merge
+ root = self.settings['ROOT']
+
+ parent_dirs = set()
+ for obj in files_to_remove:
+ obj = os.path.join(root, obj.lstrip(os.sep))
+ parent_dirs.add(os.path.dirname(obj))
+ if os.path.islink(obj):
+ obj_type = _("sym")
+ else:
+ obj_type = _("obj")
+ try:
+ os.unlink(obj)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ else:
+ showMessage(_("<<< !needed %s %s\n") % (obj_type, obj),
+ noiselevel=-1)
+
+ # Remove empty parent directories if possible.
+ while parent_dirs:
+ x = parent_dirs.pop()
+ while True:
+ try:
+ os.rmdir(x)
+ except OSError:
+ break
+ prev = x
+ x = os.path.dirname(x)
+ if x == prev:
+ break
+
+ self.vartree.dbapi._plib_registry.pruneNonExisting()
+
+ def _collision_protect(self, srcroot, destroot, mypkglist,
+ file_list, symlink_list):
+
+ os = _os_merge
+
+ collision_ignore = []
+ for x in portage.util.shlex_split(
+ self.settings.get("COLLISION_IGNORE", "")):
+ if os.path.isdir(os.path.join(self._eroot, x.lstrip(os.sep))):
+ x = normalize_path(x)
+ x += "/*"
+ collision_ignore.append(x)
+
+ # For collisions with preserved libraries, the current package
+ # will assume ownership and the libraries will be unregistered.
+ if self.vartree.dbapi._plib_registry is None:
+ # preserve-libs is entirely disabled
+ plib_cpv_map = None
+ plib_paths = None
+ plib_inodes = {}
+ else:
+ plib_dict = self.vartree.dbapi._plib_registry.getPreservedLibs()
+ plib_cpv_map = {}
+ plib_paths = set()
+ for cpv, paths in plib_dict.items():
+ plib_paths.update(paths)
+ for f in paths:
+ plib_cpv_map[f] = cpv
+ plib_inodes = self._lstat_inode_map(plib_paths)
+
+ plib_collisions = {}
+
+ showMessage = self._display_merge
+ stopmerge = False
+ collisions = []
+ dirs = set()
+ dirs_ro = set()
+ symlink_collisions = []
+ destroot = self.settings['ROOT']
+ totfiles = len(file_list) + len(symlink_list)
+ showMessage(_(" %s checking %d files for package collisions\n") % \
+ (colorize("GOOD", "*"), totfiles))
+ for i, (f, f_type) in enumerate(chain(
+ ((f, "reg") for f in file_list),
+ ((f, "sym") for f in symlink_list))):
+ if i % 1000 == 0 and i != 0:
+ showMessage(_("%d files remaining ...\n") % (totfiles - i))
+
+ dest_path = normalize_path(
+ os.path.join(destroot, f.lstrip(os.path.sep)))
+
+ parent = os.path.dirname(dest_path)
+ if parent not in dirs:
+ for x in iter_parents(parent):
+ if x in dirs:
+ break
+ dirs.add(x)
+ if os.path.isdir(x):
+ if not os.access(x, os.W_OK):
+ dirs_ro.add(x)
+ break
+
+ try:
+ dest_lstat = os.lstat(dest_path)
+ except EnvironmentError as e:
+ if e.errno == errno.ENOENT:
+ del e
+ continue
+ elif e.errno == errno.ENOTDIR:
+ del e
+ # A non-directory is in a location where this package
+ # expects to have a directory.
+ dest_lstat = None
+ parent_path = dest_path
+ while len(parent_path) > len(destroot):
+ parent_path = os.path.dirname(parent_path)
+ try:
+ dest_lstat = os.lstat(parent_path)
+ break
+ except EnvironmentError as e:
+ if e.errno != errno.ENOTDIR:
+ raise
+ del e
+ if not dest_lstat:
+ raise AssertionError(
+ "unable to find non-directory " + \
+ "parent for '%s'" % dest_path)
+ dest_path = parent_path
+ f = os.path.sep + dest_path[len(destroot):]
+ if f in collisions:
+ continue
+ else:
+ raise
+ if f[0] != "/":
+ f="/"+f
+
+ if stat.S_ISDIR(dest_lstat.st_mode):
+ if f_type == "sym":
+ # This case is explicitly banned
+ # by PMS (see bug #326685).
+ symlink_collisions.append(f)
+ collisions.append(f)
+ continue
+
+ plibs = plib_inodes.get((dest_lstat.st_dev, dest_lstat.st_ino))
+ if plibs:
+ for path in plibs:
+ cpv = plib_cpv_map[path]
+ paths = plib_collisions.get(cpv)
+ if paths is None:
+ paths = set()
+ plib_collisions[cpv] = paths
+ paths.add(path)
+ # The current package will assume ownership and the
+ # libraries will be unregistered, so exclude this
+ # path from the normal collisions.
+ continue
+
+ isowned = False
+ full_path = os.path.join(destroot, f.lstrip(os.path.sep))
+ for ver in mypkglist:
+ if ver.isowner(f):
+ isowned = True
+ break
+ if not isowned and self.isprotected(full_path):
+ isowned = True
+ if not isowned:
+ f_match = full_path[len(self._eroot)-1:]
+ stopmerge = True
+ for pattern in collision_ignore:
+ if fnmatch.fnmatch(f_match, pattern):
+ stopmerge = False
+ break
+ if stopmerge:
+ collisions.append(f)
+ return collisions, dirs_ro, symlink_collisions, plib_collisions
+
+ def _lstat_inode_map(self, path_iter):
+ """
+ Use lstat to create a map of the form:
+ {(st_dev, st_ino) : set([path1, path2, ...])}
+ Multiple paths may reference the same inode due to hardlinks.
+ All lstat() calls are relative to self.myroot.
+ """
+
+ os = _os_merge
+
+ root = self.settings['ROOT']
+ inode_map = {}
+ for f in path_iter:
+ path = os.path.join(root, f.lstrip(os.sep))
+ try:
+ st = os.lstat(path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ENOTDIR):
+ raise
+ del e
+ continue
+ key = (st.st_dev, st.st_ino)
+ paths = inode_map.get(key)
+ if paths is None:
+ paths = set()
+ inode_map[key] = paths
+ paths.add(f)
+ return inode_map
+
+ def _security_check(self, installed_instances):
+ if not installed_instances:
+ return 0
+
+ os = _os_merge
+
+ showMessage = self._display_merge
+
+ file_paths = set()
+ for dblnk in installed_instances:
+ file_paths.update(dblnk.getcontents())
+ inode_map = {}
+ real_paths = set()
+ for i, path in enumerate(file_paths):
+
+ if os is _os_merge:
+ try:
+ _unicode_encode(path,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ _unicode_encode(path,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ try:
+ s = os.lstat(path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ENOTDIR):
+ raise
+ del e
+ continue
+ if not stat.S_ISREG(s.st_mode):
+ continue
+ path = os.path.realpath(path)
+ if path in real_paths:
+ continue
+ real_paths.add(path)
+ if s.st_nlink > 1 and \
+ s.st_mode & (stat.S_ISUID | stat.S_ISGID):
+ k = (s.st_dev, s.st_ino)
+ inode_map.setdefault(k, []).append((path, s))
+ suspicious_hardlinks = []
+ for path_list in inode_map.values():
+ path, s = path_list[0]
+ if len(path_list) == s.st_nlink:
+ # All hardlinks seem to be owned by this package.
+ continue
+ suspicious_hardlinks.append(path_list)
+ if not suspicious_hardlinks:
+ return 0
+
+ msg = []
+ msg.append(_("suid/sgid file(s) "
+ "with suspicious hardlink(s):"))
+ msg.append("")
+ for path_list in suspicious_hardlinks:
+ for path, s in path_list:
+ msg.append("\t%s" % path)
+ msg.append("")
+ msg.append(_("See the Gentoo Security Handbook "
+ "guide for advice on how to proceed."))
+
+ self._eerror("preinst", msg)
+
+ return 1
+
+ def _eqawarn(self, phase, lines):
+ self._elog("eqawarn", phase, lines)
+
+ def _eerror(self, phase, lines):
+ self._elog("eerror", phase, lines)
+
+ def _elog(self, funcname, phase, lines):
+ func = getattr(portage.elog.messages, funcname)
+ if self._scheduler is None:
+ for l in lines:
+ func(l, phase=phase, key=self.mycpv)
+ else:
+ background = self.settings.get("PORTAGE_BACKGROUND") == "1"
+ log_path = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ log_path = self.settings.get("PORTAGE_LOG_FILE")
+ out = io.StringIO()
+ for line in lines:
+ func(line, phase=phase, key=self.mycpv, out=out)
+ msg = out.getvalue()
+ self._scheduler.output(msg,
+ background=background, log_path=log_path)
+
+ def _elog_process(self, phasefilter=None):
+ cpv = self.mycpv
+ if self._pipe is None:
+ elog_process(cpv, self.settings, phasefilter=phasefilter)
+ else:
+ logdir = os.path.join(self.settings["T"], "logging")
+ ebuild_logentries = collect_ebuild_messages(logdir)
+ # phasefilter is irrelevant for the above collect_ebuild_messages
+ # call, since this package instance has a private logdir. However,
+ # it may be relevant for the following collect_messages call.
+ py_logentries = collect_messages(key=cpv, phasefilter=phasefilter).get(cpv, {})
+ logentries = _merge_logentries(py_logentries, ebuild_logentries)
+ funcnames = {
+ "INFO": "einfo",
+ "LOG": "elog",
+ "WARN": "ewarn",
+ "QA": "eqawarn",
+ "ERROR": "eerror"
+ }
+ str_buffer = []
+ for phase, messages in logentries.items():
+ for key, lines in messages:
+ funcname = funcnames[key]
+ if isinstance(lines, basestring):
+ lines = [lines]
+ for line in lines:
+ for line in line.split('\n'):
+ fields = (funcname, phase, cpv, line)
+ str_buffer.append(' '.join(fields))
+ str_buffer.append('\n')
+ if str_buffer:
+ str_buffer = _unicode_encode(''.join(str_buffer))
+ while str_buffer:
+ str_buffer = str_buffer[os.write(self._pipe, str_buffer):]
+
+ def _emerge_log(self, msg):
+ emergelog(False, msg)
+
+ def treewalk(self, srcroot, destroot, inforoot, myebuild, cleanup=0,
+ mydbapi=None, prev_mtimes=None, counter=None):
+ """
+
+ This function does the following:
+
+ calls get_ro_checker to retrieve a function for checking whether Portage
+ will write to a read-only filesystem, then runs it against the directory list
+ calls self._preserve_libs if FEATURES=preserve-libs
+ calls self._collision_protect if FEATURES=collision-protect
+ calls doebuild(mydo=pkg_preinst)
+ Merges the package to the livefs
+ unmerges old version (if required)
+ calls doebuild(mydo=pkg_postinst)
+ calls env_update
+
+ @param srcroot: Typically this is ${D}
+ @type srcroot: String (Path)
+ @param destroot: ignored, self.settings['ROOT'] is used instead
+ @type destroot: String (Path)
+ @param inforoot: root of the vardb entry ?
+ @type inforoot: String (Path)
+ @param myebuild: path to the ebuild that we are processing
+ @type myebuild: String (Path)
+ @param mydbapi: dbapi which is handed to doebuild.
+ @type mydbapi: portdbapi instance
+ @param prev_mtimes: { Filename:mtime } mapping for env_update
+ @type prev_mtimes: Dictionary
+ @rtype: Boolean
+ @return:
+ 1. 0 on success
+ 2. 1 on failure
+
+ secondhand is a list of symlinks that have been skipped due to their target
+ not existing; we will merge these symlinks at a later time.
+ """
+
+ os = _os_merge
+
+ srcroot = _unicode_decode(srcroot,
+ encoding=_encodings['content'], errors='strict')
+ destroot = self.settings['ROOT']
+ inforoot = _unicode_decode(inforoot,
+ encoding=_encodings['content'], errors='strict')
+ myebuild = _unicode_decode(myebuild,
+ encoding=_encodings['content'], errors='strict')
+
+ showMessage = self._display_merge
+ srcroot = normalize_path(srcroot).rstrip(os.path.sep) + os.path.sep
+
+ if not os.path.isdir(srcroot):
+ showMessage(_("!!! Directory Not Found: D='%s'\n") % srcroot,
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ is_binpkg = self.settings.get("EMERGE_FROM") == "binary"
+ slot = ''
+ for var_name in ('CHOST', 'SLOT'):
+ try:
+ with io.open(_unicode_encode(
+ os.path.join(inforoot, var_name),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ val = f.readline().strip()
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ val = ''
+
+ if var_name == 'SLOT':
+ slot = val
+
+ if not slot.strip():
+ slot = self.settings.get(var_name, '')
+ if not slot.strip():
+ showMessage(_("!!! SLOT is undefined\n"),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+ write_atomic(os.path.join(inforoot, var_name), slot + '\n')
+
+ # This check only applies when built from source, since
+ # inforoot values are written just after src_install.
+ if not is_binpkg and val != self.settings.get(var_name, ''):
+ self._eqawarn('preinst',
+ [_("QA Notice: Expected %(var_name)s='%(expected_value)s', got '%(actual_value)s'\n") % \
+ {"var_name":var_name, "expected_value":self.settings.get(var_name, ''), "actual_value":val}])
+
+ def eerror(lines):
+ self._eerror("preinst", lines)
+
+ if not os.path.exists(self.dbcatdir):
+ ensure_dirs(self.dbcatdir)
+
+ # NOTE: We use SLOT obtained from the inforoot
+ # directory, in order to support USE=multislot.
+ # Use _pkg_str discard the sub-slot part if necessary.
+ slot = _pkg_str(self.mycpv, slot=slot).slot
+ cp = self.mysplit[0]
+ slot_atom = "%s:%s" % (cp, slot)
+
+ self.lockdb()
+ try:
+ # filter any old-style virtual matches
+ slot_matches = [cpv for cpv in self.vartree.dbapi.match(slot_atom)
+ if cpv_getkey(cpv) == cp]
+
+ if self.mycpv not in slot_matches and \
+ self.vartree.dbapi.cpv_exists(self.mycpv):
+ # handle multislot or unapplied slotmove
+ slot_matches.append(self.mycpv)
+
+ others_in_slot = []
+ for cur_cpv in slot_matches:
+ # Clone the config in case one of these has to be unmerged,
+ # since we need it to have private ${T} etc... for things
+ # like elog.
+ settings_clone = portage.config(clone=self.settings)
+ settings_clone.pop("PORTAGE_BUILDDIR_LOCKED", None)
+ settings_clone.setcpv(cur_cpv, mydb=self.vartree.dbapi)
+ if self._preserve_libs and "preserve-libs" in \
+ settings_clone["PORTAGE_RESTRICT"].split():
+ self._preserve_libs = False
+ others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
+ settings=settings_clone,
+ vartree=self.vartree, treetype="vartree",
+ scheduler=self._scheduler, pipe=self._pipe))
+ finally:
+ self.unlockdb()
+
+ # If any instance has RESTRICT=preserve-libs, then
+ # restrict it for all instances.
+ if not self._preserve_libs:
+ for dblnk in others_in_slot:
+ dblnk._preserve_libs = False
+
+ retval = self._security_check(others_in_slot)
+ if retval:
+ return retval
+
+ if slot_matches:
+ # Used by self.isprotected().
+ max_dblnk = None
+ max_counter = -1
+ for dblnk in others_in_slot:
+ cur_counter = self.vartree.dbapi.cpv_counter(dblnk.mycpv)
+ if cur_counter > max_counter:
+ max_counter = cur_counter
+ max_dblnk = dblnk
+ self._installed_instance = max_dblnk
+
+ # Apply INSTALL_MASK before collision-protect, since it may
+ # be useful to avoid collisions in some scenarios.
+ # We cannot detect if this is needed or not here as INSTALL_MASK can be
+ # modified by bashrc files.
+ phase = MiscFunctionsProcess(background=False,
+ commands=["preinst_mask"], phase="preinst",
+ scheduler=self._scheduler, settings=self.settings)
+ phase.start()
+ phase.wait()
+ try:
+ with io.open(_unicode_encode(os.path.join(inforoot, "INSTALL_MASK"),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ install_mask = InstallMask(f.read())
+ except EnvironmentError:
+ install_mask = None
+
+ if install_mask:
+ install_mask_dir(self.settings["ED"], install_mask)
+ if any(x in self.settings.features for x in ('nodoc', 'noman', 'noinfo')):
+ try:
+ os.rmdir(os.path.join(self.settings["ED"], 'usr', 'share'))
+ except OSError:
+ pass
+
+ # We check for unicode encoding issues after src_install. However,
+ # the check must be repeated here for binary packages (it's
+ # inexpensive since we call os.walk() here anyway).
+ unicode_errors = []
+ line_ending_re = re.compile('[\n\r]')
+ srcroot_len = len(srcroot)
+ ed_len = len(self.settings["ED"])
+ eprefix_len = len(self.settings["EPREFIX"])
+
+ while True:
+
+ unicode_error = False
+ eagain_error = False
+
+ filelist = []
+ linklist = []
+ paths_with_newlines = []
+ def onerror(e):
+ raise
+ walk_iter = os.walk(srcroot, onerror=onerror)
+ while True:
+ try:
+ parent, dirs, files = next(walk_iter)
+ except StopIteration:
+ break
+ except OSError as e:
+ if e.errno != errno.EAGAIN:
+ raise
+ # Observed with PyPy 1.8.
+ eagain_error = True
+ break
+
+ try:
+ parent = _unicode_decode(parent,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeDecodeError:
+ new_parent = _unicode_decode(parent,
+ encoding=_encodings['merge'], errors='replace')
+ new_parent = _unicode_encode(new_parent,
+ encoding='ascii', errors='backslashreplace')
+ new_parent = _unicode_decode(new_parent,
+ encoding=_encodings['merge'], errors='replace')
+ os.rename(parent, new_parent)
+ unicode_error = True
+ unicode_errors.append(new_parent[ed_len:])
+ break
+
+ for fname in files:
+ try:
+ fname = _unicode_decode(fname,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeDecodeError:
+ fpath = portage._os.path.join(
+ parent.encode(_encodings['merge']), fname)
+ new_fname = _unicode_decode(fname,
+ encoding=_encodings['merge'], errors='replace')
+ new_fname = _unicode_encode(new_fname,
+ encoding='ascii', errors='backslashreplace')
+ new_fname = _unicode_decode(new_fname,
+ encoding=_encodings['merge'], errors='replace')
+ new_fpath = os.path.join(parent, new_fname)
+ os.rename(fpath, new_fpath)
+ unicode_error = True
+ unicode_errors.append(new_fpath[ed_len:])
+ fname = new_fname
+ fpath = new_fpath
+ else:
+ fpath = os.path.join(parent, fname)
+
+ relative_path = fpath[srcroot_len:]
+
+ if line_ending_re.search(relative_path) is not None:
+ paths_with_newlines.append(relative_path)
+
+ file_mode = os.lstat(fpath).st_mode
+ if stat.S_ISREG(file_mode):
+ filelist.append(relative_path)
+ elif stat.S_ISLNK(file_mode):
+ # Note: os.walk puts symlinks to directories in the "dirs"
+ # list and it does not traverse them since that could lead
+ # to an infinite recursion loop.
+ linklist.append(relative_path)
+
+ myto = _unicode_decode(
+ _os.readlink(_unicode_encode(fpath,
+ encoding=_encodings['merge'], errors='strict')),
+ encoding=_encodings['merge'], errors='replace')
+ if line_ending_re.search(myto) is not None:
+ paths_with_newlines.append(relative_path)
+
+ if unicode_error:
+ break
+
+ if not (unicode_error or eagain_error):
+ break
+
+ if unicode_errors:
+ self._elog("eqawarn", "preinst",
+ _merge_unicode_error(unicode_errors))
+
+ if paths_with_newlines:
+ msg = []
+ msg.append(_("This package installs one or more files containing line ending characters:"))
+ msg.append("")
+ paths_with_newlines.sort()
+ for f in paths_with_newlines:
+ msg.append("\t/%s" % (f.replace("\n", "\\n").replace("\r", "\\r")))
+ msg.append("")
+ msg.append(_("package %s NOT merged") % self.mycpv)
+ msg.append("")
+ eerror(msg)
+ return 1
+
+ # If there are no files to merge, and an installed package in the same
+ # slot has files, it probably means that something went wrong.
+ if self.settings.get("PORTAGE_PACKAGE_EMPTY_ABORT") == "1" and \
+ not filelist and not linklist and others_in_slot:
+ installed_files = None
+ for other_dblink in others_in_slot:
+ installed_files = other_dblink.getcontents()
+ if not installed_files:
+ continue
+ from textwrap import wrap
+ wrap_width = 72
+ msg = []
+ d = {
+ "new_cpv":self.mycpv,
+ "old_cpv":other_dblink.mycpv
+ }
+ msg.extend(wrap(_("The '%(new_cpv)s' package will not install "
+ "any files, but the currently installed '%(old_cpv)s'"
+ " package has the following files: ") % d, wrap_width))
+ msg.append("")
+ msg.extend(sorted(installed_files))
+ msg.append("")
+ msg.append(_("package %s NOT merged") % self.mycpv)
+ msg.append("")
+ msg.extend(wrap(
+ _("Manually run `emerge --unmerge =%s` if you "
+ "really want to remove the above files. Set "
+ "PORTAGE_PACKAGE_EMPTY_ABORT=\"0\" in "
+ "/etc/portage/make.conf if you do not want to "
+ "abort in cases like this.") % other_dblink.mycpv,
+ wrap_width))
+ eerror(msg)
+ if installed_files:
+ return 1
+
+ # Make sure the ebuild environment is initialized and that ${T}/elog
+ # exists for logging of collision-protect eerror messages.
+ if myebuild is None:
+ myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
+ doebuild_environment(myebuild, "preinst",
+ settings=self.settings, db=mydbapi)
+ self.settings["REPLACING_VERSIONS"] = " ".join(
+ [portage.versions.cpv_getversion(other.mycpv)
+ for other in others_in_slot])
+ prepare_build_dirs(settings=self.settings, cleanup=cleanup)
+
+ # check for package collisions
+ blockers = []
+ for blocker in self._blockers or []:
+ blocker = self.vartree.dbapi._dblink(blocker.cpv)
+ # It may have been unmerged before lock(s)
+ # were aquired.
+ if blocker.exists():
+ blockers.append(blocker)
+
+ collisions, dirs_ro, symlink_collisions, plib_collisions = \
+ self._collision_protect(srcroot, destroot,
+ others_in_slot + blockers, filelist, linklist)
+
+ # Check for read-only filesystems.
+ ro_checker = get_ro_checker()
+ rofilesystems = ro_checker(dirs_ro)
+
+ if rofilesystems:
+ msg = _("One or more files installed to this package are "
+ "set to be installed to read-only filesystems. "
+ "Please mount the following filesystems as read-write "
+ "and retry.")
+ msg = textwrap.wrap(msg, 70)
+ msg.append("")
+ for f in rofilesystems:
+ msg.append("\t%s" % f)
+ msg.append("")
+ self._elog("eerror", "preinst", msg)
+
+ msg = _("Package '%s' NOT merged due to read-only file systems.") % \
+ self.settings.mycpv
+ msg += _(" If necessary, refer to your elog "
+ "messages for the whole content of the above message.")
+ msg = textwrap.wrap(msg, 70)
+ eerror(msg)
+ return 1
+
+ if symlink_collisions:
+ # Symlink collisions need to be distinguished from other types
+ # of collisions, in order to avoid confusion (see bug #409359).
+ msg = _("Package '%s' has one or more collisions "
+ "between symlinks and directories, which is explicitly "
+ "forbidden by PMS section 13.4 (see bug #326685):") % \
+ (self.settings.mycpv,)
+ msg = textwrap.wrap(msg, 70)
+ msg.append("")
+ for f in symlink_collisions:
+ msg.append("\t%s" % os.path.join(destroot,
+ f.lstrip(os.path.sep)))
+ msg.append("")
+ self._elog("eerror", "preinst", msg)
+
+ if collisions:
+ collision_protect = "collision-protect" in self.settings.features
+ protect_owned = "protect-owned" in self.settings.features
+ msg = _("This package will overwrite one or more files that"
+ " may belong to other packages (see list below).")
+ if not (collision_protect or protect_owned):
+ msg += _(" Add either \"collision-protect\" or"
+ " \"protect-owned\" to FEATURES in"
+ " make.conf if you would like the merge to abort"
+ " in cases like this. See the make.conf man page for"
+ " more information about these features.")
+ if self.settings.get("PORTAGE_QUIET") != "1":
+ msg += _(" You can use a command such as"
+ " `portageq owners / <filename>` to identify the"
+ " installed package that owns a file. If portageq"
+ " reports that only one package owns a file then do NOT"
+ " file a bug report. A bug report is only useful if it"
+ " identifies at least two or more packages that are known"
+ " to install the same file(s)."
+ " If a collision occurs and you"
+ " can not explain where the file came from then you"
+ " should simply ignore the collision since there is not"
+ " enough information to determine if a real problem"
+ " exists. Please do NOT file a bug report at"
+ " https://bugs.gentoo.org/ unless you report exactly which"
+ " two packages install the same file(s). See"
+ " https://wiki.gentoo.org/wiki/Knowledge_Base:Blockers"
+ " for tips on how to solve the problem. And once again,"
+ " please do NOT file a bug report unless you have"
+ " completely understood the above message.")
+
+ self.settings["EBUILD_PHASE"] = "preinst"
+ from textwrap import wrap
+ msg = wrap(msg, 70)
+ if collision_protect:
+ msg.append("")
+ msg.append(_("package %s NOT merged") % self.settings.mycpv)
+ msg.append("")
+ msg.append(_("Detected file collision(s):"))
+ msg.append("")
+
+ for f in collisions:
+ msg.append("\t%s" % \
+ os.path.join(destroot, f.lstrip(os.path.sep)))
+
+ eerror(msg)
+
+ owners = None
+ if collision_protect or protect_owned or symlink_collisions:
+ msg = []
+ msg.append("")
+ msg.append(_("Searching all installed"
+ " packages for file collisions..."))
+ msg.append("")
+ msg.append(_("Press Ctrl-C to Stop"))
+ msg.append("")
+ eerror(msg)
+
+ if len(collisions) > 20:
+ # get_owners is slow for large numbers of files, so
+ # don't look them all up.
+ collisions = collisions[:20]
+
+ pkg_info_strs = {}
+ self.lockdb()
+ try:
+ owners = self.vartree.dbapi._owners.get_owners(collisions)
+ self.vartree.dbapi.flush_cache()
+
+ for pkg in owners:
+ pkg = self.vartree.dbapi._pkg_str(pkg.mycpv, None)
+ pkg_info_str = "%s%s%s" % (pkg,
+ _slot_separator, pkg.slot)
+ if pkg.repo != _unknown_repo:
+ pkg_info_str += "%s%s" % (_repo_separator,
+ pkg.repo)
+ pkg_info_strs[pkg] = pkg_info_str
+
+ finally:
+ self.unlockdb()
+
+ for pkg, owned_files in owners.items():
+ msg = []
+ msg.append(pkg_info_strs[pkg.mycpv])
+ for f in sorted(owned_files):
+ msg.append("\t%s" % os.path.join(destroot,
+ f.lstrip(os.path.sep)))
+ msg.append("")
+ eerror(msg)
+
+ if not owners:
+ eerror([_("None of the installed"
+ " packages claim the file(s)."), ""])
+
+ symlink_abort_msg =_("Package '%s' NOT merged since it has "
+ "one or more collisions between symlinks and directories, "
+ "which is explicitly forbidden by PMS section 13.4 "
+ "(see bug #326685).")
+
+ # The explanation about the collision and how to solve
+ # it may not be visible via a scrollback buffer, especially
+ # if the number of file collisions is large. Therefore,
+ # show a summary at the end.
+ abort = False
+ if symlink_collisions:
+ abort = True
+ msg = symlink_abort_msg % (self.settings.mycpv,)
+ elif collision_protect:
+ abort = True
+ msg = _("Package '%s' NOT merged due to file collisions.") % \
+ self.settings.mycpv
+ elif protect_owned and owners:
+ abort = True
+ msg = _("Package '%s' NOT merged due to file collisions.") % \
+ self.settings.mycpv
+ else:
+ msg = _("Package '%s' merged despite file collisions.") % \
+ self.settings.mycpv
+ msg += _(" If necessary, refer to your elog "
+ "messages for the whole content of the above message.")
+ eerror(wrap(msg, 70))
+
+ if abort:
+ return 1
+
+ # The merge process may move files out of the image directory,
+ # which causes invalidation of the .installed flag.
+ try:
+ os.unlink(os.path.join(
+ os.path.dirname(normalize_path(srcroot)), ".installed"))
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+ self.dbdir = self.dbtmpdir
+ self.delete()
+ ensure_dirs(self.dbtmpdir)
+
+ downgrade = False
+ if self._installed_instance is not None and \
+ vercmp(self.mycpv.version,
+ self._installed_instance.mycpv.version) < 0:
+ downgrade = True
+
+ if self._installed_instance is not None:
+ rval = self._pre_merge_backup(self._installed_instance, downgrade)
+ if rval != os.EX_OK:
+ showMessage(_("!!! FAILED preinst: ") +
+ "quickpkg: %s\n" % rval,
+ level=logging.ERROR, noiselevel=-1)
+ return rval
+
+ # run preinst script
+ showMessage(_(">>> Merging %(cpv)s to %(destroot)s\n") % \
+ {"cpv":self.mycpv, "destroot":destroot})
+ phase = EbuildPhase(background=False, phase="preinst",
+ scheduler=self._scheduler, settings=self.settings)
+ phase.start()
+ a = phase.wait()
+
+ # XXX: Decide how to handle failures here.
+ if a != os.EX_OK:
+ showMessage(_("!!! FAILED preinst: ")+str(a)+"\n",
+ level=logging.ERROR, noiselevel=-1)
+ return a
+
+ # copy "info" files (like SLOT, CFLAGS, etc.) into the database
+ for x in os.listdir(inforoot):
+ self.copyfile(inforoot+"/"+x)
+
+ # write local package counter for recording
+ if counter is None:
+ counter = self.vartree.dbapi.counter_tick(mycpv=self.mycpv)
+ with io.open(_unicode_encode(os.path.join(self.dbtmpdir, 'COUNTER'),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace') as f:
+ f.write("%s" % counter)
+
+ self.updateprotect()
+
+ #if we have a file containing previously-merged config file md5sums, grab it.
+ self.vartree.dbapi._fs_lock()
+ try:
+ # This prunes any libraries from the registry that no longer
+ # exist on disk, in case they have been manually removed.
+ # This has to be done prior to merge, since after merge it
+ # is non-trivial to distinguish these files from files
+ # that have just been merged.
+ plib_registry = self.vartree.dbapi._plib_registry
+ if plib_registry:
+ plib_registry.lock()
+ try:
+ plib_registry.load()
+ plib_registry.store()
+ finally:
+ plib_registry.unlock()
+
+ # Always behave like --noconfmem is enabled for downgrades
+ # so that people who don't know about this option are less
+ # likely to get confused when doing upgrade/downgrade cycles.
+ cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
+ if "NOCONFMEM" in self.settings or downgrade:
+ cfgfiledict["IGNORE"]=1
+ else:
+ cfgfiledict["IGNORE"]=0
+
+ rval = self._merge_contents(srcroot, destroot, cfgfiledict)
+ if rval != os.EX_OK:
+ return rval
+ finally:
+ self.vartree.dbapi._fs_unlock()
+
+ # These caches are populated during collision-protect and the data
+ # they contain is now invalid. It's very important to invalidate
+ # the contents_inodes cache so that FEATURES=unmerge-orphans
+ # doesn't unmerge anything that belongs to this package that has
+ # just been merged.
+ for dblnk in others_in_slot:
+ dblnk._clear_contents_cache()
+ self._clear_contents_cache()
+
+ linkmap = self.vartree.dbapi._linkmap
+ plib_registry = self.vartree.dbapi._plib_registry
+ # We initialize preserve_paths to an empty set rather
+ # than None here because it plays an important role
+ # in prune_plib_registry logic by serving to indicate
+ # that we have a replacement for a package that's
+ # being unmerged.
+
+ preserve_paths = set()
+ needed = None
+ if not (self._linkmap_broken or linkmap is None or
+ plib_registry is None):
+ self.vartree.dbapi._fs_lock()
+ plib_registry.lock()
+ try:
+ plib_registry.load()
+ needed = os.path.join(inforoot, linkmap._needed_aux_key)
+ self._linkmap_rebuild(include_file=needed)
+
+ # Preserve old libs if they are still in use
+ # TODO: Handle cases where the previous instance
+ # has already been uninstalled but it still has some
+ # preserved libraries in the registry that we may
+ # want to preserve here.
+ preserve_paths = self._find_libs_to_preserve()
+ finally:
+ plib_registry.unlock()
+ self.vartree.dbapi._fs_unlock()
+
+ if preserve_paths:
+ self._add_preserve_libs_to_contents(preserve_paths)
+
+ # If portage is reinstalling itself, remove the old
+ # version now since we want to use the temporary
+ # PORTAGE_BIN_PATH that will be removed when we return.
+ reinstall_self = False
+ if self.myroot == "/" and \
+ match_from_list(PORTAGE_PACKAGE_ATOM, [self.mycpv]):
+ reinstall_self = True
+
+ emerge_log = self._emerge_log
+
+ # If we have any preserved libraries then autoclean
+ # is forced so that preserve-libs logic doesn't have
+ # to account for the additional complexity of the
+ # AUTOCLEAN=no mode.
+ autoclean = self.settings.get("AUTOCLEAN", "yes") == "yes" \
+ or preserve_paths
+
+ if autoclean:
+ emerge_log(_(" >>> AUTOCLEAN: %s") % (slot_atom,))
+
+ others_in_slot.append(self) # self has just been merged
+ for dblnk in list(others_in_slot):
+ if dblnk is self:
+ continue
+ if not (autoclean or dblnk.mycpv == self.mycpv or reinstall_self):
+ continue
+ showMessage(_(">>> Safely unmerging already-installed instance...\n"))
+ emerge_log(_(" === Unmerging... (%s)") % (dblnk.mycpv,))
+ others_in_slot.remove(dblnk) # dblnk will unmerge itself now
+ dblnk._linkmap_broken = self._linkmap_broken
+ dblnk.settings["REPLACED_BY_VERSION"] = portage.versions.cpv_getversion(self.mycpv)
+ dblnk.settings.backup_changes("REPLACED_BY_VERSION")
+ unmerge_rval = dblnk.unmerge(ldpath_mtimes=prev_mtimes,
+ others_in_slot=others_in_slot, needed=needed,
+ preserve_paths=preserve_paths)
+ dblnk.settings.pop("REPLACED_BY_VERSION", None)
+
+ if unmerge_rval == os.EX_OK:
+ emerge_log(_(" >>> unmerge success: %s") % (dblnk.mycpv,))
+ else:
+ emerge_log(_(" !!! unmerge FAILURE: %s") % (dblnk.mycpv,))
+
+ self.lockdb()
+ try:
+ # TODO: Check status and abort if necessary.
+ dblnk.delete()
+ finally:
+ self.unlockdb()
+ showMessage(_(">>> Original instance of package unmerged safely.\n"))
+
+ if len(others_in_slot) > 1:
+ showMessage(colorize("WARN", _("WARNING:"))
+ + _(" AUTOCLEAN is disabled. This can cause serious"
+ " problems due to overlapping packages.\n"),
+ level=logging.WARN, noiselevel=-1)
+
+ # We hold both directory locks.
+ self.dbdir = self.dbpkgdir
+ self.lockdb()
+ try:
+ self.delete()
+ _movefile(self.dbtmpdir, self.dbpkgdir, mysettings=self.settings)
+ self._merged_path(self.dbpkgdir, os.lstat(self.dbpkgdir))
+ self.vartree.dbapi._cache_delta.recordEvent(
+ "add", self.mycpv, slot, counter)
+ finally:
+ self.unlockdb()
+
+ # Check for file collisions with blocking packages
+ # and remove any colliding files from their CONTENTS
+ # since they now belong to this package.
+ self._clear_contents_cache()
+ contents = self.getcontents()
+ destroot_len = len(destroot) - 1
+ self.lockdb()
+ try:
+ for blocker in blockers:
+ self.vartree.dbapi.removeFromContents(blocker, iter(contents),
+ relative_paths=False)
+ finally:
+ self.unlockdb()
+
+ plib_registry = self.vartree.dbapi._plib_registry
+ if plib_registry:
+ self.vartree.dbapi._fs_lock()
+ plib_registry.lock()
+ try:
+ plib_registry.load()
+
+ if preserve_paths:
+ # keep track of the libs we preserved
+ plib_registry.register(self.mycpv, slot, counter,
+ sorted(preserve_paths))
+
+ # Unregister any preserved libs that this package has overwritten
+ # and update the contents of the packages that owned them.
+ plib_dict = plib_registry.getPreservedLibs()
+ for cpv, paths in plib_collisions.items():
+ if cpv not in plib_dict:
+ continue
+ has_vdb_entry = False
+ if cpv != self.mycpv:
+ # If we've replaced another instance with the
+ # same cpv then the vdb entry no longer belongs
+ # to it, so we'll have to get the slot and counter
+ # from plib_registry._data instead.
+ self.vartree.dbapi.lock()
+ try:
+ try:
+ slot = self.vartree.dbapi._pkg_str(cpv, None).slot
+ counter = self.vartree.dbapi.cpv_counter(cpv)
+ except (KeyError, InvalidData):
+ pass
+ else:
+ has_vdb_entry = True
+ self.vartree.dbapi.removeFromContents(
+ cpv, paths)
+ finally:
+ self.vartree.dbapi.unlock()
+
+ if not has_vdb_entry:
+ # It's possible for previously unmerged packages
+ # to have preserved libs in the registry, so try
+ # to retrieve the slot and counter from there.
+ has_registry_entry = False
+ for plib_cps, (plib_cpv, plib_counter, plib_paths) in \
+ plib_registry._data.items():
+ if plib_cpv != cpv:
+ continue
+ try:
+ cp, slot = plib_cps.split(":", 1)
+ except ValueError:
+ continue
+ counter = plib_counter
+ has_registry_entry = True
+ break
+
+ if not has_registry_entry:
+ continue
+
+ remaining = [f for f in plib_dict[cpv] if f not in paths]
+ plib_registry.register(cpv, slot, counter, remaining)
+
+ plib_registry.store()
+ finally:
+ plib_registry.unlock()
+ self.vartree.dbapi._fs_unlock()
+
+ self.vartree.dbapi._add(self)
+ contents = self.getcontents()
+
+ #do postinst script
+ self.settings["PORTAGE_UPDATE_ENV"] = \
+ os.path.join(self.dbpkgdir, "environment.bz2")
+ self.settings.backup_changes("PORTAGE_UPDATE_ENV")
+ try:
+ phase = EbuildPhase(background=False, phase="postinst",
+ scheduler=self._scheduler, settings=self.settings)
+ phase.start()
+ a = phase.wait()
+ if a == os.EX_OK:
+ showMessage(_(">>> %s merged.\n") % self.mycpv)
+ finally:
+ self.settings.pop("PORTAGE_UPDATE_ENV", None)
+
+ if a != os.EX_OK:
+ # It's stupid to bail out here, so keep going regardless of
+ # phase return code.
+ self._postinst_failure = True
+ self._elog("eerror", "postinst", [
+ _("FAILED postinst: %s") % (a,),
+ ])
+
+ #update environment settings, library paths. DO NOT change symlinks.
+ env_update(
+ target_root=self.settings['ROOT'], prev_mtimes=prev_mtimes,
+ contents=contents, env=self.settings,
+ writemsg_level=self._display_merge, vardbapi=self.vartree.dbapi)
+
+ # For gcc upgrades, preserved libs have to be removed after the
+ # the library path has been updated.
+ self._prune_plib_registry()
+ self._post_merge_sync()
+
+ return os.EX_OK
+
+ def _new_backup_path(self, p):
+ """
+ The works for any type path, such as a regular file, symlink,
+ or directory. The parent directory is assumed to exist.
+ The returned filename is of the form p + '.backup.' + x, where
+ x guarantees that the returned path does not exist yet.
+ """
+ os = _os_merge
+
+ x = -1
+ while True:
+ x += 1
+ backup_p = '%s.backup.%04d' % (p, x)
+ try:
+ os.lstat(backup_p)
+ except OSError:
+ break
+
+ return backup_p
+
+ def _merge_contents(self, srcroot, destroot, cfgfiledict):
+
+ cfgfiledict_orig = cfgfiledict.copy()
+
+ # open CONTENTS file (possibly overwriting old one) for recording
+ # Use atomic_ofstream for automatic coercion of raw bytes to
+ # unicode, in order to prevent TypeError when writing raw bytes
+ # to TextIOWrapper with python2.
+ outfile = atomic_ofstream(_unicode_encode(
+ os.path.join(self.dbtmpdir, 'CONTENTS'),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+
+ # Don't bump mtimes on merge since some application require
+ # preservation of timestamps. This means that the unmerge phase must
+ # check to see if file belongs to an installed instance in the same
+ # slot.
+ mymtime = None
+
+ # set umask to 0 for merging; back up umask, save old one in prevmask (since this is a global change)
+ prevmask = os.umask(0)
+ secondhand = []
+
+ # we do a first merge; this will recurse through all files in our srcroot but also build up a
+ # "second hand" of symlinks to merge later
+ if self.mergeme(srcroot, destroot, outfile, secondhand,
+ self.settings["EPREFIX"].lstrip(os.sep), cfgfiledict, mymtime):
+ return 1
+
+ # now, it's time for dealing our second hand; we'll loop until we can't merge anymore. The rest are
+ # broken symlinks. We'll merge them too.
+ lastlen = 0
+ while len(secondhand) and len(secondhand)!=lastlen:
+ # clear the thirdhand. Anything from our second hand that
+ # couldn't get merged will be added to thirdhand.
+
+ thirdhand = []
+ if self.mergeme(srcroot, destroot, outfile, thirdhand,
+ secondhand, cfgfiledict, mymtime):
+ return 1
+
+ #swap hands
+ lastlen = len(secondhand)
+
+ # our thirdhand now becomes our secondhand. It's ok to throw
+ # away secondhand since thirdhand contains all the stuff that
+ # couldn't be merged.
+ secondhand = thirdhand
+
+ if len(secondhand):
+ # force merge of remaining symlinks (broken or circular; oh well)
+ if self.mergeme(srcroot, destroot, outfile, None,
+ secondhand, cfgfiledict, mymtime):
+ return 1
+
+ #restore umask
+ os.umask(prevmask)
+
+ #if we opened it, close it
+ outfile.flush()
+ outfile.close()
+
+ # write out our collection of md5sums
+ if cfgfiledict != cfgfiledict_orig:
+ cfgfiledict.pop("IGNORE", None)
+ try:
+ writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
+ except InvalidLocation:
+ self.settings._init_dirs()
+ writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
+
+ return os.EX_OK
+
+ def mergeme(self, srcroot, destroot, outfile, secondhand, stufftomerge, cfgfiledict, thismtime):
+ """
+
+ This function handles actual merging of the package contents to the livefs.
+ It also handles config protection.
+
+ @param srcroot: Where are we copying files from (usually ${D})
+ @type srcroot: String (Path)
+ @param destroot: Typically ${ROOT}
+ @type destroot: String (Path)
+ @param outfile: File to log operations to
+ @type outfile: File Object
+ @param secondhand: A set of items to merge in pass two (usually
+ or symlinks that point to non-existing files that may get merged later)
+ @type secondhand: List
+ @param stufftomerge: Either a diretory to merge, or a list of items.
+ @type stufftomerge: String or List
+ @param cfgfiledict: { File:mtime } mapping for config_protected files
+ @type cfgfiledict: Dictionary
+ @param thismtime: None or new mtime for merged files (expressed in seconds
+ in Python <3.3 and nanoseconds in Python >=3.3)
+ @type thismtime: None or Int
+ @rtype: None or Boolean
+ @return:
+ 1. True on failure
+ 2. None otherwise
+
+ """
+
+ showMessage = self._display_merge
+ writemsg = self._display_merge
+
+ os = _os_merge
+ sep = os.sep
+ join = os.path.join
+ srcroot = normalize_path(srcroot).rstrip(sep) + sep
+ destroot = normalize_path(destroot).rstrip(sep) + sep
+ calc_prelink = "prelink-checksums" in self.settings.features
+
+ protect_if_modified = \
+ "config-protect-if-modified" in self.settings.features and \
+ self._installed_instance is not None
+
+ # this is supposed to merge a list of files. There will be 2 forms of argument passing.
+ if isinstance(stufftomerge, basestring):
+ #A directory is specified. Figure out protection paths, listdir() it and process it.
+ mergelist = [join(stufftomerge, child) for child in \
+ os.listdir(join(srcroot, stufftomerge))]
+ else:
+ mergelist = stufftomerge[:]
+
+ while mergelist:
+
+ relative_path = mergelist.pop()
+ mysrc = join(srcroot, relative_path)
+ mydest = join(destroot, relative_path)
+ # myrealdest is mydest without the $ROOT prefix (makes a difference if ROOT!="/")
+ myrealdest = join(sep, relative_path)
+ # stat file once, test using S_* macros many times (faster that way)
+ mystat = os.lstat(mysrc)
+ mymode = mystat[stat.ST_MODE]
+ mymd5 = None
+ myto = None
+
+ if sys.hexversion >= 0x3030000:
+ mymtime = mystat.st_mtime_ns
+ else:
+ mymtime = mystat[stat.ST_MTIME]
+
+ if stat.S_ISREG(mymode):
+ mymd5 = perform_md5(mysrc, calc_prelink=calc_prelink)
+ elif stat.S_ISLNK(mymode):
+ # The file name of mysrc and the actual file that it points to
+ # will have earlier been forcefully converted to the 'merge'
+ # encoding if necessary, but the content of the symbolic link
+ # may need to be forcefully converted here.
+ myto = _os.readlink(_unicode_encode(mysrc,
+ encoding=_encodings['merge'], errors='strict'))
+ try:
+ myto = _unicode_decode(myto,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeDecodeError:
+ myto = _unicode_decode(myto, encoding=_encodings['merge'],
+ errors='replace')
+ myto = _unicode_encode(myto, encoding='ascii',
+ errors='backslashreplace')
+ myto = _unicode_decode(myto, encoding=_encodings['merge'],
+ errors='replace')
+ os.unlink(mysrc)
+ os.symlink(myto, mysrc)
+
+ mymd5 = md5(_unicode_encode(myto)).hexdigest()
+
+ protected = False
+ if stat.S_ISLNK(mymode) or stat.S_ISREG(mymode):
+ protected = self.isprotected(mydest)
+
+ if stat.S_ISREG(mymode) and \
+ mystat.st_size == 0 and \
+ os.path.basename(mydest).startswith(".keep"):
+ protected = False
+
+ destmd5 = None
+ mydest_link = None
+ # handy variables; mydest is the target object on the live filesystems;
+ # mysrc is the source object in the temporary install dir
+ try:
+ mydstat = os.lstat(mydest)
+ mydmode = mydstat.st_mode
+ if protected:
+ if stat.S_ISLNK(mydmode):
+ # Read symlink target as bytes, in case the
+ # target path has a bad encoding.
+ mydest_link = _os.readlink(
+ _unicode_encode(mydest,
+ encoding=_encodings['merge'],
+ errors='strict'))
+ mydest_link = _unicode_decode(mydest_link,
+ encoding=_encodings['merge'],
+ errors='replace')
+
+ # For protection of symlinks, the md5
+ # of the link target path string is used
+ # for cfgfiledict (symlinks are
+ # protected since bug #485598).
+ destmd5 = md5(_unicode_encode(mydest_link)).hexdigest()
+
+ elif stat.S_ISREG(mydmode):
+ destmd5 = perform_md5(mydest,
+ calc_prelink=calc_prelink)
+ except (FileNotFound, OSError) as e:
+ if isinstance(e, OSError) and e.errno != errno.ENOENT:
+ raise
+ #dest file doesn't exist
+ mydstat = None
+ mydmode = None
+ mydest_link = None
+ destmd5 = None
+
+ moveme = True
+ if protected:
+ mydest, protected, moveme = self._protect(cfgfiledict,
+ protect_if_modified, mymd5, myto, mydest,
+ myrealdest, mydmode, destmd5, mydest_link)
+
+ zing = "!!!"
+ if not moveme:
+ # confmem rejected this update
+ zing = "---"
+
+ if stat.S_ISLNK(mymode):
+ # we are merging a symbolic link
+ # Pass in the symlink target in order to bypass the
+ # os.readlink() call inside abssymlink(), since that
+ # call is unsafe if the merge encoding is not ascii
+ # or utf_8 (see bug #382021).
+ myabsto = abssymlink(mysrc, target=myto)
+
+ if myabsto.startswith(srcroot):
+ myabsto = myabsto[len(srcroot):]
+ myabsto = myabsto.lstrip(sep)
+ if self.settings and self.settings["D"]:
+ if myto.startswith(self.settings["D"]):
+ myto = myto[len(self.settings["D"])-1:]
+ # myrealto contains the path of the real file to which this symlink points.
+ # we can simply test for existence of this file to see if the target has been merged yet
+ myrealto = normalize_path(os.path.join(destroot, myabsto))
+ if mydmode is not None and stat.S_ISDIR(mydmode):
+ if not protected:
+ # we can't merge a symlink over a directory
+ newdest = self._new_backup_path(mydest)
+ msg = []
+ msg.append("")
+ msg.append(_("Installation of a symlink is blocked by a directory:"))
+ msg.append(" '%s'" % mydest)
+ msg.append(_("This symlink will be merged with a different name:"))
+ msg.append(" '%s'" % newdest)
+ msg.append("")
+ self._eerror("preinst", msg)
+ mydest = newdest
+
+ # if secondhand is None it means we're operating in "force" mode and should not create a second hand.
+ if (secondhand != None) and (not os.path.exists(myrealto)):
+ # either the target directory doesn't exist yet or the target file doesn't exist -- or
+ # the target is a broken symlink. We will add this file to our "second hand" and merge
+ # it later.
+ secondhand.append(mysrc[len(srcroot):])
+ continue
+ # unlinking no longer necessary; "movefile" will overwrite symlinks atomically and correctly
+ if moveme:
+ zing = ">>>"
+ mymtime = movefile(mysrc, mydest, newmtime=thismtime,
+ sstat=mystat, mysettings=self.settings,
+ encoding=_encodings['merge'])
+
+ try:
+ self._merged_path(mydest, os.lstat(mydest))
+ except OSError:
+ pass
+
+ if mymtime != None:
+ # Use lexists, since if the target happens to be a broken
+ # symlink then that should trigger an independent warning.
+ if not (os.path.lexists(myrealto) or
+ os.path.lexists(join(srcroot, myabsto))):
+ self._eqawarn('preinst',
+ [_("QA Notice: Symbolic link /%s points to /%s which does not exist.")
+ % (relative_path, myabsto)])
+
+ showMessage("%s %s -> %s\n" % (zing, mydest, myto))
+ if sys.hexversion >= 0x3030000:
+ outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime // 1000000000)+"\n")
+ else:
+ outfile.write("sym "+myrealdest+" -> "+myto+" "+str(mymtime)+"\n")
+ else:
+ showMessage(_("!!! Failed to move file.\n"),
+ level=logging.ERROR, noiselevel=-1)
+ showMessage("!!! %s -> %s\n" % (mydest, myto),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+ elif stat.S_ISDIR(mymode):
+ # we are merging a directory
+ if mydmode != None:
+ # destination exists
+
+ if bsd_chflags:
+ # Save then clear flags on dest.
+ dflags = mydstat.st_flags
+ if dflags != 0:
+ bsd_chflags.lchflags(mydest, 0)
+
+ if not stat.S_ISLNK(mydmode) and \
+ not os.access(mydest, os.W_OK):
+ pkgstuff = pkgsplit(self.pkg)
+ writemsg(_("\n!!! Cannot write to '%s'.\n") % mydest, noiselevel=-1)
+ writemsg(_("!!! Please check permissions and directories for broken symlinks.\n"))
+ writemsg(_("!!! You may start the merge process again by using ebuild:\n"))
+ writemsg("!!! ebuild "+self.settings["PORTDIR"]+"/"+self.cat+"/"+pkgstuff[0]+"/"+self.pkg+".ebuild merge\n")
+ writemsg(_("!!! And finish by running this: env-update\n\n"))
+ return 1
+
+ if stat.S_ISDIR(mydmode) or \
+ (stat.S_ISLNK(mydmode) and os.path.isdir(mydest)):
+ # a symlink to an existing directory will work for us; keep it:
+ showMessage("--- %s/\n" % mydest)
+ if bsd_chflags:
+ bsd_chflags.lchflags(mydest, dflags)
+ else:
+ # a non-directory and non-symlink-to-directory. Won't work for us. Move out of the way.
+ backup_dest = self._new_backup_path(mydest)
+ msg = []
+ msg.append("")
+ msg.append(_("Installation of a directory is blocked by a file:"))
+ msg.append(" '%s'" % mydest)
+ msg.append(_("This file will be renamed to a different name:"))
+ msg.append(" '%s'" % backup_dest)
+ msg.append("")
+ self._eerror("preinst", msg)
+ if movefile(mydest, backup_dest,
+ mysettings=self.settings,
+ encoding=_encodings['merge']) is None:
+ return 1
+ showMessage(_("bak %s %s.backup\n") % (mydest, mydest),
+ level=logging.ERROR, noiselevel=-1)
+ #now create our directory
+ try:
+ if self.settings.selinux_enabled():
+ _selinux_merge.mkdir(mydest, mysrc)
+ else:
+ os.mkdir(mydest)
+ except OSError as e:
+ # Error handling should be equivalent to
+ # portage.util.ensure_dirs() for cases
+ # like bug #187518.
+ if e.errno in (errno.EEXIST,):
+ pass
+ elif os.path.isdir(mydest):
+ pass
+ else:
+ raise
+ del e
+
+ if bsd_chflags:
+ bsd_chflags.lchflags(mydest, dflags)
+ os.chmod(mydest, mystat[0])
+ os.chown(mydest, mystat[4], mystat[5])
+ showMessage(">>> %s/\n" % mydest)
+ else:
+ try:
+ #destination doesn't exist
+ if self.settings.selinux_enabled():
+ _selinux_merge.mkdir(mydest, mysrc)
+ else:
+ os.mkdir(mydest)
+ except OSError as e:
+ # Error handling should be equivalent to
+ # portage.util.ensure_dirs() for cases
+ # like bug #187518.
+ if e.errno in (errno.EEXIST,):
+ pass
+ elif os.path.isdir(mydest):
+ pass
+ else:
+ raise
+ del e
+ os.chmod(mydest, mystat[0])
+ os.chown(mydest, mystat[4], mystat[5])
+ showMessage(">>> %s/\n" % mydest)
+
+ try:
+ self._merged_path(mydest, os.lstat(mydest))
+ except OSError:
+ pass
+
+ outfile.write("dir "+myrealdest+"\n")
+ # recurse and merge this directory
+ mergelist.extend(join(relative_path, child) for child in
+ os.listdir(join(srcroot, relative_path)))
+
+ elif stat.S_ISREG(mymode):
+ # we are merging a regular file
+ if not protected and \
+ mydmode is not None and stat.S_ISDIR(mydmode):
+ # install of destination is blocked by an existing directory with the same name
+ newdest = self._new_backup_path(mydest)
+ msg = []
+ msg.append("")
+ msg.append(_("Installation of a regular file is blocked by a directory:"))
+ msg.append(" '%s'" % mydest)
+ msg.append(_("This file will be merged with a different name:"))
+ msg.append(" '%s'" % newdest)
+ msg.append("")
+ self._eerror("preinst", msg)
+ mydest = newdest
+
+ # whether config protection or not, we merge the new file the
+ # same way. Unless moveme=0 (blocking directory)
+ if moveme:
+ # Create hardlinks only for source files that already exist
+ # as hardlinks (having identical st_dev and st_ino).
+ hardlink_key = (mystat.st_dev, mystat.st_ino)
+
+ hardlink_candidates = self._hardlink_merge_map.get(hardlink_key)
+ if hardlink_candidates is None:
+ hardlink_candidates = []
+ self._hardlink_merge_map[hardlink_key] = hardlink_candidates
+
+ mymtime = movefile(mysrc, mydest, newmtime=thismtime,
+ sstat=mystat, mysettings=self.settings,
+ hardlink_candidates=hardlink_candidates,
+ encoding=_encodings['merge'])
+ if mymtime is None:
+ return 1
+ hardlink_candidates.append(mydest)
+ zing = ">>>"
+
+ try:
+ self._merged_path(mydest, os.lstat(mydest))
+ except OSError:
+ pass
+
+ if mymtime != None:
+ if sys.hexversion >= 0x3030000:
+ outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime // 1000000000)+"\n")
+ else:
+ outfile.write("obj "+myrealdest+" "+mymd5+" "+str(mymtime)+"\n")
+ showMessage("%s %s\n" % (zing,mydest))
+ else:
+ # we are merging a fifo or device node
+ zing = "!!!"
+ if mydmode is None:
+ # destination doesn't exist
+ if movefile(mysrc, mydest, newmtime=thismtime,
+ sstat=mystat, mysettings=self.settings,
+ encoding=_encodings['merge']) is not None:
+ zing = ">>>"
+
+ try:
+ self._merged_path(mydest, os.lstat(mydest))
+ except OSError:
+ pass
+
+ else:
+ return 1
+ if stat.S_ISFIFO(mymode):
+ outfile.write("fif %s\n" % myrealdest)
+ else:
+ outfile.write("dev %s\n" % myrealdest)
+ showMessage(zing + " " + mydest + "\n")
+
+ def _protect(self, cfgfiledict, protect_if_modified, src_md5,
+ src_link, dest, dest_real, dest_mode, dest_md5, dest_link):
+
+ move_me = True
+ protected = True
+ force = False
+ k = False
+ if self._installed_instance is not None:
+ k = self._installed_instance._match_contents(dest_real)
+ if k is not False:
+ if dest_mode is None:
+ # If the file doesn't exist, then it may
+ # have been deleted or renamed by the
+ # admin. Therefore, force the file to be
+ # merged with a ._cfg name, so that the
+ # admin will be prompted for this update
+ # (see bug #523684).
+ force = True
+
+ elif protect_if_modified:
+ data = self._installed_instance.getcontents()[k]
+ if data[0] == "obj" and data[2] == dest_md5:
+ protected = False
+ elif data[0] == "sym" and data[2] == dest_link:
+ protected = False
+
+ if protected and dest_mode is not None:
+ # we have a protection path; enable config file management.
+ if src_md5 == dest_md5:
+ protected = False
+
+ elif src_md5 == cfgfiledict.get(dest_real, [None])[0]:
+ # An identical update has previously been
+ # merged. Skip it unless the user has chosen
+ # --noconfmem.
+ move_me = protected = bool(cfgfiledict["IGNORE"])
+
+ if protected and \
+ (dest_link is not None or src_link is not None) and \
+ dest_link != src_link:
+ # If either one is a symlink, and they are not
+ # identical symlinks, then force config protection.
+ force = True
+
+ if move_me:
+ # Merging a new file, so update confmem.
+ cfgfiledict[dest_real] = [src_md5]
+ elif dest_md5 == cfgfiledict.get(dest_real, [None])[0]:
+ # A previously remembered update has been
+ # accepted, so it is removed from confmem.
+ del cfgfiledict[dest_real]
+
+ if protected and move_me:
+ dest = new_protect_filename(dest,
+ newmd5=(dest_link or src_md5),
+ force=force)
+
+ return dest, protected, move_me
+
+ def _merged_path(self, path, lstatobj, exists=True):
+ previous_path = self._device_path_map.get(lstatobj.st_dev)
+ if previous_path is None or previous_path is False or \
+ (exists and len(path) < len(previous_path)):
+ if exists:
+ self._device_path_map[lstatobj.st_dev] = path
+ else:
+ # This entry is used to indicate that we've unmerged
+ # a file from this device, and later, this entry is
+ # replaced by a parent directory.
+ self._device_path_map[lstatobj.st_dev] = False
+
+ def _post_merge_sync(self):
+ """
+ Call this after merge or unmerge, in order to sync relevant files to
+ disk and avoid data-loss in the event of a power failure. This method
+ does nothing if FEATURES=merge-sync is disabled.
+ """
+ if not self._device_path_map or \
+ "merge-sync" not in self.settings.features:
+ return
+
+ returncode = None
+ if platform.system() == "Linux":
+
+ paths = []
+ for path in self._device_path_map.values():
+ if path is not False:
+ paths.append(path)
+ paths = tuple(paths)
+
+ proc = SyncfsProcess(paths=paths,
+ scheduler=(self._scheduler or
+ portage._internal_caller and global_event_loop() or
+ EventLoop(main=False)))
+ proc.start()
+ returncode = proc.wait()
+
+ if returncode is None or returncode != os.EX_OK:
+ try:
+ proc = subprocess.Popen(["sync"])
+ except EnvironmentError:
+ pass
+ else:
+ proc.wait()
+
+ @_slot_locked
+ def merge(self, mergeroot, inforoot, myroot=None, myebuild=None, cleanup=0,
+ mydbapi=None, prev_mtimes=None, counter=None):
+ """
+ @param myroot: ignored, self._eroot is used instead
+ """
+ myroot = None
+ retval = -1
+ parallel_install = "parallel-install" in self.settings.features
+ if not parallel_install:
+ self.lockdb()
+ self.vartree.dbapi._bump_mtime(self.mycpv)
+ if self._scheduler is None:
+ self._scheduler = SchedulerInterface(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False))
+ try:
+ retval = self.treewalk(mergeroot, myroot, inforoot, myebuild,
+ cleanup=cleanup, mydbapi=mydbapi, prev_mtimes=prev_mtimes,
+ counter=counter)
+
+ # If PORTAGE_BUILDDIR doesn't exist, then it probably means
+ # fail-clean is enabled, and the success/die hooks have
+ # already been called by EbuildPhase.
+ if os.path.isdir(self.settings['PORTAGE_BUILDDIR']):
+
+ if retval == os.EX_OK:
+ phase = 'success_hooks'
+ else:
+ phase = 'die_hooks'
+
+ ebuild_phase = MiscFunctionsProcess(
+ background=False, commands=[phase],
+ scheduler=self._scheduler, settings=self.settings)
+ ebuild_phase.start()
+ ebuild_phase.wait()
+ self._elog_process()
+
+ if 'noclean' not in self.settings.features and \
+ (retval == os.EX_OK or \
+ 'fail-clean' in self.settings.features):
+ if myebuild is None:
+ myebuild = os.path.join(inforoot, self.pkg + ".ebuild")
+
+ doebuild_environment(myebuild, "clean",
+ settings=self.settings, db=mydbapi)
+ phase = EbuildPhase(background=False, phase="clean",
+ scheduler=self._scheduler, settings=self.settings)
+ phase.start()
+ phase.wait()
+ finally:
+ self.settings.pop('REPLACING_VERSIONS', None)
+ if self.vartree.dbapi._linkmap is None:
+ # preserve-libs is entirely disabled
+ pass
+ else:
+ self.vartree.dbapi._linkmap._clear_cache()
+ self.vartree.dbapi._bump_mtime(self.mycpv)
+ if not parallel_install:
+ self.unlockdb()
+
+ if retval == os.EX_OK and self._postinst_failure:
+ retval = portage.const.RETURNCODE_POSTINST_FAILURE
+
+ return retval
+
+ def getstring(self,name):
+ "returns contents of a file with whitespace converted to spaces"
+ if not os.path.exists(self.dbdir+"/"+name):
+ return ""
+ with io.open(
+ _unicode_encode(os.path.join(self.dbdir, name),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace'
+ ) as f:
+ mydata = f.read().split()
+ return " ".join(mydata)
+
+ def copyfile(self,fname):
+ shutil.copyfile(fname,self.dbdir+"/"+os.path.basename(fname))
+
+ def getfile(self,fname):
+ if not os.path.exists(self.dbdir+"/"+fname):
+ return ""
+ with io.open(_unicode_encode(os.path.join(self.dbdir, fname),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace'
+ ) as f:
+ return f.read()
+
+ def setfile(self,fname,data):
+ kwargs = {}
+ if fname == 'environment.bz2' or not isinstance(data, basestring):
+ kwargs['mode'] = 'wb'
+ else:
+ kwargs['mode'] = 'w'
+ kwargs['encoding'] = _encodings['repo.content']
+ write_atomic(os.path.join(self.dbdir, fname), data, **kwargs)
+
+ def getelements(self,ename):
+ if not os.path.exists(self.dbdir+"/"+ename):
+ return []
+ with io.open(_unicode_encode(
+ os.path.join(self.dbdir, ename),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace'
+ ) as f:
+ mylines = f.readlines()
+ myreturn = []
+ for x in mylines:
+ for y in x[:-1].split():
+ myreturn.append(y)
+ return myreturn
+
+ def setelements(self,mylist,ename):
+ with io.open(_unicode_encode(
+ os.path.join(self.dbdir, ename),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='backslashreplace') as f:
+ for x in mylist:
+ f.write("%s\n" % x)
+
+ def isregular(self):
+ "Is this a regular package (does it have a CATEGORY file? A dblink can be virtual *and* regular)"
+ return os.path.exists(os.path.join(self.dbdir, "CATEGORY"))
+
+ def _pre_merge_backup(self, backup_dblink, downgrade):
+
+ if ("unmerge-backup" in self.settings.features or
+ (downgrade and "downgrade-backup" in self.settings.features)):
+ return self._quickpkg_dblink(backup_dblink, False, None)
+
+ return os.EX_OK
+
+ def _pre_unmerge_backup(self, background):
+
+ if "unmerge-backup" in self.settings.features :
+ logfile = None
+ if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
+ logfile = self.settings.get("PORTAGE_LOG_FILE")
+ return self._quickpkg_dblink(self, background, logfile)
+
+ return os.EX_OK
+
+ def _quickpkg_dblink(self, backup_dblink, background, logfile):
+
+ build_time = backup_dblink.getfile('BUILD_TIME')
+ try:
+ build_time = long(build_time.strip())
+ except ValueError:
+ build_time = 0
+
+ trees = QueryCommand.get_db()[self.settings["EROOT"]]
+ bintree = trees["bintree"]
+
+ for binpkg in reversed(
+ bintree.dbapi.match('={}'.format(backup_dblink.mycpv))):
+ if binpkg.build_time == build_time:
+ return os.EX_OK
+
+ self.lockdb()
+ try:
+
+ if not backup_dblink.exists():
+ # It got unmerged by a concurrent process.
+ return os.EX_OK
+
+ # Call quickpkg for support of QUICKPKG_DEFAULT_OPTS and stuff.
+ quickpkg_binary = os.path.join(self.settings["PORTAGE_BIN_PATH"],
+ "quickpkg")
+
+ if not os.access(quickpkg_binary, os.X_OK):
+ # If not running from the source tree, use PATH.
+ quickpkg_binary = find_binary("quickpkg")
+ if quickpkg_binary is None:
+ self._display_merge(
+ _("%s: command not found") % "quickpkg",
+ level=logging.ERROR, noiselevel=-1)
+ return 127
+
+ # Let quickpkg inherit the global vartree config's env.
+ env = dict(self.vartree.settings.items())
+ env["__PORTAGE_INHERIT_VARDB_LOCK"] = "1"
+
+ pythonpath = [x for x in env.get('PYTHONPATH', '').split(":") if x]
+ if not pythonpath or \
+ not os.path.samefile(pythonpath[0], portage._pym_path):
+ pythonpath.insert(0, portage._pym_path)
+ env['PYTHONPATH'] = ":".join(pythonpath)
+
+ quickpkg_proc = SpawnProcess(
+ args=[portage._python_interpreter, quickpkg_binary,
+ "=%s" % (backup_dblink.mycpv,)],
+ background=background, env=env,
+ scheduler=self._scheduler, logfile=logfile)
+ quickpkg_proc.start()
+
+ return quickpkg_proc.wait()
+
+ finally:
+ self.unlockdb()
+
+def merge(mycat, mypkg, pkgloc, infloc,
+ myroot=None, settings=None, myebuild=None,
+ mytree=None, mydbapi=None, vartree=None, prev_mtimes=None, blockers=None,
+ scheduler=None, fd_pipes=None):
+ """
+ @param myroot: ignored, settings['EROOT'] is used instead
+ """
+ myroot = None
+ if settings is None:
+ raise TypeError("settings argument is required")
+ if not os.access(settings['EROOT'], os.W_OK):
+ writemsg(_("Permission denied: access('%s', W_OK)\n") % settings['EROOT'],
+ noiselevel=-1)
+ return errno.EACCES
+ background = (settings.get('PORTAGE_BACKGROUND') == '1')
+ merge_task = MergeProcess(
+ mycat=mycat, mypkg=mypkg, settings=settings,
+ treetype=mytree, vartree=vartree,
+ scheduler=(scheduler or portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ background=background, blockers=blockers, pkgloc=pkgloc,
+ infloc=infloc, myebuild=myebuild, mydbapi=mydbapi,
+ prev_mtimes=prev_mtimes, logfile=settings.get('PORTAGE_LOG_FILE'),
+ fd_pipes=fd_pipes)
+ merge_task.start()
+ retcode = merge_task.wait()
+ return retcode
+
+def unmerge(cat, pkg, myroot=None, settings=None,
+ mytrimworld=None, vartree=None,
+ ldpath_mtimes=None, scheduler=None):
+ """
+ @param myroot: ignored, settings['EROOT'] is used instead
+ @param mytrimworld: ignored
+ """
+ myroot = None
+ if settings is None:
+ raise TypeError("settings argument is required")
+ mylink = dblink(cat, pkg, settings=settings, treetype="vartree",
+ vartree=vartree, scheduler=scheduler)
+ vartree = mylink.vartree
+ parallel_install = "parallel-install" in settings.features
+ if not parallel_install:
+ mylink.lockdb()
+ try:
+ if mylink.exists():
+ retval = mylink.unmerge(ldpath_mtimes=ldpath_mtimes)
+ if retval == os.EX_OK:
+ mylink.lockdb()
+ try:
+ mylink.delete()
+ finally:
+ mylink.unlockdb()
+ return retval
+ return os.EX_OK
+ finally:
+ if vartree.dbapi._linkmap is None:
+ # preserve-libs is entirely disabled
+ pass
+ else:
+ vartree.dbapi._linkmap._clear_cache()
+ if not parallel_install:
+ mylink.unlockdb()
+
+def write_contents(contents, root, f):
+ """
+ Write contents to any file like object. The file will be left open.
+ """
+ root_len = len(root) - 1
+ for filename in sorted(contents):
+ entry_data = contents[filename]
+ entry_type = entry_data[0]
+ relative_filename = filename[root_len:]
+ if entry_type == "obj":
+ entry_type, mtime, md5sum = entry_data
+ line = "%s %s %s %s\n" % \
+ (entry_type, relative_filename, md5sum, mtime)
+ elif entry_type == "sym":
+ entry_type, mtime, link = entry_data
+ line = "%s %s -> %s %s\n" % \
+ (entry_type, relative_filename, link, mtime)
+ else: # dir, dev, fif
+ line = "%s %s\n" % (entry_type, relative_filename)
+ f.write(line)
+
+def tar_contents(contents, root, tar, protect=None, onProgress=None,
+ xattrs=False):
+ os = _os_merge
+ encoding = _encodings['merge']
+
+ try:
+ for x in contents:
+ _unicode_encode(x,
+ encoding=_encodings['merge'],
+ errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ for x in contents:
+ _unicode_encode(x,
+ encoding=_encodings['fs'],
+ errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+ encoding = _encodings['fs']
+
+ tar.encoding = encoding
+ root = normalize_path(root).rstrip(os.path.sep) + os.path.sep
+ id_strings = {}
+ maxval = len(contents)
+ curval = 0
+ if onProgress:
+ onProgress(maxval, 0)
+ paths = list(contents)
+ paths.sort()
+ for path in paths:
+ curval += 1
+ try:
+ lst = os.lstat(path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ if onProgress:
+ onProgress(maxval, curval)
+ continue
+ contents_type = contents[path][0]
+ if path.startswith(root):
+ arcname = "./" + path[len(root):]
+ else:
+ raise ValueError("invalid root argument: '%s'" % root)
+ live_path = path
+ if 'dir' == contents_type and \
+ not stat.S_ISDIR(lst.st_mode) and \
+ os.path.isdir(live_path):
+ # Even though this was a directory in the original ${D}, it exists
+ # as a symlink to a directory in the live filesystem. It must be
+ # recorded as a real directory in the tar file to ensure that tar
+ # can properly extract it's children.
+ live_path = os.path.realpath(live_path)
+ lst = os.lstat(live_path)
+
+ # Since os.lstat() inside TarFile.gettarinfo() can trigger a
+ # UnicodeEncodeError when python has something other than utf_8
+ # return from sys.getfilesystemencoding() (as in bug #388773),
+ # we implement the needed functionality here, using the result
+ # of our successful lstat call. An alternative to this would be
+ # to pass in the fileobj argument to TarFile.gettarinfo(), so
+ # that it could use fstat instead of lstat. However, that would
+ # have the unwanted effect of dereferencing symlinks.
+
+ tarinfo = tar.tarinfo()
+ tarinfo.name = arcname
+ tarinfo.mode = lst.st_mode
+ tarinfo.uid = lst.st_uid
+ tarinfo.gid = lst.st_gid
+ tarinfo.size = 0
+ tarinfo.mtime = lst.st_mtime
+ tarinfo.linkname = ""
+ if stat.S_ISREG(lst.st_mode):
+ inode = (lst.st_ino, lst.st_dev)
+ if (lst.st_nlink > 1 and
+ inode in tar.inodes and
+ arcname != tar.inodes[inode]):
+ tarinfo.type = tarfile.LNKTYPE
+ tarinfo.linkname = tar.inodes[inode]
+ else:
+ tar.inodes[inode] = arcname
+ tarinfo.type = tarfile.REGTYPE
+ tarinfo.size = lst.st_size
+ elif stat.S_ISDIR(lst.st_mode):
+ tarinfo.type = tarfile.DIRTYPE
+ elif stat.S_ISLNK(lst.st_mode):
+ tarinfo.type = tarfile.SYMTYPE
+ tarinfo.linkname = os.readlink(live_path)
+ else:
+ continue
+ try:
+ tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
+ except KeyError:
+ pass
+ try:
+ tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
+ except KeyError:
+ pass
+
+ if stat.S_ISREG(lst.st_mode):
+ if protect and protect(path):
+ # Create an empty file as a place holder in order to avoid
+ # potential collision-protect issues.
+ f = tempfile.TemporaryFile()
+ f.write(_unicode_encode(
+ "# empty file because --include-config=n " + \
+ "when `quickpkg` was used\n"))
+ f.flush()
+ f.seek(0)
+ tarinfo.size = os.fstat(f.fileno()).st_size
+ tar.addfile(tarinfo, f)
+ f.close()
+ else:
+ path_bytes = _unicode_encode(path,
+ encoding=encoding,
+ errors='strict')
+
+ if xattrs:
+ # Compatible with GNU tar, which saves the xattrs
+ # under the SCHILY.xattr namespace.
+ for k in xattr.list(path_bytes):
+ tarinfo.pax_headers['SCHILY.xattr.' +
+ _unicode_decode(k)] = _unicode_decode(
+ xattr.get(path_bytes, _unicode_encode(k)))
+
+ with open(path_bytes, 'rb') as f:
+ tar.addfile(tarinfo, f)
+
+ else:
+ tar.addfile(tarinfo)
+ if onProgress:
+ onProgress(maxval, curval)
diff --git a/lib/portage/dbapi/virtual.py b/lib/portage/dbapi/virtual.py
new file mode 100644
index 000000000..3f7e6c221
--- /dev/null
+++ b/lib/portage/dbapi/virtual.py
@@ -0,0 +1,232 @@
+# Copyright 1998-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+from portage.dbapi import dbapi
+from portage.dbapi.dep_expand import dep_expand
+from portage.versions import cpv_getkey, _pkg_str
+
+class fakedbapi(dbapi):
+ """A fake dbapi that allows consumers to inject/remove packages to/from it
+ portage.settings is required to maintain the dbAPI.
+ """
+ def __init__(self, settings=None, exclusive_slots=True,
+ multi_instance=False):
+ """
+ @param exclusive_slots: When True, injecting a package with SLOT
+ metadata causes an existing package in the same slot to be
+ automatically removed (default is True).
+ @type exclusive_slots: Boolean
+ @param multi_instance: When True, multiple instances with the
+ same cpv may be stored simultaneously, as long as they are
+ distinguishable (default is False).
+ @type multi_instance: Boolean
+ """
+ self._exclusive_slots = exclusive_slots
+ self.cpvdict = {}
+ self.cpdict = {}
+ if settings is None:
+ from portage import settings
+ self.settings = settings
+ self._match_cache = {}
+ self._set_multi_instance(multi_instance)
+
+ def _set_multi_instance(self, multi_instance):
+ """
+ Enable or disable multi_instance mode. This should before any
+ packages are injected, so that all packages are indexed with
+ the same implementation of self._instance_key.
+ """
+ if self.cpvdict:
+ raise AssertionError("_set_multi_instance called after "
+ "packages have already been added")
+ self._multi_instance = multi_instance
+ if multi_instance:
+ self._instance_key = self._instance_key_multi_instance
+ else:
+ self._instance_key = self._instance_key_cpv
+
+ def _instance_key_cpv(self, cpv, support_string=False):
+ return cpv
+
+ def _instance_key_multi_instance(self, cpv, support_string=False):
+ try:
+ return (cpv, cpv.build_id, cpv.file_size, cpv.build_time,
+ cpv.mtime)
+ except AttributeError:
+ if not support_string:
+ raise
+
+ # Fallback for interfaces such as aux_get where API consumers
+ # may pass in a plain string.
+ latest = None
+ for pkg in self.cp_list(cpv_getkey(cpv)):
+ if pkg == cpv and (
+ latest is None or
+ latest.build_time < pkg.build_time):
+ latest = pkg
+
+ if latest is not None:
+ return (latest, latest.build_id, latest.file_size,
+ latest.build_time, latest.mtime)
+
+ raise KeyError(cpv)
+
+ def clear(self):
+ """
+ Remove all packages.
+ """
+ self._clear_cache()
+ self.cpvdict.clear()
+ self.cpdict.clear()
+
+ def _clear_cache(self):
+ if self._categories is not None:
+ self._categories = None
+ if self._match_cache:
+ self._match_cache = {}
+
+ def match(self, origdep, use_cache=1):
+ atom = dep_expand(origdep, mydb=self, settings=self.settings)
+ cache_key = (atom, atom.unevaluated_atom)
+ result = self._match_cache.get(cache_key)
+ if result is not None:
+ return result[:]
+ result = list(self._iter_match(atom, self.cp_list(atom.cp)))
+ self._match_cache[cache_key] = result
+ return result[:]
+
+ def cpv_exists(self, mycpv, myrepo=None):
+ try:
+ return self._instance_key(mycpv,
+ support_string=True) in self.cpvdict
+ except KeyError:
+ # _instance_key failure
+ return False
+
+ def cp_list(self, mycp, use_cache=1, myrepo=None):
+ # NOTE: Cache can be safely shared with the match cache, since the
+ # match cache uses the result from dep_expand for the cache_key.
+ cache_key = (mycp, mycp)
+ cachelist = self._match_cache.get(cache_key)
+ if cachelist is not None:
+ return cachelist[:]
+ cpv_list = self.cpdict.get(mycp)
+ if cpv_list is None:
+ cpv_list = []
+ self._cpv_sort_ascending(cpv_list)
+ self._match_cache[cache_key] = cpv_list
+ return cpv_list[:]
+
+ def cp_all(self, sort=False):
+ return sorted(self.cpdict) if sort else list(self.cpdict)
+
+ def cpv_all(self):
+ if self._multi_instance:
+ return [x[0] for x in self.cpvdict]
+ else:
+ return list(self.cpvdict)
+
+ def cpv_inject(self, mycpv, metadata=None):
+ """Adds a cpv to the list of available packages. See the
+ exclusive_slots constructor parameter for behavior with
+ respect to SLOT metadata.
+ @param mycpv: cpv for the package to inject
+ @type mycpv: str
+ @param metadata: dictionary of raw metadata for aux_get() calls
+ @param metadata: dict
+ """
+ self._clear_cache()
+
+ try:
+ mycp = mycpv.cp
+ except AttributeError:
+ mycp = None
+ try:
+ myslot = mycpv.slot
+ except AttributeError:
+ myslot = None
+
+ if mycp is None or \
+ (myslot is None and metadata is not None and metadata.get('SLOT')):
+ if metadata is None:
+ mycpv = _pkg_str(mycpv, db=self)
+ else:
+ mycpv = _pkg_str(mycpv, metadata=metadata,
+ settings=self.settings, db=self)
+
+ mycp = mycpv.cp
+ try:
+ myslot = mycpv.slot
+ except AttributeError:
+ pass
+
+ instance_key = self._instance_key(mycpv)
+ self.cpvdict[instance_key] = metadata
+ if not self._exclusive_slots:
+ myslot = None
+ if myslot and mycp in self.cpdict:
+ # If necessary, remove another package in the same SLOT.
+ for cpv in self.cpdict[mycp]:
+ if instance_key != self._instance_key(cpv):
+ try:
+ other_slot = cpv.slot
+ except AttributeError:
+ pass
+ else:
+ if myslot == other_slot:
+ self.cpv_remove(cpv)
+ break
+
+ cp_list = self.cpdict.get(mycp, [])
+ cp_list = [x for x in cp_list
+ if self._instance_key(x) != instance_key]
+ cp_list.append(mycpv)
+ self.cpdict[mycp] = cp_list
+
+ def cpv_remove(self,mycpv):
+ """Removes a cpv from the list of available packages."""
+ self._clear_cache()
+ mycp = cpv_getkey(mycpv)
+ instance_key = self._instance_key(mycpv)
+ self.cpvdict.pop(instance_key, None)
+ cp_list = self.cpdict.get(mycp)
+ if cp_list is not None:
+ cp_list = [x for x in cp_list
+ if self._instance_key(x) != instance_key]
+ if cp_list:
+ self.cpdict[mycp] = cp_list
+ else:
+ del self.cpdict[mycp]
+
+ def aux_get(self, mycpv, wants, myrepo=None):
+ metadata = self.cpvdict.get(
+ self._instance_key(mycpv, support_string=True))
+ if metadata is None:
+ raise KeyError(mycpv)
+ return [metadata.get(x, "") for x in wants]
+
+ def aux_update(self, cpv, values):
+ self._clear_cache()
+ metadata = self.cpvdict.get(
+ self._instance_key(cpv, support_string=True))
+ if metadata is None:
+ raise KeyError(cpv)
+ metadata.update(values)
+
+class testdbapi(object):
+ """A dbapi instance with completely fake functions to get by hitting disk
+ TODO(antarus):
+ This class really needs to be rewritten to have better stubs; but these work for now.
+ The dbapi classes themselves need unit tests...and that will be a lot of work.
+ """
+
+ def __init__(self):
+ self.cpvs = {}
+ def f(*args, **kwargs):
+ return True
+ fake_api = dir(dbapi)
+ for call in fake_api:
+ if not hasattr(self, call):
+ setattr(self, call, f)
diff --git a/lib/portage/debug.py b/lib/portage/debug.py
new file mode 100644
index 000000000..193e62291
--- /dev/null
+++ b/lib/portage/debug.py
@@ -0,0 +1,120 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import sys
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+import portage.const
+from portage.util import writemsg
+
+def set_trace(on=True):
+ if on:
+ t = trace_handler()
+ threading.settrace(t.event_handler)
+ sys.settrace(t.event_handler)
+ else:
+ sys.settrace(None)
+ threading.settrace(None)
+
+class trace_handler(object):
+
+ def __init__(self):
+ python_system_paths = []
+ for x in sys.path:
+ if os.path.basename(x) == "python%s.%s" % sys.version_info[:2]:
+ python_system_paths.append(x)
+
+ self.ignore_prefixes = []
+ for x in python_system_paths:
+ self.ignore_prefixes.append(x + os.sep)
+
+ self.trim_filename = prefix_trimmer(os.path.join(portage.const.PORTAGE_BASE_PATH, "lib") + os.sep).trim
+ self.show_local_lines = False
+ self.max_repr_length = 200
+
+ def event_handler(self, *args):
+ frame, event, _arg = args
+ if "line" == event:
+ if self.show_local_lines:
+ self.trace_line(*args)
+ else:
+ if not self.ignore_filename(frame.f_code.co_filename):
+ self.trace_event(*args)
+ return self.event_handler
+
+ def trace_event(self, frame, event, arg):
+ writemsg("%s line=%d name=%s event=%s %slocals=%s\n" % \
+ (self.trim_filename(frame.f_code.co_filename),
+ frame.f_lineno,
+ frame.f_code.co_name,
+ event,
+ self.arg_repr(frame, event, arg),
+ self.locals_repr(frame, event, arg)))
+
+ def arg_repr(self, _frame, event, arg):
+ my_repr = None
+ if "return" == event:
+ my_repr = repr(arg)
+ if len(my_repr) > self.max_repr_length:
+ my_repr = "'omitted'"
+ return "value=%s " % my_repr
+ elif "exception" == event:
+ my_repr = repr(arg[1])
+ if len(my_repr) > self.max_repr_length:
+ my_repr = "'omitted'"
+ return "type=%s value=%s " % (arg[0], my_repr)
+
+ return ""
+
+ def trace_line(self, frame, _event, _arg):
+ writemsg("%s line=%d\n" % (self.trim_filename(frame.f_code.co_filename), frame.f_lineno))
+
+ def ignore_filename(self, filename):
+ if filename:
+ for x in self.ignore_prefixes:
+ if filename.startswith(x):
+ return True
+ return False
+
+ def locals_repr(self, frame, _event, _arg):
+ """Create a representation of the locals dict that is suitable for
+ tracing output."""
+
+ my_locals = frame.f_locals.copy()
+
+ # prevent unsafe __repr__ call on self when __init__ is called
+ # (method calls aren't safe until after __init__ has completed).
+ if frame.f_code.co_name == "__init__" and "self" in my_locals:
+ my_locals["self"] = "omitted"
+
+ # We omit items that will lead to unreasonable bloat of the trace
+ # output (and resulting log file).
+ for k, v in my_locals.items():
+ my_repr = repr(v)
+ if len(my_repr) > self.max_repr_length:
+ my_locals[k] = "omitted"
+ return my_locals
+
+class prefix_trimmer(object):
+ def __init__(self, prefix):
+ self.prefix = prefix
+ self.cut_index = len(prefix)
+ self.previous = None
+ self.previous_trimmed = None
+
+ def trim(self, s):
+ """Remove a prefix from the string and return the result.
+ The previous result is automatically cached."""
+ if s == self.previous:
+ return self.previous_trimmed
+ else:
+ if s.startswith(self.prefix):
+ self.previous_trimmed = s[self.cut_index:]
+ else:
+ self.previous_trimmed = s
+ return self.previous_trimmed
diff --git a/lib/portage/dep/__init__.py b/lib/portage/dep/__init__.py
new file mode 100644
index 000000000..26595da47
--- /dev/null
+++ b/lib/portage/dep/__init__.py
@@ -0,0 +1,2874 @@
+# deps.py -- Portage dependency resolution functions
+# Copyright 2003-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = [
+ 'Atom', 'best_match_to_list', 'cpvequal',
+ 'dep_getcpv', 'dep_getkey', 'dep_getslot',
+ 'dep_getusedeps', 'dep_opconvert', 'flatten',
+ 'get_operator', 'isjustname', 'isspecific',
+ 'isvalidatom', 'match_from_list', 'match_to_list',
+ 'paren_enclose', 'paren_normalize', 'paren_reduce',
+ 'remove_slot', 'strip_empty', 'use_reduce',
+ '_repo_separator', '_slot_separator',
+]
+
+import re, sys
+import warnings
+from itertools import chain
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.util:cmp_sort_key,writemsg',
+)
+
+from portage import _encodings, _unicode_decode, _unicode_encode
+from portage.eapi import _get_eapi_attrs
+from portage.exception import InvalidAtom, InvalidData, InvalidDependString
+from portage.localization import _
+from portage.versions import catpkgsplit, catsplit, \
+ vercmp, ververify, _cp, _cpv, _pkg_str, _slot, _unknown_repo, _vr
+import portage.cache.mappings
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+ _unicode = str
+else:
+ _unicode = unicode
+
+# \w is [a-zA-Z0-9_]
+
+# PMS 3.1.3: A slot name may contain any of the characters [A-Za-z0-9+_.-].
+# It must not begin with a hyphen or a dot.
+_slot_separator = ":"
+# loosly match SLOT, which may have an optional ABI part
+_slot_loose = r'([\w+./*=-]+)'
+
+_use = r'\[.*\]'
+_op = r'([=~]|[><]=?)'
+
+_repo_separator = "::"
+_repo_name = r'[\w][\w-]*'
+_repo_name_re = re.compile('^' + _repo_name + '$', re.UNICODE)
+_repo = r'(?:' + _repo_separator + '(' + _repo_name + ')' + ')?'
+
+_extended_cat = r'[\w+*][\w+.*-]*'
+
+_slot_dep_re_cache = {}
+
+def _get_slot_dep_re(eapi_attrs):
+ cache_key = eapi_attrs.slot_operator
+ slot_re = _slot_dep_re_cache.get(cache_key)
+ if slot_re is not None:
+ return slot_re
+
+ if eapi_attrs.slot_operator:
+ slot_re = _slot + r'?(\*|=|/' + _slot + r'=?)?'
+ else:
+ slot_re = _slot
+
+ slot_re = re.compile('^' + slot_re + '$', re.VERBOSE | re.UNICODE)
+
+ _slot_dep_re_cache[cache_key] = slot_re
+ return slot_re
+
+def _match_slot(atom, pkg):
+ if pkg.slot == atom.slot:
+ if not atom.sub_slot:
+ return True
+ elif atom.sub_slot == pkg.sub_slot:
+ return True
+ return False
+
+_atom_re_cache = {}
+
+def _get_atom_re(eapi_attrs):
+ cache_key = eapi_attrs.dots_in_PN
+ atom_re = _atom_re_cache.get(cache_key)
+ if atom_re is not None:
+ return atom_re
+
+ if eapi_attrs.dots_in_PN:
+ cp_re = _cp['dots_allowed_in_PN']
+ cpv_re = _cpv['dots_allowed_in_PN']
+ else:
+ cp_re = _cp['dots_disallowed_in_PN']
+ cpv_re = _cpv['dots_disallowed_in_PN']
+
+ atom_re = re.compile('^(?P<without_use>(?:' +
+ '(?P<op>' + _op + cpv_re + ')|' +
+ '(?P<star>=' + cpv_re + r'\*)|' +
+ '(?P<simple>' + cp_re + '))' +
+ '(' + _slot_separator + _slot_loose + ')?' +
+ _repo + ')(' + _use + ')?$', re.VERBOSE | re.UNICODE)
+
+ _atom_re_cache[cache_key] = atom_re
+ return atom_re
+
+_atom_wildcard_re_cache = {}
+
+def _get_atom_wildcard_re(eapi_attrs):
+ cache_key = eapi_attrs.dots_in_PN
+ atom_re = _atom_wildcard_re_cache.get(cache_key)
+ if atom_re is not None:
+ return atom_re
+
+ if eapi_attrs.dots_in_PN:
+ pkg_re = r'[\w+*][\w+.*-]*?'
+ else:
+ pkg_re = r'[\w+*][\w+*-]*?'
+
+ atom_re = re.compile(r'((?P<simple>(' +
+ _extended_cat + r')/(' + pkg_re + r'(-' + _vr + ')?))' + \
+ '|(?P<star>=((' + _extended_cat + r')/(' + pkg_re + r'))-(?P<version>\*\w+\*)))' + \
+ '(:(?P<slot>' + _slot_loose + r'))?(' +
+ _repo_separator + r'(?P<repo>' + _repo_name + r'))?$', re.UNICODE)
+
+ _atom_wildcard_re_cache[cache_key] = atom_re
+ return atom_re
+
+_usedep_re_cache = {}
+
+def _get_usedep_re(eapi_attrs):
+ """
+ @param eapi_attrs: The EAPI attributes from _get_eapi_attrs
+ @type eapi_attrs: _eapi_attrs
+ @rtype: regular expression object
+ @return: A regular expression object that matches valid USE deps for the
+ given eapi.
+ """
+ cache_key = eapi_attrs.dots_in_use_flags
+ usedep_re = _usedep_re_cache.get(cache_key)
+ if usedep_re is not None:
+ return usedep_re
+
+ if eapi_attrs.dots_in_use_flags:
+ _flag_re = r'[A-Za-z0-9][A-Za-z0-9+_@.-]*'
+ else:
+ _flag_re = r'[A-Za-z0-9][A-Za-z0-9+_@-]*'
+
+ usedep_re = re.compile(r'^(?P<prefix>[!-]?)(?P<flag>' +
+ _flag_re + r')(?P<default>(\(\+\)|\(\-\))?)(?P<suffix>[?=]?)$')
+
+ _usedep_re_cache[cache_key] = usedep_re
+ return usedep_re
+
+_useflag_re_cache = {}
+
+def _get_useflag_re(eapi):
+ """
+ When eapi is None then validation is not as strict, since we want the
+ same to work for multiple EAPIs that may have slightly different rules.
+ @param eapi: The EAPI
+ @type eapi: String or None
+ @rtype: regular expression object
+ @return: A regular expression object that matches valid USE flags for the
+ given eapi.
+ """
+ eapi_attrs = _get_eapi_attrs(eapi)
+ cache_key = eapi_attrs.dots_in_use_flags
+ useflag_re = _useflag_re_cache.get(cache_key)
+ if useflag_re is not None:
+ return useflag_re
+
+ if eapi_attrs.dots_in_use_flags:
+ flag_re = r'[A-Za-z0-9][A-Za-z0-9+_@.-]*'
+ else:
+ flag_re = r'[A-Za-z0-9][A-Za-z0-9+_@-]*'
+
+ useflag_re = re.compile(r'^' + flag_re + r'$')
+
+ _useflag_re_cache[cache_key] = useflag_re
+ return useflag_re
+
+def cpvequal(cpv1, cpv2):
+ """
+
+ @param cpv1: CategoryPackageVersion (no operators) Example: "sys-apps/portage-2.1"
+ @type cpv1: String
+ @param cpv2: CategoryPackageVersion (no operators) Example: "sys-apps/portage-2.1"
+ @type cpv2: String
+ @rtype: Boolean
+ @return:
+ 1. True if cpv1 = cpv2
+ 2. False Otherwise
+ 3. Throws PortageException if cpv1 or cpv2 is not a CPV
+
+ Example Usage:
+ >>> from portage.dep import cpvequal
+ >>> cpvequal("sys-apps/portage-2.1","sys-apps/portage-2.1")
+ >>> True
+
+ """
+
+ try:
+ try:
+ split1 = cpv1.cpv_split
+ except AttributeError:
+ cpv1 = _pkg_str(cpv1)
+ split1 = cpv1.cpv_split
+
+ try:
+ split2 = cpv2.cpv_split
+ except AttributeError:
+ cpv2 = _pkg_str(cpv2)
+ split2 = cpv2.cpv_split
+
+ except InvalidData:
+ raise portage.exception.PortageException(_("Invalid data '%s, %s', parameter was not a CPV") % (cpv1, cpv2))
+
+ if split1[0] != split2[0] or \
+ split1[1] != split2[1]:
+ return False
+
+ return vercmp(cpv1.version, cpv2.version) == 0
+
+def strip_empty(myarr):
+ """
+ Strip all empty elements from an array
+
+ @param myarr: The list of elements
+ @type myarr: List
+ @rtype: Array
+ @return: The array with empty elements removed
+ """
+ warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
+ ('portage.dep.strip_empty',), DeprecationWarning, stacklevel=2)
+ return [x for x in myarr if x]
+
+def paren_reduce(mystr, _deprecation_warn=True):
+ """
+ Take a string and convert all paren enclosed entities into sublists and
+ split the list elements by spaces. All redundant brackets are removed.
+
+ Example usage:
+ >>> paren_reduce('foobar foo? ( bar baz )')
+ ['foobar', 'foo?', ['bar', 'baz']]
+
+ @param mystr: The string to reduce
+ @type mystr: String
+ @rtype: Array
+ @return: The reduced string in an array
+ """
+ if portage._internal_caller and _deprecation_warn:
+ warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
+ ('portage.dep.paren_reduce',), DeprecationWarning, stacklevel=2)
+ mysplit = mystr.split()
+ level = 0
+ stack = [[]]
+ need_bracket = False
+
+ for token in mysplit:
+ if token == "(":
+ need_bracket = False
+ stack.append([])
+ level += 1
+ elif token == ")":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ if level > 0:
+ level -= 1
+ l = stack.pop()
+ is_single = (len(l) == 1 or (len(l)==2 and (l[0] == "||" or l[0][-1] == "?")))
+
+ def ends_in_any_of_dep(k):
+ return k>=0 and stack[k] and stack[k][-1] == "||"
+
+ def ends_in_operator(k):
+ return k>=0 and stack[k] and (stack[k][-1] == "||" or stack[k][-1][-1] == "?")
+
+ def special_append():
+ """
+ Use extend instead of append if possible. This kills all redundant brackets.
+ """
+ if is_single and (not stack[level] or not stack[level][-1][-1] == "?"):
+ if len(l) == 1 and isinstance(l[0], list):
+ # l = [[...]]
+ stack[level].extend(l[0])
+ else:
+ stack[level].extend(l)
+ else:
+ stack[level].append(l)
+
+ if l:
+ if not ends_in_any_of_dep(level-1) and not ends_in_operator(level):
+ #Optimize: ( ( ... ) ) -> ( ... ). Make sure there is no '||' hanging around.
+ stack[level].extend(l)
+ elif not stack[level]:
+ #An '||' in the level above forces us to keep to brackets.
+ special_append()
+ elif len(l) == 1 and ends_in_any_of_dep(level):
+ #Optimize: || ( A ) -> A
+ stack[level].pop()
+ special_append()
+ elif len(l) == 2 and (l[0] == "||" or l[0][-1] == "?") and stack[level][-1] in (l[0], "||"):
+ #Optimize: || ( || ( ... ) ) -> || ( ... )
+ # foo? ( foo? ( ... ) ) -> foo? ( ... )
+ # || ( foo? ( ... ) ) -> foo? ( ... )
+ stack[level].pop()
+ special_append()
+ else:
+ special_append()
+ else:
+ if stack[level] and (stack[level][-1] == "||" or stack[level][-1][-1] == "?"):
+ stack[level].pop()
+ else:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ elif token == "||":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ need_bracket = True
+ stack[level].append(token)
+ else:
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+
+ if token[-1] == "?":
+ need_bracket = True
+
+ stack[level].append(token)
+
+ if level != 0 or need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+
+ return stack[0]
+
+class paren_normalize(list):
+ """Take a dependency structure as returned by paren_reduce or use_reduce
+ and generate an equivalent structure that has no redundant lists."""
+ def __init__(self, src):
+ if portage._internal_caller:
+ warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
+ ('portage.dep.paren_normalize',), DeprecationWarning, stacklevel=2)
+ list.__init__(self)
+ self._zap_parens(src, self)
+
+ def _zap_parens(self, src, dest, disjunction=False):
+ if not src:
+ return dest
+ i = iter(src)
+ for x in i:
+ if isinstance(x, basestring):
+ if x in ('||', '^^'):
+ y = self._zap_parens(next(i), [], disjunction=True)
+ if len(y) == 1:
+ dest.append(y[0])
+ else:
+ dest.append(x)
+ dest.append(y)
+ elif x.endswith("?"):
+ dest.append(x)
+ dest.append(self._zap_parens(next(i), []))
+ else:
+ dest.append(x)
+ else:
+ if disjunction:
+ x = self._zap_parens(x, [])
+ if len(x) == 1:
+ dest.append(x[0])
+ else:
+ dest.append(x)
+ else:
+ self._zap_parens(x, dest)
+ return dest
+
+def paren_enclose(mylist, unevaluated_atom=False, opconvert=False):
+ """
+ Convert a list to a string with sublists enclosed with parens.
+
+ Example usage:
+ >>> test = ['foobar','foo',['bar','baz']]
+ >>> paren_enclose(test)
+ 'foobar foo ( bar baz )'
+
+ @param mylist: The list
+ @type mylist: List
+ @rtype: String
+ @return: The paren enclosed string
+ """
+ mystrparts = []
+ for x in mylist:
+ if isinstance(x, list):
+ if opconvert and x and x[0] == "||":
+ mystrparts.append("%s ( %s )" % (x[0], paren_enclose(x[1:])))
+ else:
+ mystrparts.append("( %s )" % paren_enclose(x))
+ else:
+ if unevaluated_atom:
+ x = getattr(x, 'unevaluated_atom', x)
+ mystrparts.append(x)
+ return " ".join(mystrparts)
+
+def use_reduce(depstr, uselist=[], masklist=[], matchall=False, excludeall=[], is_src_uri=False, \
+ eapi=None, opconvert=False, flat=False, is_valid_flag=None, token_class=None, matchnone=False):
+ """
+ Takes a dep string and reduces the use? conditionals out, leaving an array
+ with subarrays. All redundant brackets are removed.
+
+ @param deparray: depstring
+ @type deparray: String
+ @param uselist: List of use enabled flags
+ @type uselist: List
+ @param masklist: List of masked flags (always treated as disabled)
+ @type masklist: List
+ @param matchall: Treat all conditionals as active. Used by repoman.
+ @type matchall: Bool
+ @param excludeall: List of flags for which negated conditionals are always treated as inactive.
+ @type excludeall: List
+ @param is_src_uri: Indicates if depstr represents a SRC_URI
+ @type is_src_uri: Bool
+ @param eapi: Indicates the EAPI the dep string has to comply to
+ @type eapi: String
+ @param opconvert: Put every operator as first element into it's argument list
+ @type opconvert: Bool
+ @param flat: Create a flat list of all tokens
+ @type flat: Bool
+ @param is_valid_flag: Function that decides if a given use flag might be used in use conditionals
+ @type is_valid_flag: Function
+ @param token_class: Convert all non operator tokens into this class
+ @type token_class: Class
+ @param matchnone: Treat all conditionals as inactive. Used by digestgen().
+ @type matchnone: Bool
+ @rtype: List
+ @return: The use reduced depend array
+ """
+ if isinstance(depstr, list):
+ if portage._internal_caller:
+ warnings.warn(_("Passing paren_reduced dep arrays to %s is deprecated. " + \
+ "Pass the original dep string instead.") % \
+ ('portage.dep.use_reduce',), DeprecationWarning, stacklevel=2)
+ depstr = paren_enclose(depstr)
+
+ if opconvert and flat:
+ raise ValueError("portage.dep.use_reduce: 'opconvert' and 'flat' are mutually exclusive")
+
+ if matchall and matchnone:
+ raise ValueError("portage.dep.use_reduce: 'matchall' and 'matchnone' are mutually exclusive")
+
+ eapi_attrs = _get_eapi_attrs(eapi)
+ useflag_re = _get_useflag_re(eapi)
+
+ def is_active(conditional):
+ """
+ Decides if a given use conditional is active.
+ """
+ if conditional.startswith("!"):
+ flag = conditional[1:-1]
+ is_negated = True
+ else:
+ flag = conditional[:-1]
+ is_negated = False
+
+ if is_valid_flag:
+ if not is_valid_flag(flag):
+ msg = _("USE flag '%s' referenced in " + \
+ "conditional '%s' is not in IUSE") \
+ % (flag, conditional)
+ e = InvalidData(msg, category='IUSE.missing')
+ raise InvalidDependString(msg, errors=(e,))
+ else:
+ if useflag_re.match(flag) is None:
+ raise InvalidDependString(
+ _("invalid use flag '%s' in conditional '%s'") % (flag, conditional))
+
+ if is_negated and flag in excludeall:
+ return False
+
+ if flag in masklist:
+ return is_negated
+
+ if matchall:
+ return True
+
+ if matchnone:
+ return False
+
+ return (flag in uselist and not is_negated) or \
+ (flag not in uselist and is_negated)
+
+ def missing_white_space_check(token, pos):
+ """
+ Used to generate good error messages for invalid tokens.
+ """
+ for x in (")", "(", "||"):
+ if token.startswith(x) or token.endswith(x):
+ raise InvalidDependString(
+ _("missing whitespace around '%s' at '%s', token %s") % (x, token, pos+1))
+
+ mysplit = depstr.split()
+ #Count the bracket level.
+ level = 0
+ #We parse into a stack. Every time we hit a '(', a new empty list is appended to the stack.
+ #When we hit a ')', the last list in the stack is merged with list one level up.
+ stack = [[]]
+ #Set need_bracket to True after use conditionals or ||. Other tokens need to ensure
+ #that need_bracket is not True.
+ need_bracket = False
+ #Set need_simple_token to True after a SRC_URI arrow. Other tokens need to ensure
+ #that need_simple_token is not True.
+ need_simple_token = False
+
+ for pos, token in enumerate(mysplit):
+ if token == "(":
+ if need_simple_token:
+ raise InvalidDependString(
+ _("expected: file name, got: '%s', token %s") % (token, pos+1))
+ if len(mysplit) >= pos+2 and mysplit[pos+1] == ")":
+ raise InvalidDependString(
+ _("expected: dependency string, got: ')', token %s") % (pos+1,))
+ need_bracket = False
+ stack.append([])
+ level += 1
+ elif token == ")":
+ if need_bracket:
+ raise InvalidDependString(
+ _("expected: '(', got: '%s', token %s") % (token, pos+1))
+ if need_simple_token:
+ raise InvalidDependString(
+ _("expected: file name, got: '%s', token %s") % (token, pos+1))
+ if level > 0:
+ level -= 1
+ l = stack.pop()
+
+ is_single = len(l) == 1 or \
+ (opconvert and l and l[0] == "||") or \
+ (not opconvert and len(l)==2 and l[0] == "||")
+ ignore = False
+
+ if flat:
+ #In 'flat' mode, we simply merge all lists into a single large one.
+ if stack[level] and stack[level][-1][-1] == "?":
+ #The last token before the '(' that matches the current ')'
+ #was a use conditional. The conditional is removed in any case.
+ #Merge the current list if needed.
+ if is_active(stack[level][-1]):
+ stack[level].pop()
+ stack[level].extend(l)
+ else:
+ stack[level].pop()
+ else:
+ stack[level].extend(l)
+ continue
+
+ if stack[level] and isinstance(stack[level][-1],
+ basestring):
+ if stack[level][-1] == "||" and not l:
+ #Optimize: || ( ) -> .
+ if not eapi_attrs.empty_groups_always_true:
+ # in EAPI 7+, we need to fail here
+ l.append((token_class or _unicode)("__const__/empty-any-of"))
+ stack[level].pop()
+ elif stack[level][-1][-1] == "?":
+ #The last token before the '(' that matches the current ')'
+ #was a use conditional, remove it and decide if we
+ #have to keep the current list.
+ if not is_active(stack[level][-1]):
+ ignore = True
+ stack[level].pop()
+
+ def ends_in_any_of_dep(k):
+ return k>=0 and stack[k] and stack[k][-1] == "||"
+
+ def starts_with_any_of_dep(k):
+ #'ends_in_any_of_dep' for opconvert
+ return k>=0 and stack[k] and stack[k][0] == "||"
+
+ def last_any_of_operator_level(k):
+ #Returns the level of the last || operator if it is in effect for
+ #the current level. It is not in effect, if there is a level, that
+ #ends in a non-operator. This is almost equivalent to stack[level][-1]=="||",
+ #expect that it skips empty levels.
+ while k>=0:
+ if stack[k] and isinstance(stack[k][-1],
+ basestring):
+ if stack[k][-1] == "||":
+ return k
+ elif stack[k][-1][-1] != "?":
+ return -1
+ k -= 1
+ return -1
+
+ def special_append():
+ """
+ Use extend instead of append if possible. This kills all redundant brackets.
+ """
+ if is_single:
+ #Either [A], [[...]] or [|| [...]]
+ if l[0] == "||" and ends_in_any_of_dep(level-1):
+ if opconvert:
+ stack[level].extend(l[1:])
+ else:
+ stack[level].extend(l[1])
+ elif len(l) == 1 and isinstance(l[0], list):
+ # l = [[...]]
+ last = last_any_of_operator_level(level-1)
+ if last == -1:
+ if opconvert and isinstance(l[0], list) \
+ and l[0] and l[0][0] == '||':
+ stack[level].append(l[0])
+ else:
+ stack[level].extend(l[0])
+ else:
+ if opconvert and l[0] and l[0][0] == "||":
+ stack[level].extend(l[0][1:])
+ else:
+ stack[level].append(l[0])
+ else:
+ stack[level].extend(l)
+ else:
+ if opconvert and stack[level] and stack[level][-1] == '||':
+ stack[level][-1] = ['||'] + l
+ else:
+ stack[level].append(l)
+
+ if l and not ignore:
+ #The current list is not empty and we don't want to ignore it because
+ #of an inactive use conditional.
+ if not ends_in_any_of_dep(level-1) and not ends_in_any_of_dep(level):
+ #Optimize: ( ( ... ) ) -> ( ... ). Make sure there is no '||' hanging around.
+ stack[level].extend(l)
+ elif not stack[level]:
+ #An '||' in the level above forces us to keep to brackets.
+ special_append()
+ elif is_single and ends_in_any_of_dep(level):
+ #Optimize: || ( A ) -> A, || ( || ( ... ) ) -> || ( ... )
+ stack[level].pop()
+ special_append()
+ elif ends_in_any_of_dep(level) and ends_in_any_of_dep(level-1):
+ #Optimize: || ( A || ( B C ) ) -> || ( A B C )
+ stack[level].pop()
+ stack[level].extend(l)
+ else:
+ if opconvert and ends_in_any_of_dep(level):
+ #In opconvert mode, we have to move the operator from the level
+ #above into the current list.
+ stack[level].pop()
+ stack[level].append(["||"] + l)
+ else:
+ special_append()
+
+ else:
+ raise InvalidDependString(
+ _("no matching '%s' for '%s', token %s") % ("(", ")", pos+1))
+ elif token == "||":
+ if is_src_uri:
+ raise InvalidDependString(
+ _("any-of dependencies are not allowed in SRC_URI: token %s") % (pos+1,))
+ if need_bracket:
+ raise InvalidDependString(
+ _("expected: '(', got: '%s', token %s") % (token, pos+1))
+ need_bracket = True
+ stack[level].append(token)
+ elif token == "->":
+ if need_simple_token:
+ raise InvalidDependString(
+ _("expected: file name, got: '%s', token %s") % (token, pos+1))
+ if not is_src_uri:
+ raise InvalidDependString(
+ _("SRC_URI arrow are only allowed in SRC_URI: token %s") % (pos+1,))
+ if not eapi_attrs.src_uri_arrows:
+ raise InvalidDependString(
+ _("SRC_URI arrow not allowed in EAPI %s: token %s") % (eapi, pos+1))
+ need_simple_token = True
+ stack[level].append(token)
+ else:
+ if need_bracket:
+ raise InvalidDependString(
+ _("expected: '(', got: '%s', token %s") % (token, pos+1))
+
+ if need_simple_token and "/" in token:
+ #The last token was a SRC_URI arrow, make sure we have a simple file name.
+ raise InvalidDependString(
+ _("expected: file name, got: '%s', token %s") % (token, pos+1))
+
+ if token[-1] == "?":
+ need_bracket = True
+ else:
+ need_simple_token = False
+ if token_class and not is_src_uri:
+ #Add a hack for SRC_URI here, to avoid conditional code at the consumer level
+ try:
+ token = token_class(token, eapi=eapi,
+ is_valid_flag=is_valid_flag)
+ except InvalidAtom as e:
+ missing_white_space_check(token, pos)
+ raise InvalidDependString(
+ _("Invalid atom (%s), token %s") \
+ % (e, pos+1), errors=(e,))
+ except SystemExit:
+ raise
+ except Exception as e:
+ missing_white_space_check(token, pos)
+ raise InvalidDependString(
+ _("Invalid token '%s', token %s") % (token, pos+1))
+
+ if not matchall and \
+ hasattr(token, 'evaluate_conditionals'):
+ token = token.evaluate_conditionals(uselist)
+
+ stack[level].append(token)
+
+ if level != 0:
+ raise InvalidDependString(
+ _("Missing '%s' at end of string") % (")",))
+
+ if need_bracket:
+ raise InvalidDependString(
+ _("Missing '%s' at end of string") % ("(",))
+
+ if need_simple_token:
+ raise InvalidDependString(
+ _("Missing file name at end of string"))
+
+ return stack[0]
+
+def dep_opconvert(deplist):
+ """
+ Iterate recursively through a list of deps, if the
+ dep is a '||' or '&&' operator, combine it with the
+ list of deps that follows..
+
+ Example usage:
+ >>> test = ["blah", "||", ["foo", "bar", "baz"]]
+ >>> dep_opconvert(test)
+ ['blah', ['||', 'foo', 'bar', 'baz']]
+
+ @param deplist: A list of deps to format
+ @type mydep: List
+ @rtype: List
+ @return:
+ The new list with the new ordering
+ """
+ if portage._internal_caller:
+ warnings.warn(_("%s is deprecated. Use %s with the opconvert parameter set to True instead.") % \
+ ('portage.dep.dep_opconvert', 'portage.dep.use_reduce'), DeprecationWarning, stacklevel=2)
+
+ retlist = []
+ x = 0
+ while x != len(deplist):
+ if isinstance(deplist[x], list):
+ retlist.append(dep_opconvert(deplist[x]))
+ elif deplist[x] == "||":
+ retlist.append([deplist[x]] + dep_opconvert(deplist[x+1]))
+ x += 1
+ else:
+ retlist.append(deplist[x])
+ x += 1
+ return retlist
+
+def flatten(mylist):
+ """
+ Recursively traverse nested lists and return a single list containing
+ all non-list elements that are found.
+
+ Example usage:
+ >>> flatten([1, [2, 3, [4]]])
+ [1, 2, 3, 4]
+
+ @param mylist: A list containing nested lists and non-list elements.
+ @type mylist: List
+ @rtype: List
+ @return: A single list containing only non-list elements.
+ """
+ if portage._internal_caller:
+ warnings.warn(_("%s is deprecated and will be removed without replacement.") % \
+ ('portage.dep.flatten',), DeprecationWarning, stacklevel=2)
+
+ newlist = []
+ for x in mylist:
+ if isinstance(x, list):
+ newlist.extend(flatten(x))
+ else:
+ newlist.append(x)
+ return newlist
+
+class _use_dep(object):
+
+ __slots__ = ("_eapi_attrs", "conditional", "missing_enabled", "missing_disabled",
+ "disabled", "enabled", "tokens", "required")
+
+ class _conditionals_class(object):
+ __slots__ = ("enabled", "disabled", "equal", "not_equal")
+
+ def items(self):
+ for k in self.__slots__:
+ v = getattr(self, k, None)
+ if v:
+ yield (k, v)
+
+ def values(self):
+ for k in self.__slots__:
+ v = getattr(self, k, None)
+ if v:
+ yield v
+
+ # used in InvalidAtom messages
+ _conditional_strings = {
+ 'enabled' : '%s?',
+ 'disabled': '!%s?',
+ 'equal': '%s=',
+ 'not_equal': '!%s=',
+ }
+
+ def __init__(self, use, eapi_attrs, enabled_flags=None, disabled_flags=None, missing_enabled=None,
+ missing_disabled=None, conditional=None, required=None):
+
+ self._eapi_attrs = eapi_attrs
+
+ if enabled_flags is not None:
+ #A shortcut for the classe's own methods.
+ self.tokens = use
+ if not isinstance(self.tokens, tuple):
+ self.tokens = tuple(self.tokens)
+
+ self.required = frozenset(required)
+ self.enabled = frozenset(enabled_flags)
+ self.disabled = frozenset(disabled_flags)
+ self.missing_enabled = frozenset(missing_enabled)
+ self.missing_disabled = frozenset(missing_disabled)
+ self.conditional = None
+
+ if conditional:
+ self.conditional = self._conditionals_class()
+ for k in "enabled", "disabled", "equal", "not_equal":
+ setattr(self.conditional, k, frozenset(conditional.get(k, [])))
+
+ return
+
+ enabled_flags = set()
+ disabled_flags = set()
+ missing_enabled = set()
+ missing_disabled = set()
+ no_default = set()
+
+ conditional = {}
+ usedep_re = _get_usedep_re(self._eapi_attrs)
+
+ for x in use:
+ m = usedep_re.match(x)
+ if m is None:
+ raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+
+ operator = m.group("prefix") + m.group("suffix")
+ flag = m.group("flag")
+ default = m.group("default")
+
+ if not operator:
+ enabled_flags.add(flag)
+ elif operator == "-":
+ disabled_flags.add(flag)
+ elif operator == "?":
+ conditional.setdefault("enabled", set()).add(flag)
+ elif operator == "=":
+ conditional.setdefault("equal", set()).add(flag)
+ elif operator == "!=":
+ conditional.setdefault("not_equal", set()).add(flag)
+ elif operator == "!?":
+ conditional.setdefault("disabled", set()).add(flag)
+ else:
+ raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+
+ if default:
+ if default == "(+)":
+ if flag in missing_disabled or flag in no_default:
+ raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+ missing_enabled.add(flag)
+ else:
+ if flag in missing_enabled or flag in no_default:
+ raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+ missing_disabled.add(flag)
+ else:
+ if flag in missing_enabled or flag in missing_disabled:
+ raise InvalidAtom(_("Invalid use dep: '%s'") % (x,))
+ no_default.add(flag)
+
+ self.tokens = use
+ if not isinstance(self.tokens, tuple):
+ self.tokens = tuple(self.tokens)
+
+ self.required = frozenset(no_default)
+
+ self.enabled = frozenset(enabled_flags)
+ self.disabled = frozenset(disabled_flags)
+ self.missing_enabled = frozenset(missing_enabled)
+ self.missing_disabled = frozenset(missing_disabled)
+ self.conditional = None
+
+ if conditional:
+ self.conditional = self._conditionals_class()
+ for k in "enabled", "disabled", "equal", "not_equal":
+ setattr(self.conditional, k, frozenset(conditional.get(k, [])))
+
+ def __bool__(self):
+ return bool(self.tokens)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+ def __str__(self):
+ if not self.tokens:
+ return ""
+ return "[%s]" % (",".join(self.tokens),)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'], errors='backslashreplace')
+
+ def __repr__(self):
+ return "portage.dep._use_dep(%s)" % repr(self.tokens)
+
+ def evaluate_conditionals(self, use):
+ """
+ Create a new instance with conditionals evaluated.
+
+ Conditional evaluation behavior:
+
+ parent state conditional result
+
+ x x? x
+ -x x?
+ x !x?
+ -x !x? -x
+
+ x x= x
+ -x x= -x
+ x !x= -x
+ -x !x= x
+
+ Conditional syntax examples:
+
+ Compact Form Equivalent Expanded Form
+
+ foo[bar?] bar? ( foo[bar] ) !bar? ( foo )
+ foo[!bar?] bar? ( foo ) !bar? ( foo[-bar] )
+ foo[bar=] bar? ( foo[bar] ) !bar? ( foo[-bar] )
+ foo[!bar=] bar? ( foo[-bar] ) !bar? ( foo[bar] )
+
+ """
+ enabled_flags = set(self.enabled)
+ disabled_flags = set(self.disabled)
+
+ tokens = []
+ usedep_re = _get_usedep_re(self._eapi_attrs)
+
+ for x in self.tokens:
+ m = usedep_re.match(x)
+
+ operator = m.group("prefix") + m.group("suffix")
+ flag = m.group("flag")
+ default = m.group("default")
+ if default is None:
+ default = ""
+
+ if operator == "?":
+ if flag in use:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ elif operator == "=":
+ if flag in use:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ else:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ elif operator == "!=":
+ if flag in use:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ else:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ elif operator == "!?":
+ if flag not in use:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ else:
+ tokens.append(x)
+
+ return _use_dep(tokens, self._eapi_attrs, enabled_flags=enabled_flags, disabled_flags=disabled_flags,
+ missing_enabled=self.missing_enabled, missing_disabled=self.missing_disabled, required=self.required)
+
+ def violated_conditionals(self, other_use, is_valid_flag, parent_use=None):
+ """
+ Create a new instance with satisfied use deps removed.
+ """
+ if parent_use is None and self.conditional:
+ raise InvalidAtom("violated_conditionals needs 'parent_use'" + \
+ " parameter for conditional flags.")
+
+ enabled_flags = set()
+ disabled_flags = set()
+
+ conditional = {}
+ tokens = []
+
+ all_defaults = frozenset(chain(self.missing_enabled, self.missing_disabled))
+
+ def validate_flag(flag):
+ return is_valid_flag(flag) or flag in all_defaults
+
+ usedep_re = _get_usedep_re(self._eapi_attrs)
+
+ for x in self.tokens:
+ m = usedep_re.match(x)
+
+ operator = m.group("prefix") + m.group("suffix")
+ flag = m.group("flag")
+
+ if not validate_flag(flag):
+ tokens.append(x)
+ if not operator:
+ enabled_flags.add(flag)
+ elif operator == "-":
+ disabled_flags.add(flag)
+ elif operator == "?":
+ conditional.setdefault("enabled", set()).add(flag)
+ elif operator == "=":
+ conditional.setdefault("equal", set()).add(flag)
+ elif operator == "!=":
+ conditional.setdefault("not_equal", set()).add(flag)
+ elif operator == "!?":
+ conditional.setdefault("disabled", set()).add(flag)
+
+ continue
+
+ if not operator:
+ if flag not in other_use:
+ if is_valid_flag(flag) or flag in self.missing_disabled:
+ tokens.append(x)
+ enabled_flags.add(flag)
+ elif operator == "-":
+ if flag not in other_use:
+ if not is_valid_flag(flag):
+ if flag in self.missing_enabled:
+ tokens.append(x)
+ disabled_flags.add(flag)
+ else:
+ tokens.append(x)
+ disabled_flags.add(flag)
+ elif operator == "?":
+ if flag not in parent_use or flag in other_use:
+ continue
+
+ if is_valid_flag(flag) or flag in self.missing_disabled:
+ tokens.append(x)
+ conditional.setdefault("enabled", set()).add(flag)
+ elif operator == "=":
+ if flag in parent_use and flag not in other_use:
+ if is_valid_flag(flag):
+ tokens.append(x)
+ conditional.setdefault("equal", set()).add(flag)
+ else:
+ if flag in self.missing_disabled:
+ tokens.append(x)
+ conditional.setdefault("equal", set()).add(flag)
+ elif flag not in parent_use:
+ if flag not in other_use:
+ if not is_valid_flag(flag):
+ if flag in self.missing_enabled:
+ tokens.append(x)
+ conditional.setdefault("equal", set()).add(flag)
+ else:
+ tokens.append(x)
+ conditional.setdefault("equal", set()).add(flag)
+ elif operator == "!=":
+ if flag not in parent_use and flag not in other_use:
+ if is_valid_flag(flag):
+ tokens.append(x)
+ conditional.setdefault("not_equal", set()).add(flag)
+ else:
+ if flag in self.missing_disabled:
+ tokens.append(x)
+ conditional.setdefault("not_equal", set()).add(flag)
+ elif flag in parent_use:
+ if flag not in other_use:
+ if not is_valid_flag(flag):
+ if flag in self.missing_enabled:
+ tokens.append(x)
+ conditional.setdefault("not_equal", set()).add(flag)
+ else:
+ tokens.append(x)
+ conditional.setdefault("not_equal", set()).add(flag)
+ elif operator == "!?":
+ if flag not in parent_use:
+ if flag not in other_use:
+ if not is_valid_flag(flag) and flag in self.missing_enabled:
+ tokens.append(x)
+ conditional.setdefault("disabled", set()).add(flag)
+ else:
+ tokens.append(x)
+ conditional.setdefault("disabled", set()).add(flag)
+
+ return _use_dep(tokens, self._eapi_attrs, enabled_flags=enabled_flags, disabled_flags=disabled_flags,
+ missing_enabled=self.missing_enabled, missing_disabled=self.missing_disabled, \
+ conditional=conditional, required=self.required)
+
+ def _eval_qa_conditionals(self, use_mask, use_force):
+ """
+ For repoman, evaluate all possible combinations within the constraints
+ of the given use.force and use.mask settings. The result may seem
+ ambiguous in the sense that the same flag can be in both the enabled
+ and disabled sets, but this is useful within the context of how its
+ intended to be used by repoman. It is assumed that the caller has
+ already ensured that there is no intersection between the given
+ use_mask and use_force sets when necessary.
+ """
+ enabled_flags = set(self.enabled)
+ disabled_flags = set(self.disabled)
+ missing_enabled = self.missing_enabled
+ missing_disabled = self.missing_disabled
+
+ tokens = []
+ usedep_re = _get_usedep_re(self._eapi_attrs)
+
+ for x in self.tokens:
+ m = usedep_re.match(x)
+
+ operator = m.group("prefix") + m.group("suffix")
+ flag = m.group("flag")
+ default = m.group("default")
+ if default is None:
+ default = ""
+
+ if operator == "?":
+ if flag not in use_mask:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ elif operator == "=":
+ if flag not in use_mask:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ if flag not in use_force:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ elif operator == "!=":
+ if flag not in use_force:
+ enabled_flags.add(flag)
+ tokens.append(flag+default)
+ if flag not in use_mask:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ elif operator == "!?":
+ if flag not in use_force:
+ disabled_flags.add(flag)
+ tokens.append("-"+flag+default)
+ else:
+ tokens.append(x)
+
+ return _use_dep(tokens, self._eapi_attrs, enabled_flags=enabled_flags, disabled_flags=disabled_flags,
+ missing_enabled=missing_enabled, missing_disabled=missing_disabled, required=self.required)
+
+class Atom(_unicode):
+
+ """
+ For compatibility with existing atom string manipulation code, this
+ class emulates most of the str methods that are useful with atoms.
+ """
+
+ # Distiguishes package atoms from other atom types
+ package = True
+
+ # Distiguishes soname atoms from other atom types
+ soname = False
+
+ class _blocker(object):
+ __slots__ = ("overlap",)
+
+ class _overlap(object):
+ __slots__ = ("forbid",)
+
+ def __init__(self, forbid=False):
+ self.forbid = forbid
+
+ def __init__(self, forbid_overlap=False):
+ self.overlap = self._overlap(forbid=forbid_overlap)
+
+ def __new__(cls, s, unevaluated_atom=None, allow_wildcard=False, allow_repo=None,
+ _use=None, eapi=None, is_valid_flag=None, allow_build_id=None):
+ return _unicode.__new__(cls, s)
+
+ def __init__(self, s, unevaluated_atom=None, allow_wildcard=False, allow_repo=None,
+ _use=None, eapi=None, is_valid_flag=None, allow_build_id=None):
+ if isinstance(s, Atom):
+ # This is an efficiency assertion, to ensure that the Atom
+ # constructor is not called redundantly.
+ raise TypeError(_("Expected %s, got %s") % \
+ (_unicode, type(s)))
+
+ if not isinstance(s, _unicode):
+ # Avoid TypeError from _unicode.__init__ with PyPy.
+ s = _unicode_decode(s)
+
+ _unicode.__init__(s)
+
+ eapi_attrs = _get_eapi_attrs(eapi)
+ atom_re = _get_atom_re(eapi_attrs)
+
+ self.__dict__['eapi'] = eapi
+ if eapi is not None:
+ # Ignore allow_repo when eapi is specified.
+ allow_repo = eapi_attrs.repo_deps
+ else:
+ # These parameters have "smart" defaults that are only
+ # applied when the caller does not explicitly pass in a
+ # True or False value.
+ if allow_repo is None:
+ allow_repo = True
+ if allow_build_id is None:
+ allow_build_id = True
+
+ blocker_prefix = ""
+ if "!" == s[:1]:
+ blocker = self._blocker(forbid_overlap=("!" == s[1:2]))
+ if blocker.overlap.forbid:
+ blocker_prefix = s[:2]
+ s = s[2:]
+ else:
+ blocker_prefix = s[:1]
+ s = s[1:]
+ else:
+ blocker = False
+ self.__dict__['blocker'] = blocker
+ m = atom_re.match(s)
+ build_id = None
+ extended_syntax = False
+ extended_version = None
+ if m is None:
+ if allow_wildcard:
+ atom_re = _get_atom_wildcard_re(eapi_attrs)
+ m = atom_re.match(s)
+ if m is None:
+ raise InvalidAtom(self)
+ gdict = m.groupdict()
+ if m.group('star') is not None:
+ op = '=*'
+ base = atom_re.groupindex['star']
+ cp = m.group(base + 1)
+ cpv = m.group('star')[1:]
+ extended_version = m.group(base + 4)
+ else:
+ op = None
+ cpv = cp = m.group('simple')
+ if m.group(atom_re.groupindex['simple'] + 3) is not None:
+ raise InvalidAtom(self)
+ if cpv.find("**") != -1:
+ raise InvalidAtom(self)
+ slot = m.group('slot')
+ repo = m.group('repo')
+ use_str = None
+ extended_syntax = True
+ else:
+ raise InvalidAtom(self)
+ elif m.group('op') is not None:
+ base = atom_re.groupindex['op']
+ op = m.group(base + 1)
+ cpv = m.group(base + 2)
+ cp = m.group(base + 3)
+ slot = m.group(atom_re.groups - 2)
+ repo = m.group(atom_re.groups - 1)
+ use_str = m.group(atom_re.groups)
+ version = m.group(base + 4)
+ if version is not None:
+ if allow_build_id:
+ cpv_build_id = cpv
+ cpv = cp
+ cp = cp[:-len(version)]
+ build_id = cpv_build_id[len(cpv)+1:]
+ if len(build_id) > 1 and build_id[:1] == "0":
+ # Leading zeros are not allowed.
+ raise InvalidAtom(self)
+ try:
+ build_id = int(build_id)
+ except ValueError:
+ raise InvalidAtom(self)
+ else:
+ raise InvalidAtom(self)
+ elif m.group('star') is not None:
+ base = atom_re.groupindex['star']
+ op = '=*'
+ cpv = m.group(base + 1)
+ cp = m.group(base + 2)
+ slot = m.group(atom_re.groups - 2)
+ repo = m.group(atom_re.groups - 1)
+ use_str = m.group(atom_re.groups)
+ if m.group(base + 3) is not None:
+ raise InvalidAtom(self)
+ elif m.group('simple') is not None:
+ op = None
+ cpv = cp = m.group(atom_re.groupindex['simple'] + 1)
+ slot = m.group(atom_re.groups - 2)
+ repo = m.group(atom_re.groups - 1)
+ use_str = m.group(atom_re.groups)
+ if m.group(atom_re.groupindex['simple'] + 2) is not None:
+ raise InvalidAtom(self)
+
+ else:
+ raise AssertionError(_("required group not found in atom: '%s'") % self)
+ self.__dict__['cp'] = cp
+ try:
+ self.__dict__['cpv'] = _pkg_str(cpv)
+ self.__dict__['version'] = self.cpv.version
+ except InvalidData:
+ # plain cp, wildcard, or something
+ self.__dict__['cpv'] = cpv
+ self.__dict__['version'] = extended_version
+ self.__dict__['repo'] = repo
+ if slot is None:
+ self.__dict__['slot'] = None
+ self.__dict__['sub_slot'] = None
+ self.__dict__['slot_operator'] = None
+ else:
+ slot_re = _get_slot_dep_re(eapi_attrs)
+ slot_match = slot_re.match(slot)
+ if slot_match is None:
+ raise InvalidAtom(self)
+ if eapi_attrs.slot_operator:
+ self.__dict__['slot'] = slot_match.group(1)
+ sub_slot = slot_match.group(2)
+ if sub_slot is not None:
+ sub_slot = sub_slot.lstrip("/")
+ if sub_slot in ("*", "="):
+ self.__dict__['sub_slot'] = None
+ self.__dict__['slot_operator'] = sub_slot
+ else:
+ slot_operator = None
+ if sub_slot is not None and sub_slot[-1:] == "=":
+ slot_operator = sub_slot[-1:]
+ sub_slot = sub_slot[:-1]
+ self.__dict__['sub_slot'] = sub_slot
+ self.__dict__['slot_operator'] = slot_operator
+ if self.slot is not None and self.slot_operator == "*":
+ raise InvalidAtom(self)
+ else:
+ self.__dict__['slot'] = slot
+ self.__dict__['sub_slot'] = None
+ self.__dict__['slot_operator'] = None
+ self.__dict__['operator'] = op
+ self.__dict__['extended_syntax'] = extended_syntax
+ self.__dict__['build_id'] = build_id
+
+ if not (repo is None or allow_repo):
+ raise InvalidAtom(self)
+
+ if use_str is not None:
+ if _use is not None:
+ use = _use
+ else:
+ use = _use_dep(use_str[1:-1].split(","), eapi_attrs)
+ without_use = Atom(blocker_prefix + m.group('without_use'),
+ allow_repo=allow_repo)
+ else:
+ use = None
+ if unevaluated_atom is not None and \
+ unevaluated_atom.use is not None:
+ # unevaluated_atom.use is used for IUSE checks when matching
+ # packages, so it must not propagate to without_use
+ without_use = Atom(_unicode(self),
+ allow_wildcard=allow_wildcard,
+ allow_repo=allow_repo,
+ eapi=eapi)
+ else:
+ without_use = self
+
+ self.__dict__['use'] = use
+ self.__dict__['without_use'] = without_use
+
+ if unevaluated_atom:
+ self.__dict__['unevaluated_atom'] = unevaluated_atom
+ else:
+ self.__dict__['unevaluated_atom'] = self
+
+ if eapi is not None:
+ if not isinstance(eapi, basestring):
+ raise TypeError('expected eapi argument of ' + \
+ '%s, got %s: %s' % (basestring, type(eapi), eapi,))
+ if self.slot and not eapi_attrs.slot_deps:
+ raise InvalidAtom(
+ _("Slot deps are not allowed in EAPI %s: '%s'") \
+ % (eapi, self), category='EAPI.incompatible')
+ if self.use:
+ if not eapi_attrs.use_deps:
+ raise InvalidAtom(
+ _("Use deps are not allowed in EAPI %s: '%s'") \
+ % (eapi, self), category='EAPI.incompatible')
+ elif not eapi_attrs.use_dep_defaults and \
+ (self.use.missing_enabled or self.use.missing_disabled):
+ raise InvalidAtom(
+ _("Use dep defaults are not allowed in EAPI %s: '%s'") \
+ % (eapi, self), category='EAPI.incompatible')
+ if is_valid_flag is not None and self.use.conditional:
+ invalid_flag = None
+ try:
+ for conditional_type, flags in \
+ self.use.conditional.items():
+ for flag in flags:
+ if not is_valid_flag(flag):
+ invalid_flag = (conditional_type, flag)
+ raise StopIteration()
+ except StopIteration:
+ pass
+ if invalid_flag is not None:
+ conditional_type, flag = invalid_flag
+ conditional_str = _use_dep._conditional_strings[conditional_type]
+ msg = _("USE flag '%s' referenced in " + \
+ "conditional '%s' in atom '%s' is not in IUSE") \
+ % (flag, conditional_str % flag, self)
+ raise InvalidAtom(msg, category='IUSE.missing')
+ if self.blocker and self.blocker.overlap.forbid and not eapi_attrs.strong_blocks:
+ raise InvalidAtom(
+ _("Strong blocks are not allowed in EAPI %s: '%s'") \
+ % (eapi, self), category='EAPI.incompatible')
+
+ @property
+ def slot_operator_built(self):
+ """
+ Returns True if slot_operator == "=" and sub_slot is not None.
+ NOTE: foo/bar:2= is unbuilt and returns False, whereas foo/bar:2/2=
+ is built and returns True.
+ """
+ return self.slot_operator == "=" and self.sub_slot is not None
+
+ @property
+ def without_repo(self):
+ if self.repo is None:
+ return self
+ return Atom(self.replace(_repo_separator + self.repo, '', 1),
+ allow_wildcard=True)
+
+ @property
+ def without_slot(self):
+ if self.slot is None and self.slot_operator is None:
+ return self
+ atom = remove_slot(self)
+ if self.repo is not None:
+ atom += _repo_separator + self.repo
+ if self.use is not None:
+ atom += _unicode(self.use)
+ return Atom(atom,
+ allow_repo=True, allow_wildcard=True)
+
+ def with_repo(self, repo):
+ atom = remove_slot(self)
+ if self.slot is not None or self.slot_operator is not None:
+ atom += _slot_separator
+ if self.slot is not None:
+ atom += self.slot
+ if self.sub_slot is not None:
+ atom += "/%s" % self.sub_slot
+ if self.slot_operator is not None:
+ atom += self.slot_operator
+ atom += _repo_separator + repo
+ if self.use is not None:
+ atom += _unicode(self.use)
+ return Atom(atom, allow_repo=True, allow_wildcard=True)
+
+ def with_slot(self, slot):
+ atom = remove_slot(self) + _slot_separator + slot
+ if self.repo is not None:
+ atom += _repo_separator + self.repo
+ if self.use is not None:
+ atom += _unicode(self.use)
+ return Atom(atom, allow_repo=True, allow_wildcard=True)
+
+ def __setattr__(self, name, value):
+ raise AttributeError("Atom instances are immutable",
+ self.__class__, name, value)
+
+ def intersects(self, other):
+ """
+ Atoms with different cpv, operator or use attributes cause this method
+ to return False even though there may actually be some intersection.
+ TODO: Detect more forms of intersection.
+ @param other: The package atom to match
+ @type other: Atom
+ @rtype: Boolean
+ @return: True if this atom and the other atom intersect,
+ False otherwise.
+ """
+ if not isinstance(other, Atom):
+ raise TypeError("expected %s, got %s" % \
+ (Atom, type(other)))
+
+ if self == other:
+ return True
+
+ if self.cp != other.cp or \
+ self.use != other.use or \
+ self.operator != other.operator or \
+ self.cpv != other.cpv:
+ return False
+
+ if self.slot is None or \
+ other.slot is None or \
+ self.slot == other.slot:
+ return True
+
+ return False
+
+ def evaluate_conditionals(self, use):
+ """
+ Create an atom instance with any USE conditionals evaluated.
+ @param use: The set of enabled USE flags
+ @type use: set
+ @rtype: Atom
+ @return: an atom instance with any USE conditionals evaluated
+ """
+ if not (self.use and self.use.conditional):
+ return self
+ atom = remove_slot(self)
+ if self.slot is not None or self.slot_operator is not None:
+ atom += _slot_separator
+ if self.slot is not None:
+ atom += self.slot
+ if self.sub_slot is not None:
+ atom += "/%s" % self.sub_slot
+ if self.slot_operator is not None:
+ atom += self.slot_operator
+ use_dep = self.use.evaluate_conditionals(use)
+ atom += _unicode(use_dep)
+ return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
+
+ def violated_conditionals(self, other_use, is_valid_flag, parent_use=None):
+ """
+ Create an atom instance with any USE conditional removed, that is
+ satisfied by other_use.
+ @param other_use: The set of enabled USE flags
+ @type other_use: set
+ @param is_valid_flag: Function that decides if a use flag is referenceable in use deps
+ @type is_valid_flag: function
+ @param parent_use: Set of enabled use flags of the package requiring this atom
+ @type parent_use: set
+ @rtype: Atom
+ @return: an atom instance with any satisfied USE conditionals removed
+ """
+ if not self.use:
+ return self
+ atom = remove_slot(self)
+ if self.slot is not None or self.slot_operator is not None:
+ atom += _slot_separator
+ if self.slot is not None:
+ atom += self.slot
+ if self.sub_slot is not None:
+ atom += "/%s" % self.sub_slot
+ if self.slot_operator is not None:
+ atom += self.slot_operator
+ use_dep = self.use.violated_conditionals(other_use, is_valid_flag, parent_use)
+ atom += _unicode(use_dep)
+ return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
+
+ def _eval_qa_conditionals(self, use_mask, use_force):
+ if not (self.use and self.use.conditional):
+ return self
+ atom = remove_slot(self)
+ if self.slot is not None or self.slot_operator is not None:
+ atom += _slot_separator
+ if self.slot is not None:
+ atom += self.slot
+ if self.sub_slot is not None:
+ atom += "/%s" % self.sub_slot
+ if self.slot_operator is not None:
+ atom += self.slot_operator
+ use_dep = self.use._eval_qa_conditionals(use_mask, use_force)
+ atom += _unicode(use_dep)
+ return Atom(atom, unevaluated_atom=self, allow_repo=(self.repo is not None), _use=use_dep)
+
+ def __copy__(self):
+ """Immutable, so returns self."""
+ return self
+
+ def __deepcopy__(self, memo=None):
+ """Immutable, so returns self."""
+ memo[id(self)] = self
+ return self
+
+ def match(self, pkg):
+ """
+ Check if the given package instance matches this atom.
+
+ @param pkg: a Package instance
+ @type pkg: Package
+ @return: True if this atom matches pkg, otherwise False
+ @rtype: bool
+ """
+ return bool(match_from_list(self, [pkg]))
+
+_extended_cp_re_cache = {}
+
+def extended_cp_match(extended_cp, other_cp):
+ """
+ Checks if an extended syntax cp matches a non extended cp
+ """
+ # Escape special '+' and '.' characters which are allowed in atoms,
+ # and convert '*' to regex equivalent.
+ global _extended_cp_re_cache
+ extended_cp_re = _extended_cp_re_cache.get(extended_cp)
+ if extended_cp_re is None:
+ extended_cp_re = re.compile("^" + re.escape(extended_cp).replace(
+ r'\*', '[^/]*') + "$", re.UNICODE)
+ _extended_cp_re_cache[extended_cp] = extended_cp_re
+ return extended_cp_re.match(other_cp) is not None
+
+class ExtendedAtomDict(portage.cache.mappings.MutableMapping):
+ """
+ dict() wrapper that supports extended atoms as keys and allows lookup
+ of a normal cp against other normal cp and extended cp.
+ The value type has to be given to __init__ and is assumed to be the same
+ for all values.
+ """
+
+ __slots__ = ('_extended', '_normal', '_value_class')
+
+ def __init__(self, value_class):
+ self._extended = {}
+ self._normal = {}
+ self._value_class = value_class
+
+ def copy(self):
+ result = self.__class__(self._value_class)
+ result._extended.update(self._extended)
+ result._normal.update(self._normal)
+ return result
+
+ def __iter__(self):
+ for k in self._normal:
+ yield k
+ for k in self._extended:
+ yield k
+
+ def iteritems(self):
+ try:
+ for item in self._normal.items():
+ yield item
+ for item in self._extended.items():
+ yield item
+ except AttributeError:
+ pass # FEATURES=python-trace
+
+ def __delitem__(self, cp):
+ if "*" in cp:
+ return self._extended.__delitem__(cp)
+ else:
+ return self._normal.__delitem__(cp)
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+ items = iteritems
+
+ def __len__(self):
+ return len(self._normal) + len(self._extended)
+
+ def setdefault(self, cp, default=None):
+ if "*" in cp:
+ return self._extended.setdefault(cp, default)
+ else:
+ return self._normal.setdefault(cp, default)
+
+ def __getitem__(self, cp):
+
+ if not isinstance(cp, basestring):
+ raise KeyError(cp)
+
+ if '*' in cp:
+ return self._extended[cp]
+
+ ret = self._value_class()
+ normal_match = self._normal.get(cp)
+ match = False
+
+ if normal_match is not None:
+ match = True
+ if hasattr(ret, "update"):
+ ret.update(normal_match)
+ elif hasattr(ret, "extend"):
+ ret.extend(normal_match)
+ else:
+ raise NotImplementedError()
+
+ for extended_cp in self._extended:
+ if extended_cp_match(extended_cp, cp):
+ match = True
+ if hasattr(ret, "update"):
+ ret.update(self._extended[extended_cp])
+ elif hasattr(ret, "extend"):
+ ret.extend(self._extended[extended_cp])
+ else:
+ raise NotImplementedError()
+
+ if not match:
+ raise KeyError(cp)
+
+ return ret
+
+ def __setitem__(self, cp, val):
+ if "*" in cp:
+ self._extended[cp] = val
+ else:
+ self._normal[cp] = val
+
+ def __eq__(self, other):
+ return self._value_class == other._value_class and \
+ self._extended == other._extended and \
+ self._normal == other._normal
+
+ def clear(self):
+ self._extended.clear()
+ self._normal.clear()
+
+
+def get_operator(mydep):
+ """
+ Return the operator used in a depstring.
+
+ Example usage:
+ >>> from portage.dep import *
+ >>> get_operator(">=test-1.0")
+ '>='
+
+ @param mydep: The dep string to check
+ @type mydep: String
+ @rtype: String
+ @return: The operator. One of:
+ '~', '=', '>', '<', '=*', '>=', or '<='
+ """
+ if not isinstance(mydep, Atom):
+ mydep = Atom(mydep)
+
+ return mydep.operator
+
+def dep_getcpv(mydep):
+ """
+ Return the category-package-version with any operators/slot specifications stripped off
+
+ Example usage:
+ >>> dep_getcpv('>=media-libs/test-3.0')
+ 'media-libs/test-3.0'
+
+ @param mydep: The depstring
+ @type mydep: String
+ @rtype: String
+ @return: The depstring with the operator removed
+ """
+ if not isinstance(mydep, Atom):
+ mydep = Atom(mydep)
+
+ return mydep.cpv
+
+def dep_getslot(mydep):
+ """
+ Retrieve the slot on a depend.
+
+ Example usage:
+ >>> dep_getslot('app-misc/test:3')
+ '3'
+
+ @param mydep: The depstring to retrieve the slot of
+ @type mydep: String
+ @rtype: String
+ @return: The slot
+ """
+ slot = getattr(mydep, "slot", False)
+ if slot is not False:
+ return slot
+
+ #remove repo_name if present
+ mydep = mydep.split(_repo_separator)[0]
+
+ colon = mydep.find(_slot_separator)
+ if colon != -1:
+ bracket = mydep.find("[", colon)
+ if bracket == -1:
+ return mydep[colon+1:]
+ else:
+ return mydep[colon+1:bracket]
+ return None
+
+def dep_getrepo(mydep):
+ """
+ Retrieve the repo on a depend.
+
+ Example usage:
+ >>> dep_getrepo('app-misc/test::repository')
+ 'repository'
+
+ @param mydep: The depstring to retrieve the repository of
+ @type mydep: String
+ @rtype: String
+ @return: The repository name
+ """
+ repo = getattr(mydep, "repo", False)
+ if repo is not False:
+ return repo
+
+ metadata = getattr(mydep, "metadata", False)
+ if metadata:
+ repo = metadata.get('repository', False)
+ if repo is not False:
+ return repo
+
+ colon = mydep.find(_repo_separator)
+ if colon != -1:
+ bracket = mydep.find("[", colon)
+ if bracket == -1:
+ return mydep[colon+2:]
+ else:
+ return mydep[colon+2:bracket]
+ return None
+def remove_slot(mydep):
+ """
+ Removes dep components from the right side of an atom:
+ * slot
+ * use
+ * repo
+ And repo_name from the left side.
+ """
+ colon = mydep.find(_slot_separator)
+ if colon != -1:
+ mydep = mydep[:colon]
+ else:
+ bracket = mydep.find("[")
+ if bracket != -1:
+ mydep = mydep[:bracket]
+ return mydep
+
+def dep_getusedeps( depend ):
+ """
+ Pull a listing of USE Dependencies out of a dep atom.
+
+ Example usage:
+ >>> dep_getusedeps('app-misc/test:3[foo,-bar]')
+ ('foo', '-bar')
+
+ @param depend: The depstring to process
+ @type depend: String
+ @rtype: List
+ @return: List of use flags ( or [] if no flags exist )
+ """
+ use_list = []
+ open_bracket = depend.find('[')
+ # -1 = failure (think c++ string::npos)
+ comma_separated = False
+ bracket_count = 0
+ while( open_bracket != -1 ):
+ bracket_count += 1
+ if bracket_count > 1:
+ raise InvalidAtom(_("USE Dependency with more "
+ "than one set of brackets: %s") % (depend,))
+ close_bracket = depend.find(']', open_bracket )
+ if close_bracket == -1:
+ raise InvalidAtom(_("USE Dependency with no closing bracket: %s") % depend )
+ use = depend[open_bracket + 1: close_bracket]
+ # foo[1:1] may return '' instead of None, we don't want '' in the result
+ if not use:
+ raise InvalidAtom(_("USE Dependency with "
+ "no use flag ([]): %s") % depend )
+ if not comma_separated:
+ comma_separated = "," in use
+
+ if comma_separated and bracket_count > 1:
+ raise InvalidAtom(_("USE Dependency contains a mixture of "
+ "comma and bracket separators: %s") % depend )
+
+ if comma_separated:
+ for x in use.split(","):
+ if x:
+ use_list.append(x)
+ else:
+ raise InvalidAtom(_("USE Dependency with no use "
+ "flag next to comma: %s") % depend )
+ else:
+ use_list.append(use)
+
+ # Find next use flag
+ open_bracket = depend.find( '[', open_bracket+1 )
+ return tuple(use_list)
+
+def isvalidatom(atom, allow_blockers=False, allow_wildcard=False,
+ allow_repo=False, eapi=None, allow_build_id=False):
+ """
+ Check to see if a depend atom is valid
+
+ Example usage:
+ >>> isvalidatom('media-libs/test-3.0')
+ False
+ >>> isvalidatom('>=media-libs/test-3.0')
+ True
+
+ @param atom: The depend atom to check against
+ @type atom: String or Atom
+ @rtype: Boolean
+ @return: One of the following:
+ 1) False if the atom is invalid
+ 2) True if the atom is valid
+ """
+
+ if eapi is not None and isinstance(atom, Atom) and atom.eapi != eapi:
+ # We'll construct a new atom with the given eapi.
+ atom = _unicode(atom)
+
+ try:
+ if not isinstance(atom, Atom):
+ atom = Atom(atom, allow_wildcard=allow_wildcard,
+ allow_repo=allow_repo, eapi=eapi,
+ allow_build_id=allow_build_id)
+ if not allow_blockers and atom.blocker:
+ return False
+ return True
+ except InvalidAtom:
+ return False
+
+def isjustname(mypkg):
+ """
+ Checks to see if the atom is only the package name (no version parts).
+
+ Example usage:
+ >>> isjustname('=media-libs/test-3.0')
+ False
+ >>> isjustname('media-libs/test')
+ True
+
+ @param mypkg: The package atom to check
+ @param mypkg: String or Atom
+ @rtype: Integer
+ @return: One of the following:
+ 1) False if the package string is not just the package name
+ 2) True if it is
+ """
+ try:
+ if not isinstance(mypkg, Atom):
+ mypkg = Atom(mypkg)
+ return mypkg == mypkg.cp
+ except InvalidAtom:
+ pass
+
+ for x in mypkg.split('-')[-2:]:
+ if ververify(x):
+ return False
+ return True
+
+def isspecific(mypkg):
+ """
+ Checks to see if a package is in =category/package-version or
+ package-version format.
+
+ Example usage:
+ >>> isspecific('media-libs/test')
+ False
+ >>> isspecific('=media-libs/test-3.0')
+ True
+
+ @param mypkg: The package depstring to check against
+ @type mypkg: String
+ @rtype: Boolean
+ @return: One of the following:
+ 1) False if the package string is not specific
+ 2) True if it is
+ """
+ try:
+ if not isinstance(mypkg, Atom):
+ mypkg = Atom(mypkg)
+ return mypkg != mypkg.cp
+ except InvalidAtom:
+ pass
+
+ # Fall back to legacy code for backward compatibility.
+ return not isjustname(mypkg)
+
+def dep_getkey(mydep):
+ """
+ Return the category/package-name of a depstring.
+
+ Example usage:
+ >>> dep_getkey('=media-libs/test-3.0')
+ 'media-libs/test'
+
+ @param mydep: The depstring to retrieve the category/package-name of
+ @type mydep: String
+ @rtype: String
+ @return: The package category/package-name
+ """
+ if not isinstance(mydep, Atom):
+ mydep = Atom(mydep, allow_wildcard=True, allow_repo=True)
+
+ return mydep.cp
+
+def match_to_list(mypkg, mylist):
+ """
+ Searches list for entries that matches the package.
+
+ @param mypkg: The package atom to match
+ @type mypkg: String
+ @param mylist: The list of package atoms to compare against
+ @param mylist: List
+ @rtype: List
+ @return: A unique list of package atoms that match the given package atom
+ """
+ matches = set()
+ result = []
+ pkgs = [mypkg]
+ for x in mylist:
+ if x not in matches and match_from_list(x, pkgs):
+ matches.add(x)
+ result.append(x)
+ return result
+
+def best_match_to_list(mypkg, mylist):
+ """
+ Returns the most specific entry that matches the package given.
+
+ @param mypkg: The package atom to check
+ @type mypkg: String
+ @param mylist: The list of package atoms to check against
+ @type mylist: List
+ @rtype: String
+ @return: The package atom which best matches given the following ordering:
+ - =cpv 6
+ - ~cpv 5
+ - =cpv* 4
+ - cp:slot 3
+ - >cpv 2
+ - <cpv 2
+ - >=cpv 2
+ - <=cpv 2
+ - cp 1
+ - cp:slot with extended syntax 0
+ - cp with extended syntax -1
+ """
+ operator_values = {'=':6, '~':5, '=*':4,
+ '>':2, '<':2, '>=':2, '<=':2, None:1}
+ maxvalue = -99
+ bestm = None
+ mypkg_cpv = None
+ for x in match_to_list(mypkg, mylist):
+ if x.extended_syntax:
+ if x.operator == '=*':
+ if maxvalue < 0:
+ maxvalue = 0
+ bestm = x
+ elif x.slot is not None:
+ if maxvalue < -1:
+ maxvalue = -1
+ bestm = x
+ else:
+ if maxvalue < -2:
+ maxvalue = -2
+ bestm = x
+ continue
+ if dep_getslot(x) is not None:
+ if maxvalue < 3:
+ maxvalue = 3
+ bestm = x
+ op_val = operator_values[x.operator]
+ if op_val > maxvalue:
+ maxvalue = op_val
+ bestm = x
+ elif op_val == maxvalue and op_val == 2:
+ # For >, <, >=, and <=, the one with the version
+ # closest to mypkg is the best match.
+ if mypkg_cpv is None:
+ try:
+ mypkg_cpv = mypkg.cpv
+ except AttributeError:
+ mypkg_cpv = _pkg_str(remove_slot(mypkg))
+ if bestm.cpv == mypkg_cpv or bestm.cpv == x.cpv:
+ pass
+ elif x.cpv == mypkg_cpv:
+ bestm = x
+ else:
+ # Sort the cpvs to find the one closest to mypkg_cpv
+ cpv_list = [bestm.cpv, mypkg_cpv, x.cpv]
+ def cmp_cpv(cpv1, cpv2):
+ return vercmp(cpv1.version, cpv2.version)
+ cpv_list.sort(key=cmp_sort_key(cmp_cpv))
+ if cpv_list[0] is mypkg_cpv or cpv_list[-1] is mypkg_cpv:
+ if cpv_list[1] is x.cpv:
+ bestm = x
+ else:
+ # TODO: handle the case where mypkg_cpv is in the middle
+ pass
+
+ return bestm
+
+def match_from_list(mydep, candidate_list):
+ """
+ Searches list for entries that matches the package.
+
+ @param mydep: The package atom to match
+ @type mydep: String
+ @param candidate_list: The list of package atoms to compare against
+ @param candidate_list: List
+ @rtype: List
+ @return: A list of package atoms that match the given package atom
+ """
+
+ if not candidate_list:
+ return []
+
+ if "!" == mydep[:1]:
+ if "!" == mydep[1:2]:
+ mydep = mydep[2:]
+ else:
+ mydep = mydep[1:]
+ if not isinstance(mydep, Atom):
+ mydep = Atom(mydep, allow_wildcard=True, allow_repo=True)
+
+ mycpv = mydep.cpv
+ mycpv_cps = catpkgsplit(mycpv) # Can be None if not specific
+ slot = mydep.slot
+ build_id = mydep.build_id
+
+ if not mycpv_cps:
+ cat, pkg = catsplit(mycpv)
+ ver = None
+ rev = None
+ else:
+ cat, pkg, ver, rev = mycpv_cps
+ if mydep == mycpv:
+ raise KeyError(_("Specific key requires an operator"
+ " (%s) (try adding an '=')") % (mydep))
+
+ if ver and rev:
+ operator = mydep.operator
+ if not operator:
+ writemsg(_("!!! Invalid atom: %s\n") % mydep, noiselevel=-1)
+ return []
+ else:
+ operator = None
+
+ mylist = []
+
+ if mydep.extended_syntax:
+
+ for x in candidate_list:
+ cp = getattr(x, "cp", None)
+ if cp is None:
+ mysplit = catpkgsplit(remove_slot(x))
+ if mysplit is not None:
+ cp = mysplit[0] + '/' + mysplit[1]
+
+ if cp is None:
+ continue
+
+ if cp == mycpv or extended_cp_match(mydep.cp, cp):
+ mylist.append(x)
+
+ if mylist and mydep.operator == "=*":
+
+ candidate_list = mylist
+ mylist = []
+ # Currently, only \*\w+\* is supported.
+ ver = mydep.version[1:-1]
+
+ for x in candidate_list:
+ x_ver = getattr(x, "version", None)
+ if x_ver is None:
+ xs = catpkgsplit(remove_slot(x))
+ if xs is None:
+ continue
+ x_ver = "-".join(xs[-2:])
+ if ver in x_ver:
+ mylist.append(x)
+
+ elif operator is None:
+ for x in candidate_list:
+ cp = getattr(x, "cp", None)
+ if cp is None:
+ mysplit = catpkgsplit(remove_slot(x))
+ if mysplit is not None:
+ cp = mysplit[0] + '/' + mysplit[1]
+
+ if cp is None:
+ continue
+
+ if cp == mydep.cp:
+ mylist.append(x)
+
+ elif operator == "=": # Exact match
+ for x in candidate_list:
+ xcpv = getattr(x, "cpv", None)
+ if xcpv is None:
+ xcpv = remove_slot(x)
+ if not cpvequal(xcpv, mycpv):
+ continue
+ if (build_id is not None and
+ getattr(xcpv, "build_id", None) != build_id):
+ continue
+ mylist.append(x)
+
+ elif operator == "=*": # glob match
+ # XXX: Nasty special casing for leading zeros
+ # Required as =* is a literal prefix match, so can't
+ # use vercmp
+ myver = mycpv_cps[2].lstrip("0")
+ if not myver or not myver[0].isdigit():
+ myver = "0"+myver
+ if myver == mycpv_cps[2]:
+ mycpv_cmp = mycpv
+ else:
+ # Use replace to preserve the revision part if it exists
+ # (mycpv_cps[3] can't be trusted because in contains r0
+ # even when the input has no revision part).
+ mycpv_cmp = mycpv.replace(
+ mydep.cp + "-" + mycpv_cps[2],
+ mydep.cp + "-" + myver, 1)
+ for x in candidate_list:
+ try:
+ x.cp
+ except AttributeError:
+ try:
+ pkg = _pkg_str(remove_slot(x))
+ except InvalidData:
+ continue
+ else:
+ pkg = x
+
+ xs = pkg.cpv_split
+ myver = xs[2].lstrip("0")
+ if not myver or not myver[0].isdigit():
+ myver = "0"+myver
+ if myver == xs[2]:
+ xcpv = pkg.cpv
+ else:
+ # Use replace to preserve the revision part if it exists.
+ xcpv = pkg.cpv.replace(
+ pkg.cp + "-" + xs[2],
+ pkg.cp + "-" + myver, 1)
+ if xcpv.startswith(mycpv_cmp):
+ # =* glob matches only on boundaries between version parts,
+ # so 1* does not match 10 (bug 560466).
+ next_char = xcpv[len(mycpv_cmp):len(mycpv_cmp)+1]
+ if (not next_char or next_char in '._-' or
+ mycpv_cmp[-1].isdigit() != next_char.isdigit()):
+ mylist.append(x)
+
+ elif operator == "~": # version, any revision, match
+ for x in candidate_list:
+ xs = getattr(x, "cpv_split", None)
+ if xs is None:
+ xs = catpkgsplit(remove_slot(x))
+ if xs is None:
+ raise InvalidData(x)
+ if not cpvequal(xs[0]+"/"+xs[1]+"-"+xs[2], mycpv_cps[0]+"/"+mycpv_cps[1]+"-"+mycpv_cps[2]):
+ continue
+ if xs[2] != ver:
+ continue
+ mylist.append(x)
+
+ elif operator in [">", ">=", "<", "<="]:
+ for x in candidate_list:
+ if hasattr(x, 'cp'):
+ pkg = x
+ else:
+ try:
+ pkg = _pkg_str(remove_slot(x))
+ except InvalidData:
+ continue
+
+ if pkg.cp != mydep.cp:
+ continue
+ try:
+ result = vercmp(pkg.version, mydep.version)
+ except ValueError: # pkgcmp may return ValueError during int() conversion
+ writemsg(_("\nInvalid package name: %s\n") % x, noiselevel=-1)
+ raise
+ if result is None:
+ continue
+ elif operator == ">":
+ if result > 0:
+ mylist.append(x)
+ elif operator == ">=":
+ if result >= 0:
+ mylist.append(x)
+ elif operator == "<":
+ if result < 0:
+ mylist.append(x)
+ elif operator == "<=":
+ if result <= 0:
+ mylist.append(x)
+ else:
+ raise KeyError(_("Unknown operator: %s") % mydep)
+ else:
+ raise KeyError(_("Unknown operator: %s") % mydep)
+
+ if mydep.slot is not None:
+ candidate_list = mylist
+ mylist = []
+ for x in candidate_list:
+ x_pkg = None
+ try:
+ x.cpv
+ except AttributeError:
+ xslot = dep_getslot(x)
+ if xslot is not None:
+ try:
+ x_pkg = _pkg_str(remove_slot(x), slot=xslot)
+ except InvalidData:
+ continue
+ else:
+ x_pkg = x
+
+ if x_pkg is None:
+ mylist.append(x)
+ else:
+ try:
+ x_pkg.slot
+ except AttributeError:
+ mylist.append(x)
+ else:
+ if _match_slot(mydep, x_pkg):
+ mylist.append(x)
+
+ if mydep.unevaluated_atom.use:
+ candidate_list = mylist
+ mylist = []
+ for x in candidate_list:
+ use = getattr(x, "use", None)
+ if use is not None:
+ if mydep.unevaluated_atom.use and \
+ not x.iuse.is_valid_flag(
+ mydep.unevaluated_atom.use.required):
+ continue
+
+ if mydep.use:
+ is_valid_flag = x.iuse.is_valid_flag
+ missing_enabled = frozenset(flag for flag in
+ mydep.use.missing_enabled if not is_valid_flag(flag))
+ missing_disabled = frozenset(flag for flag in
+ mydep.use.missing_disabled if not is_valid_flag(flag))
+
+ if mydep.use.enabled:
+ if any(f in mydep.use.enabled for f in missing_disabled):
+ continue
+ need_enabled = mydep.use.enabled.difference(use.enabled)
+ if need_enabled:
+ if any(f not in missing_enabled for f in need_enabled):
+ continue
+
+ if mydep.use.disabled:
+ if any(f in mydep.use.disabled for f in missing_enabled):
+ continue
+ need_disabled = mydep.use.disabled.intersection(use.enabled)
+ if need_disabled:
+ if any(f not in missing_disabled for f in need_disabled):
+ continue
+
+ mylist.append(x)
+
+ if mydep.repo:
+ candidate_list = mylist
+ mylist = []
+ for x in candidate_list:
+ repo = getattr(x, "repo", False)
+ if repo is False:
+ repo = dep_getrepo(x)
+ if repo is not None and repo != _unknown_repo and \
+ repo != mydep.repo:
+ continue
+ mylist.append(x)
+
+ return mylist
+
+def human_readable_required_use(required_use):
+ return required_use.replace("^^", "exactly-one-of").replace("||", "any-of").replace("??", "at-most-one-of")
+
+def get_required_use_flags(required_use, eapi=None):
+ """
+ Returns a set of use flags that are used in the given REQUIRED_USE string
+
+ @param required_use: REQUIRED_USE string
+ @type required_use: String
+ @rtype: Set
+ @return: Set of use flags that are used in the given REQUIRED_USE string
+ """
+
+ eapi_attrs = _get_eapi_attrs(eapi)
+ if eapi_attrs.required_use_at_most_one_of:
+ valid_operators = ("||", "^^", "??")
+ else:
+ valid_operators = ("||", "^^")
+
+ mysplit = required_use.split()
+ level = 0
+ stack = [[]]
+ need_bracket = False
+
+ used_flags = set()
+
+ def register_token(token):
+ if token.endswith("?"):
+ token = token[:-1]
+ if token.startswith("!"):
+ token = token[1:]
+ used_flags.add(token)
+
+ for token in mysplit:
+ if token == "(":
+ need_bracket = False
+ stack.append([])
+ level += 1
+ elif token == ")":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ if level > 0:
+ level -= 1
+ l = stack.pop()
+ ignore = False
+ if stack[level]:
+ if stack[level][-1] in valid_operators or \
+ (not isinstance(stack[level][-1], bool) and \
+ stack[level][-1][-1] == "?"):
+ ignore = True
+ stack[level].pop()
+ stack[level].append(True)
+
+ if l and not ignore:
+ stack[level].append(all(x for x in l))
+ else:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ elif token in valid_operators:
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ need_bracket = True
+ stack[level].append(token)
+ else:
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+
+ if token[-1] == "?":
+ need_bracket = True
+ stack[level].append(token)
+ else:
+ stack[level].append(True)
+
+ register_token(token)
+
+ if level != 0 or need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+
+ return frozenset(used_flags)
+
+class _RequiredUseLeaf(object):
+
+ __slots__ = ('_satisfied', '_token')
+
+ def __init__(self, token, satisfied):
+ self._token = token
+ self._satisfied = satisfied
+
+ def tounicode(self):
+ return self._token
+
+class _RequiredUseBranch(object):
+
+ __slots__ = ('_children', '_operator', '_parent', '_satisfied')
+
+ def __init__(self, operator=None, parent=None):
+ self._children = []
+ self._operator = operator
+ self._parent = parent
+ self._satisfied = False
+
+ def __bool__(self):
+ return self._satisfied
+
+ def tounicode(self):
+
+ include_parens = self._parent is not None
+ tokens = []
+ if self._operator is not None:
+ tokens.append(self._operator)
+
+ if include_parens:
+ tokens.append("(")
+
+ complex_nesting = False
+ node = self
+ while node != None and not complex_nesting:
+ if node._operator in ("||", "^^", "??"):
+ complex_nesting = True
+ else:
+ node = node._parent
+
+ if complex_nesting:
+ for child in self._children:
+ tokens.append(child.tounicode())
+ else:
+ for child in self._children:
+ if not child._satisfied:
+ tokens.append(child.tounicode())
+
+ if include_parens:
+ tokens.append(")")
+
+ return " ".join(tokens)
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+def check_required_use(required_use, use, iuse_match, eapi=None):
+ """
+ Checks if the use flags listed in 'use' satisfy all
+ constraints specified in 'constraints'.
+
+ @param required_use: REQUIRED_USE string
+ @type required_use: String
+ @param use: Enabled use flags
+ @param use: List
+ @param iuse_match: Callable that takes a single flag argument and returns
+ True if the flag is matched, false otherwise,
+ @param iuse_match: Callable
+ @rtype: Bool
+ @return: Indicates if REQUIRED_USE constraints are satisfied
+ """
+
+ eapi_attrs = _get_eapi_attrs(eapi)
+ if eapi_attrs.required_use_at_most_one_of:
+ valid_operators = ("||", "^^", "??")
+ else:
+ valid_operators = ("||", "^^")
+
+ def is_active(token):
+ if token.startswith("!"):
+ flag = token[1:]
+ is_negated = True
+ else:
+ flag = token
+ is_negated = False
+
+ if not flag or not iuse_match(flag):
+ if not eapi_attrs.required_use_at_most_one_of and flag == "?":
+ msg = _("Operator '??' is not supported with EAPI '%s'") \
+ % (eapi,)
+ e = InvalidData(msg, category='EAPI.incompatible')
+ raise InvalidDependString(msg, errors=(e,))
+ msg = _("USE flag '%s' is not in IUSE") \
+ % (flag,)
+ e = InvalidData(msg, category='IUSE.missing')
+ raise InvalidDependString(msg, errors=(e,))
+
+ return (flag in use and not is_negated) or \
+ (flag not in use and is_negated)
+
+ def is_satisfied(operator, argument):
+ if not argument and eapi_attrs.empty_groups_always_true:
+ #|| ( ) -> True
+ return True
+
+ if operator == "||":
+ return (True in argument)
+ elif operator == "^^":
+ return (argument.count(True) == 1)
+ elif operator == "??":
+ return (argument.count(True) <= 1)
+ elif operator[-1] == "?":
+ return (False not in argument)
+
+ mysplit = required_use.split()
+ level = 0
+ stack = [[]]
+ tree = _RequiredUseBranch()
+ node = tree
+ need_bracket = False
+
+ for token in mysplit:
+ if token == "(":
+ if not need_bracket:
+ child = _RequiredUseBranch(parent=node)
+ node._children.append(child)
+ node = child
+
+ need_bracket = False
+ stack.append([])
+ level += 1
+ elif token == ")":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ if level > 0:
+ level -= 1
+ l = stack.pop()
+ op = None
+ if stack[level]:
+ if stack[level][-1] in valid_operators:
+ op = stack[level].pop()
+ satisfied = is_satisfied(op, l)
+ stack[level].append(satisfied)
+ node._satisfied = satisfied
+
+ elif not isinstance(stack[level][-1], bool) and \
+ stack[level][-1][-1] == "?":
+ op = stack[level].pop()
+ if is_active(op[:-1]):
+ satisfied = is_satisfied(op, l)
+ stack[level].append(satisfied)
+ node._satisfied = satisfied
+ else:
+ node._satisfied = True
+ last_node = node._parent._children.pop()
+ if last_node is not node:
+ raise AssertionError(
+ "node is not last child of parent")
+ node = node._parent
+ continue
+
+ if op is None:
+ satisfied = False not in l
+ node._satisfied = satisfied
+ if l:
+ stack[level].append(satisfied)
+
+ if len(node._children) <= 1 or \
+ node._parent._operator not in valid_operators:
+ last_node = node._parent._children.pop()
+ if last_node is not node:
+ raise AssertionError(
+ "node is not last child of parent")
+ for child in node._children:
+ node._parent._children.append(child)
+ if isinstance(child, _RequiredUseBranch):
+ child._parent = node._parent
+
+ elif not node._children:
+ last_node = node._parent._children.pop()
+ if last_node is not node:
+ raise AssertionError(
+ "node is not last child of parent")
+
+ elif len(node._children) == 1 and op in valid_operators:
+ last_node = node._parent._children.pop()
+ if last_node is not node:
+ raise AssertionError(
+ "node is not last child of parent")
+ node._parent._children.append(node._children[0])
+ if isinstance(node._children[0], _RequiredUseBranch):
+ node._children[0]._parent = node._parent
+ node = node._children[0]
+ if node._operator is None and \
+ node._parent._operator not in valid_operators:
+ last_node = node._parent._children.pop()
+ if last_node is not node:
+ raise AssertionError(
+ "node is not last child of parent")
+ for child in node._children:
+ node._parent._children.append(child)
+ if isinstance(child, _RequiredUseBranch):
+ child._parent = node._parent
+
+ node = node._parent
+ else:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ elif token in valid_operators:
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+ need_bracket = True
+ stack[level].append(token)
+ child = _RequiredUseBranch(operator=token, parent=node)
+ node._children.append(child)
+ node = child
+ else:
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+
+ if token[-1] == "?":
+ need_bracket = True
+ stack[level].append(token)
+ child = _RequiredUseBranch(operator=token, parent=node)
+ node._children.append(child)
+ node = child
+ else:
+ satisfied = is_active(token)
+ stack[level].append(satisfied)
+ node._children.append(_RequiredUseLeaf(token, satisfied))
+
+ if level != 0 or need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % required_use)
+
+ tree._satisfied = False not in stack[0]
+ return tree
+
+def extract_affecting_use(mystr, atom, eapi=None):
+ """
+ Take a dep string and an atom and return the use flags
+ that decide if the given atom is in effect.
+
+ Example usage:
+ >>> extract_affecting_use('sasl? ( dev-libs/cyrus-sasl ) \
+ !minimal? ( cxx? ( dev-libs/cyrus-sasl ) )', 'dev-libs/cyrus-sasl')
+ {'cxx', 'minimal', 'sasl'}
+
+ @param mystr: The dependency string
+ @type mystr: String
+ @param atom: The atom to get into effect
+ @type atom: String
+ @rtype: Set of strings
+ @return: Set of use flags affecting given atom
+ """
+ useflag_re = _get_useflag_re(eapi)
+ mysplit = mystr.split()
+ level = 0
+ stack = [[]]
+ need_bracket = False
+ affecting_use = set()
+
+ def flag(conditional):
+ if conditional[0] == "!":
+ flag = conditional[1:-1]
+ else:
+ flag = conditional[:-1]
+
+ if useflag_re.match(flag) is None:
+ raise InvalidDependString(
+ _("invalid use flag '%s' in conditional '%s'") % \
+ (flag, conditional))
+
+ return flag
+
+ for token in mysplit:
+ if token == "(":
+ need_bracket = False
+ stack.append([])
+ level += 1
+ elif token == ")":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ if level > 0:
+ level -= 1
+ l = stack.pop()
+ is_single = (len(l) == 1 or (len(l)==2 and (l[0] == "||" or l[0][-1] == "?")))
+
+ def ends_in_any_of_dep(k):
+ return k>=0 and stack[k] and stack[k][-1] == "||"
+
+ def ends_in_operator(k):
+ return k>=0 and stack[k] and (stack[k][-1] == "||" or stack[k][-1][-1] == "?")
+
+ def special_append():
+ """
+ Use extend instead of append if possible. This kills all redundant brackets.
+ """
+ if is_single and (not stack[level] or not stack[level][-1][-1] == "?"):
+ if len(l) == 1 and isinstance(l[0], list):
+ # l = [[...]]
+ stack[level].extend(l[0])
+ else:
+ stack[level].extend(l)
+ else:
+ stack[level].append(l)
+
+ if l:
+ if not ends_in_any_of_dep(level-1) and not ends_in_operator(level):
+ #Optimize: ( ( ... ) ) -> ( ... ). Make sure there is no '||' hanging around.
+ stack[level].extend(l)
+ elif not stack[level]:
+ #An '||' in the level above forces us to keep to brackets.
+ special_append()
+ elif len(l) == 1 and ends_in_any_of_dep(level):
+ #Optimize: || ( A ) -> A
+ stack[level].pop()
+ special_append()
+ elif len(l) == 2 and (l[0] == "||" or l[0][-1] == "?") and stack[level][-1] in (l[0], "||"):
+ #Optimize: || ( || ( ... ) ) -> || ( ... )
+ # foo? ( foo? ( ... ) ) -> foo? ( ... )
+ # || ( foo? ( ... ) ) -> foo? ( ... )
+ stack[level].pop()
+ special_append()
+ if l[0][-1] == "?":
+ affecting_use.add(flag(l[0]))
+ else:
+ if stack[level] and stack[level][-1][-1] == "?":
+ affecting_use.add(flag(stack[level][-1]))
+ special_append()
+ else:
+ if stack[level] and (stack[level][-1] == "||" or stack[level][-1][-1] == "?"):
+ stack[level].pop()
+ else:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ elif token == "||":
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+ need_bracket = True
+ stack[level].append(token)
+ else:
+ if need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+
+ if token[-1] == "?":
+ need_bracket = True
+ stack[level].append(token)
+ elif token == atom:
+ stack[level].append(token)
+
+ if level != 0 or need_bracket:
+ raise InvalidDependString(
+ _("malformed syntax: '%s'") % mystr)
+
+ return affecting_use
+
+def extract_unpack_dependencies(src_uri, unpackers):
+ """
+ Return unpack dependencies string for given SRC_URI string.
+
+ @param src_uri: SRC_URI string
+ @type src_uri: String
+ @param unpackers: Dictionary mapping archive suffixes to dependency strings
+ @type unpackers: Dictionary
+ @rtype: String
+ @return: Dependency string specifying packages required to unpack archives.
+ """
+ src_uri = src_uri.split()
+
+ depend = []
+ for i in range(len(src_uri)):
+ if src_uri[i][-1] == "?" or src_uri[i] in ("(", ")"):
+ depend.append(src_uri[i])
+ elif (i+1 < len(src_uri) and src_uri[i+1] == "->") or src_uri[i] == "->":
+ continue
+ else:
+ for suffix in sorted(unpackers, key=lambda x: len(x), reverse=True):
+ suffix = suffix.lower()
+ if src_uri[i].lower().endswith(suffix):
+ depend.append(unpackers[suffix])
+ break
+
+ while True:
+ cleaned_depend = depend[:]
+ for i in range(len(cleaned_depend)):
+ if cleaned_depend[i] is None:
+ continue
+ elif cleaned_depend[i] == "(" and cleaned_depend[i+1] == ")":
+ cleaned_depend[i] = None
+ cleaned_depend[i+1] = None
+ elif cleaned_depend[i][-1] == "?" and cleaned_depend[i+1] == "(" and cleaned_depend[i+2] == ")":
+ cleaned_depend[i] = None
+ cleaned_depend[i+1] = None
+ cleaned_depend[i+2] = None
+ if depend == cleaned_depend:
+ break
+ else:
+ depend = [x for x in cleaned_depend if x is not None]
+
+ return " ".join(depend)
diff --git a/lib/portage/dep/_dnf.py b/lib/portage/dep/_dnf.py
new file mode 100644
index 000000000..59657fd6a
--- /dev/null
+++ b/lib/portage/dep/_dnf.py
@@ -0,0 +1,90 @@
+# Copyright 2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import itertools
+
+
+def dnf_convert(dep_struct):
+ """
+ Convert dep_struct to disjunctive normal form (DNF), where dep_struct
+ is either a conjunction or disjunction of the form produced by
+ use_reduce(opconvert=True).
+ """
+ # Normalize input to have a top-level conjunction.
+ if isinstance(dep_struct, list):
+ if dep_struct and dep_struct[0] == '||':
+ dep_struct = [dep_struct]
+ else:
+ dep_struct = [dep_struct]
+
+ conjunction = []
+ disjunctions = []
+
+ for x in dep_struct:
+ if isinstance (x, list):
+ assert x and x[0] == '||', \
+ 'Normalization error, nested conjunction found in %s' % (dep_struct,)
+ if any(isinstance(element, list) for element in x):
+ x_dnf = ['||']
+ for element in x[1:]:
+ if isinstance(element, list):
+ # Due to normalization, a disjunction must not be
+ # nested directly in another disjunction, so this
+ # must be a conjunction.
+ assert element, 'Normalization error, empty conjunction found in %s' % (x,)
+ assert element[0] != '||', \
+ 'Normalization error, nested disjunction found in %s' % (x,)
+ element = dnf_convert(element)
+ if contains_disjunction(element):
+ assert (len(element) == 1 and
+ element[0] and element[0][0] == '||'), \
+ 'Normalization error, expected single disjunction in %s' % (element,)
+ x_dnf.extend(element[0][1:])
+ else:
+ x_dnf.append(element)
+ else:
+ x_dnf.append(element)
+ x = x_dnf
+ disjunctions.append(x)
+ else:
+ conjunction.append(x)
+
+ if disjunctions and (conjunction or len(disjunctions) > 1):
+ dnf_form = ['||']
+ for x in itertools.product(*[x[1:] for x in disjunctions]):
+ normalized = conjunction[:]
+ for element in x:
+ if isinstance(element, list):
+ normalized.extend(element)
+ else:
+ normalized.append(element)
+ dnf_form.append(normalized)
+ result = [dnf_form]
+ else:
+ result = conjunction + disjunctions
+
+ return result
+
+
+def contains_disjunction(dep_struct):
+ """
+ Search for a disjunction contained in dep_struct, where dep_struct
+ is either a conjunction or disjunction of the form produced by
+ use_reduce(opconvert=True). If dep_struct is a disjunction, then
+ this only returns True if there is a nested disjunction. Due to
+ normalization, recursion is only needed when dep_struct is a
+ disjunction containing a conjunction. If dep_struct is a conjunction,
+ then it is assumed that normalization has elevated any nested
+ disjunctions to the top-level.
+ """
+ is_disjunction = dep_struct and dep_struct[0] == '||'
+ for x in dep_struct:
+ if isinstance(x, list):
+ assert x, 'Normalization error, empty conjunction found in %s' % (dep_struct,)
+ if x[0] == '||':
+ return True
+ elif is_disjunction and contains_disjunction(x):
+ return True
+ return False
diff --git a/lib/portage/dep/_slot_operator.py b/lib/portage/dep/_slot_operator.py
new file mode 100644
index 000000000..bae94b30a
--- /dev/null
+++ b/lib/portage/dep/_slot_operator.py
@@ -0,0 +1,122 @@
+# Copyright 2012-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+from portage.dep import Atom, paren_enclose, use_reduce
+from portage.eapi import _get_eapi_attrs
+from portage.exception import InvalidData
+from _emerge.Package import Package
+
+def strip_slots(dep_struct):
+ """
+ Search dep_struct for any slot := operators and remove the
+ slot/sub-slot part, while preserving the operator. The result
+ is suitable for --changed-deps comparisons.
+ """
+ for i, x in enumerate(dep_struct):
+ if isinstance(x, list):
+ strip_slots(x)
+ elif (isinstance(x, Atom) and
+ x.slot_operator == "=" and x.slot is not None):
+ dep_struct[i] = x.with_slot("=")
+
+def find_built_slot_operator_atoms(pkg):
+ atoms = {}
+ for k in Package._dep_keys:
+ atom_list = list(_find_built_slot_operator(use_reduce(pkg._metadata[k],
+ uselist=pkg.use.enabled, eapi=pkg.eapi,
+ token_class=Atom)))
+ if atom_list:
+ atoms[k] = atom_list
+ return atoms
+
+def _find_built_slot_operator(dep_struct):
+ for x in dep_struct:
+ if isinstance(x, list):
+ for atom in _find_built_slot_operator(x):
+ yield atom
+ elif isinstance(x, Atom) and x.slot_operator_built:
+ yield x
+
+def ignore_built_slot_operator_deps(dep_struct):
+ for i, x in enumerate(dep_struct):
+ if isinstance(x, list):
+ ignore_built_slot_operator_deps(x)
+ elif isinstance(x, Atom) and x.slot_operator_built:
+ # There's no way of knowing here whether the SLOT
+ # part of the slot/sub-slot pair should be kept, so we
+ # ignore both parts.
+ dep_struct[i] = x.without_slot
+
+def evaluate_slot_operator_equal_deps(settings, use, trees):
+
+ metadata = settings.configdict['pkg']
+ eapi = metadata['EAPI']
+ eapi_attrs = _get_eapi_attrs(eapi)
+ running_vardb = trees[trees._running_eroot]["vartree"].dbapi
+ target_vardb = trees[trees._target_eroot]["vartree"].dbapi
+ vardbs = [target_vardb]
+ deps = {}
+ for k in Package._dep_keys:
+ deps[k] = use_reduce(metadata[k],
+ uselist=use, eapi=eapi, token_class=Atom)
+
+ for k in Package._runtime_keys:
+ _eval_deps(deps[k], vardbs)
+
+ if eapi_attrs.bdepend:
+ _eval_deps(deps["BDEPEND"], [running_vardb])
+ _eval_deps(deps["DEPEND"], [target_vardb])
+ elif eapi_attrs.hdepend:
+ _eval_deps(deps["HDEPEND"], [running_vardb])
+ _eval_deps(deps["DEPEND"], [target_vardb])
+ else:
+ if running_vardb is not target_vardb:
+ vardbs.append(running_vardb)
+ _eval_deps(deps["DEPEND"], vardbs)
+
+ result = {}
+ for k, v in deps.items():
+ result[k] = paren_enclose(v)
+
+ return result
+
+def _eval_deps(dep_struct, vardbs):
+ # TODO: we'd use a better || () handling, i.e. || ( A:= B:= ) with both A
+ # and B installed should record subslot on A only since the package is
+ # supposed to link against that anyway, and we have no guarantee that B
+ # has matching ABI.
+
+ for i, x in enumerate(dep_struct):
+ if isinstance(x, list):
+ _eval_deps(x, vardbs)
+ elif isinstance(x, Atom) and x.slot_operator == "=":
+ for vardb in vardbs:
+ best_version = vardb.match(x)
+ if best_version:
+ best_version = best_version[-1]
+ try:
+ best_version = \
+ vardb._pkg_str(best_version, None)
+ except (KeyError, InvalidData):
+ pass
+ else:
+ slot_part = "%s/%s=" % \
+ (best_version.slot, best_version.sub_slot)
+ x = x.with_slot(slot_part)
+ dep_struct[i] = x
+ break
+ else:
+ # this dep could not be resolved, possibilities include:
+ # 1. unsatisfied branch of || () dep,
+ # 2. package.provided,
+ # 3. --nodeps.
+ #
+ # just leave it as-is for now. this does not cause any special
+ # behavior while keeping the information in vdb -- necessary
+ # e.g. for @changed-deps to work properly.
+ #
+ # TODO: make it actually cause subslot rebuilds when switching
+ # || () branches.
+ pass
diff --git a/lib/portage/dep/dep_check.py b/lib/portage/dep/dep_check.py
new file mode 100644
index 000000000..2896e2389
--- /dev/null
+++ b/lib/portage/dep/dep_check.py
@@ -0,0 +1,961 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = ['dep_check', 'dep_eval', 'dep_wordreduce', 'dep_zapdeps']
+
+import collections
+import itertools
+import logging
+import operator
+
+import portage
+from portage.dep import Atom, match_from_list, use_reduce
+from portage.dep._dnf import (
+ dnf_convert as _dnf_convert,
+ contains_disjunction as _contains_disjunction,
+)
+from portage.exception import InvalidDependString, ParseError
+from portage.localization import _
+from portage.util import writemsg, writemsg_level
+from portage.util.digraph import digraph
+from portage.util.SlotObject import SlotObject
+from portage.versions import vercmp, _pkg_str
+
+def _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings, myroot="/",
+ trees=None, use_mask=None, use_force=None, **kwargs):
+ """
+ In order to solve bug #141118, recursively expand new-style virtuals so
+ as to collapse one or more levels of indirection, generating an expanded
+ search space. In dep_zapdeps, new-style virtuals will be assigned
+ zero cost regardless of whether or not they are currently installed. Virtual
+ blockers are supported but only when the virtual expands to a single
+ atom because it wouldn't necessarily make sense to block all the components
+ of a compound virtual. When more than one new-style virtual is matched,
+ the matches are sorted from highest to lowest versions and the atom is
+ expanded to || ( highest match ... lowest match ).
+
+ The result is normalized in the same way as use_reduce, having a top-level
+ conjuction, and no redundant nested lists.
+ """
+ newsplit = []
+ mytrees = trees[myroot]
+ portdb = mytrees["porttree"].dbapi
+ pkg_use_enabled = mytrees.get("pkg_use_enabled")
+ # Atoms are stored in the graph as (atom, id(atom)) tuples
+ # since each atom is considered to be a unique entity. For
+ # example, atoms that appear identical may behave differently
+ # in USE matching, depending on their unevaluated form. Also,
+ # specially generated virtual atoms may appear identical while
+ # having different _orig_atom attributes.
+ atom_graph = mytrees.get("atom_graph")
+ parent = mytrees.get("parent")
+ virt_parent = mytrees.get("virt_parent")
+ graph_parent = None
+ if parent is not None:
+ if virt_parent is not None:
+ graph_parent = virt_parent
+ parent = virt_parent
+ else:
+ graph_parent = parent
+ repoman = not mysettings.local_config
+ if kwargs["use_binaries"]:
+ portdb = trees[myroot]["bintree"].dbapi
+ pprovideddict = mysettings.pprovideddict
+ myuse = kwargs["myuse"]
+ is_disjunction = mysplit and mysplit[0] == '||'
+ for x in mysplit:
+ if x == "||":
+ newsplit.append(x)
+ continue
+ elif isinstance(x, list):
+ assert x, 'Normalization error, empty conjunction found in %s' % (mysplit,)
+ if is_disjunction:
+ assert x[0] != '||', \
+ 'Normalization error, nested disjunction found in %s' % (mysplit,)
+ else:
+ assert x[0] == '||', \
+ 'Normalization error, nested conjunction found in %s' % (mysplit,)
+ x_exp = _expand_new_virtuals(x, edebug, mydbapi,
+ mysettings, myroot=myroot, trees=trees, use_mask=use_mask,
+ use_force=use_force, **kwargs)
+ if is_disjunction:
+ if len(x_exp) == 1:
+ x = x_exp[0]
+ if isinstance(x, list):
+ # Due to normalization, a conjunction must not be
+ # nested directly in another conjunction, so this
+ # must be a disjunction.
+ assert x and x[0] == '||', \
+ 'Normalization error, nested conjunction found in %s' % (x_exp,)
+ newsplit.extend(x[1:])
+ else:
+ newsplit.append(x)
+ else:
+ newsplit.append(x_exp)
+ else:
+ newsplit.extend(x_exp)
+ continue
+
+ if not isinstance(x, Atom):
+ raise ParseError(
+ _("invalid token: '%s'") % x)
+
+ if repoman:
+ x = x._eval_qa_conditionals(use_mask, use_force)
+
+ mykey = x.cp
+ if not mykey.startswith("virtual/"):
+ newsplit.append(x)
+ if atom_graph is not None:
+ atom_graph.add((x, id(x)), graph_parent)
+ continue
+
+ if x.blocker:
+ # Virtual blockers are no longer expanded here since
+ # the un-expanded virtual atom is more useful for
+ # maintaining a cache of blocker atoms.
+ newsplit.append(x)
+ if atom_graph is not None:
+ atom_graph.add((x, id(x)), graph_parent)
+ continue
+
+ if repoman or not hasattr(portdb, 'match_pkgs') or \
+ pkg_use_enabled is None:
+ if portdb.cp_list(x.cp):
+ newsplit.append(x)
+ else:
+ a = []
+ myvartree = mytrees.get("vartree")
+ if myvartree is not None:
+ mysettings._populate_treeVirtuals_if_needed(myvartree)
+ mychoices = mysettings.getvirtuals().get(mykey, [])
+ for y in mychoices:
+ a.append(Atom(x.replace(x.cp, y.cp, 1)))
+ if not a:
+ newsplit.append(x)
+ elif is_disjunction:
+ newsplit.extend(a)
+ elif len(a) == 1:
+ newsplit.append(a[0])
+ else:
+ newsplit.append(['||'] + a)
+ continue
+
+ pkgs = []
+ # Ignore USE deps here, since otherwise we might not
+ # get any matches. Choices with correct USE settings
+ # will be preferred in dep_zapdeps().
+ matches = portdb.match_pkgs(x.without_use)
+ # Use descending order to prefer higher versions.
+ matches.reverse()
+ for pkg in matches:
+ # only use new-style matches
+ if pkg.cp.startswith("virtual/"):
+ pkgs.append(pkg)
+
+ mychoices = []
+ if not pkgs and not portdb.cp_list(x.cp):
+ myvartree = mytrees.get("vartree")
+ if myvartree is not None:
+ mysettings._populate_treeVirtuals_if_needed(myvartree)
+ mychoices = mysettings.getvirtuals().get(mykey, [])
+
+ if not (pkgs or mychoices):
+ # This one couldn't be expanded as a new-style virtual. Old-style
+ # virtuals have already been expanded by dep_virtual, so this one
+ # is unavailable and dep_zapdeps will identify it as such. The
+ # atom is not eliminated here since it may still represent a
+ # dependency that needs to be satisfied.
+ newsplit.append(x)
+ if atom_graph is not None:
+ atom_graph.add((x, id(x)), graph_parent)
+ continue
+
+ a = []
+ for pkg in pkgs:
+ virt_atom = '=' + pkg.cpv
+ if x.unevaluated_atom.use:
+ virt_atom += str(x.unevaluated_atom.use)
+ virt_atom = Atom(virt_atom)
+ if parent is None:
+ if myuse is None:
+ virt_atom = virt_atom.evaluate_conditionals(
+ mysettings.get("PORTAGE_USE", "").split())
+ else:
+ virt_atom = virt_atom.evaluate_conditionals(myuse)
+ else:
+ virt_atom = virt_atom.evaluate_conditionals(
+ pkg_use_enabled(parent))
+ else:
+ virt_atom = Atom(virt_atom)
+
+ # Allow the depgraph to map this atom back to the
+ # original, in order to avoid distortion in places
+ # like display or conflict resolution code.
+ virt_atom.__dict__['_orig_atom'] = x
+
+ # According to GLEP 37, RDEPEND is the only dependency
+ # type that is valid for new-style virtuals. Repoman
+ # should enforce this.
+ depstring = pkg._metadata['RDEPEND']
+ pkg_kwargs = kwargs.copy()
+ pkg_kwargs["myuse"] = pkg_use_enabled(pkg)
+ if edebug:
+ writemsg_level(_("Virtual Parent: %s\n") \
+ % (pkg,), noiselevel=-1, level=logging.DEBUG)
+ writemsg_level(_("Virtual Depstring: %s\n") \
+ % (depstring,), noiselevel=-1, level=logging.DEBUG)
+
+ # Set EAPI used for validation in dep_check() recursion.
+ mytrees["virt_parent"] = pkg
+
+ try:
+ mycheck = dep_check(depstring, mydbapi, mysettings,
+ myroot=myroot, trees=trees, **pkg_kwargs)
+ finally:
+ # Restore previous EAPI after recursion.
+ if virt_parent is not None:
+ mytrees["virt_parent"] = virt_parent
+ else:
+ del mytrees["virt_parent"]
+
+ if not mycheck[0]:
+ raise ParseError("%s: %s '%s'" % \
+ (pkg, mycheck[1], depstring))
+
+ # Replace the original atom "x" with "virt_atom" which refers
+ # to the specific version of the virtual whose deps we're
+ # expanding. The virt_atom._orig_atom attribute is used
+ # by depgraph to map virt_atom back to the original atom.
+ # We specifically exclude the original atom "x" from the
+ # the expanded output here, since otherwise it could trigger
+ # incorrect dep_zapdeps behavior (see bug #597752).
+ mycheck[1].append(virt_atom)
+ a.append(mycheck[1])
+ if atom_graph is not None:
+ virt_atom_node = (virt_atom, id(virt_atom))
+ atom_graph.add(virt_atom_node, graph_parent)
+ atom_graph.add(pkg, virt_atom_node)
+ atom_graph.add((x, id(x)), graph_parent)
+
+ if not a and mychoices:
+ # Check for a virtual package.provided match.
+ for y in mychoices:
+ new_atom = Atom(x.replace(x.cp, y.cp, 1))
+ if match_from_list(new_atom,
+ pprovideddict.get(new_atom.cp, [])):
+ a.append(new_atom)
+ if atom_graph is not None:
+ atom_graph.add((new_atom, id(new_atom)), graph_parent)
+
+ if not a:
+ newsplit.append(x)
+ if atom_graph is not None:
+ atom_graph.add((x, id(x)), graph_parent)
+ elif is_disjunction:
+ newsplit.extend(a)
+ elif len(a) == 1:
+ newsplit.extend(a[0])
+ else:
+ newsplit.append(['||'] + a)
+
+ # For consistency with related functions like use_reduce, always
+ # normalize the result to have a top-level conjunction.
+ if is_disjunction:
+ newsplit = [newsplit]
+
+ return newsplit
+
+def dep_eval(deplist):
+ if not deplist:
+ return 1
+ if deplist[0]=="||":
+ #or list; we just need one "1"
+ for x in deplist[1:]:
+ if isinstance(x, list):
+ if dep_eval(x)==1:
+ return 1
+ elif x==1:
+ return 1
+ #XXX: unless there's no available atoms in the list
+ #in which case we need to assume that everything is
+ #okay as some ebuilds are relying on an old bug.
+ if len(deplist) == 1:
+ return 1
+ return 0
+ else:
+ for x in deplist:
+ if isinstance(x, list):
+ if dep_eval(x)==0:
+ return 0
+ elif x==0 or x==2:
+ return 0
+ return 1
+
+class _dep_choice(SlotObject):
+ __slots__ = ('atoms', 'slot_map', 'cp_map', 'all_available',
+ 'all_installed_slots', 'new_slot_count')
+
+def dep_zapdeps(unreduced, reduced, myroot, use_binaries=0, trees=None,
+ minimize_slots=False):
+ """
+ Takes an unreduced and reduced deplist and removes satisfied dependencies.
+ Returned deplist contains steps that must be taken to satisfy dependencies.
+ """
+ if trees is None:
+ trees = portage.db
+ writemsg("ZapDeps -- %s\n" % (use_binaries), 2)
+ if not reduced or unreduced == ["||"] or dep_eval(reduced):
+ return []
+
+ if unreduced[0] != "||":
+ unresolved = []
+ for x, satisfied in zip(unreduced, reduced):
+ if isinstance(x, list):
+ unresolved += dep_zapdeps(x, satisfied, myroot,
+ use_binaries=use_binaries, trees=trees,
+ minimize_slots=minimize_slots)
+ elif not satisfied:
+ unresolved.append(x)
+ return unresolved
+
+ # We're at a ( || atom ... ) type level and need to make a choice
+ deps = unreduced[1:]
+ satisfieds = reduced[1:]
+
+ # Our preference order is for an the first item that:
+ # a) contains all unmasked packages with the same key as installed packages
+ # b) contains all unmasked packages
+ # c) contains masked installed packages
+ # d) is the first item
+
+ preferred_installed = []
+ preferred_in_graph = []
+ preferred_any_slot = []
+ preferred_non_installed = []
+ unsat_use_in_graph = []
+ unsat_use_installed = []
+ unsat_use_non_installed = []
+ other_installed = []
+ other_installed_some = []
+ other_installed_any_slot = []
+ other = []
+
+ # unsat_use_* must come after preferred_non_installed
+ # for correct ordering in cases like || ( foo[a] foo[b] ).
+ choice_bins = (
+ preferred_in_graph,
+ preferred_installed,
+ preferred_any_slot,
+ preferred_non_installed,
+ unsat_use_in_graph,
+ unsat_use_installed,
+ unsat_use_non_installed,
+ other_installed,
+ other_installed_some,
+ other_installed_any_slot,
+ other,
+ )
+
+ # Alias the trees we'll be checking availability against
+ parent = trees[myroot].get("parent")
+ priority = trees[myroot].get("priority")
+ graph_db = trees[myroot].get("graph_db")
+ graph = trees[myroot].get("graph")
+ pkg_use_enabled = trees[myroot].get("pkg_use_enabled")
+ want_update_pkg = trees[myroot].get("want_update_pkg")
+ downgrade_probe = trees[myroot].get("downgrade_probe")
+ vardb = None
+ if "vartree" in trees[myroot]:
+ vardb = trees[myroot]["vartree"].dbapi
+ if use_binaries:
+ mydbapi = trees[myroot]["bintree"].dbapi
+ else:
+ mydbapi = trees[myroot]["porttree"].dbapi
+
+ try:
+ mydbapi_match_pkgs = mydbapi.match_pkgs
+ except AttributeError:
+ def mydbapi_match_pkgs(atom):
+ return [mydbapi._pkg_str(cpv, atom.repo)
+ for cpv in mydbapi.match(atom)]
+
+ # Sort the deps into installed, not installed but already
+ # in the graph and other, not installed and not in the graph
+ # and other, with values of [[required_atom], availablility]
+ for x, satisfied in zip(deps, satisfieds):
+ if isinstance(x, list):
+ atoms = dep_zapdeps(x, satisfied, myroot,
+ use_binaries=use_binaries, trees=trees,
+ minimize_slots=minimize_slots)
+ else:
+ atoms = [x]
+ if vardb is None:
+ # When called by repoman, we can simply return the first choice
+ # because dep_eval() handles preference selection.
+ return atoms
+
+ all_available = True
+ all_use_satisfied = True
+ all_use_unmasked = True
+ conflict_downgrade = False
+ installed_downgrade = False
+ slot_atoms = collections.defaultdict(list)
+ slot_map = {}
+ cp_map = {}
+ for atom in atoms:
+ if atom.blocker:
+ continue
+ # Ignore USE dependencies here since we don't want USE
+ # settings to adversely affect || preference evaluation.
+ avail_pkg = mydbapi_match_pkgs(atom.without_use)
+ if avail_pkg:
+ avail_pkg = avail_pkg[-1] # highest (ascending order)
+ avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot))
+ if not avail_pkg:
+ all_available = False
+ all_use_satisfied = False
+ break
+
+ if graph_db is not None and downgrade_probe is not None:
+ slot_matches = graph_db.match_pkgs(avail_slot)
+ if (len(slot_matches) > 1 and
+ avail_pkg < slot_matches[-1] and
+ not downgrade_probe(avail_pkg)):
+ # If a downgrade is not desirable, then avoid a
+ # choice that pulls in a lower version involved
+ # in a slot conflict (bug #531656).
+ conflict_downgrade = True
+
+ if atom.use:
+ avail_pkg_use = mydbapi_match_pkgs(atom)
+ if not avail_pkg_use:
+ all_use_satisfied = False
+
+ if pkg_use_enabled is not None:
+ # Check which USE flags cause the match to fail,
+ # so we can prioritize choices that do not
+ # require changes to use.mask or use.force
+ # (see bug #515584).
+ violated_atom = atom.violated_conditionals(
+ pkg_use_enabled(avail_pkg),
+ avail_pkg.iuse.is_valid_flag)
+
+ # Note that violated_atom.use can be None here,
+ # since evaluation can collapse conditional USE
+ # deps that cause the match to fail due to
+ # missing IUSE (match uses atom.unevaluated_atom
+ # to detect such missing IUSE).
+ if violated_atom.use is not None:
+ for flag in violated_atom.use.enabled:
+ if flag in avail_pkg.use.mask:
+ all_use_unmasked = False
+ break
+ else:
+ for flag in violated_atom.use.disabled:
+ if flag in avail_pkg.use.force and \
+ flag not in avail_pkg.use.mask:
+ all_use_unmasked = False
+ break
+ else:
+ # highest (ascending order)
+ avail_pkg_use = avail_pkg_use[-1]
+ if avail_pkg_use != avail_pkg:
+ avail_pkg = avail_pkg_use
+ avail_slot = Atom("%s:%s" % (atom.cp, avail_pkg.slot))
+
+ if downgrade_probe is not None and graph is not None:
+ highest_in_slot = mydbapi_match_pkgs(avail_slot)
+ highest_in_slot = (highest_in_slot[-1]
+ if highest_in_slot else None)
+ if (avail_pkg and highest_in_slot and
+ avail_pkg < highest_in_slot and
+ not downgrade_probe(avail_pkg) and
+ (highest_in_slot.installed or
+ highest_in_slot in graph)):
+ installed_downgrade = True
+
+ slot_map[avail_slot] = avail_pkg
+ slot_atoms[avail_slot].append(atom)
+ highest_cpv = cp_map.get(avail_pkg.cp)
+ all_match_current = None
+ all_match_previous = None
+ if (highest_cpv is not None and
+ highest_cpv.slot == avail_pkg.slot):
+ # If possible, make the package selection internally
+ # consistent by choosing a package that satisfies all
+ # atoms which match a package in the same slot. Later on,
+ # the package version chosen here is used in the
+ # has_upgrade/has_downgrade logic to prefer choices with
+ # upgrades, and a package choice that is not internally
+ # consistent will lead the has_upgrade/has_downgrade logic
+ # to produce invalid results (see bug 600346).
+ all_match_current = all(a.match(avail_pkg)
+ for a in slot_atoms[avail_slot])
+ all_match_previous = all(a.match(highest_cpv)
+ for a in slot_atoms[avail_slot])
+ if all_match_previous and not all_match_current:
+ continue
+
+ current_higher = (highest_cpv is None or
+ vercmp(avail_pkg.version, highest_cpv.version) > 0)
+
+ if current_higher or (all_match_current and not all_match_previous):
+ cp_map[avail_pkg.cp] = avail_pkg
+
+ new_slot_count = (len(slot_map) if graph_db is None else
+ sum(not graph_db.match_pkgs(slot_atom) for slot_atom in slot_map
+ if not slot_atom.cp.startswith("virtual/")))
+
+ this_choice = _dep_choice(atoms=atoms, slot_map=slot_map,
+ cp_map=cp_map, all_available=all_available,
+ all_installed_slots=False,
+ new_slot_count=new_slot_count)
+ if all_available:
+ # The "all installed" criterion is not version or slot specific.
+ # If any version of a package is already in the graph then we
+ # assume that it is preferred over other possible packages choices.
+ all_installed = True
+ for atom in set(Atom(atom.cp) for atom in atoms \
+ if not atom.blocker):
+ # New-style virtuals have zero cost to install.
+ if not vardb.match(atom) and not atom.startswith("virtual/"):
+ all_installed = False
+ break
+ all_installed_slots = False
+ if all_installed:
+ all_installed_slots = True
+ for slot_atom in slot_map:
+ # New-style virtuals have zero cost to install.
+ if not vardb.match(slot_atom) and \
+ not slot_atom.startswith("virtual/"):
+ all_installed_slots = False
+ break
+ this_choice.all_installed_slots = all_installed_slots
+ if graph_db is None:
+ if all_use_satisfied:
+ if all_installed:
+ if all_installed_slots:
+ preferred_installed.append(this_choice)
+ else:
+ preferred_any_slot.append(this_choice)
+ else:
+ preferred_non_installed.append(this_choice)
+ else:
+ if not all_use_unmasked:
+ other.append(this_choice)
+ elif all_installed_slots:
+ unsat_use_installed.append(this_choice)
+ else:
+ unsat_use_non_installed.append(this_choice)
+ elif conflict_downgrade or installed_downgrade:
+ other.append(this_choice)
+ else:
+ all_in_graph = True
+ for atom in atoms:
+ # New-style virtuals have zero cost to install.
+ if atom.blocker or atom.cp.startswith("virtual/"):
+ continue
+ # We check if the matched package has actually been
+ # added to the digraph, in order to distinguish between
+ # those packages and installed packages that may need
+ # to be uninstalled in order to resolve blockers.
+ if not any(pkg in graph for pkg in
+ graph_db.match_pkgs(atom)):
+ all_in_graph = False
+ break
+ circular_atom = None
+ if not (parent is None or priority is None) and \
+ (parent.onlydeps or
+ (all_in_graph and priority.buildtime and
+ not (priority.satisfied or priority.optional))):
+ # Check if the atom would result in a direct circular
+ # dependency and try to avoid that if it seems likely
+ # to be unresolvable. This is only relevant for
+ # buildtime deps that aren't already satisfied by an
+ # installed package.
+ cpv_slot_list = [parent]
+ for atom in atoms:
+ if atom.blocker:
+ continue
+ if vardb.match(atom):
+ # If the atom is satisfied by an installed
+ # version then it's not a circular dep.
+ continue
+ if atom.cp != parent.cp:
+ continue
+ if match_from_list(atom, cpv_slot_list):
+ circular_atom = atom
+ break
+ if circular_atom is not None:
+ other.append(this_choice)
+ else:
+ if all_use_satisfied:
+ if all_in_graph:
+ preferred_in_graph.append(this_choice)
+ elif all_installed:
+ if all_installed_slots:
+ preferred_installed.append(this_choice)
+ elif parent is None or want_update_pkg is None:
+ preferred_any_slot.append(this_choice)
+ else:
+ # When appropriate, prefer a slot that is not
+ # installed yet for bug #478188.
+ want_update = True
+ for slot_atom, avail_pkg in slot_map.items():
+ if avail_pkg in graph:
+ continue
+ # New-style virtuals have zero cost to install.
+ if slot_atom.startswith("virtual/") or \
+ vardb.match(slot_atom):
+ continue
+ if not want_update_pkg(parent, avail_pkg):
+ want_update = False
+ break
+
+ if want_update:
+ preferred_installed.append(this_choice)
+ else:
+ preferred_any_slot.append(this_choice)
+ else:
+ preferred_non_installed.append(this_choice)
+ else:
+ if not all_use_unmasked:
+ other.append(this_choice)
+ elif all_in_graph:
+ unsat_use_in_graph.append(this_choice)
+ elif all_installed_slots:
+ unsat_use_installed.append(this_choice)
+ else:
+ unsat_use_non_installed.append(this_choice)
+ else:
+ all_installed = True
+ some_installed = False
+ for atom in atoms:
+ if not atom.blocker:
+ if vardb.match(atom):
+ some_installed = True
+ else:
+ all_installed = False
+
+ if all_installed:
+ this_choice.all_installed_slots = True
+ other_installed.append(this_choice)
+ elif some_installed:
+ other_installed_some.append(this_choice)
+
+ # Use Atom(atom.cp) for a somewhat "fuzzy" match, since
+ # the whole atom may be too specific. For example, see
+ # bug #522652, where using the whole atom leads to an
+ # unsatisfiable choice.
+ elif any(vardb.match(Atom(atom.cp)) for atom in atoms
+ if not atom.blocker):
+ other_installed_any_slot.append(this_choice)
+ else:
+ other.append(this_choice)
+
+ # Prefer choices which contain upgrades to higher slots. This helps
+ # for deps such as || ( foo:1 foo:2 ), where we want to prefer the
+ # atom which matches the higher version rather than the atom furthest
+ # to the left. Sorting is done separately for each of choice_bins, so
+ # as not to interfere with the ordering of the bins. Because of the
+ # bin separation, the main function of this code is to allow
+ # --depclean to remove old slots (rather than to pull in new slots).
+ for choices in choice_bins:
+ if len(choices) < 2:
+ continue
+
+ sort_keys = []
+ # Prefer choices with all_installed_slots for bug #480736.
+ sort_keys.append(lambda x: not x.all_installed_slots)
+
+ if minimize_slots:
+ # Prefer choices having fewer new slots. When used with DNF form,
+ # this can eliminate unecessary packages that depclean would
+ # ultimately eliminate (see bug 632026). Only use this behavior
+ # when deemed necessary by the caller, since this will discard the
+ # order specified in the ebuild, and the preferences specified
+ # there can serve as a crucial sources of guidance (see bug 645002).
+
+ # NOTE: Under some conditions, new_slot_count value may have some
+ # variance from one calculation to the next because it depends on
+ # the order that packages are added to the graph. This variance can
+ # contribute to outcomes that appear to be random. Meanwhile,
+ # the order specified in the ebuild is without variance, so it
+ # does not have this problem.
+ sort_keys.append(lambda x: x.new_slot_count)
+
+ choices.sort(key=lambda x: tuple(f(x) for f in sort_keys))
+ for choice_1 in choices[1:]:
+ cps = set(choice_1.cp_map)
+ for choice_2 in choices:
+ if choice_1 is choice_2:
+ # choice_1 will not be promoted, so move on
+ break
+ intersecting_cps = cps.intersection(choice_2.cp_map)
+ if not intersecting_cps:
+ continue
+ has_upgrade = False
+ has_downgrade = False
+ for cp in intersecting_cps:
+ version_1 = choice_1.cp_map[cp]
+ version_2 = choice_2.cp_map[cp]
+ difference = vercmp(version_1.version, version_2.version)
+ if difference != 0:
+ if difference > 0:
+ has_upgrade = True
+ else:
+ has_downgrade = True
+ break
+ if has_upgrade and not has_downgrade:
+ # promote choice_1 in front of choice_2
+ choices.remove(choice_1)
+ index_2 = choices.index(choice_2)
+ choices.insert(index_2, choice_1)
+ break
+
+ for allow_masked in (False, True):
+ for choices in choice_bins:
+ for choice in choices:
+ if choice.all_available or allow_masked:
+ return choice.atoms
+
+ assert(False) # This point should not be reachable
+
+def dep_check(depstring, mydbapi, mysettings, use="yes", mode=None, myuse=None,
+ use_cache=1, use_binaries=0, myroot=None, trees=None):
+ """
+ Takes a depend string, parses it, and selects atoms.
+ The myroot parameter is unused (use mysettings['EROOT'] instead).
+ """
+ myroot = mysettings['EROOT']
+ edebug = mysettings.get("PORTAGE_DEBUG", None) == "1"
+ #check_config_instance(mysettings)
+ if trees is None:
+ trees = globals()["db"]
+ if use=="yes":
+ if myuse is None:
+ #default behavior
+ myusesplit = mysettings["PORTAGE_USE"].split()
+ else:
+ myusesplit = myuse
+ # We've been given useflags to use.
+ #print "USE FLAGS PASSED IN."
+ #print myuse
+ #if "bindist" in myusesplit:
+ # print "BINDIST is set!"
+ #else:
+ # print "BINDIST NOT set."
+ else:
+ #we are being run by autouse(), don't consult USE vars yet.
+ # WE ALSO CANNOT USE SETTINGS
+ myusesplit=[]
+
+ mymasks = set()
+ useforce = set()
+ if use == "all":
+ # This is only for repoman, in order to constrain the use_reduce
+ # matchall behavior to account for profile use.mask/force. The
+ # ARCH/archlist code here may be redundant, since the profile
+ # really should be handling ARCH masking/forcing itself.
+ arch = mysettings.get("ARCH")
+ mymasks.update(mysettings.usemask)
+ mymasks.update(mysettings.archlist())
+ if arch:
+ mymasks.discard(arch)
+ useforce.add(arch)
+ useforce.update(mysettings.useforce)
+ useforce.difference_update(mymasks)
+
+ # eapi code borrowed from _expand_new_virtuals()
+ mytrees = trees[myroot]
+ parent = mytrees.get("parent")
+ virt_parent = mytrees.get("virt_parent")
+ current_parent = None
+ eapi = None
+ if parent is not None:
+ if virt_parent is not None:
+ current_parent = virt_parent
+ else:
+ current_parent = parent
+
+ if current_parent is not None:
+ # Don't pass the eapi argument to use_reduce() for installed packages
+ # since previous validation will have already marked them as invalid
+ # when necessary and now we're more interested in evaluating
+ # dependencies so that things like --depclean work as well as possible
+ # in spite of partial invalidity.
+ if not current_parent.installed:
+ eapi = current_parent.eapi
+
+ if isinstance(depstring, list):
+ mysplit = depstring
+ else:
+ try:
+ mysplit = use_reduce(depstring, uselist=myusesplit,
+ masklist=mymasks, matchall=(use=="all"), excludeall=useforce,
+ opconvert=True, token_class=Atom, eapi=eapi)
+ except InvalidDependString as e:
+ return [0, "%s" % (e,)]
+
+ if mysplit == []:
+ #dependencies were reduced to nothing
+ return [1,[]]
+
+ # Recursively expand new-style virtuals so as to
+ # collapse one or more levels of indirection.
+ try:
+ mysplit = _expand_new_virtuals(mysplit, edebug, mydbapi, mysettings,
+ use=use, mode=mode, myuse=myuse,
+ use_force=useforce, use_mask=mymasks, use_cache=use_cache,
+ use_binaries=use_binaries, myroot=myroot, trees=trees)
+ except ParseError as e:
+ return [0, "%s" % (e,)]
+
+ dnf = False
+ if mysettings.local_config: # if not repoman
+ orig_split = mysplit
+ mysplit = _overlap_dnf(mysplit)
+ dnf = mysplit is not orig_split
+
+ mysplit2 = dep_wordreduce(mysplit,
+ mysettings, mydbapi, mode, use_cache=use_cache)
+ if mysplit2 is None:
+ return [0, _("Invalid token")]
+
+ writemsg("\n\n\n", 1)
+ writemsg("mysplit: %s\n" % (mysplit), 1)
+ writemsg("mysplit2: %s\n" % (mysplit2), 1)
+
+ selected_atoms = dep_zapdeps(mysplit, mysplit2, myroot,
+ use_binaries=use_binaries, trees=trees, minimize_slots=dnf)
+
+ return [1, selected_atoms]
+
+
+def _overlap_dnf(dep_struct):
+ """
+ Combine overlapping || groups using disjunctive normal form (DNF), in
+ order to minimize the number of packages chosen to satisfy cases like
+ "|| ( foo bar ) || ( bar baz )" as in bug #632026. Non-overlapping
+ groups are excluded from the conversion, since DNF leads to exponential
+ explosion of the formula.
+
+ When dep_struct does not contain any overlapping groups, no DNF
+ conversion will be performed, and dep_struct will be returned as-is.
+ Callers can detect this case by checking if the returned object has
+ the same identity as dep_struct. If the identity is different, then
+ DNF conversion was performed.
+ """
+ if not _contains_disjunction(dep_struct):
+ return dep_struct
+
+ # map atom.cp to disjunctions
+ cp_map = collections.defaultdict(list)
+ # graph atom.cp, with edges connecting atoms in the same disjunction
+ overlap_graph = digraph()
+ # map id(disjunction) to index in dep_struct, for deterministic output
+ order_map = {}
+ order_key = lambda x: order_map[id(x)]
+ result = []
+ for i, x in enumerate(dep_struct):
+ if isinstance(x, list):
+ assert x and x[0] == '||', \
+ 'Normalization error, nested conjunction found in %s' % (dep_struct,)
+ order_map[id(x)] = i
+ prev_cp = None
+ for atom in _iter_flatten(x):
+ if isinstance(atom, Atom) and not atom.blocker:
+ cp_map[atom.cp].append(x)
+ overlap_graph.add(atom.cp, parent=prev_cp)
+ prev_cp = atom.cp
+ if prev_cp is None: # only contains blockers
+ result.append(x)
+ else:
+ result.append(x)
+
+ # group together disjunctions having atom.cp overlap
+ traversed = set()
+ overlap = False
+ for cp in overlap_graph:
+ if cp in traversed:
+ continue
+ disjunctions = {}
+ stack = [cp]
+ while stack:
+ cp = stack.pop()
+ traversed.add(cp)
+ for x in cp_map[cp]:
+ disjunctions[id(x)] = x
+ for other_cp in itertools.chain(overlap_graph.child_nodes(cp),
+ overlap_graph.parent_nodes(cp)):
+ if other_cp not in traversed:
+ stack.append(other_cp)
+
+ if len(disjunctions) > 1:
+ overlap = True
+ # convert overlapping disjunctions to DNF
+ result.extend(_dnf_convert(
+ sorted(disjunctions.values(), key=order_key)))
+ else:
+ # pass through non-overlapping disjunctions
+ result.append(disjunctions.popitem()[1])
+
+ return result if overlap else dep_struct
+
+
+def _iter_flatten(dep_struct):
+ """
+ Yield nested elements of dep_struct.
+ """
+ for x in dep_struct:
+ if isinstance(x, list):
+ for x in _iter_flatten(x):
+ yield x
+ else:
+ yield x
+
+
+def dep_wordreduce(mydeplist,mysettings,mydbapi,mode,use_cache=1):
+ "Reduces the deplist to ones and zeros"
+ deplist=mydeplist[:]
+ for mypos, token in enumerate(deplist):
+ if isinstance(deplist[mypos], list):
+ #recurse
+ deplist[mypos]=dep_wordreduce(deplist[mypos],mysettings,mydbapi,mode,use_cache=use_cache)
+ elif deplist[mypos]=="||":
+ pass
+ elif token[:1] == "!":
+ deplist[mypos] = False
+ else:
+ mykey = deplist[mypos].cp
+ if mysettings and mykey in mysettings.pprovideddict and \
+ match_from_list(deplist[mypos], mysettings.pprovideddict[mykey]):
+ deplist[mypos]=True
+ elif mydbapi is None:
+ # Assume nothing is satisfied. This forces dep_zapdeps to
+ # return all of deps the deps that have been selected
+ # (excluding those satisfied by package.provided).
+ deplist[mypos] = False
+ else:
+ if mode:
+ x = mydbapi.xmatch(mode, deplist[mypos])
+ if mode.startswith("minimum-"):
+ mydep = []
+ if x:
+ mydep.append(x)
+ else:
+ mydep = x
+ else:
+ mydep=mydbapi.match(deplist[mypos],use_cache=use_cache)
+ if mydep!=None:
+ tmp=(len(mydep)>=1)
+ if deplist[mypos][0]=="!":
+ tmp=False
+ deplist[mypos]=tmp
+ else:
+ #encountered invalid string
+ return None
+ return deplist
diff --git a/lib/portage/dep/soname/SonameAtom.py b/lib/portage/dep/soname/SonameAtom.py
new file mode 100644
index 000000000..a7dad973d
--- /dev/null
+++ b/lib/portage/dep/soname/SonameAtom.py
@@ -0,0 +1,72 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import sys
+
+from portage import _encodings, _unicode_encode
+
+class SonameAtom(object):
+
+ __slots__ = ("multilib_category", "soname", "_hash_key",
+ "_hash_value")
+
+ # Distiguishes package atoms from other atom types
+ package = False
+
+ def __init__(self, multilib_category, soname):
+ object.__setattr__(self, "multilib_category", multilib_category)
+ object.__setattr__(self, "soname", soname)
+ object.__setattr__(self, "_hash_key",
+ (multilib_category, soname))
+ object.__setattr__(self, "_hash_value", hash(self._hash_key))
+
+ def __setattr__(self, name, value):
+ raise AttributeError("SonameAtom instances are immutable",
+ self.__class__, name, value)
+
+ def __hash__(self):
+ return self._hash_value
+
+ def __eq__(self, other):
+ try:
+ return self._hash_key == other._hash_key
+ except AttributeError:
+ return False
+
+ def __ne__(self, other):
+ try:
+ return self._hash_key != other._hash_key
+ except AttributeError:
+ return True
+
+ def __repr__(self):
+ return "%s('%s', '%s')" % (
+ self.__class__.__name__,
+ self.multilib_category,
+ self.soname
+ )
+
+ def __str__(self):
+ return "%s: %s" % (self.multilib_category, self.soname)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'])
+
+ def match(self, pkg):
+ """
+ Check if the given package instance matches this atom. Unbuilt
+ ebuilds, which do not have soname metadata, will never match.
+
+ @param pkg: a Package instance
+ @type pkg: Package
+ @return: True if this atom matches pkg, otherwise False
+ @rtype: bool
+ """
+ return pkg.provides is not None and self in pkg.provides
diff --git a/lib/portage/dep/soname/__init__.py b/lib/portage/dep/soname/__init__.py
new file mode 100644
index 000000000..4725d3317
--- /dev/null
+++ b/lib/portage/dep/soname/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/dep/soname/multilib_category.py b/lib/portage/dep/soname/multilib_category.py
new file mode 100644
index 000000000..84e018fb0
--- /dev/null
+++ b/lib/portage/dep/soname/multilib_category.py
@@ -0,0 +1,116 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#
+# Compute a multilib category, as discussed here:
+#
+# https://bugs.gentoo.org/show_bug.cgi?id=534206
+#
+# Supported categories:
+#
+# alpha_{32,64}
+# arm_{32,64}
+# hppa_{32,64}
+# ia_{32,64}
+# m68k_{32,64}
+# mips_{eabi32,eabi64,n32,n64,o32,o64}
+# ppc_{32,64}
+# s390_{32,64}
+# sh_{32,64}
+# sparc_{32,64}
+# x86_{32,64,x32}
+#
+# NOTES:
+#
+# * The ABIs referenced by some of the above *_32 and *_64 categories
+# may be imaginary, but they are listed anyway, since the goal is to
+# establish a naming convention that is as consistent and uniform as
+# possible.
+#
+# * The Elf header's e_ident[EI_OSABI] byte is completely ignored,
+# since OS-independence is one of the goals. The assumption is that,
+# for given installation, we are only interested in tracking multilib
+# ABIs for a single OS.
+
+from __future__ import unicode_literals
+
+from portage.util.elf.constants import (
+ EF_MIPS_ABI, EF_MIPS_ABI2, ELFCLASS32, ELFCLASS64,
+ EM_386, EM_68K, EM_AARCH64, EM_ALPHA, EM_ARM, EM_ALTERA_NIOS2,
+ EM_IA_64, EM_MIPS,
+ EM_PARISC, EM_PPC, EM_PPC64, EM_S390, EM_SH, EM_SPARC,
+ EM_SPARC32PLUS, EM_SPARCV9, EM_X86_64, E_MIPS_ABI_EABI32,
+ E_MIPS_ABI_EABI64, E_MIPS_ABI_O32, E_MIPS_ABI_O64)
+
+_machine_prefix_map = {
+ EM_386: "x86",
+ EM_68K: "m68k",
+ EM_AARCH64: "arm",
+ EM_ALPHA: "alpha",
+ EM_ALTERA_NIOS2: "nios2",
+ EM_ARM: "arm",
+ EM_IA_64: "ia64",
+ EM_MIPS: "mips",
+ EM_PARISC: "hppa",
+ EM_PPC: "ppc",
+ EM_PPC64: "ppc",
+ EM_S390: "s390",
+ EM_SH: "sh",
+ EM_SPARC: "sparc",
+ EM_SPARC32PLUS: "sparc",
+ EM_SPARCV9: "sparc",
+ EM_X86_64: "x86",
+}
+
+_mips_abi_map = {
+ E_MIPS_ABI_EABI32: "eabi32",
+ E_MIPS_ABI_EABI64: "eabi64",
+ E_MIPS_ABI_O32: "o32",
+ E_MIPS_ABI_O64: "o64",
+}
+
+def _compute_suffix_mips(elf_header):
+
+ name = None
+ mips_abi = elf_header.e_flags & EF_MIPS_ABI
+
+ if mips_abi:
+ name = _mips_abi_map.get(mips_abi)
+ elif elf_header.e_flags & EF_MIPS_ABI2:
+ name = "n32"
+ elif elf_header.ei_class == ELFCLASS64:
+ name = "n64"
+
+ return name
+
+def compute_multilib_category(elf_header):
+ """
+ Compute a multilib category from an ELF header.
+
+ @param elf_header: an ELFHeader instance
+ @type elf_header: ELFHeader
+ @rtype: str
+ @return: A multilib category, or None if elf_header does not fit
+ into a recognized category
+ """
+ category = None
+ if elf_header.e_machine is not None:
+
+ prefix = _machine_prefix_map.get(elf_header.e_machine)
+ suffix = None
+
+ if prefix == "mips":
+ suffix = _compute_suffix_mips(elf_header)
+ elif elf_header.ei_class == ELFCLASS64:
+ suffix = "64"
+ elif elf_header.ei_class == ELFCLASS32:
+ if elf_header.e_machine == EM_X86_64:
+ suffix = "x32"
+ else:
+ suffix = "32"
+
+ if prefix is None or suffix is None:
+ category = None
+ else:
+ category = "%s_%s" % (prefix, suffix)
+
+ return category
diff --git a/lib/portage/dep/soname/parse.py b/lib/portage/dep/soname/parse.py
new file mode 100644
index 000000000..3f3757209
--- /dev/null
+++ b/lib/portage/dep/soname/parse.py
@@ -0,0 +1,47 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+from portage.exception import InvalidData
+from portage.localization import _
+from portage.dep.soname.SonameAtom import SonameAtom
+
+_error_empty_category = _("Multilib category empty: %s")
+_error_missing_category = _("Multilib category missing: %s")
+_error_duplicate_category = _("Multilib category occurs"
+ " more than once: %s")
+
+def parse_soname_deps(s):
+ """
+ Parse a REQUIRES or PROVIDES dependency string, and raise
+ InvalidData if necessary.
+
+ @param s: REQUIRES or PROVIDES string
+ @type s: str
+ @rtype: iter
+ @return: An iterator of SonameAtom instances
+ """
+
+ categories = set()
+ category = None
+ previous_soname = None
+ for soname in s.split():
+ if soname.endswith(":"):
+ if category is not None and previous_soname is None:
+ raise InvalidData(_error_empty_category % category)
+
+ category = soname[:-1]
+ previous_soname = None
+ if category in categories:
+ raise InvalidData(_error_duplicate_category % category)
+ categories.add(category)
+
+ elif category is None:
+ raise InvalidData(_error_missing_category % soname)
+ else:
+ previous_soname = soname
+ yield SonameAtom(category, soname)
+
+ if category is not None and previous_soname is None:
+ raise InvalidData(_error_empty_category % category)
diff --git a/lib/portage/dispatch_conf.py b/lib/portage/dispatch_conf.py
new file mode 100644
index 000000000..eaea59393
--- /dev/null
+++ b/lib/portage/dispatch_conf.py
@@ -0,0 +1,397 @@
+# archive_conf.py -- functionality common to archive-conf and dispatch-conf
+# Copyright 2003-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+
+# Library by Wayne Davison <gentoo@blorf.net>, derived from code
+# written by Jeremy Wohl (http://igmus.org)
+
+from __future__ import print_function, unicode_literals
+
+import errno
+import io
+import functools
+import stat
+import subprocess
+import sys
+import tempfile
+
+import portage
+from portage import _encodings, os, shutil
+from portage.env.loaders import KeyValuePairFileLoader
+from portage.localization import _
+from portage.util import shlex_split, varexpand
+from portage.util.path import iter_parents
+
+RCS_BRANCH = '1.1.1'
+RCS_LOCK = 'rcs -ko -M -l'
+RCS_PUT = 'ci -t-"Archived config file." -m"dispatch-conf update."'
+RCS_GET = 'co'
+RCS_MERGE = "rcsmerge -p -r" + RCS_BRANCH + " '%s' > '%s'"
+
+DIFF3_MERGE = "diff3 -mE '%s' '%s' '%s' > '%s'"
+_ARCHIVE_ROTATE_MAX = 9
+
+def diffstatusoutput(cmd, file1, file2):
+ """
+ Execute the string cmd in a shell with getstatusoutput() and return a
+ 2-tuple (status, output).
+ """
+ # Use Popen to emulate getstatusoutput(), since getstatusoutput() may
+ # raise a UnicodeDecodeError which makes the output inaccessible.
+ args = shlex_split(cmd % (file1, file2))
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000 and \
+ not os.path.isabs(args[0]):
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see https://bugs.python.org/issue8513).
+ fullname = portage.process.find_binary(args[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(args[0])
+ args[0] = fullname
+
+ args = [portage._unicode_encode(x, errors='strict') for x in args]
+ proc = subprocess.Popen(args,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ output = portage._unicode_decode(proc.communicate()[0])
+ if output and output[-1] == "\n":
+ # getstatusoutput strips one newline
+ output = output[:-1]
+ return (proc.wait(), output)
+
+def diff_mixed(func, file1, file2):
+ tempdir = None
+ try:
+ if os.path.islink(file1) and \
+ not os.path.islink(file2) and \
+ os.path.isfile(file1) and \
+ os.path.isfile(file2):
+ # If a regular file replaces a symlink to a regular
+ # file, then show the diff between the regular files
+ # (bug #330221).
+ diff_files = (file2, file2)
+ else:
+ files = [file1, file2]
+ diff_files = [file1, file2]
+ for i in range(len(diff_files)):
+ try:
+ st = os.lstat(diff_files[i])
+ except OSError:
+ st = None
+ if st is not None and stat.S_ISREG(st.st_mode):
+ continue
+
+ if tempdir is None:
+ tempdir = tempfile.mkdtemp()
+ diff_files[i] = os.path.join(tempdir, "%d" % i)
+ if st is None:
+ content = "/dev/null\n"
+ elif stat.S_ISLNK(st.st_mode):
+ link_dest = os.readlink(files[i])
+ content = "SYM: %s -> %s\n" % \
+ (file1, link_dest)
+ elif stat.S_ISDIR(st.st_mode):
+ content = "DIR: %s\n" % (file1,)
+ elif stat.S_ISFIFO(st.st_mode):
+ content = "FIF: %s\n" % (file1,)
+ else:
+ content = "DEV: %s\n" % (file1,)
+ with io.open(diff_files[i], mode='w',
+ encoding=_encodings['stdio']) as f:
+ f.write(content)
+
+ return func(diff_files[0], diff_files[1])
+
+ finally:
+ if tempdir is not None:
+ shutil.rmtree(tempdir)
+
+class diff_mixed_wrapper(object):
+
+ def __init__(self, f, *args):
+ self._func = f
+ self._args = args
+
+ def __call__(self, *args):
+ return diff_mixed(
+ functools.partial(self._func, *(self._args + args[:-2])),
+ *args[-2:])
+
+diffstatusoutput_mixed = diff_mixed_wrapper(diffstatusoutput)
+
+def read_config(mandatory_opts):
+ eprefix = portage.settings["EPREFIX"]
+ if portage._not_installed:
+ config_path = os.path.join(portage.PORTAGE_BASE_PATH, "cnf", "dispatch-conf.conf")
+ else:
+ config_path = os.path.join(eprefix or os.sep, "etc/dispatch-conf.conf")
+ loader = KeyValuePairFileLoader(config_path, None)
+ opts, _errors = loader.load()
+ if not opts:
+ print(_('dispatch-conf: Error reading /etc/dispatch-conf.conf; fatal'), file=sys.stderr)
+ sys.exit(1)
+
+ # Handle quote removal here, since KeyValuePairFileLoader doesn't do that.
+ quotes = "\"'"
+ for k, v in opts.items():
+ if v[:1] in quotes and v[:1] == v[-1:]:
+ opts[k] = v[1:-1]
+
+ for key in mandatory_opts:
+ if key not in opts:
+ if key == "merge":
+ opts["merge"] = "sdiff --suppress-common-lines --output='%s' '%s' '%s'"
+ else:
+ print(_('dispatch-conf: Missing option "%s" in /etc/dispatch-conf.conf; fatal') % (key,), file=sys.stderr)
+
+ # archive-dir supports ${EPREFIX} expansion, in order to avoid hardcoding
+ variables = {"EPREFIX": eprefix}
+ opts['archive-dir'] = varexpand(opts['archive-dir'], mydict=variables)
+
+ if not os.path.exists(opts['archive-dir']):
+ os.mkdir(opts['archive-dir'])
+ # Use restrictive permissions by default, in order to protect
+ # against vulnerabilities (like bug #315603 involving rcs).
+ os.chmod(opts['archive-dir'], 0o700)
+ elif not os.path.isdir(opts['archive-dir']):
+ print(_('dispatch-conf: Config archive dir [%s] must exist; fatal') % (opts['archive-dir'],), file=sys.stderr)
+ sys.exit(1)
+
+ return opts
+
+def _archive_copy(src_st, src_path, dest_path):
+ """
+ Copy file from src_path to dest_path. Regular files and symlinks
+ are supported. If an EnvironmentError occurs, then it is logged
+ to stderr.
+
+ @param src_st: source file lstat result
+ @type src_st: posix.stat_result
+ @param src_path: source file path
+ @type src_path: str
+ @param dest_path: destination file path
+ @type dest_path: str
+ """
+ # Remove destination file in order to ensure that the following
+ # symlink or copy2 call won't fail (see bug #535850).
+ try:
+ os.unlink(dest_path)
+ except OSError:
+ pass
+ try:
+ if stat.S_ISLNK(src_st.st_mode):
+ os.symlink(os.readlink(src_path), dest_path)
+ else:
+ shutil.copy2(src_path, dest_path)
+ except EnvironmentError as e:
+ portage.util.writemsg(
+ _('dispatch-conf: Error copying %(src_path)s to '
+ '%(dest_path)s: %(reason)s\n') % {
+ "src_path": src_path,
+ "dest_path": dest_path,
+ "reason": e
+ }, noiselevel=-1)
+
+def rcs_archive(archive, curconf, newconf, mrgconf):
+ """Archive existing config in rcs (on trunk). Then, if mrgconf is
+ specified and an old branch version exists, merge the user's changes
+ and the distributed changes and put the result into mrgconf. Lastly,
+ if newconf was specified, leave it in the archive dir with a .dist.new
+ suffix along with the last 1.1.1 branch version with a .dist suffix."""
+
+ try:
+ os.makedirs(os.path.dirname(archive))
+ except OSError:
+ pass
+
+ try:
+ curconf_st = os.lstat(curconf)
+ except OSError:
+ curconf_st = None
+
+ if curconf_st is not None and \
+ (stat.S_ISREG(curconf_st.st_mode) or
+ stat.S_ISLNK(curconf_st.st_mode)):
+ _archive_copy(curconf_st, curconf, archive)
+
+ if os.path.lexists(archive + ',v'):
+ os.system(RCS_LOCK + ' ' + archive)
+ os.system(RCS_PUT + ' ' + archive)
+
+ ret = 0
+ mystat = None
+ if newconf:
+ try:
+ mystat = os.lstat(newconf)
+ except OSError:
+ pass
+
+ if mystat is not None and \
+ (stat.S_ISREG(mystat.st_mode) or
+ stat.S_ISLNK(mystat.st_mode)):
+ os.system(RCS_GET + ' -r' + RCS_BRANCH + ' ' + archive)
+ has_branch = os.path.lexists(archive)
+ if has_branch:
+ os.rename(archive, archive + '.dist')
+
+ _archive_copy(mystat, newconf, archive)
+
+ if has_branch:
+ if mrgconf and os.path.isfile(archive) and \
+ os.path.isfile(mrgconf):
+ # This puts the results of the merge into mrgconf.
+ ret = os.system(RCS_MERGE % (archive, mrgconf))
+ os.chmod(mrgconf, mystat.st_mode)
+ os.chown(mrgconf, mystat.st_uid, mystat.st_gid)
+ os.rename(archive, archive + '.dist.new')
+
+ return ret
+
+def _file_archive_rotate(archive):
+ """
+ Rename archive to archive + '.1', and perform similar rotation
+ for files up to archive + '.9'.
+
+ @param archive: file path to archive
+ @type archive: str
+ """
+
+ max_suf = 0
+ try:
+ for max_suf, max_st, max_path in (
+ (suf, os.lstat(path), path) for suf, path in (
+ (suf, "%s.%s" % (archive, suf)) for suf in range(
+ 1, _ARCHIVE_ROTATE_MAX + 1))):
+ pass
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ # There's already an unused suffix.
+ else:
+ # Free the max suffix in order to avoid possible problems
+ # when we rename another file or directory to the same
+ # location (see bug 256376).
+ if stat.S_ISDIR(max_st.st_mode):
+ # Removing a directory might destroy something important,
+ # so rename it instead.
+ head, tail = os.path.split(archive)
+ placeholder = tempfile.NamedTemporaryFile(
+ prefix="%s." % tail,
+ dir=head)
+ placeholder.close()
+ os.rename(max_path, placeholder.name)
+ else:
+ os.unlink(max_path)
+
+ # The max suffix is now unused.
+ max_suf -= 1
+
+ for suf in range(max_suf + 1, 1, -1):
+ os.rename("%s.%s" % (archive, suf - 1), "%s.%s" % (archive, suf))
+
+ os.rename(archive, "%s.1" % (archive,))
+
+def _file_archive_ensure_dir(parent_dir):
+ """
+ Ensure that the parent directory for an archive exists.
+ If a file exists where a directory is needed, then rename
+ it (see bug 256376).
+
+ @param parent_dir: path of parent directory
+ @type parent_dir: str
+ """
+
+ for parent in iter_parents(parent_dir):
+ # Use lstat because a symlink to a directory might point
+ # to a directory outside of the config archive, making
+ # it an unsuitable parent.
+ try:
+ parent_st = os.lstat(parent)
+ except OSError:
+ pass
+ else:
+ if not stat.S_ISDIR(parent_st.st_mode):
+ _file_archive_rotate(parent)
+ break
+
+ try:
+ os.makedirs(parent_dir)
+ except OSError:
+ pass
+
+def file_archive(archive, curconf, newconf, mrgconf):
+ """Archive existing config to the archive-dir, bumping old versions
+ out of the way into .# versions (log-rotate style). Then, if mrgconf
+ was specified and there is a .dist version, merge the user's changes
+ and the distributed changes and put the result into mrgconf. Lastly,
+ if newconf was specified, archive it as a .dist.new version (which
+ gets moved to the .dist version at the end of the processing)."""
+
+ _file_archive_ensure_dir(os.path.dirname(archive))
+
+ # Archive the current config file if it isn't already saved
+ if (os.path.lexists(archive) and
+ len(diffstatusoutput_mixed(
+ "diff -aq '%s' '%s'", curconf, archive)[1]) != 0):
+ _file_archive_rotate(archive)
+
+ try:
+ curconf_st = os.lstat(curconf)
+ except OSError:
+ curconf_st = None
+
+ if curconf_st is not None and \
+ (stat.S_ISREG(curconf_st.st_mode) or
+ stat.S_ISLNK(curconf_st.st_mode)):
+ _archive_copy(curconf_st, curconf, archive)
+
+ mystat = None
+ if newconf:
+ try:
+ mystat = os.lstat(newconf)
+ except OSError:
+ pass
+
+ if mystat is not None and \
+ (stat.S_ISREG(mystat.st_mode) or
+ stat.S_ISLNK(mystat.st_mode)):
+ # Save off new config file in the archive dir with .dist.new suffix
+ newconf_archive = archive + '.dist.new'
+ if os.path.isdir(newconf_archive
+ ) and not os.path.islink(newconf_archive):
+ _file_archive_rotate(newconf_archive)
+ _archive_copy(mystat, newconf, newconf_archive)
+
+ ret = 0
+ if mrgconf and os.path.isfile(curconf) and \
+ os.path.isfile(newconf) and \
+ os.path.isfile(archive + '.dist'):
+ # This puts the results of the merge into mrgconf.
+ ret = os.system(DIFF3_MERGE % (curconf, archive + '.dist', newconf, mrgconf))
+ os.chmod(mrgconf, mystat.st_mode)
+ os.chown(mrgconf, mystat.st_uid, mystat.st_gid)
+
+ return ret
+
+
+def rcs_archive_post_process(archive):
+ """Check in the archive file with the .dist.new suffix on the branch
+ and remove the one with the .dist suffix."""
+ os.rename(archive + '.dist.new', archive)
+ if os.path.lexists(archive + '.dist'):
+ # Commit the last-distributed version onto the branch.
+ os.system(RCS_LOCK + RCS_BRANCH + ' ' + archive)
+ os.system(RCS_PUT + ' -r' + RCS_BRANCH + ' ' + archive)
+ os.unlink(archive + '.dist')
+ else:
+ # Forcefully commit the last-distributed version onto the branch.
+ os.system(RCS_PUT + ' -f -r' + RCS_BRANCH + ' ' + archive)
+
+
+def file_archive_post_process(archive):
+ """Rename the archive file with the .dist.new suffix to a .dist suffix"""
+ if os.path.lexists(archive + '.dist.new'):
+ dest = "%s.dist" % archive
+ if os.path.isdir(dest) and not os.path.islink(dest):
+ _file_archive_rotate(dest)
+ os.rename(archive + '.dist.new', dest)
diff --git a/lib/portage/eapi.py b/lib/portage/eapi.py
new file mode 100644
index 000000000..158d58243
--- /dev/null
+++ b/lib/portage/eapi.py
@@ -0,0 +1,194 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import collections
+
+from portage import eapi_is_supported
+
+def eapi_has_iuse_defaults(eapi):
+ return eapi != "0"
+
+def eapi_has_iuse_effective(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi")
+
+def eapi_has_slot_deps(eapi):
+ return eapi != "0"
+
+def eapi_has_slot_operator(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python")
+
+def eapi_has_src_uri_arrows(eapi):
+ return eapi not in ("0", "1")
+
+def eapi_has_use_deps(eapi):
+ return eapi not in ("0", "1")
+
+def eapi_has_strong_blocks(eapi):
+ return eapi not in ("0", "1")
+
+def eapi_has_src_prepare_and_src_configure(eapi):
+ return eapi not in ("0", "1")
+
+def eapi_supports_prefix(eapi):
+ return eapi not in ("0", "1", "2")
+
+def eapi_exports_AA(eapi):
+ return eapi in ("0", "1", "2", "3")
+
+def eapi_exports_KV(eapi):
+ return eapi in ("0", "1", "2", "3")
+
+def eapi_exports_merge_type(eapi):
+ return eapi not in ("0", "1", "2", "3")
+
+def eapi_exports_replace_vars(eapi):
+ return eapi not in ("0", "1", "2", "3")
+
+def eapi_exports_EBUILD_PHASE_FUNC(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi")
+
+def eapi_exports_PORTDIR(eapi):
+ return eapi in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi",
+ "5", "5-progress", "6")
+
+def eapi_exports_ECLASSDIR(eapi):
+ return eapi in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi",
+ "5", "5-progress", "6")
+
+def eapi_exports_REPOSITORY(eapi):
+ return eapi in ("4-python", "5-progress")
+
+def eapi_has_pkg_pretend(eapi):
+ return eapi not in ("0", "1", "2", "3")
+
+def eapi_has_implicit_rdepend(eapi):
+ return eapi in ("0", "1", "2", "3")
+
+def eapi_has_dosed_dohard(eapi):
+ return eapi in ("0", "1", "2", "3")
+
+def eapi_has_required_use(eapi):
+ return eapi not in ("0", "1", "2", "3")
+
+def eapi_has_required_use_at_most_one_of(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi")
+
+def eapi_has_use_dep_defaults(eapi):
+ return eapi not in ("0", "1", "2", "3")
+
+def eapi_requires_posixish_locale(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi",
+ "5", "5-progress", "5-hdepend")
+
+def eapi_has_repo_deps(eapi):
+ return eapi in ("4-python", "5-progress")
+
+def eapi_allows_dots_in_PN(eapi):
+ return eapi in ("4-python", "5-progress")
+
+def eapi_allows_dots_in_use_flags(eapi):
+ return eapi in ("4-python", "5-progress")
+
+def eapi_supports_stable_use_forcing_and_masking(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi")
+
+def eapi_allows_directories_on_profile_level_and_repository_level(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-slot-abi", "5", "6")
+
+def eapi_has_use_aliases(eapi):
+ return eapi in ("4-python", "5-progress")
+
+def eapi_has_automatic_unpack_dependencies(eapi):
+ return eapi in ("5-progress",)
+
+def eapi_has_hdepend(eapi):
+ return eapi in ("5-hdepend",)
+
+def eapi_allows_package_provided(eapi):
+ return eapi in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi",
+ "5", "5-progress", "6")
+
+def eapi_has_bdepend(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi",
+ "5", "5-progress", "6")
+
+def eapi_has_targetroot(eapi):
+ return eapi in ("5-hdepend",)
+
+def eapi_empty_groups_always_true(eapi):
+ return eapi in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi",
+ "5", "5-progress", "6")
+
+def eapi_path_variables_end_with_trailing_slash(eapi):
+ return eapi in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi",
+ "5", "5-progress", "6")
+
+def eapi_has_broot(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi",
+ "5", "5-progress", "5-hdepend", "6")
+
+def eapi_has_sysroot(eapi):
+ return eapi not in ("0", "1", "2", "3", "4", "4-python", "4-slot-abi",
+ "5", "5-progress", "5-hdepend", "6")
+
+_eapi_attrs = collections.namedtuple('_eapi_attrs',
+ 'allows_package_provided '
+ 'bdepend broot dots_in_PN dots_in_use_flags exports_EBUILD_PHASE_FUNC '
+ 'exports_PORTDIR exports_ECLASSDIR '
+ 'feature_flag_test feature_flag_targetroot '
+ 'hdepend iuse_defaults iuse_effective posixish_locale '
+ 'path_variables_end_with_trailing_slash '
+ 'repo_deps required_use required_use_at_most_one_of slot_operator slot_deps '
+ 'src_uri_arrows strong_blocks use_deps use_dep_defaults '
+ 'empty_groups_always_true sysroot')
+
+_eapi_attrs_cache = {}
+
+def _get_eapi_attrs(eapi):
+ """
+ When eapi is None then validation is not as strict, since we want the
+ same to work for multiple EAPIs that may have slightly different rules.
+ An unsupported eapi is handled the same as when eapi is None, which may
+ be helpful for handling of corrupt EAPI metadata in essential functions
+ such as pkgsplit.
+ """
+ eapi_attrs = _eapi_attrs_cache.get(eapi)
+ if eapi_attrs is not None:
+ return eapi_attrs
+
+ orig_eapi = eapi
+ if eapi is not None and not eapi_is_supported(eapi):
+ eapi = None
+
+ eapi_attrs = _eapi_attrs(
+ allows_package_provided=(eapi is None or eapi_allows_package_provided(eapi)),
+ bdepend = (eapi is not None and eapi_has_bdepend(eapi)),
+ broot = (eapi is None or eapi_has_broot(eapi)),
+ dots_in_PN = (eapi is None or eapi_allows_dots_in_PN(eapi)),
+ dots_in_use_flags = (eapi is None or eapi_allows_dots_in_use_flags(eapi)),
+ empty_groups_always_true = (eapi is not None and eapi_empty_groups_always_true(eapi)),
+ exports_EBUILD_PHASE_FUNC = (eapi is None or eapi_exports_EBUILD_PHASE_FUNC(eapi)),
+ exports_PORTDIR = (eapi is None or eapi_exports_PORTDIR(eapi)),
+ exports_ECLASSDIR = (eapi is not None and eapi_exports_ECLASSDIR(eapi)),
+ feature_flag_test = True,
+ feature_flag_targetroot = (eapi is not None and eapi_has_targetroot(eapi)),
+ hdepend = (eapi is not None and eapi_has_hdepend(eapi)),
+ iuse_defaults = (eapi is None or eapi_has_iuse_defaults(eapi)),
+ iuse_effective = (eapi is not None and eapi_has_iuse_effective(eapi)),
+ path_variables_end_with_trailing_slash = (eapi is not None and
+ eapi_path_variables_end_with_trailing_slash(eapi)),
+ posixish_locale = (eapi is not None and eapi_requires_posixish_locale(eapi)),
+ repo_deps = (eapi is None or eapi_has_repo_deps(eapi)),
+ required_use = (eapi is None or eapi_has_required_use(eapi)),
+ required_use_at_most_one_of = (eapi is None or eapi_has_required_use_at_most_one_of(eapi)),
+ slot_deps = (eapi is None or eapi_has_slot_deps(eapi)),
+ slot_operator = (eapi is None or eapi_has_slot_operator(eapi)),
+ src_uri_arrows = (eapi is None or eapi_has_src_uri_arrows(eapi)),
+ strong_blocks = (eapi is None or eapi_has_strong_blocks(eapi)),
+ sysroot = (eapi is None or eapi_has_sysroot(eapi)),
+ use_deps = (eapi is None or eapi_has_use_deps(eapi)),
+ use_dep_defaults = (eapi is None or eapi_has_use_dep_defaults(eapi))
+ )
+
+ _eapi_attrs_cache[orig_eapi] = eapi_attrs
+ return eapi_attrs
diff --git a/lib/portage/eclass_cache.py b/lib/portage/eclass_cache.py
new file mode 100644
index 000000000..d2d9e2710
--- /dev/null
+++ b/lib/portage/eclass_cache.py
@@ -0,0 +1,187 @@
+# Copyright 2005-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+# Author(s): Nicholas Carpaski (carpaski@gentoo.org), Brian Harring (ferringb@gentoo.org)
+
+from __future__ import unicode_literals
+
+__all__ = ["cache"]
+
+import stat
+import sys
+import operator
+import warnings
+from portage.util import normalize_path
+import errno
+from portage.exception import FileNotFound, PermissionDenied
+from portage import os
+from portage import checksum
+from portage import _shell_quote
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+
+class hashed_path(object):
+
+ def __init__(self, location):
+ self.location = location
+
+ def __getattr__(self, attr):
+ if attr == 'mtime':
+ # use stat.ST_MTIME; accessing .st_mtime gets you a float
+ # depending on the python version, and long(float) introduces
+ # some rounding issues that aren't present for people using
+ # the straight c api.
+ # thus use the defacto python compatibility work around;
+ # access via index, which guarantees you get the raw long.
+ try:
+ self.mtime = obj = os.stat(self.location)[stat.ST_MTIME]
+ except OSError as e:
+ if e.errno in (errno.ENOENT, errno.ESTALE):
+ raise FileNotFound(self.location)
+ elif e.errno == PermissionDenied.errno:
+ raise PermissionDenied(self.location)
+ raise
+ return obj
+ if not attr.islower():
+ # we don't care to allow .mD5 as an alias for .md5
+ raise AttributeError(attr)
+ hashname = attr.upper()
+ if hashname not in checksum.get_valid_checksum_keys():
+ raise AttributeError(attr)
+ val = checksum.perform_checksum(self.location, hashname)[0]
+ setattr(self, attr, val)
+ return val
+
+ def __repr__(self):
+ return "<portage.eclass_cache.hashed_path('%s')>" % (self.location,)
+
+class cache(object):
+ """
+ Maintains the cache information about eclasses used in ebuild.
+ """
+ def __init__(self, porttree_root, overlays=None):
+ if overlays is not None:
+ warnings.warn("overlays parameter of portage.eclass_cache.cache constructor is deprecated and no longer used",
+ DeprecationWarning, stacklevel=2)
+
+ self.eclasses = {} # {"Name": hashed_path}
+ self._eclass_locations = {}
+ self._eclass_locations_str = None
+
+ # screw with the porttree ordering, w/out having bash inherit match it, and I'll hurt you.
+ # ~harring
+ if porttree_root:
+ self.porttree_root = porttree_root
+ self.porttrees = (normalize_path(self.porttree_root),)
+ self._master_eclass_root = os.path.join(self.porttrees[0], "eclass")
+ self.update_eclasses()
+ else:
+ self.porttree_root = None
+ self.porttrees = ()
+ self._master_eclass_root = None
+
+ def copy(self):
+ return self.__copy__()
+
+ def __copy__(self):
+ result = self.__class__(None)
+ result.eclasses = self.eclasses.copy()
+ result._eclass_locations = self._eclass_locations.copy()
+ result.porttree_root = self.porttree_root
+ result.porttrees = self.porttrees
+ result._master_eclass_root = self._master_eclass_root
+ return result
+
+ def append(self, other):
+ """
+ Append another instance to this instance. This will cause eclasses
+ from the other instance to override any eclasses from this instance
+ that have the same name.
+ """
+ if not isinstance(other, self.__class__):
+ raise TypeError(
+ "expected type %s, got %s" % (self.__class__, type(other)))
+ self.porttrees = self.porttrees + other.porttrees
+ self.eclasses.update(other.eclasses)
+ self._eclass_locations.update(other._eclass_locations)
+ self._eclass_locations_str = None
+
+ def update_eclasses(self):
+ self.eclasses = {}
+ self._eclass_locations = {}
+ master_eclasses = {}
+ eclass_len = len(".eclass")
+ ignored_listdir_errnos = (errno.ENOENT, errno.ENOTDIR)
+ for x in [normalize_path(os.path.join(y,"eclass")) for y in self.porttrees]:
+ try:
+ eclass_filenames = os.listdir(x)
+ except OSError as e:
+ if e.errno in ignored_listdir_errnos:
+ del e
+ continue
+ elif e.errno == PermissionDenied.errno:
+ raise PermissionDenied(x)
+ raise
+ for y in eclass_filenames:
+ if not y.endswith(".eclass"):
+ continue
+ obj = hashed_path(os.path.join(x, y))
+ obj.eclass_dir = x
+ try:
+ mtime = obj.mtime
+ except FileNotFound:
+ continue
+ ys = y[:-eclass_len]
+ if x == self._master_eclass_root:
+ master_eclasses[ys] = mtime
+ self.eclasses[ys] = obj
+ self._eclass_locations[ys] = x
+ continue
+
+ master_mtime = master_eclasses.get(ys)
+ if master_mtime is not None:
+ if master_mtime == mtime:
+ # It appears to be identical to the master,
+ # so prefer the master entry.
+ continue
+
+ self.eclasses[ys] = obj
+ self._eclass_locations[ys] = x
+
+ def validate_and_rewrite_cache(self, ec_dict, chf_type, stores_paths):
+ """
+ This will return an empty dict if the ec_dict parameter happens
+ to be empty, therefore callers must take care to distinguish
+ between empty dict and None return values.
+ """
+ if not isinstance(ec_dict, dict):
+ return None
+ our_getter = operator.attrgetter(chf_type)
+ cache_getter = lambda x:x
+ if stores_paths:
+ cache_getter = operator.itemgetter(1)
+ d = {}
+ for eclass, ec_data in ec_dict.items():
+ cached_data = self.eclasses.get(eclass)
+ if cached_data is None:
+ return None
+ if cache_getter(ec_data) != our_getter(cached_data):
+ return None
+ d[eclass] = cached_data
+ return d
+
+ def get_eclass_data(self, inherits):
+ ec_dict = {}
+ for x in inherits:
+ ec_dict[x] = self.eclasses[x]
+
+ return ec_dict
+
+ @property
+ def eclass_locations_string(self):
+ if self._eclass_locations_str is None:
+ self._eclass_locations_str = " ".join(_shell_quote(x)
+ for x in reversed(self.porttrees))
+ return self._eclass_locations_str
diff --git a/lib/portage/elog/__init__.py b/lib/portage/elog/__init__.py
new file mode 100644
index 000000000..cc086123f
--- /dev/null
+++ b/lib/portage/elog/__init__.py
@@ -0,0 +1,191 @@
+# elog/__init__.py - elog core functions
+# Copyright 2006-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.util:writemsg',
+)
+
+from portage.const import EBUILD_PHASES
+from portage.exception import AlarmSignal, PortageException
+from portage.process import atexit_register
+from portage.elog.messages import collect_ebuild_messages, collect_messages
+from portage.elog.filtering import filter_loglevels
+from portage.localization import _
+from portage import os
+
+def _preload_elog_modules(settings):
+ logsystems = settings.get("PORTAGE_ELOG_SYSTEM", "").split()
+ for s in logsystems:
+ # allow per module overrides of PORTAGE_ELOG_CLASSES
+ if ":" in s:
+ s, levels = s.split(":", 1)
+ levels = levels.split(",")
+ # - is nicer than _ for module names, so allow people to use it.
+ s = s.replace("-", "_")
+ try:
+ _load_mod("portage.elog.mod_" + s)
+ except ImportError:
+ pass
+
+def _merge_logentries(a, b):
+ rValue = {}
+ phases = set(a)
+ phases.update(b)
+ for p in phases:
+ merged_msgs = []
+ rValue[p] = merged_msgs
+ for d in a, b:
+ msgs = d.get(p)
+ if msgs:
+ merged_msgs.extend(msgs)
+ return rValue
+
+def _combine_logentries(logentries):
+ # generate a single string with all log messages
+ rValue = []
+ for phase in EBUILD_PHASES:
+ if not phase in logentries:
+ continue
+ previous_type = None
+ for msgtype, msgcontent in logentries[phase]:
+ if previous_type != msgtype:
+ previous_type = msgtype
+ rValue.append("%s: %s" % (msgtype, phase))
+ if isinstance(msgcontent, basestring):
+ rValue.append(msgcontent.rstrip("\n"))
+ else:
+ for line in msgcontent:
+ rValue.append(line.rstrip("\n"))
+ if rValue:
+ rValue.append("")
+ return "\n".join(rValue)
+
+_elog_mod_imports = {}
+def _load_mod(name):
+ global _elog_mod_imports
+ m = _elog_mod_imports.get(name)
+ if m is None:
+ m = __import__(name)
+ for comp in name.split(".")[1:]:
+ m = getattr(m, comp)
+ _elog_mod_imports[name] = m
+ return m
+
+_elog_listeners = []
+def add_listener(listener):
+ '''
+ Listeners should accept four arguments: settings, key, logentries and logtext
+ '''
+ _elog_listeners.append(listener)
+
+def remove_listener(listener):
+ '''
+ Remove previously added listener
+ '''
+ _elog_listeners.remove(listener)
+
+_elog_atexit_handlers = []
+
+def elog_process(cpv, mysettings, phasefilter=None):
+ global _elog_atexit_handlers
+
+ logsystems = mysettings.get("PORTAGE_ELOG_SYSTEM","").split()
+ for s in logsystems:
+ # allow per module overrides of PORTAGE_ELOG_CLASSES
+ if ":" in s:
+ s, levels = s.split(":", 1)
+ levels = levels.split(",")
+ # - is nicer than _ for module names, so allow people to use it.
+ s = s.replace("-", "_")
+ try:
+ _load_mod("portage.elog.mod_" + s)
+ except ImportError:
+ pass
+
+ if "T" in mysettings:
+ ebuild_logentries = collect_ebuild_messages(
+ os.path.join(mysettings["T"], "logging"))
+ else:
+ # A build dir isn't necessarily required since the messages.e*
+ # functions allow messages to be generated in-memory.
+ ebuild_logentries = {}
+ all_logentries = collect_messages(key=cpv, phasefilter=phasefilter)
+ if cpv in all_logentries:
+ # Messages generated by the python elog implementation are assumed
+ # to come first. For example, this ensures correct order for einfo
+ # messages that are generated prior to the setup phase.
+ all_logentries[cpv] = \
+ _merge_logentries(all_logentries[cpv], ebuild_logentries)
+ else:
+ all_logentries[cpv] = ebuild_logentries
+
+ my_elog_classes = set(mysettings.get("PORTAGE_ELOG_CLASSES", "").split())
+ logsystems = {}
+ for token in mysettings.get("PORTAGE_ELOG_SYSTEM", "").split():
+ if ":" in token:
+ s, levels = token.split(":", 1)
+ levels = levels.split(",")
+ else:
+ s = token
+ levels = ()
+ levels_set = logsystems.get(s)
+ if levels_set is None:
+ levels_set = set()
+ logsystems[s] = levels_set
+ levels_set.update(levels)
+
+ for key in all_logentries:
+ default_logentries = filter_loglevels(all_logentries[key], my_elog_classes)
+
+ # in case the filters matched all messages and no module overrides exist
+ if len(default_logentries) == 0 and (not ":" in mysettings.get("PORTAGE_ELOG_SYSTEM", "")):
+ continue
+
+ default_fulllog = _combine_logentries(default_logentries)
+
+ # call listeners
+ for listener in _elog_listeners:
+ listener(mysettings, str(key), default_logentries, default_fulllog)
+
+ # pass the processing to the individual modules
+ for s, levels in logsystems.items():
+ # allow per module overrides of PORTAGE_ELOG_CLASSES
+ if levels:
+ mod_logentries = filter_loglevels(all_logentries[key], levels)
+ mod_fulllog = _combine_logentries(mod_logentries)
+ else:
+ mod_logentries = default_logentries
+ mod_fulllog = default_fulllog
+ if len(mod_logentries) == 0:
+ continue
+ # - is nicer than _ for module names, so allow people to use it.
+ s = s.replace("-", "_")
+ try:
+ m = _load_mod("portage.elog.mod_" + s)
+ # Timeout after one minute (in case something like the mail
+ # module gets hung).
+ try:
+ AlarmSignal.register(60)
+ m.process(mysettings, str(key), mod_logentries, mod_fulllog)
+ finally:
+ AlarmSignal.unregister()
+ if hasattr(m, "finalize") and not m.finalize in _elog_atexit_handlers:
+ _elog_atexit_handlers.append(m.finalize)
+ atexit_register(m.finalize)
+ except (ImportError, AttributeError) as e:
+ writemsg(_("!!! Error while importing logging modules "
+ "while loading \"mod_%s\":\n") % str(s))
+ writemsg("%s\n" % str(e), noiselevel=-1)
+ except AlarmSignal:
+ writemsg("Timeout in elog_process for system '%s'\n" % s,
+ noiselevel=-1)
+ except PortageException as e:
+ writemsg("%s\n" % str(e), noiselevel=-1)
+
diff --git a/lib/portage/elog/filtering.py b/lib/portage/elog/filtering.py
new file mode 100644
index 000000000..82181a4cb
--- /dev/null
+++ b/lib/portage/elog/filtering.py
@@ -0,0 +1,15 @@
+# elog/messages.py - elog core functions
+# Copyright 2006-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+def filter_loglevels(logentries, loglevels):
+ # remove unwanted entries from all logentries
+ rValue = {}
+ loglevels = [x.upper() for x in loglevels]
+ for phase in logentries:
+ for msgtype, msgcontent in logentries[phase]:
+ if msgtype.upper() in loglevels or "*" in loglevels:
+ if phase not in rValue:
+ rValue[phase] = []
+ rValue[phase].append((msgtype, msgcontent))
+ return rValue
diff --git a/lib/portage/elog/messages.py b/lib/portage/elog/messages.py
new file mode 100644
index 000000000..a4897d8d8
--- /dev/null
+++ b/lib/portage/elog/messages.py
@@ -0,0 +1,190 @@
+# elog/messages.py - elog core functions
+# Copyright 2006-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.output:colorize',
+ 'portage.util:writemsg',
+)
+
+from portage.const import EBUILD_PHASES
+from portage.localization import _
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage import _unicode_decode
+
+import io
+import sys
+
+_log_levels = frozenset([
+ "ERROR",
+ "INFO",
+ "LOG",
+ "QA",
+ "WARN",
+])
+
+def collect_ebuild_messages(path):
+ """ Collect elog messages generated by the bash logging function stored
+ at 'path'.
+ """
+ mylogfiles = None
+ try:
+ mylogfiles = os.listdir(path)
+ except OSError:
+ pass
+ # shortcut for packages without any messages
+ if not mylogfiles:
+ return {}
+ # exploit listdir() file order so we process log entries in chronological order
+ mylogfiles.reverse()
+ logentries = {}
+ for msgfunction in mylogfiles:
+ filename = os.path.join(path, msgfunction)
+ if msgfunction not in EBUILD_PHASES:
+ writemsg(_("!!! can't process invalid log file: %s\n") % filename,
+ noiselevel=-1)
+ continue
+ if not msgfunction in logentries:
+ logentries[msgfunction] = []
+ lastmsgtype = None
+ msgcontent = []
+ f = io.open(_unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace')
+ # Use split('\n') since normal line iteration or readlines() will
+ # split on \r characters as shown in bug #390833.
+ for l in f.read().split('\n'):
+ if not l:
+ continue
+ try:
+ msgtype, msg = l.split(" ", 1)
+ if msgtype not in _log_levels:
+ raise ValueError(msgtype)
+ except ValueError:
+ writemsg(_("!!! malformed entry in "
+ "log file: '%s': %s\n") % (filename, l), noiselevel=-1)
+ continue
+
+ if lastmsgtype is None:
+ lastmsgtype = msgtype
+
+ if msgtype == lastmsgtype:
+ msgcontent.append(msg)
+ else:
+ if msgcontent:
+ logentries[msgfunction].append((lastmsgtype, msgcontent))
+ msgcontent = [msg]
+ lastmsgtype = msgtype
+ f.close()
+ if msgcontent:
+ logentries[msgfunction].append((lastmsgtype, msgcontent))
+
+ # clean logfiles to avoid repetitions
+ for f in mylogfiles:
+ try:
+ os.unlink(os.path.join(path, f))
+ except OSError:
+ pass
+ return logentries
+
+_msgbuffer = {}
+def _elog_base(level, msg, phase="other", key=None, color=None, out=None):
+ """ Backend for the other messaging functions, should not be called
+ directly.
+ """
+
+ # TODO: Have callers pass in a more unique 'key' parameter than a plain
+ # cpv, in order to ensure that messages are properly grouped together
+ # for a given package instance, and also to ensure that each elog module's
+ # process() function is only called once for each unique package. This is
+ # needed not only when building packages in parallel, but also to preserve
+ # continuity in messages when a package is simply updated, since we don't
+ # want the elog_process() call from the uninstall of the old version to
+ # cause discontinuity in the elog messages of the new one being installed.
+
+ global _msgbuffer
+
+ if out is None:
+ out = sys.stdout
+
+ if color is None:
+ color = "GOOD"
+
+ msg = _unicode_decode(msg,
+ encoding=_encodings['content'], errors='replace')
+
+ formatted_msg = colorize(color, " * ") + msg + "\n"
+
+ # avoid potential UnicodeEncodeError
+ if out in (sys.stdout, sys.stderr):
+ formatted_msg = _unicode_encode(formatted_msg,
+ encoding=_encodings['stdio'], errors='backslashreplace')
+ if sys.hexversion >= 0x3000000:
+ out = out.buffer
+
+ out.write(formatted_msg)
+
+ if key not in _msgbuffer:
+ _msgbuffer[key] = {}
+ if phase not in _msgbuffer[key]:
+ _msgbuffer[key][phase] = []
+ _msgbuffer[key][phase].append((level, msg))
+
+ #raise NotImplementedError()
+
+def collect_messages(key=None, phasefilter=None):
+ global _msgbuffer
+
+ if key is None:
+ rValue = _msgbuffer
+ _reset_buffer()
+ else:
+ rValue = {}
+ if key in _msgbuffer:
+ if phasefilter is None:
+ rValue[key] = _msgbuffer.pop(key)
+ else:
+ rValue[key] = {}
+ for phase in phasefilter:
+ try:
+ rValue[key][phase] = _msgbuffer[key].pop(phase)
+ except KeyError:
+ pass
+ if not _msgbuffer[key]:
+ del _msgbuffer[key]
+ return rValue
+
+def _reset_buffer():
+ """ Reset the internal message buffer when it has been processed,
+ should not be called directly.
+ """
+ global _msgbuffer
+
+ _msgbuffer = {}
+
+# creating and exporting the actual messaging functions
+_functions = { "einfo": ("INFO", "GOOD"),
+ "elog": ("LOG", "GOOD"),
+ "ewarn": ("WARN", "WARN"),
+ "eqawarn": ("QA", "WARN"),
+ "eerror": ("ERROR", "BAD"),
+}
+
+class _make_msgfunction(object):
+ __slots__ = ('_color', '_level')
+ def __init__(self, level, color):
+ self._level = level
+ self._color = color
+ def __call__(self, msg, phase="other", key=None, out=None):
+ """
+ Display and log a message assigned to the given key/cpv.
+ """
+ _elog_base(self._level, msg, phase=phase,
+ key=key, color=self._color, out=out)
+
+for f in _functions:
+ setattr(sys.modules[__name__], f, _make_msgfunction(_functions[f][0], _functions[f][1]))
+del f, _functions
diff --git a/lib/portage/elog/mod_custom.py b/lib/portage/elog/mod_custom.py
new file mode 100644
index 000000000..e1a5223d6
--- /dev/null
+++ b/lib/portage/elog/mod_custom.py
@@ -0,0 +1,19 @@
+# elog/mod_custom.py - elog dispatch module
+# Copyright 2006-2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage.elog.mod_save, portage.process, portage.exception
+
+def process(mysettings, key, logentries, fulltext):
+ elogfilename = portage.elog.mod_save.process(mysettings, key, logentries, fulltext)
+
+ if not mysettings.get("PORTAGE_ELOG_COMMAND"):
+ raise portage.exception.MissingParameter("!!! Custom logging requested but PORTAGE_ELOG_COMMAND is not defined")
+ else:
+ mylogcmd = mysettings["PORTAGE_ELOG_COMMAND"]
+ mylogcmd = mylogcmd.replace("${LOGFILE}", elogfilename)
+ mylogcmd = mylogcmd.replace("${PACKAGE}", key)
+ retval = portage.process.spawn_bash(mylogcmd)
+ if retval != 0:
+ raise portage.exception.PortageException("!!! PORTAGE_ELOG_COMMAND failed with exitcode %d" % retval)
+ return
diff --git a/lib/portage/elog/mod_echo.py b/lib/portage/elog/mod_echo.py
new file mode 100644
index 000000000..fb86547a4
--- /dev/null
+++ b/lib/portage/elog/mod_echo.py
@@ -0,0 +1,69 @@
+# elog/mod_echo.py - elog dispatch module
+# Copyright 2007-2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import sys
+from portage.output import EOutput, colorize
+from portage.const import EBUILD_PHASES
+from portage.localization import _
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+_items = []
+def process(mysettings, key, logentries, fulltext):
+ global _items
+ logfile = None
+ # output logfile explicitly only if it isn't in tempdir, otherwise
+ # it will be removed anyway
+ if (key == mysettings.mycpv and
+ "PORT_LOGDIR" in mysettings and
+ "PORTAGE_LOG_FILE" in mysettings):
+ logfile = mysettings["PORTAGE_LOG_FILE"]
+ _items.append((mysettings["ROOT"], key, logentries, logfile))
+
+def finalize():
+ # For consistency, send all message types to stdout.
+ sys.stdout.flush()
+ sys.stderr.flush()
+ stderr = sys.stderr
+ try:
+ sys.stderr = sys.stdout
+ _finalize()
+ finally:
+ sys.stderr = stderr
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+def _finalize():
+ global _items
+ printer = EOutput()
+ for root, key, logentries, logfile in _items:
+ print()
+ if root == "/":
+ printer.einfo(_("Messages for package %s:") %
+ colorize("INFORM", key))
+ else:
+ printer.einfo(_("Messages for package %(pkg)s merged to %(root)s:") %
+ {"pkg": colorize("INFORM", key), "root": root})
+ if logfile is not None:
+ printer.einfo(_("Log file: %s") % colorize("INFORM", logfile))
+ print()
+ for phase in EBUILD_PHASES:
+ if phase not in logentries:
+ continue
+ for msgtype, msgcontent in logentries[phase]:
+ fmap = {"INFO": printer.einfo,
+ "WARN": printer.ewarn,
+ "ERROR": printer.eerror,
+ "LOG": printer.einfo,
+ "QA": printer.ewarn}
+ if isinstance(msgcontent, basestring):
+ msgcontent = [msgcontent]
+ for line in msgcontent:
+ fmap[msgtype](line.strip("\n"))
+ _items = []
+ return
diff --git a/lib/portage/elog/mod_mail.py b/lib/portage/elog/mod_mail.py
new file mode 100644
index 000000000..086c683a6
--- /dev/null
+++ b/lib/portage/elog/mod_mail.py
@@ -0,0 +1,43 @@
+# elog/mod_mail.py - elog dispatch module
+# Copyright 2006-2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage.mail, socket
+from portage.exception import PortageException
+from portage.localization import _
+from portage.util import writemsg
+
+def process(mysettings, key, logentries, fulltext):
+ if "PORTAGE_ELOG_MAILURI" in mysettings:
+ myrecipient = mysettings["PORTAGE_ELOG_MAILURI"].split()[0]
+ else:
+ myrecipient = "root@localhost"
+
+ myfrom = mysettings["PORTAGE_ELOG_MAILFROM"]
+ myfrom = myfrom.replace("${HOST}", socket.getfqdn())
+ mysubject = mysettings["PORTAGE_ELOG_MAILSUBJECT"]
+ mysubject = mysubject.replace("${PACKAGE}", key)
+ mysubject = mysubject.replace("${HOST}", socket.getfqdn())
+
+ # look at the phases listed in our logentries to figure out what action was performed
+ action = _("merged")
+ for phase in logentries:
+ # if we found a *rm phase assume that the package was unmerged
+ if phase in ["postrm", "prerm"]:
+ action = _("unmerged")
+ # if we think that the package was unmerged, make sure there was no unexpected
+ # phase recorded to avoid misinformation
+ if action == _("unmerged"):
+ for phase in logentries:
+ if phase not in ["postrm", "prerm", "other"]:
+ action = _("unknown")
+
+ mysubject = mysubject.replace("${ACTION}", action)
+
+ mymessage = portage.mail.create_message(myfrom, myrecipient, mysubject, fulltext)
+ try:
+ portage.mail.send_mail(mysettings, mymessage)
+ except PortageException as e:
+ writemsg("%s\n" % str(e), noiselevel=-1)
+
+ return
diff --git a/lib/portage/elog/mod_mail_summary.py b/lib/portage/elog/mod_mail_summary.py
new file mode 100644
index 000000000..0bd67f22b
--- /dev/null
+++ b/lib/portage/elog/mod_mail_summary.py
@@ -0,0 +1,89 @@
+# elog/mod_mail_summary.py - elog dispatch module
+# Copyright 2006-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.exception import AlarmSignal, PortageException
+from portage.localization import _
+from portage.util import writemsg
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+
+import socket
+import time
+
+_config_keys = ('PORTAGE_ELOG_MAILURI', 'PORTAGE_ELOG_MAILFROM',
+ 'PORTAGE_ELOG_MAILSUBJECT',)
+_items = {}
+def process(mysettings, key, logentries, fulltext):
+ global _items
+ time_str = _unicode_decode(
+ time.strftime("%Y%m%d-%H%M%S %Z", time.localtime(time.time())),
+ encoding=_encodings['content'], errors='replace')
+ header = _(">>> Messages generated for package %(pkg)s by process %(pid)d on %(time)s:\n\n") % \
+ {"pkg": key, "pid": os.getpid(), "time": time_str}
+ config_root = mysettings["PORTAGE_CONFIGROOT"]
+
+ # Copy needed variables from the config instance,
+ # since we don't need to hold a reference for the
+ # whole thing. This also makes it possible to
+ # rely on per-package variable settings that may
+ # have come from /etc/portage/package.env, since
+ # we'll be isolated from any future mutations of
+ # mysettings.
+ config_dict = {}
+ for k in _config_keys:
+ v = mysettings.get(k)
+ if v is not None:
+ config_dict[k] = v
+
+ config_dict, items = _items.setdefault(config_root, (config_dict, {}))
+ items[key] = header + fulltext
+
+def finalize():
+ global _items
+ for mysettings, items in _items.values():
+ _finalize(mysettings, items)
+ _items.clear()
+
+def _finalize(mysettings, items):
+ if len(items) == 0:
+ return
+ elif len(items) == 1:
+ count = _("one package")
+ else:
+ count = _("multiple packages")
+ if "PORTAGE_ELOG_MAILURI" in mysettings:
+ myrecipient = mysettings["PORTAGE_ELOG_MAILURI"].split()[0]
+ else:
+ myrecipient = "root@localhost"
+
+ myfrom = mysettings.get("PORTAGE_ELOG_MAILFROM", "")
+ myfrom = myfrom.replace("${HOST}", socket.getfqdn())
+ mysubject = mysettings.get("PORTAGE_ELOG_MAILSUBJECT", "")
+ mysubject = mysubject.replace("${PACKAGE}", count)
+ mysubject = mysubject.replace("${HOST}", socket.getfqdn())
+
+ mybody = _("elog messages for the following packages generated by "
+ "process %(pid)d on host %(host)s:\n") % {"pid": os.getpid(), "host": socket.getfqdn()}
+ for key in items:
+ mybody += "- %s\n" % key
+
+ mymessage = portage.mail.create_message(myfrom, myrecipient, mysubject,
+ mybody, attachments=list(items.values()))
+
+ # Timeout after one minute in case send_mail() blocks indefinitely.
+ try:
+ try:
+ AlarmSignal.register(60)
+ portage.mail.send_mail(mysettings, mymessage)
+ finally:
+ AlarmSignal.unregister()
+ except AlarmSignal:
+ writemsg("Timeout in finalize() for elog system 'mail_summary'\n",
+ noiselevel=-1)
+ except PortageException as e:
+ writemsg("%s\n" % (e,), noiselevel=-1)
+
+ return
diff --git a/lib/portage/elog/mod_save.py b/lib/portage/elog/mod_save.py
new file mode 100644
index 000000000..829ec6c5e
--- /dev/null
+++ b/lib/portage/elog/mod_save.py
@@ -0,0 +1,84 @@
+# elog/mod_save.py - elog dispatch module
+# Copyright 2006-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import io
+import time
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.data import portage_gid, portage_uid
+from portage.package.ebuild.prepare_build_dirs import _ensure_log_subdirs
+from portage.util import apply_permissions, ensure_dirs, normalize_path
+
+def process(mysettings, key, logentries, fulltext):
+
+ if mysettings.get("PORT_LOGDIR"):
+ logdir = normalize_path(mysettings["PORT_LOGDIR"])
+ else:
+ logdir = os.path.join(os.sep, mysettings["EPREFIX"].lstrip(os.sep),
+ "var", "log", "portage")
+
+ if not os.path.isdir(logdir):
+ # Only initialize group/mode if the directory doesn't
+ # exist, so that we don't override permissions if they
+ # were previously set by the administrator.
+ # NOTE: These permissions should be compatible with our
+ # default logrotate config as discussed in bug 374287.
+ uid = -1
+ if portage.data.secpass >= 2:
+ uid = portage_uid
+ ensure_dirs(logdir, uid=uid, gid=portage_gid, mode=0o2770)
+
+ cat, pf = portage.catsplit(key)
+
+ elogfilename = pf + ":" + _unicode_decode(
+ time.strftime("%Y%m%d-%H%M%S", time.gmtime(time.time())),
+ encoding=_encodings['content'], errors='replace') + ".log"
+
+ if "split-elog" in mysettings.features:
+ log_subdir = os.path.join(logdir, "elog", cat)
+ elogfilename = os.path.join(log_subdir, elogfilename)
+ else:
+ log_subdir = os.path.join(logdir, "elog")
+ elogfilename = os.path.join(log_subdir, cat + ':' + elogfilename)
+ _ensure_log_subdirs(logdir, log_subdir)
+
+ try:
+ with io.open(_unicode_encode(elogfilename,
+ encoding=_encodings['fs'], errors='strict'), mode='w',
+ encoding=_encodings['content'],
+ errors='backslashreplace') as elogfile:
+ elogfile.write(_unicode_decode(fulltext))
+ except IOError as e:
+ func_call = "open('%s', 'w')" % elogfilename
+ if e.errno == errno.EACCES:
+ raise portage.exception.PermissionDenied(func_call)
+ elif e.errno == errno.EPERM:
+ raise portage.exception.OperationNotPermitted(func_call)
+ elif e.errno == errno.EROFS:
+ raise portage.exception.ReadOnlyFileSystem(func_call)
+ else:
+ raise
+
+ # Copy group permission bits from parent directory.
+ elogdir_st = os.stat(log_subdir)
+ elogdir_gid = elogdir_st.st_gid
+ elogdir_grp_mode = 0o060 & elogdir_st.st_mode
+
+ # Copy the uid from the parent directory if we have privileges
+ # to do so, for compatibility with our default logrotate
+ # config (see bug 378451). With the "su portage portage"
+ # directive and logrotate-3.8.0, logrotate's chown call during
+ # the compression phase will only succeed if the log file's uid
+ # is portage_uid.
+ logfile_uid = -1
+ if portage.data.secpass >= 2:
+ logfile_uid = elogdir_st.st_uid
+ apply_permissions(elogfilename, uid=logfile_uid, gid=elogdir_gid,
+ mode=elogdir_grp_mode, mask=0)
+
+ return elogfilename
diff --git a/lib/portage/elog/mod_save_summary.py b/lib/portage/elog/mod_save_summary.py
new file mode 100644
index 000000000..786f89454
--- /dev/null
+++ b/lib/portage/elog/mod_save_summary.py
@@ -0,0 +1,92 @@
+# elog/mod_save_summary.py - elog dispatch module
+# Copyright 2006-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import errno
+import io
+import sys
+import time
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.data import portage_gid, portage_uid
+from portage.localization import _
+from portage.package.ebuild.prepare_build_dirs import _ensure_log_subdirs
+from portage.util import apply_permissions, ensure_dirs, normalize_path
+
+def process(mysettings, key, logentries, fulltext):
+ if mysettings.get("PORT_LOGDIR"):
+ logdir = normalize_path(mysettings["PORT_LOGDIR"])
+ else:
+ logdir = os.path.join(os.sep, mysettings["EPREFIX"].lstrip(os.sep),
+ "var", "log", "portage")
+
+ if not os.path.isdir(logdir):
+ # Only initialize group/mode if the directory doesn't
+ # exist, so that we don't override permissions if they
+ # were previously set by the administrator.
+ # NOTE: These permissions should be compatible with our
+ # default logrotate config as discussed in bug 374287.
+ logdir_uid = -1
+ if portage.data.secpass >= 2:
+ logdir_uid = portage_uid
+ ensure_dirs(logdir, uid=logdir_uid, gid=portage_gid, mode=0o2770)
+
+ elogdir = os.path.join(logdir, "elog")
+ _ensure_log_subdirs(logdir, elogdir)
+
+ # TODO: Locking
+ elogfilename = elogdir+"/summary.log"
+ try:
+ elogfile = io.open(_unicode_encode(elogfilename,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='a', encoding=_encodings['content'],
+ errors='backslashreplace')
+ except IOError as e:
+ func_call = "open('%s', 'a')" % elogfilename
+ if e.errno == errno.EACCES:
+ raise portage.exception.PermissionDenied(func_call)
+ elif e.errno == errno.EPERM:
+ raise portage.exception.OperationNotPermitted(func_call)
+ elif e.errno == errno.EROFS:
+ raise portage.exception.ReadOnlyFileSystem(func_call)
+ else:
+ raise
+
+ # Copy group permission bits from parent directory.
+ elogdir_st = os.stat(elogdir)
+ elogdir_gid = elogdir_st.st_gid
+ elogdir_grp_mode = 0o060 & elogdir_st.st_mode
+
+ # Copy the uid from the parent directory if we have privileges
+ # to do so, for compatibility with our default logrotate
+ # config (see bug 378451). With the "su portage portage"
+ # directive and logrotate-3.8.0, logrotate's chown call during
+ # the compression phase will only succeed if the log file's uid
+ # is portage_uid.
+ logfile_uid = -1
+ if portage.data.secpass >= 2:
+ logfile_uid = elogdir_st.st_uid
+ apply_permissions(elogfilename, uid=logfile_uid, gid=elogdir_gid,
+ mode=elogdir_grp_mode, mask=0)
+
+ time_fmt = "%Y-%m-%d %H:%M:%S %Z"
+ if sys.hexversion < 0x3000000:
+ time_fmt = _unicode_encode(time_fmt)
+ time_str = time.strftime(time_fmt, time.localtime(time.time()))
+ # Avoid potential UnicodeDecodeError in Python 2, since strftime
+ # returns bytes in Python 2, and %Z may contain non-ascii chars.
+ time_str = _unicode_decode(time_str,
+ encoding=_encodings['content'], errors='replace')
+ elogfile.write(_(">>> Messages generated by process "
+ "%(pid)d on %(time)s for package %(pkg)s:\n\n") %
+ {"pid": os.getpid(), "time": time_str, "pkg": key})
+ elogfile.write(_unicode_decode(fulltext))
+ elogfile.write("\n")
+ elogfile.close()
+
+ return elogfilename
diff --git a/lib/portage/elog/mod_syslog.py b/lib/portage/elog/mod_syslog.py
new file mode 100644
index 000000000..8b26ffa1e
--- /dev/null
+++ b/lib/portage/elog/mod_syslog.py
@@ -0,0 +1,37 @@
+# elog/mod_syslog.py - elog dispatch module
+# Copyright 2006-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+import syslog
+from portage.const import EBUILD_PHASES
+from portage import _encodings
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+_pri = {
+ "INFO" : syslog.LOG_INFO,
+ "WARN" : syslog.LOG_WARNING,
+ "ERROR" : syslog.LOG_ERR,
+ "LOG" : syslog.LOG_NOTICE,
+ "QA" : syslog.LOG_WARNING
+}
+
+def process(mysettings, key, logentries, fulltext):
+ syslog.openlog("portage", syslog.LOG_ERR | syslog.LOG_WARNING | syslog.LOG_INFO | syslog.LOG_NOTICE, syslog.LOG_LOCAL5)
+ for phase in EBUILD_PHASES:
+ if not phase in logentries:
+ continue
+ for msgtype, msgcontent in logentries[phase]:
+ if isinstance(msgcontent, basestring):
+ msgcontent = [msgcontent]
+ for line in msgcontent:
+ line = "%s: %s: %s" % (key, phase, line)
+ if sys.hexversion < 0x3000000 and not isinstance(line, bytes):
+ # Avoid TypeError from syslog.syslog()
+ line = line.encode(_encodings['content'],
+ 'backslashreplace')
+ syslog.syslog(_pri[msgtype], line.rstrip("\n"))
+ syslog.closelog()
diff --git a/lib/portage/emaint/__init__.py b/lib/portage/emaint/__init__.py
new file mode 100644
index 000000000..48bc6e2ae
--- /dev/null
+++ b/lib/portage/emaint/__init__.py
@@ -0,0 +1,5 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""System health checks and maintenance utilities.
+"""
diff --git a/lib/portage/emaint/defaults.py b/lib/portage/emaint/defaults.py
new file mode 100644
index 000000000..30f36af50
--- /dev/null
+++ b/lib/portage/emaint/defaults.py
@@ -0,0 +1,25 @@
+# Copyright 2005-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# parser option data
+CHECK = {"short": "-c", "long": "--check",
+ "help": "Check for problems (a default option for most modules)",
+ 'status': "Checking %s for problems",
+ 'action': 'store_true',
+ 'func': 'check'
+ }
+
+FIX = {"short": "-f", "long": "--fix",
+ "help": "Attempt to fix problems (a default option for most modules)",
+ 'status': "Attempting to fix %s",
+ 'action': 'store_true',
+ 'func': 'fix'
+ }
+
+VERSION = {"long": "--version",
+ "help": "show program's version number and exit",
+ 'action': 'store_true',
+ }
+
+# parser options
+DEFAULT_OPTIONS = {'check': CHECK, 'fix': FIX, 'version': VERSION}
diff --git a/lib/portage/emaint/main.py b/lib/portage/emaint/main.py
new file mode 100644
index 000000000..f448d6baa
--- /dev/null
+++ b/lib/portage/emaint/main.py
@@ -0,0 +1,246 @@
+# Copyright 2005-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import argparse
+import sys
+import textwrap
+
+import portage
+from portage import os
+from portage.module import Modules
+from portage.progress import ProgressBar
+from portage.emaint.defaults import DEFAULT_OPTIONS
+
+class OptionItem(object):
+ """class to hold module ArgumentParser options data
+ """
+
+ def __init__(self, opt):
+ """
+ @type opt: dictionary
+ @param opt: options parser options
+ """
+ self.short = opt.get('short')
+ self.long = opt.get('long')
+ # '-' are not allowed in python identifiers
+ # so store the sanitized target variable name
+ self.target = self.long[2:].replace('-','_')
+ self.help = opt.get('help')
+ self.status = opt.get('status')
+ self.func = opt.get('func')
+ self.action = opt.get('action')
+ self.type = opt.get('type')
+ self.dest = opt.get('dest')
+ self.choices = opt.get('choices')
+
+ @property
+ def pargs(self):
+ pargs = []
+ if self.short is not None:
+ pargs.append(self.short)
+ if self.long is not None:
+ pargs.append(self.long)
+ return pargs
+
+ @property
+ def kwargs(self):
+ # Support for keyword arguments varies depending on the action,
+ # so only pass in the keywords that are needed, in order
+ # to avoid a TypeError.
+ kwargs = {}
+ if self.help is not None:
+ kwargs['help'] = self.help
+ if self.action is not None:
+ kwargs['action'] = self.action
+ if self.type is not None:
+ kwargs['type'] = self.type
+ if self.dest is not None:
+ kwargs['dest'] = self.dest
+ if self.choices is not None:
+ kwargs['choices'] = self.choices
+ return kwargs
+
+def usage(module_controller):
+ _usage = "usage: emaint [options] COMMAND"
+
+ desc = "The emaint program provides an interface to system health " + \
+ "checks and maintenance. See the emaint(1) man page " + \
+ "for additional information about the following commands:"
+
+ _usage += "\n\n"
+ for line in textwrap.wrap(desc, 65):
+ _usage += "%s\n" % line
+ _usage += "\nCommands:\n"
+ _usage += " %s" % "all".ljust(15) + \
+ "Perform all supported commands\n"
+ textwrap.subsequent_indent = ' '.ljust(17)
+ for mod in module_controller.module_names:
+ desc = textwrap.wrap(module_controller.get_description(mod), 65)
+ _usage += " %s%s\n" % (mod.ljust(15), desc[0])
+ for d in desc[1:]:
+ _usage += " %s%s\n" % (' '.ljust(15), d)
+ return _usage
+
+
+def module_opts(module_controller, module):
+ _usage = " %s module options:\n" % module
+ opts = module_controller.get_func_descriptions(module)
+ if opts == {}:
+ opts = DEFAULT_OPTIONS
+ for opt in sorted(opts):
+ optd = opts[opt]
+ if 'short' in optd:
+ opto = " %s, %s" % (optd['short'], optd['long'])
+ else:
+ opto = " %s" % (optd['long'],)
+ _usage += '%s %s\n' % (opto.ljust(15), optd['help'])
+ _usage += '\n'
+ return _usage
+
+
+class TaskHandler(object):
+ """Handles the running of the tasks it is given"""
+
+ def __init__(self, show_progress_bar=True, verbose=True, callback=None, module_output=None):
+ self.show_progress_bar = show_progress_bar
+ self.verbose = verbose
+ self.callback = callback
+ self.module_output = module_output
+ self.isatty = os.environ.get('TERM') != 'dumb' and sys.stdout.isatty()
+ self.progress_bar = ProgressBar(self.isatty, title="Emaint", max_desc_length=27)
+
+ def run_tasks(self, tasks, func, status=None, verbose=True, options=None):
+ """Runs the module tasks"""
+ if tasks is None or func is None:
+ return
+ returncodes = []
+ for task in tasks:
+ inst = task()
+ show_progress = self.show_progress_bar and self.isatty
+ # check if the function is capable of progressbar
+ # and possibly override it off
+ if show_progress and hasattr(inst, 'can_progressbar'):
+ show_progress = inst.can_progressbar(func)
+ if show_progress:
+ self.progress_bar.reset()
+ self.progress_bar.set_label(func + " " + inst.name())
+ onProgress = self.progress_bar.start()
+ else:
+ onProgress = None
+ kwargs = {
+ 'onProgress': onProgress,
+ 'module_output': self.module_output,
+ # pass in a copy of the options so a module can not pollute or change
+ # them for other tasks if there is more to do.
+ 'options': options.copy()
+ }
+ returncode, msgs = getattr(inst, func)(**kwargs)
+ returncodes.append(returncode)
+ if show_progress:
+ # make sure the final progress is displayed
+ self.progress_bar.display()
+ print()
+ self.progress_bar.stop()
+ if self.callback:
+ self.callback(msgs)
+
+ return returncodes
+
+
+def print_results(results):
+ if results:
+ print()
+ print("\n".join(results))
+ print("\n")
+
+
+def emaint_main(myargv):
+
+ # Similar to emerge, emaint needs a default umask so that created
+ # files (such as the world file) have sane permissions.
+ os.umask(0o22)
+
+ module_path = os.path.join(
+ (os.path.dirname(
+ os.path.realpath(__file__))), "modules"
+ )
+ module_controller = Modules(
+ path=module_path,
+ namepath="portage.emaint.modules")
+ module_names = module_controller.module_names[:]
+ module_names.insert(0, "all")
+
+ parser = argparse.ArgumentParser(usage=usage(module_controller))
+ # add default options
+ parser_options = []
+ for opt in DEFAULT_OPTIONS:
+ parser_options.append(OptionItem(DEFAULT_OPTIONS[opt]))
+ for mod in module_names[1:]:
+ desc = module_controller.get_func_descriptions(mod)
+ if desc:
+ for opt in desc:
+ parser_options.append(OptionItem(desc[opt]))
+ desc = module_controller.get_opt_descriptions(mod)
+ if desc:
+ for opt in desc:
+ parser_options.append(OptionItem(desc[opt]))
+ for opt in parser_options:
+ parser.add_argument(*opt.pargs, **opt.kwargs)
+
+ options, args = parser.parse_known_args(args=myargv)
+
+ if options.version:
+ print(portage.VERSION)
+ return os.EX_OK
+
+ if len(args) != 1:
+ parser.error("Incorrect number of arguments")
+ if args[0] not in module_names:
+ parser.error("%s target is not a known target" % args[0])
+
+ check_opt = None
+ func = status = long_action = None
+ for opt in parser_options:
+ if opt.long == '--check':
+ # Default action
+ check_opt = opt
+ if opt.status and getattr(options, opt.target, False):
+ if long_action is not None:
+ parser.error("--%s and %s are exclusive options" %
+ (long_action, opt.long))
+ status = opt.status
+ func = opt.func
+ long_action = opt.long.lstrip('-')
+
+ if long_action is None:
+ #print("DEBUG: long_action is None: setting to 'check'")
+ long_action = 'check'
+ func = check_opt.func
+ status = check_opt.status
+
+ if args[0] == "all":
+ tasks = []
+ for m in module_names[1:]:
+ #print("DEBUG: module: %s, functions: " % (m, str(module_controller.get_functions(m))))
+ if long_action in module_controller.get_functions(m):
+ tasks.append(module_controller.get_class(m))
+ elif long_action in module_controller.get_functions(args[0]):
+ tasks = [module_controller.get_class(args[0] )]
+ else:
+ portage.util.writemsg(
+ "\nERROR: module '%s' does not have option '--%s'\n\n" %
+ (args[0], long_action), noiselevel=-1)
+ portage.util.writemsg(module_opts(module_controller, args[0]),
+ noiselevel=-1)
+ sys.exit(1)
+
+ # need to pass the parser options dict to the modules
+ # so they are available if needed.
+ task_opts = options.__dict__
+ task_opts['return-messages'] = True
+ taskmaster = TaskHandler(callback=print_results, module_output=sys.stdout)
+ returncodes = taskmaster.run_tasks(tasks, func, status, options=task_opts)
+
+ sys.exit(False in returncodes)
diff --git a/lib/portage/emaint/modules/__init__.py b/lib/portage/emaint/modules/__init__.py
new file mode 100644
index 000000000..f67197d9f
--- /dev/null
+++ b/lib/portage/emaint/modules/__init__.py
@@ -0,0 +1,5 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Plug-in modules for system health checks and maintenance.
+"""
diff --git a/lib/portage/emaint/modules/binhost/__init__.py b/lib/portage/emaint/modules/binhost/__init__.py
new file mode 100644
index 000000000..d535b47dd
--- /dev/null
+++ b/lib/portage/emaint/modules/binhost/__init__.py
@@ -0,0 +1,21 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+doc = """Scan and generate metadata indexes for binary packages."""
+__doc__ = doc
+
+
+module_spec = {
+ 'name': 'binhost',
+ 'description': doc,
+ 'provides':{
+ 'module1': {
+ 'name': "binhost",
+ 'sourcefile': "binhost",
+ 'class': "BinhostHandler",
+ 'description': doc,
+ 'functions': ['check', 'fix'],
+ 'func_desc': {}
+ }
+ }
+ }
diff --git a/lib/portage/emaint/modules/binhost/binhost.py b/lib/portage/emaint/modules/binhost/binhost.py
new file mode 100644
index 000000000..d3df0cbce
--- /dev/null
+++ b/lib/portage/emaint/modules/binhost/binhost.py
@@ -0,0 +1,183 @@
+# Copyright 2005-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import stat
+
+import portage
+from portage import os
+from portage.util import writemsg
+from portage.versions import _pkg_str
+
+import sys
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+class BinhostHandler(object):
+
+ short_desc = "Generate a metadata index for binary packages"
+
+ @staticmethod
+ def name():
+ return "binhost"
+
+ def __init__(self):
+ eroot = portage.settings['EROOT']
+ self._bintree = portage.db[eroot]["bintree"]
+ self._bintree.populate()
+ self._pkgindex_file = self._bintree._pkgindex_file
+ self._pkgindex = self._bintree._load_pkgindex()
+
+ def _need_update(self, cpv, data):
+
+ if "MD5" not in data:
+ return True
+
+ size = data.get("SIZE")
+ if size is None:
+ return True
+
+ mtime = data.get("_mtime_")
+ if mtime is None:
+ return True
+
+ pkg_path = self._bintree.getname(cpv)
+ try:
+ s = os.lstat(pkg_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ # We can't update the index for this one because
+ # it disappeared.
+ return False
+
+ try:
+ if long(mtime) != s[stat.ST_MTIME]:
+ return True
+ if long(size) != long(s.st_size):
+ return True
+ except ValueError:
+ return True
+
+ return False
+
+ def check(self, **kwargs):
+ onProgress = kwargs.get('onProgress', None)
+ missing = []
+ cpv_all = self._bintree.dbapi.cpv_all()
+ cpv_all.sort()
+ maxval = len(cpv_all)
+ if onProgress:
+ onProgress(maxval, 0)
+ pkgindex = self._pkgindex
+ missing = []
+ metadata = {}
+ for d in pkgindex.packages:
+ metadata[d["CPV"]] = d
+ for i, cpv in enumerate(cpv_all):
+ d = metadata.get(cpv)
+ if not d or self._need_update(cpv, d):
+ missing.append(cpv)
+ if onProgress:
+ onProgress(maxval, i+1)
+ errors = ["'%s' is not in Packages" % cpv for cpv in missing]
+ stale = set(metadata).difference(cpv_all)
+ for cpv in stale:
+ errors.append("'%s' is not in the repository" % cpv)
+ if errors:
+ return (False, errors)
+ return (True, None)
+
+ def fix(self, **kwargs):
+ onProgress = kwargs.get('onProgress', None)
+ bintree = self._bintree
+ _instance_key = bintree.dbapi._instance_key
+ cpv_all = self._bintree.dbapi.cpv_all()
+ cpv_all.sort()
+ missing = []
+ maxval = 0
+ if onProgress:
+ onProgress(maxval, 0)
+ pkgindex = self._pkgindex
+ missing = []
+ stale = []
+ metadata = {}
+ for d in pkgindex.packages:
+ cpv = _pkg_str(d["CPV"], metadata=d,
+ settings=bintree.settings)
+ d["CPV"] = cpv
+ metadata[_instance_key(cpv)] = d
+ if not bintree.dbapi.cpv_exists(cpv):
+ stale.append(cpv)
+
+ for cpv in cpv_all:
+ d = metadata.get(_instance_key(cpv))
+ if not d or self._need_update(cpv, d):
+ missing.append(cpv)
+
+ if missing or stale:
+ from portage import locks
+ pkgindex_lock = locks.lockfile(
+ self._pkgindex_file, wantnewlockfile=1)
+ try:
+ # Repopulate with lock held. If _populate_local returns
+ # data then use that, since _load_pkgindex would return
+ # stale data in this case.
+ self._pkgindex = pkgindex = (bintree._populate_local() or
+ bintree._load_pkgindex())
+ cpv_all = self._bintree.dbapi.cpv_all()
+ cpv_all.sort()
+
+ # Recount stale/missing packages, with lock held.
+ missing = []
+ stale = []
+ metadata = {}
+ for d in pkgindex.packages:
+ cpv = _pkg_str(d["CPV"], metadata=d,
+ settings=bintree.settings)
+ d["CPV"] = cpv
+ metadata[_instance_key(cpv)] = d
+ if not bintree.dbapi.cpv_exists(cpv):
+ stale.append(cpv)
+
+ for cpv in cpv_all:
+ d = metadata.get(_instance_key(cpv))
+ if not d or self._need_update(cpv, d):
+ missing.append(cpv)
+
+ maxval = len(missing)
+ for i, cpv in enumerate(missing):
+ d = bintree._pkgindex_entry(cpv)
+ try:
+ bintree._eval_use_flags(cpv, d)
+ except portage.exception.InvalidDependString:
+ writemsg("!!! Invalid binary package: '%s'\n" % \
+ bintree.getname(cpv), noiselevel=-1)
+ else:
+ metadata[_instance_key(cpv)] = d
+
+ if onProgress:
+ onProgress(maxval, i+1)
+
+ for cpv in stale:
+ del metadata[_instance_key(cpv)]
+
+ # We've updated the pkgindex, so set it to
+ # repopulate when necessary.
+ bintree.populated = False
+
+ del pkgindex.packages[:]
+ pkgindex.packages.extend(metadata.values())
+ bintree._update_pkgindex_header(self._pkgindex.header)
+ bintree._pkgindex_write(self._pkgindex)
+
+ finally:
+ locks.unlockfile(pkgindex_lock)
+
+ if onProgress:
+ if maxval == 0:
+ maxval = 1
+ onProgress(maxval, maxval)
+ return (True, None)
diff --git a/lib/portage/emaint/modules/config/__init__.py b/lib/portage/emaint/modules/config/__init__.py
new file mode 100644
index 000000000..e94e767e0
--- /dev/null
+++ b/lib/portage/emaint/modules/config/__init__.py
@@ -0,0 +1,21 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+doc = """Check and clean the config tracker list for uninstalled packages."""
+__doc__ = doc
+
+
+module_spec = {
+ 'name': 'config',
+ 'description': doc,
+ 'provides':{
+ 'module1': {
+ 'name': "cleanconfmem",
+ 'sourcefile': "config",
+ 'class': "CleanConfig",
+ 'description': doc,
+ 'functions': ['check', 'fix'],
+ 'func_desc': {}
+ }
+ }
+ }
diff --git a/lib/portage/emaint/modules/config/config.py b/lib/portage/emaint/modules/config/config.py
new file mode 100644
index 000000000..a0d56992c
--- /dev/null
+++ b/lib/portage/emaint/modules/config/config.py
@@ -0,0 +1,81 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.const import PRIVATE_PATH
+from portage.util import grabdict, writedict
+
+class CleanConfig(object):
+
+ short_desc = "Discard any no longer installed configs from emerge's tracker list"
+
+ @staticmethod
+ def name():
+ return "cleanconfmem"
+
+ def __init__(self):
+ self._root = portage.settings["ROOT"]
+ self.target = os.path.join(portage.settings["EROOT"], PRIVATE_PATH, 'config')
+
+ def load_configlist(self):
+ return grabdict(self.target)
+
+ def check(self, **kwargs):
+ onProgress = kwargs.get('onProgress', None)
+ configs = self.load_configlist()
+ messages = []
+ maxval = len(configs)
+ if onProgress:
+ onProgress(maxval, 0)
+ i = 0
+ keys = sorted(configs)
+ for config in keys:
+ if not os.path.exists(config):
+ messages.append(" %s" % config)
+ if onProgress:
+ onProgress(maxval, i+1)
+ i += 1
+ msgs = self._format_output(messages)
+ return (True, msgs)
+
+ def fix(self, **kwargs):
+ onProgress = kwargs.get('onProgress', None)
+ configs = self.load_configlist()
+ messages = []
+ maxval = len(configs)
+ if onProgress:
+ onProgress(maxval, 0)
+ i = 0
+
+ root = self._root
+ if root == "/":
+ root = None
+ modified = False
+ for config in sorted(configs):
+ if root is None:
+ full_path = config
+ else:
+ full_path = os.path.join(root, config.lstrip(os.sep))
+ if not os.path.exists(full_path):
+ modified = True
+ configs.pop(config)
+ messages.append(" %s" % config)
+ if onProgress:
+ onProgress(maxval, i+1)
+ i += 1
+ if modified:
+ writedict(configs, self.target)
+ msgs = self._format_output(messages, True)
+ return (True, msgs)
+
+ def _format_output(self, messages=[], cleaned=False):
+ output = []
+ if messages:
+ output.append('Not Installed:')
+ output += messages
+ tot = '------------------------------------\n Total %i Not installed'
+ if cleaned:
+ tot += ' ...Cleaned'
+ output.append(tot % len(messages))
+ return output
diff --git a/lib/portage/emaint/modules/logs/__init__.py b/lib/portage/emaint/modules/logs/__init__.py
new file mode 100644
index 000000000..5b78d71ff
--- /dev/null
+++ b/lib/portage/emaint/modules/logs/__init__.py
@@ -0,0 +1,46 @@
+# Copyright 2005-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+doc = """Check and clean old logs in the PORT_LOGDIR."""
+__doc__ = doc
+
+
+module_spec = {
+ 'name': 'logs',
+ 'description': doc,
+ 'provides':{
+ 'module1': {
+ 'name': "logs",
+ 'sourcefile': "logs",
+ 'class': "CleanLogs",
+ 'description': doc,
+ 'functions': ['check','clean'],
+ 'func_desc': {
+ 'clean': {
+ "short": "-C", "long": "--clean",
+ "help": "Cleans out logs more than 7 days old (cleanlogs only)" + \
+ " module-options: -t, -p",
+ 'status': "Cleaning %s",
+ 'action': 'store_true',
+ 'func': 'clean',
+ },
+ 'time': {
+ "short": "-t", "long": "--time",
+ "help": "(cleanlogs only): -t, --time Delete logs older than NUM of days",
+ 'status': "",
+ 'type': int,
+ 'dest': 'NUM',
+ 'func': 'clean'
+ },
+ 'pretend': {
+ "short": "-p", "long": "--pretend",
+ "help": "(cleanlogs only): -p, --pretend Output logs that would be deleted",
+ 'status': "",
+ 'action': 'store_true',
+ 'dest': 'pretend',
+ 'func': 'clean'
+ }
+ }
+ }
+ }
+ }
diff --git a/lib/portage/emaint/modules/logs/logs.py b/lib/portage/emaint/modules/logs/logs.py
new file mode 100644
index 000000000..97b45475d
--- /dev/null
+++ b/lib/portage/emaint/modules/logs/logs.py
@@ -0,0 +1,110 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.util import shlex_split, varexpand
+
+## default clean command from make.globals
+## PORT_LOGDIR_CLEAN = 'find "${PORT_LOGDIR}" -type f ! -name "summary.log*" -mtime +7 -delete'
+
+ERROR_MESSAGES = {
+ 78 : "PORT_LOGDIR variable not set or PORT_LOGDIR not a directory.",
+ 127 : "PORT_LOGDIR_CLEAN command not found."
+}
+
+
+class CleanLogs(object):
+
+ short_desc = "Clean PORT_LOGDIR logs"
+
+ @staticmethod
+ def name():
+ return "logs"
+
+
+ def can_progressbar(self, func):
+ return False
+
+
+ def check(self, **kwargs):
+ options = kwargs.get('options', None)
+ if options:
+ options['pretend'] = True
+ return self.clean(**kwargs)
+
+
+ def clean(self, **kwargs):
+ """Log directory cleaning function
+
+ @param **kwargs: optional dictionary of values used in this function are:
+ settings: portage settings instance: defaults to portage.settings
+ "PORT_LOGDIR": directory to clean
+ "PORT_LOGDIR_CLEAN": command for cleaning the logs.
+ options: dict:
+ 'NUM': int: number of days
+ 'pretend': boolean
+ """
+ num_of_days = None
+ pretend = False
+
+ # convoluted, I know, but portage.settings does not exist in
+ # kwargs.get() when called from _emerge.main.clean_logs()
+ settings = kwargs.get('settings', getattr(portage, 'settings', {}))
+
+ options = kwargs.get('options', None)
+ if options:
+ num_of_days = options.get('NUM', None)
+ pretend = options.get('pretend', False)
+
+ clean_cmd = settings.get("PORT_LOGDIR_CLEAN")
+ if clean_cmd:
+ clean_cmd = shlex_split(clean_cmd)
+ if '-mtime' in clean_cmd and num_of_days is not None:
+ if num_of_days == 0:
+ i = clean_cmd.index('-mtime')
+ clean_cmd.remove('-mtime')
+ clean_cmd.pop(i)
+ else:
+ clean_cmd[clean_cmd.index('-mtime') +1] = \
+ '+%s' % str(num_of_days)
+ if pretend:
+ if "-delete" in clean_cmd:
+ clean_cmd.remove("-delete")
+
+ if not clean_cmd:
+ return (True, None)
+ rval = self._clean_logs(clean_cmd, settings)
+ errors = self._convert_errors(rval)
+ if errors:
+ return (False, errors)
+ return (True, None)
+
+
+ @staticmethod
+ def _clean_logs(clean_cmd, settings):
+ logdir = settings.get("PORT_LOGDIR")
+ if logdir is None or not os.path.isdir(logdir):
+ return 78
+
+ variables = {"PORT_LOGDIR" : logdir}
+ cmd = [varexpand(x, mydict=variables) for x in clean_cmd]
+
+ try:
+ rval = portage.process.spawn(cmd, env=os.environ)
+ except portage.exception.CommandNotFound:
+ rval = 127
+ return rval
+
+
+ @staticmethod
+ def _convert_errors(rval):
+ msg = []
+ if rval != os.EX_OK:
+ if rval in ERROR_MESSAGES:
+ msg.append(ERROR_MESSAGES[rval])
+ else:
+ msg.append("PORT_LOGDIR_CLEAN command returned %s" % rval)
+ msg.append("See the make.conf(5) man page for "
+ "PORT_LOGDIR_CLEAN usage instructions.")
+ return msg
diff --git a/lib/portage/emaint/modules/merges/__init__.py b/lib/portage/emaint/modules/merges/__init__.py
new file mode 100644
index 000000000..89aa758a0
--- /dev/null
+++ b/lib/portage/emaint/modules/merges/__init__.py
@@ -0,0 +1,32 @@
+# Copyright 2005-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+doc = """Scan for failed merges and fix them."""
+__doc__ = doc
+
+
+module_spec = {
+ 'name': 'merges',
+ 'description': doc,
+ 'provides': {
+ 'merges': {
+ 'name': "merges",
+ 'sourcefile': "merges",
+ 'class': "MergesHandler",
+ 'description': doc,
+ 'functions': ['check', 'fix', 'purge'],
+ 'func_desc': {
+ 'purge': {
+ 'short': '-P', 'long': '--purge',
+ 'help': 'Removes the list of previously failed merges.' +
+ ' WARNING: Only use this option if you plan on' +
+ ' manually fixing them or do not want them'
+ ' re-installed.',
+ 'status': "Removing %s",
+ 'action': 'store_true',
+ 'func': 'purge'
+ }
+ }
+ }
+ }
+}
diff --git a/lib/portage/emaint/modules/merges/merges.py b/lib/portage/emaint/modules/merges/merges.py
new file mode 100644
index 000000000..416a725ff
--- /dev/null
+++ b/lib/portage/emaint/modules/merges/merges.py
@@ -0,0 +1,291 @@
+# Copyright 2005-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os, _unicode_encode
+from portage.const import MERGING_IDENTIFIER, EPREFIX, PRIVATE_PATH, VDB_PATH
+from portage.dep import isvalidatom
+
+import shutil
+import subprocess
+import sys
+import time
+
+class TrackingFile(object):
+ """File for keeping track of failed merges."""
+
+
+ def __init__(self, tracking_path):
+ """
+ Create a TrackingFile object.
+
+ @param tracking_path: file path used to keep track of failed merges
+ @type tracking_path: String
+ """
+ self._tracking_path = _unicode_encode(tracking_path)
+
+
+ def save(self, failed_pkgs):
+ """
+ Save the specified packages that failed to merge.
+
+ @param failed_pkgs: dictionary of failed packages
+ @type failed_pkgs: dict
+ """
+ tracking_path = self._tracking_path
+ lines = ['%s %s' % (pkg, mtime) for pkg, mtime in failed_pkgs.items()]
+ portage.util.write_atomic(tracking_path, '\n'.join(lines))
+
+
+ def load(self):
+ """
+ Load previously failed merges.
+
+ @rtype: dict
+ @return: dictionary of packages that failed to merge
+ """
+ tracking_path = self._tracking_path
+ if not self.exists():
+ return {}
+ failed_pkgs = {}
+ with open(tracking_path, 'r') as tracking_file:
+ for failed_merge in tracking_file:
+ pkg, mtime = failed_merge.strip().split()
+ failed_pkgs[pkg] = mtime
+ return failed_pkgs
+
+
+ def exists(self):
+ """
+ Check if tracking file exists.
+
+ @rtype: bool
+ @return: true if tracking file exists, false otherwise
+ """
+ return os.path.exists(self._tracking_path)
+
+
+ def purge(self):
+ """Delete previously saved tracking file if one exists."""
+ if self.exists():
+ os.remove(self._tracking_path)
+
+
+ def __iter__(self):
+ """
+ Provide an interator over failed merges.
+
+ @return: iterator of packages that failed to merge
+ """
+ return self.load().items().__iter__()
+
+
+class MergesHandler(object):
+ """Handle failed package merges."""
+
+ short_desc = "Remove failed merges"
+
+ @staticmethod
+ def name():
+ return "merges"
+
+
+ def __init__(self):
+ """Create MergesHandler object."""
+ eroot = portage.settings['EROOT']
+ tracking_path = os.path.join(eroot, PRIVATE_PATH, 'failed-merges');
+ self._tracking_file = TrackingFile(tracking_path)
+ self._vardb_path = os.path.join(eroot, VDB_PATH)
+
+
+ def can_progressbar(self, func):
+ return func == 'check'
+
+
+ def _scan(self, onProgress=None):
+ """
+ Scan the file system for failed merges and return any found.
+
+ @param onProgress: function to call for updating progress
+ @type onProgress: Function
+ @rtype: dict
+ @return: dictionary of packages that failed to merges
+ """
+ failed_pkgs = {}
+ for cat in os.listdir(self._vardb_path):
+ pkgs_path = os.path.join(self._vardb_path, cat)
+ if not os.path.isdir(pkgs_path):
+ continue
+ pkgs = os.listdir(pkgs_path)
+ maxval = len(pkgs)
+ for i, pkg in enumerate(pkgs):
+ if onProgress:
+ onProgress(maxval, i+1)
+ if MERGING_IDENTIFIER in pkg:
+ mtime = int(os.stat(os.path.join(pkgs_path, pkg)).st_mtime)
+ pkg = os.path.join(cat, pkg)
+ failed_pkgs[pkg] = mtime
+ return failed_pkgs
+
+
+ def _failed_pkgs(self, onProgress=None):
+ """
+ Return failed packages from both the file system and tracking file.
+
+ @rtype: dict
+ @return: dictionary of packages that failed to merges
+ """
+ failed_pkgs = self._scan(onProgress)
+ for pkg, mtime in self._tracking_file:
+ if pkg not in failed_pkgs:
+ failed_pkgs[pkg] = mtime
+ return failed_pkgs
+
+
+ def _remove_failed_dirs(self, failed_pkgs):
+ """
+ Remove the directories of packages that failed to merge.
+
+ @param failed_pkgs: failed packages whose directories to remove
+ @type failed_pkg: dict
+ """
+ for failed_pkg in failed_pkgs:
+ pkg_path = os.path.join(self._vardb_path, failed_pkg)
+ # delete failed merge directory if it exists (it might not exist
+ # if loaded from tracking file)
+ if os.path.exists(pkg_path):
+ shutil.rmtree(pkg_path)
+ # TODO: try removing package CONTENTS to prevent orphaned
+ # files
+
+
+ def _get_pkg_atoms(self, failed_pkgs, pkg_atoms, pkg_invalid_entries):
+ """
+ Get the package atoms for the specified failed packages.
+
+ @param failed_pkgs: failed packages to iterate
+ @type failed_pkgs: dict
+ @param pkg_atoms: add package atoms to this set
+ @type pkg_atoms: set
+ @param pkg_invalid_entries: add any packages that are invalid to this set
+ @type pkg_invalid_entries: set
+ """
+
+ portdb = portage.db[portage.root]['porttree'].dbapi
+ for failed_pkg in failed_pkgs:
+ # validate pkg name
+ pkg_name = '%s' % failed_pkg.replace(MERGING_IDENTIFIER, '')
+ pkg_atom = '=%s' % pkg_name
+
+ if not isvalidatom(pkg_atom):
+ pkg_invalid_entries.add("'%s' is an invalid package atom."
+ % pkg_atom)
+ if not portdb.cpv_exists(pkg_name):
+ pkg_invalid_entries.add(
+ "'%s' does not exist in the portage tree." % pkg_name)
+ pkg_atoms.add(pkg_atom)
+
+
+ def _emerge_pkg_atoms(self, module_output, pkg_atoms):
+ """
+ Emerge the specified packages atoms.
+
+ @param module_output: output will be written to
+ @type module_output: Class
+ @param pkg_atoms: packages atoms to emerge
+ @type pkg_atoms: set
+ @rtype: list
+ @return: List of results
+ """
+ # TODO: rewrite code to use portage's APIs instead of a subprocess
+ env = {
+ "FEATURES" : "-collision-protect -protect-owned",
+ "PATH" : os.environ["PATH"]
+ }
+ emerge_cmd = (
+ portage._python_interpreter,
+ '-b',
+ os.path.join(EPREFIX or '/', 'usr', 'bin', 'emerge'),
+ '--ask',
+ '--quiet',
+ '--oneshot',
+ '--complete-graph=y'
+ )
+ results = []
+ msg = 'Re-Emerging packages that failed to merge...\n'
+ if module_output:
+ module_output.write(msg)
+ else:
+ module_output = subprocess.PIPE
+ results.append(msg)
+ proc = subprocess.Popen(emerge_cmd + tuple(pkg_atoms), env=env,
+ stdout=module_output, stderr=sys.stderr)
+ output = proc.communicate()[0]
+ if output:
+ results.append(output)
+ if proc.returncode != os.EX_OK:
+ emerge_status = "Failed to emerge '%s'" % (' '.join(pkg_atoms))
+ else:
+ emerge_status = "Successfully emerged '%s'" % (' '.join(pkg_atoms))
+ results.append(emerge_status)
+ return results
+
+
+ def check(self, **kwargs):
+ """Check for failed merges."""
+ onProgress = kwargs.get('onProgress', None)
+ failed_pkgs = self._failed_pkgs(onProgress)
+ errors = []
+ for pkg, mtime in failed_pkgs.items():
+ mtime_str = time.ctime(int(mtime))
+ errors.append("'%s' failed to merge on '%s'" % (pkg, mtime_str))
+ if errors:
+ return (False, errors)
+ return (True, None)
+
+
+ def fix(self, **kwargs):
+ """Attempt to fix any failed merges."""
+ module_output = kwargs.get('module_output', None)
+ failed_pkgs = self._failed_pkgs()
+ if not failed_pkgs:
+ return (True, ['No failed merges found.'])
+
+ pkg_invalid_entries = set()
+ pkg_atoms = set()
+ self._get_pkg_atoms(failed_pkgs, pkg_atoms, pkg_invalid_entries)
+ if pkg_invalid_entries:
+ return (False, pkg_invalid_entries)
+
+ try:
+ self._tracking_file.save(failed_pkgs)
+ except IOError as ex:
+ errors = ['Unable to save failed merges to tracking file: %s\n'
+ % str(ex)]
+ errors.append(', '.join(sorted(failed_pkgs)))
+ return (False, errors)
+ self._remove_failed_dirs(failed_pkgs)
+ results = self._emerge_pkg_atoms(module_output, pkg_atoms)
+ # list any new failed merges
+ for pkg in sorted(self._scan()):
+ results.append("'%s' still found as a failed merge." % pkg)
+ # reload config and remove successful packages from tracking file
+ portage._reset_legacy_globals()
+ vardb = portage.db[portage.root]['vartree'].dbapi
+ still_failed_pkgs = {}
+ for pkg, mtime in failed_pkgs.items():
+ pkg_name = '%s' % pkg.replace(MERGING_IDENTIFIER, '')
+ if not vardb.cpv_exists(pkg_name):
+ still_failed_pkgs[pkg] = mtime
+ self._tracking_file.save(still_failed_pkgs)
+ if still_failed_pkgs:
+ return (False, results)
+ return (True, results)
+
+
+ def purge(self, **kwargs):
+ """Attempt to remove previously saved tracking file."""
+ if not self._tracking_file.exists():
+ return (True, ['Tracking file not found.'])
+ self._tracking_file.purge()
+ return (True, ['Removed tracking file.'])
diff --git a/lib/portage/emaint/modules/move/__init__.py b/lib/portage/emaint/modules/move/__init__.py
new file mode 100644
index 000000000..0dbd86b0d
--- /dev/null
+++ b/lib/portage/emaint/modules/move/__init__.py
@@ -0,0 +1,32 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+doc = """Perform package move updates for installed and binary packages."""
+__doc__ = doc
+
+
+module_spec = {
+ 'name': 'move',
+ 'description': doc,
+ 'provides':{
+ 'module1': {
+ 'name': "moveinst",
+ 'sourcefile': "move",
+ 'class': "MoveInstalled",
+ 'description': doc,
+ 'options': ['check', 'fix'],
+ 'functions': ['check', 'fix'],
+ 'func_desc': {
+ }
+ },
+ 'module2':{
+ 'name': "movebin",
+ 'sourcefile': "move",
+ 'class': "MoveBinary",
+ 'description': "Perform package move updates for binary packages",
+ 'functions': ['check', 'fix'],
+ 'func_desc': {
+ }
+ }
+ }
+ }
diff --git a/lib/portage/emaint/modules/move/move.py b/lib/portage/emaint/modules/move/move.py
new file mode 100644
index 000000000..e9a6acb6b
--- /dev/null
+++ b/lib/portage/emaint/modules/move/move.py
@@ -0,0 +1,188 @@
+# Copyright 2005-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.exception import InvalidData
+from _emerge.Package import Package
+from portage.versions import _pkg_str
+
+class MoveHandler(object):
+
+ def __init__(self, tree, porttree):
+ self._tree = tree
+ self._portdb = porttree.dbapi
+ self._update_keys = Package._dep_keys
+ self._master_repo = self._portdb.repositories.mainRepo()
+ if self._master_repo is not None:
+ self._master_repo = self._master_repo.name
+
+ def _grab_global_updates(self):
+ from portage.update import grab_updates, parse_updates
+ retupdates = {}
+ errors = []
+
+ for repo_name in self._portdb.getRepositories():
+ repo = self._portdb.getRepositoryPath(repo_name)
+ updpath = os.path.join(repo, "profiles", "updates")
+ if not os.path.isdir(updpath):
+ continue
+
+ try:
+ rawupdates = grab_updates(updpath)
+ except portage.exception.DirectoryNotFound:
+ rawupdates = []
+ upd_commands = []
+ for mykey, mystat, mycontent in rawupdates:
+ commands, errors = parse_updates(mycontent)
+ upd_commands.extend(commands)
+ errors.extend(errors)
+ retupdates[repo_name] = upd_commands
+
+ if self._master_repo in retupdates:
+ retupdates['DEFAULT'] = retupdates[self._master_repo]
+
+ return retupdates, errors
+
+ def check(self, **kwargs):
+ onProgress = kwargs.get('onProgress', None)
+ allupdates, errors = self._grab_global_updates()
+ # Matching packages and moving them is relatively fast, so the
+ # progress bar is updated in indeterminate mode.
+ match = self._tree.dbapi.match
+ aux_get = self._tree.dbapi.aux_get
+ pkg_str = self._tree.dbapi._pkg_str
+ settings = self._tree.dbapi.settings
+ if onProgress:
+ onProgress(0, 0)
+ for repo, updates in allupdates.items():
+ if repo == 'DEFAULT':
+ continue
+ if not updates:
+ continue
+
+ def repo_match(repository):
+ return repository == repo or \
+ (repo == self._master_repo and \
+ repository not in allupdates)
+
+ for i, update_cmd in enumerate(updates):
+ if update_cmd[0] == "move":
+ origcp, newcp = update_cmd[1:]
+ for cpv in match(origcp):
+ try:
+ cpv = pkg_str(cpv, origcp.repo)
+ except (KeyError, InvalidData):
+ continue
+ if repo_match(cpv.repo):
+ errors.append("'%s' moved to '%s'" % (cpv, newcp))
+ elif update_cmd[0] == "slotmove":
+ pkg, origslot, newslot = update_cmd[1:]
+ atom = pkg.with_slot(origslot)
+ for cpv in match(atom):
+ try:
+ cpv = pkg_str(cpv, atom.repo)
+ except (KeyError, InvalidData):
+ continue
+ if repo_match(cpv.repo):
+ errors.append("'%s' slot moved from '%s' to '%s'" % \
+ (cpv, origslot, newslot))
+ if onProgress:
+ onProgress(0, 0)
+
+ # Searching for updates in all the metadata is relatively slow, so this
+ # is where the progress bar comes out of indeterminate mode.
+ cpv_all = self._tree.dbapi.cpv_all()
+ cpv_all.sort()
+ maxval = len(cpv_all)
+ meta_keys = self._update_keys + self._portdb._pkg_str_aux_keys
+ if onProgress:
+ onProgress(maxval, 0)
+ for i, cpv in enumerate(cpv_all):
+ try:
+ metadata = dict(zip(meta_keys, aux_get(cpv, meta_keys)))
+ except KeyError:
+ continue
+ try:
+ pkg = _pkg_str(cpv, metadata=metadata, settings=settings)
+ except InvalidData:
+ continue
+ metadata = dict((k, metadata[k]) for k in self._update_keys)
+ try:
+ updates = allupdates[pkg.repo]
+ except KeyError:
+ try:
+ updates = allupdates['DEFAULT']
+ except KeyError:
+ continue
+ if not updates:
+ continue
+ metadata_updates = \
+ portage.update_dbentries(updates, metadata, parent=pkg)
+ if metadata_updates:
+ errors.append("'%s' has outdated metadata" % cpv)
+ if onProgress:
+ onProgress(maxval, i+1)
+
+ if errors:
+ return (False, errors)
+ return (True, None)
+
+ def fix(self, **kwargs):
+ onProgress = kwargs.get('onProgress', None)
+ allupdates, errors = self._grab_global_updates()
+ # Matching packages and moving them is relatively fast, so the
+ # progress bar is updated in indeterminate mode.
+ move = self._tree.dbapi.move_ent
+ slotmove = self._tree.dbapi.move_slot_ent
+ if onProgress:
+ onProgress(0, 0)
+ for repo, updates in allupdates.items():
+ if repo == 'DEFAULT':
+ continue
+ if not updates:
+ continue
+
+ def repo_match(repository):
+ return repository == repo or \
+ (repo == self._master_repo and \
+ repository not in allupdates)
+
+ for i, update_cmd in enumerate(updates):
+ if update_cmd[0] == "move":
+ move(update_cmd, repo_match=repo_match)
+ elif update_cmd[0] == "slotmove":
+ slotmove(update_cmd, repo_match=repo_match)
+ if onProgress:
+ onProgress(0, 0)
+
+ # Searching for updates in all the metadata is relatively slow, so this
+ # is where the progress bar comes out of indeterminate mode.
+ self._tree.dbapi.update_ents(allupdates, onProgress=onProgress)
+ if errors:
+ return (False, errors)
+ return (True, None)
+
+class MoveInstalled(MoveHandler):
+
+ short_desc = "Perform package move updates for installed packages"
+
+ @staticmethod
+ def name():
+ return "moveinst"
+
+ def __init__(self):
+ eroot = portage.settings['EROOT']
+ MoveHandler.__init__(self, portage.db[eroot]["vartree"], portage.db[eroot]["porttree"])
+
+class MoveBinary(MoveHandler):
+
+ short_desc = "Perform package move updates for binary packages"
+
+ @staticmethod
+ def name():
+ return "movebin"
+
+ def __init__(self):
+ eroot = portage.settings['EROOT']
+ MoveHandler.__init__(self, portage.db[eroot]["bintree"], portage.db[eroot]['porttree'])
diff --git a/lib/portage/emaint/modules/resume/__init__.py b/lib/portage/emaint/modules/resume/__init__.py
new file mode 100644
index 000000000..0c86f9536
--- /dev/null
+++ b/lib/portage/emaint/modules/resume/__init__.py
@@ -0,0 +1,21 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+doc = """Check and fix problems in the resume and/or resume_backup files."""
+__doc__ = doc
+
+
+module_spec = {
+ 'name': 'resume',
+ 'description': doc,
+ 'provides':{
+ 'module1': {
+ 'name': "cleanresume",
+ 'sourcefile': "resume",
+ 'class': "CleanResume",
+ 'description': "Discard emerge --resume merge lists",
+ 'functions': ['check', 'fix'],
+ 'func_desc': {}
+ }
+ }
+ }
diff --git a/lib/portage/emaint/modules/resume/resume.py b/lib/portage/emaint/modules/resume/resume.py
new file mode 100644
index 000000000..580643b26
--- /dev/null
+++ b/lib/portage/emaint/modules/resume/resume.py
@@ -0,0 +1,59 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+
+
+class CleanResume(object):
+
+ short_desc = "Discard emerge --resume merge lists"
+
+ @staticmethod
+ def name():
+ return "cleanresume"
+
+ def check(self, **kwargs):
+ onProgress = kwargs.get('onProgress', None)
+ messages = []
+ mtimedb = portage.mtimedb
+ resume_keys = ("resume", "resume_backup")
+ maxval = len(resume_keys)
+ if onProgress:
+ onProgress(maxval, 0)
+ for i, k in enumerate(resume_keys):
+ try:
+ d = mtimedb.get(k)
+ if d is None:
+ continue
+ if not isinstance(d, dict):
+ messages.append("unrecognized resume list: '%s'" % k)
+ continue
+ mergelist = d.get("mergelist")
+ if mergelist is None or not hasattr(mergelist, "__len__"):
+ messages.append("unrecognized resume list: '%s'" % k)
+ continue
+ messages.append("resume list '%s' contains %d packages" % \
+ (k, len(mergelist)))
+ finally:
+ if onProgress:
+ onProgress(maxval, i+1)
+ return (True, messages)
+
+ def fix(self, **kwargs):
+ onProgress = kwargs.get('onProgress', None)
+ delete_count = 0
+ mtimedb = portage.mtimedb
+ resume_keys = ("resume", "resume_backup")
+ maxval = len(resume_keys)
+ if onProgress:
+ onProgress(maxval, 0)
+ for i, k in enumerate(resume_keys):
+ try:
+ if mtimedb.pop(k, None) is not None:
+ delete_count += 1
+ finally:
+ if onProgress:
+ onProgress(maxval, i+1)
+ if delete_count:
+ mtimedb.commit()
+ return (True, None)
diff --git a/lib/portage/emaint/modules/sync/__init__.py b/lib/portage/emaint/modules/sync/__init__.py
new file mode 100644
index 000000000..23f3a2e37
--- /dev/null
+++ b/lib/portage/emaint/modules/sync/__init__.py
@@ -0,0 +1,56 @@
+# Copyright 2014-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from ....sync import _SUBMODULE_PATH_MAP
+
+doc = """Check repos.conf settings and sync repositories."""
+__doc__ = doc[:]
+
+module_spec = {
+ 'name': 'sync',
+ 'description': doc,
+ 'provides':{
+ 'sync-module': {
+ 'name': "sync",
+ 'sourcefile': "sync",
+ 'class': "SyncRepos",
+ 'description': doc,
+ 'functions': ['allrepos', 'auto', 'repo'],
+ 'func_desc': {
+ 'repo': {
+ "short": "-r", "long": "--repo",
+ "help": "(sync module only): -r, --repo Sync the specified repo",
+ 'status': "Syncing %s",
+ 'action': 'store',
+ 'func': 'repo',
+ },
+ 'allrepos': {
+ "short": "-A", "long": "--allrepos",
+ "help": "(sync module only): -A, --allrepos Sync all repos that have a sync-url defined",
+ 'status': "Syncing %s",
+ 'action': 'store_true',
+ 'dest': 'allrepos',
+ 'func': 'all_repos',
+ },
+ 'auto': {
+ "short": "-a", "long": "--auto",
+ "help": "(sync module only): -a, --auto Sync auto-sync enabled repos only",
+ 'status': "Syncing %s",
+ 'action': 'store_true',
+ 'dest': 'auto',
+ 'func': 'auto_sync',
+ },
+ },
+ 'opt_desc': {
+ 'sync-submodule': {
+ "long": "--sync-submodule",
+ "help": ("(sync module only): Restrict sync "
+ "to the specified submodule(s)"),
+ "choices": tuple(_SUBMODULE_PATH_MAP),
+ "action": "append",
+ "dest": "sync_submodule",
+ },
+ }
+ }
+ }
+ }
diff --git a/lib/portage/emaint/modules/sync/sync.py b/lib/portage/emaint/modules/sync/sync.py
new file mode 100644
index 000000000..ebdc362e1
--- /dev/null
+++ b/lib/portage/emaint/modules/sync/sync.py
@@ -0,0 +1,462 @@
+# Copyright 2014-2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+import os
+import sys
+
+import portage
+portage._internal_caller = True
+portage._sync_mode = True
+from portage.localization import _
+from portage.output import bold, red, create_color_func
+from portage._global_updates import _global_updates
+from portage.sync.controller import SyncManager
+from portage.util import writemsg_level
+from portage.util.digraph import digraph
+from portage.util._async.AsyncScheduler import AsyncScheduler
+from portage.util._eventloop.global_event_loop import global_event_loop
+from portage.util._eventloop.EventLoop import EventLoop
+
+import _emerge
+from _emerge.emergelog import emergelog
+
+
+portage.proxy.lazyimport.lazyimport(globals(),
+ '_emerge.actions:adjust_configs,load_emerge_config',
+ '_emerge.chk_updated_cfg_files:chk_updated_cfg_files',
+ '_emerge.main:parse_opts',
+ '_emerge.post_emerge:display_news_notification',
+)
+
+warn = create_color_func("WARN")
+
+if sys.hexversion >= 0x3000000:
+ _basestring = str
+else:
+ _basestring = basestring
+
+
+class SyncRepos(object):
+
+ short_desc = "Check repos.conf settings and/or sync repositories"
+
+ @staticmethod
+ def name():
+ return "sync"
+
+
+ def can_progressbar(self, func):
+ return False
+
+
+ def __init__(self, emerge_config=None, emerge_logging=False):
+ '''Class init function
+
+ @param emerge_config: optional an emerge_config instance to use
+ @param emerge_logging: boolean, defaults to False
+ '''
+ if emerge_config is None:
+ # need a basic options instance
+ actions, opts, _files = parse_opts([], silent=True)
+ emerge_config = load_emerge_config(
+ action='sync', args=_files, opts=opts)
+
+ # Parse EMERGE_DEFAULT_OPTS, for settings like
+ # --package-moves=n.
+ cmdline = portage.util.shlex_split(
+ emerge_config.target_config.settings.get(
+ "EMERGE_DEFAULT_OPTS", ""))
+ emerge_config.opts = parse_opts(cmdline, silent=True)[1]
+
+ if hasattr(portage, 'settings'):
+ # cleanly destroy global objects
+ portage._reset_legacy_globals()
+ # update redundant global variables, for consistency
+ # and in order to conserve memory
+ portage.settings = emerge_config.target_config.settings
+ portage.db = emerge_config.trees
+ portage.root = portage.db._target_eroot
+
+ self.emerge_config = emerge_config
+ if emerge_logging:
+ _emerge.emergelog._disable = False
+ self.xterm_titles = "notitles" not in \
+ self.emerge_config.target_config.settings.features
+ emergelog(self.xterm_titles, " === sync")
+
+
+ def auto_sync(self, **kwargs):
+ '''Sync auto-sync enabled repos'''
+ options = kwargs.get('options', None)
+ if options:
+ return_messages = options.get('return-messages', False)
+ else:
+ return_messages = False
+ success, repos, msgs = self._get_repos(auto_sync_only=True)
+ if not success:
+ if return_messages:
+ return (False, msgs)
+ return (False, None)
+ return self._sync(repos, return_messages, emaint_opts=options)
+
+
+ def all_repos(self, **kwargs):
+ '''Sync all repos defined in repos.conf'''
+ options = kwargs.get('options', None)
+ if options:
+ return_messages = options.get('return-messages', False)
+ else:
+ return_messages = False
+ success, repos, msgs = self._get_repos(auto_sync_only=False)
+ if not success:
+ if return_messages:
+ return (False, msgs)
+ return (False, None)
+ return self._sync(repos, return_messages, emaint_opts=options)
+
+
+ def repo(self, **kwargs):
+ '''Sync the specified repo'''
+ options = kwargs.get('options', None)
+ if options:
+ repo_names = options.get('repo', '')
+ return_messages = options.get('return-messages', False)
+ else:
+ return_messages = False
+ if isinstance(repo_names, _basestring):
+ repo_names = repo_names.split()
+ success, repos, msgs = self._get_repos(auto_sync_only=False,
+ match_repos=repo_names)
+ if not success:
+ if return_messages:
+ return (False, msgs)
+ return (False, None)
+ return self._sync(repos, return_messages, emaint_opts=options)
+
+
+ @staticmethod
+ def _match_repos(repos, available):
+ '''Internal search, matches up the repo name or alias in repos.
+
+ @param repos: list of repo names or aliases to match
+ @param avalable: list of repo objects to search
+ @return: list of repo objects that match
+ '''
+ selected = []
+ for repo in available:
+ if repo.name in repos:
+ selected.append(repo)
+ elif (repo.aliases is not None and
+ any(alias in repos for alias in repo.aliases)):
+ selected.append(repo)
+ return selected
+
+
+ def _get_repos(self, auto_sync_only=True, match_repos=None):
+ msgs = []
+ repos = self.emerge_config.target_config.settings.repositories
+ if match_repos is not None:
+ # Discard duplicate repository names or aliases.
+ match_repos = set(match_repos)
+ repos = self._match_repos(match_repos, repos)
+ if len(repos) < len(match_repos):
+ # Build a set of all the matched repos' names and aliases so we
+ # can do a set difference for names that are missing.
+ repo_names = set()
+ for repo in repos:
+ repo_names.add(repo.name)
+ if repo.aliases is not None:
+ repo_names.update(repo.aliases)
+ missing = match_repos - repo_names
+ if missing:
+ msgs.append(red(" * ") + "The specified repo(s) were not found: %s" %
+ (" ".join(repo_name for repo_name in missing)) + \
+ "\n ...returning")
+ return (False, repos, msgs)
+
+ if auto_sync_only:
+ repos = self._filter_auto(repos)
+
+ sync_disabled = [repo for repo in repos if repo.sync_type is None]
+ if sync_disabled:
+ repos = [repo for repo in repos if repo.sync_type is not None]
+ if match_repos is not None:
+ msgs.append(red(" * " ) + "The specified repo(s) have sync disabled: %s" %
+ " ".join(repo.name for repo in sync_disabled) + \
+ "\n ...returning")
+ return (False, repos, msgs)
+
+ missing_sync_uri = [repo for repo in repos if repo.sync_uri is None]
+ if missing_sync_uri:
+ repos = [repo for repo in repos if repo.sync_uri is not None]
+ msgs.append(red(" * ") + "The specified repo(s) are missing sync-uri: %s" %
+ " ".join(repo.name for repo in missing_sync_uri) + \
+ "\n ...returning")
+ return (False, repos, msgs)
+
+ return (True, repos, msgs)
+
+
+ def _filter_auto(self, repos):
+ selected = []
+ for repo in repos:
+ if repo.auto_sync in ['yes', 'true']:
+ selected.append(repo)
+ return selected
+
+
+ def _sync(self, selected_repos, return_messages, emaint_opts=None):
+ msgs = []
+ if not selected_repos:
+ if return_messages:
+ msgs.append("Nothing to sync... returning")
+ return (True, msgs)
+ return (True, None)
+
+ if emaint_opts is not None:
+ for k, v in emaint_opts.items():
+ if v is not None:
+ k = "--" + k.replace("_", "-")
+ self.emerge_config.opts[k] = v
+
+ # Portage needs to ensure a sane umask for the files it creates.
+ os.umask(0o22)
+
+ sync_manager = SyncManager(
+ self.emerge_config.target_config.settings, emergelog)
+
+ max_jobs = (self.emerge_config.opts.get('--jobs', 1)
+ if 'parallel-fetch' in self.emerge_config.
+ target_config.settings.features else 1)
+ sync_scheduler = SyncScheduler(emerge_config=self.emerge_config,
+ selected_repos=selected_repos, sync_manager=sync_manager,
+ max_jobs=max_jobs,
+ event_loop=global_event_loop() if portage._internal_caller else
+ EventLoop(main=False))
+
+ sync_scheduler.start()
+ sync_scheduler.wait()
+ retvals = sync_scheduler.retvals
+ msgs.extend(sync_scheduler.msgs)
+ returncode = True
+
+ if retvals:
+ msgs.extend(self.rmessage(retvals, 'sync'))
+ for repo, retval in retvals:
+ if retval != os.EX_OK:
+ returncode = False
+ break
+ else:
+ msgs.extend(self.rmessage([('None', os.EX_OK)], 'sync'))
+
+ # run the post_sync_hook one last time for
+ # run only at sync completion hooks
+ if sync_scheduler.global_hooks_enabled:
+ rcode = sync_manager.perform_post_sync_hook('')
+ if rcode:
+ msgs.extend(self.rmessage([('None', rcode)], 'post-sync'))
+ if rcode != os.EX_OK:
+ returncode = False
+
+ # Reload the whole config.
+ portage._sync_mode = False
+ self._reload_config()
+ self._do_pkg_moves()
+ msgs.extend(self._check_updates())
+ display_news_notification(self.emerge_config.target_config,
+ self.emerge_config.opts)
+
+ if return_messages:
+ return (returncode, msgs)
+ return (returncode, None)
+
+
+ def _do_pkg_moves(self):
+ if self.emerge_config.opts.get('--package-moves') != 'n' and \
+ _global_updates(self.emerge_config.trees,
+ self.emerge_config.target_config.mtimedb["updates"],
+ quiet=("--quiet" in self.emerge_config.opts)):
+ self.emerge_config.target_config.mtimedb.commit()
+ # Reload the whole config.
+ self._reload_config()
+
+
+ def _check_updates(self):
+ mybestpv = self.emerge_config.target_config.trees['porttree'].dbapi.xmatch(
+ "bestmatch-visible", portage.const.PORTAGE_PACKAGE_ATOM)
+ mypvs = portage.best(
+ self.emerge_config.target_config.trees['vartree'].dbapi.match(
+ portage.const.PORTAGE_PACKAGE_ATOM))
+
+ chk_updated_cfg_files(self.emerge_config.target_config.root,
+ portage.util.shlex_split(
+ self.emerge_config.target_config.settings.get("CONFIG_PROTECT", "")))
+
+ msgs = []
+ if mybestpv != mypvs and "--quiet" not in self.emerge_config.opts:
+ msgs.append('')
+ msgs.append(warn(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended")
+ msgs.append(warn(" * ")+"that you update portage now, before any other packages are updated.")
+ msgs.append('')
+ msgs.append(warn(" * ")+"To update portage, run 'emerge --oneshot portage' now.")
+ msgs.append('')
+ return msgs
+
+
+ def _reload_config(self):
+ '''Reload the whole config from scratch.'''
+ load_emerge_config(emerge_config=self.emerge_config)
+ adjust_configs(self.emerge_config.opts, self.emerge_config.trees)
+
+
+ def rmessage(self, rvals, action):
+ '''Creates emaint style messages to return to the task handler'''
+ messages = []
+ for rval in rvals:
+ messages.append("Action: %s for repo: %s, returned code = %s"
+ % (action, rval[0], rval[1]))
+ return messages
+
+
+class SyncScheduler(AsyncScheduler):
+ '''
+ Sync repos in parallel, but don't sync a given repo until all
+ of its masters have synced.
+ '''
+ def __init__(self, **kwargs):
+ '''
+ @param emerge_config: an emerge_config instance
+ @param selected_repos: list of RepoConfig instances
+ @param sync_manager: a SyncManger instance
+ '''
+ self._emerge_config = kwargs.pop('emerge_config')
+ self._selected_repos = kwargs.pop('selected_repos')
+ self._sync_manager = kwargs.pop('sync_manager')
+ AsyncScheduler.__init__(self, **kwargs)
+ self._init_graph()
+ self.retvals = []
+ self.msgs = []
+
+ def _init_graph(self):
+ '''
+ Graph relationships between repos and their masters.
+ '''
+ self._sync_graph = digraph()
+ self._leaf_nodes = []
+ self._repo_map = {}
+ self._running_repos = set()
+ selected_repo_names = frozenset(repo.name
+ for repo in self._selected_repos)
+ for repo in self._selected_repos:
+ self._repo_map[repo.name] = repo
+ self._sync_graph.add(repo.name, None)
+ for master in repo.masters:
+ if master.name in selected_repo_names:
+ self._repo_map[master.name] = master
+ self._sync_graph.add(master.name, repo.name)
+ self._complete_graph = self._sync_graph.copy()
+ self._hooks_repos = set()
+ self._update_leaf_nodes()
+
+ def _task_exit(self, task):
+ '''
+ Remove the task from the graph, in order to expose
+ more leaf nodes.
+ '''
+ self._running_tasks.discard(task)
+ # Set hooks_enabled = True by default, in order to ensure
+ # that hooks will be called in a backward-compatible manner
+ # even if all sync tasks have failed.
+ hooks_enabled = True
+ returncode = task.returncode
+ if task.returncode == os.EX_OK:
+ returncode, message, updatecache_flg, hooks_enabled = task.result
+ if message:
+ self.msgs.append(message)
+ repo = task.kwargs['repo'].name
+ self._running_repos.remove(repo)
+ self.retvals.append((repo, returncode))
+ self._sync_graph.remove(repo)
+ self._update_leaf_nodes()
+ if hooks_enabled:
+ self._hooks_repos.add(repo)
+ super(SyncScheduler, self)._task_exit(self)
+
+ def _master_hooks(self, repo_name):
+ """
+ @param repo_name: a repo name
+ @type repo_name: str
+ @return: True if hooks would have been executed for any master
+ repositories of the given repo, False otherwise
+ @rtype: bool
+ """
+ traversed_nodes = set()
+ node_stack = [repo_name]
+ while node_stack:
+ node = node_stack.pop()
+ if node in self._hooks_repos:
+ return True
+ if node not in traversed_nodes:
+ traversed_nodes.add(node)
+ node_stack.extend(self._complete_graph.child_nodes(node))
+ return False
+
+ @property
+ def global_hooks_enabled(self):
+ """
+ @return: True if repo.postsync.d hooks would have been executed
+ for any repositories.
+ @rtype: bool
+ """
+ return bool(self._hooks_repos)
+
+ def _update_leaf_nodes(self):
+ '''
+ Populate self._leaf_nodes with current leaves from
+ self._sync_graph. If a circular master relationship
+ is discovered, choose a random node to break the cycle.
+ '''
+ if self._sync_graph and not self._leaf_nodes:
+ self._leaf_nodes = [obj for obj in
+ self._sync_graph.leaf_nodes()
+ if obj not in self._running_repos]
+
+ if not (self._leaf_nodes or self._running_repos):
+ # If there is a circular master relationship,
+ # choose a random node to break the cycle.
+ self._leaf_nodes = [next(iter(self._sync_graph))]
+
+ def _next_task(self):
+ '''
+ Return a task for the next available leaf node.
+ '''
+ if not self._sync_graph:
+ raise StopIteration()
+ # If self._sync_graph is non-empty, then self._leaf_nodes
+ # is guaranteed to be non-empty, since otherwise
+ # _can_add_job would have returned False and prevented
+ # _next_task from being immediately called.
+ node = self._leaf_nodes.pop()
+ self._running_repos.add(node)
+ self._update_leaf_nodes()
+
+ return self._sync_manager.sync_async(
+ emerge_config=self._emerge_config,
+ repo=self._repo_map[node],
+ master_hooks=self._master_hooks(node))
+
+ def _can_add_job(self):
+ '''
+ Returns False if there are no leaf nodes available.
+ '''
+ if not AsyncScheduler._can_add_job(self):
+ return False
+ return bool(self._leaf_nodes) and not self._terminated.is_set()
+
+ def _keep_scheduling(self):
+ '''
+ Schedule as long as the graph is non-empty, and we haven't
+ been terminated.
+ '''
+ return bool(self._sync_graph) and not self._terminated.is_set()
diff --git a/lib/portage/emaint/modules/world/__init__.py b/lib/portage/emaint/modules/world/__init__.py
new file mode 100644
index 000000000..eaf3e5eff
--- /dev/null
+++ b/lib/portage/emaint/modules/world/__init__.py
@@ -0,0 +1,21 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+doc = """Check and fix problems in the world file."""
+__doc__ = doc
+
+
+module_spec = {
+ 'name': 'world',
+ 'description': doc,
+ 'provides':{
+ 'module1':{
+ 'name': "world",
+ 'sourcefile': "world",
+ 'class': "WorldHandler",
+ 'description': doc,
+ 'functions': ['check', 'fix'],
+ 'func_desc': {}
+ }
+ }
+ }
diff --git a/lib/portage/emaint/modules/world/world.py b/lib/portage/emaint/modules/world/world.py
new file mode 100644
index 000000000..d142c3dda
--- /dev/null
+++ b/lib/portage/emaint/modules/world/world.py
@@ -0,0 +1,93 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+
+
+class WorldHandler(object):
+
+ short_desc = "Fix problems in the world file"
+
+ @staticmethod
+ def name():
+ return "world"
+
+ def __init__(self):
+ self.invalid = []
+ self.not_installed = []
+ self.okay = []
+ from portage._sets import load_default_config
+ setconfig = load_default_config(portage.settings,
+ portage.db[portage.settings['EROOT']])
+ self._sets = setconfig.getSets()
+
+ def _check_world(self, onProgress):
+ eroot = portage.settings['EROOT']
+ self.world_file = os.path.join(eroot, portage.const.WORLD_FILE)
+ self.found = os.access(self.world_file, os.R_OK)
+ vardb = portage.db[eroot]["vartree"].dbapi
+
+ from portage._sets import SETPREFIX
+ sets = self._sets
+ world_atoms = list(sets["selected"])
+ maxval = len(world_atoms)
+ if onProgress:
+ onProgress(maxval, 0)
+ for i, atom in enumerate(world_atoms):
+ if not isinstance(atom, portage.dep.Atom):
+ if atom.startswith(SETPREFIX):
+ s = atom[len(SETPREFIX):]
+ if s in sets:
+ self.okay.append(atom)
+ else:
+ self.not_installed.append(atom)
+ else:
+ self.invalid.append(atom)
+ if onProgress:
+ onProgress(maxval, i+1)
+ continue
+ okay = True
+ if not vardb.match(atom):
+ self.not_installed.append(atom)
+ okay = False
+ if okay:
+ self.okay.append(atom)
+ if onProgress:
+ onProgress(maxval, i+1)
+
+ def check(self, **kwargs):
+ onProgress = kwargs.get('onProgress', None)
+ self._check_world(onProgress)
+ errors = []
+ if self.found:
+ errors += ["'%s' is not a valid atom" % x for x in self.invalid]
+ errors += ["'%s' is not installed" % x for x in self.not_installed]
+ else:
+ errors.append(self.world_file + " could not be opened for reading")
+ if errors:
+ return (False, errors)
+ return (True, None)
+
+ def fix(self, **kwargs):
+ onProgress = kwargs.get('onProgress', None)
+ world_set = self._sets["selected"]
+ world_set.lock()
+ try:
+ world_set.load() # maybe it's changed on disk
+ before = set(world_set)
+ self._check_world(onProgress)
+ after = set(self.okay)
+ errors = []
+ if before != after:
+ try:
+ world_set.replace(self.okay)
+ except portage.exception.PortageException:
+ errors.append("%s could not be opened for writing" % \
+ self.world_file)
+ if errors:
+ return (False, errors)
+ return (True, None)
+ finally:
+ world_set.unlock()
+
diff --git a/lib/portage/env/__init__.py b/lib/portage/env/__init__.py
new file mode 100644
index 000000000..17b66d17c
--- /dev/null
+++ b/lib/portage/env/__init__.py
@@ -0,0 +1,3 @@
+# Copyright: 2007 Gentoo Foundation
+# License: GPL2
+
diff --git a/lib/portage/env/config.py b/lib/portage/env/config.py
new file mode 100644
index 000000000..865d8353a
--- /dev/null
+++ b/lib/portage/env/config.py
@@ -0,0 +1,105 @@
+# config.py -- Portage Config
+# Copyright 2007-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["ConfigLoaderKlass", "GenericFile", "PackageKeywordsFile",
+ "PackageUseFile", "PackageMaskFile", "PortageModulesFile"]
+
+from portage.cache.mappings import UserDict
+from portage.env.loaders import KeyListFileLoader, KeyValuePairFileLoader, ItemFileLoader
+
+class ConfigLoaderKlass(UserDict):
+ """
+ A base class stub for things to inherit from.
+ Users may want a non-file backend.
+ """
+
+ def __init__(self, loader):
+ """
+ @param loader: A class that has a load() that returns two dicts
+ the first being a data dict, the second being a dict of errors.
+ """
+ UserDict.__init__(self)
+ self._loader = loader
+
+ def load(self):
+ """
+ Load the data from the loader.
+
+ @throws LoaderError:
+ """
+
+ self.data, self.errors = self._loader.load()
+
+class GenericFile(UserDict):
+ """
+ Inherits from ConfigLoaderKlass, attempts to use all known loaders
+ until it gets <something> in data. This is probably really slow but is
+ helpful when you really have no idea what you are loading (hint hint the file
+ should perhaps declare what type it is? ;)
+ """
+
+ loaders = [KeyListFileLoader, KeyValuePairFileLoader, ItemFileLoader]
+
+ def __init__(self, filename):
+ UserDict.__init__(self)
+ self.filename = filename
+
+ def load(self):
+ for loader in self.loaders:
+ l = loader(self.filename, None)
+ data, errors = l.load()
+ if len(data) and not len(errors):
+ (self.data, self.errors) = (data, errors)
+ return
+
+
+class PackageKeywordsFile(ConfigLoaderKlass):
+ """
+ Inherits from ConfigLoaderKlass; implements a file-based backend.
+ """
+
+ default_loader = KeyListFileLoader
+
+ def __init__(self, filename):
+ super(PackageKeywordsFile, self).__init__(
+ self.default_loader(filename, validator=None))
+
+class PackageUseFile(ConfigLoaderKlass):
+ """
+ Inherits from PackageUse; implements a file-based backend. Doesn't handle recursion yet.
+ """
+
+ default_loader = KeyListFileLoader
+ def __init__(self, filename):
+ super(PackageUseFile, self).__init__(
+ self.default_loader(filename, validator=None))
+
+class PackageMaskFile(ConfigLoaderKlass):
+ """
+ A class that implements a file-based package.mask
+
+ Entires in package.mask are of the form:
+ atom1
+ atom2
+ or optionally
+ -atom3
+ to revert a previous mask; this only works when masking files are stacked
+ """
+
+ default_loader = ItemFileLoader
+
+ def __init__(self, filename):
+ super(PackageMaskFile, self).__init__(
+ self.default_loader(filename, validator=None))
+
+class PortageModulesFile(ConfigLoaderKlass):
+ """
+ File Class for /etc/portage/modules
+ """
+
+ default_loader = KeyValuePairFileLoader
+
+ def __init__(self, filename):
+ super(PortageModulesFile, self).__init__(
+ self.default_loader(filename, validator=None))
diff --git a/lib/portage/env/loaders.py b/lib/portage/env/loaders.py
new file mode 100644
index 000000000..f86988471
--- /dev/null
+++ b/lib/portage/env/loaders.py
@@ -0,0 +1,327 @@
+# config.py -- Portage Config
+# Copyright 2007-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import io
+import stat
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.util:writemsg',
+)
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.localization import _
+
+class LoaderError(Exception):
+
+ def __init__(self, resource, error_msg):
+ """
+ @param resource: Resource that failed to load (file/sql/etc)
+ @type resource: String
+ @param error_msg: Error from underlying Loader system
+ @type error_msg: String
+ """
+
+ self.resource = resource
+ self.error_msg = error_msg
+
+ def __str__(self):
+ return "Failed while loading resource: %s, error was: %s" % (
+ self.resource, self.error_msg)
+
+
+def RecursiveFileLoader(filename):
+ """
+ If filename is of type file, return a generate that yields filename
+ else if filename is of type directory, return a generator that fields
+ files in that directory.
+
+ Ignore files beginning with . or ending in ~.
+ Prune CVS directories.
+
+ @param filename: name of a file/directory to traverse
+ @rtype: list
+ @return: List of files to process
+ """
+
+ try:
+ st = os.stat(filename)
+ except OSError:
+ return
+ if stat.S_ISDIR(st.st_mode):
+ for root, dirs, files in os.walk(filename):
+ for d in list(dirs):
+ if d[:1] == '.' or d == 'CVS':
+ dirs.remove(d)
+ for f in files:
+ try:
+ f = _unicode_decode(f,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if f[:1] == '.' or f[-1:] == '~':
+ continue
+ yield os.path.join(root, f)
+ else:
+ yield filename
+
+
+class DataLoader(object):
+
+ def __init__(self, validator):
+ f = validator
+ if f is None:
+ # if they pass in no validator, just make a fake one
+ # that always returns true
+ def validate(key):
+ return True
+ f = validate
+ self._validate = f
+
+ def load(self):
+ """
+ Function to do the actual work of a Loader
+ """
+ raise NotImplementedError("Please override in a subclass")
+
+class EnvLoader(DataLoader):
+ """ Class to access data in the environment """
+ def __init__(self, validator):
+ DataLoader.__init__(self, validator)
+
+ def load(self):
+ return os.environ
+
+class TestTextLoader(DataLoader):
+ """ You give it some data, it 'loads' it for you, no filesystem access
+ """
+ def __init__(self, validator):
+ DataLoader.__init__(self, validator)
+ self.data = {}
+ self.errors = {}
+
+ def setData(self, text):
+ """Explicitly set the data field
+ Args:
+ text - a dict of data typical of Loaders
+ Returns:
+ None
+ """
+ if isinstance(text, dict):
+ self.data = text
+ else:
+ raise ValueError("setData requires a dict argument")
+
+ def setErrors(self, errors):
+ self.errors = errors
+
+ def load(self):
+ return (self.data, self.errors)
+
+
+class FileLoader(DataLoader):
+ """ Class to access data in files """
+
+ def __init__(self, filename, validator):
+ """
+ Args:
+ filename : Name of file or directory to open
+ validator : class with validate() method to validate data.
+ """
+ DataLoader.__init__(self, validator)
+ self.fname = filename
+
+ def load(self):
+ """
+ Return the {source: {key: value}} pairs from a file
+ Return the {source: [list of errors] from a load
+
+ @param recursive: If set and self.fname is a directory;
+ load all files in self.fname
+ @type: Boolean
+ @rtype: tuple
+ @return:
+ Returns (data,errors), both may be empty dicts or populated.
+ """
+ data = {}
+ errors = {}
+ # I tried to save a nasty lookup on lineparser by doing the lookup
+ # once, which may be expensive due to digging in child classes.
+ func = self.lineParser
+ for fn in RecursiveFileLoader(self.fname):
+ try:
+ with io.open(_unicode_encode(fn,
+ encoding=_encodings['fs'], errors='strict'), mode='r',
+ encoding=_encodings['content'], errors='replace') as f:
+ lines = f.readlines()
+ except EnvironmentError as e:
+ if e.errno == errno.EACCES:
+ writemsg(_("Permission denied: '%s'\n") % fn, noiselevel=-1)
+ del e
+ elif e.errno in (errno.ENOENT, errno.ESTALE):
+ del e
+ else:
+ raise
+ else:
+ for line_num, line in enumerate(lines):
+ func(line, line_num, data, errors)
+ return (data, errors)
+
+ def lineParser(self, line, line_num, data, errors):
+ """ This function parses 1 line at a time
+ Args:
+ line: a string representing 1 line of a file
+ line_num: an integer representing what line we are processing
+ data: a dict that contains the data we have extracted from the file
+ already
+ errors: a dict representing parse errors.
+ Returns:
+ Nothing (None). Writes to data and errors
+ """
+ raise NotImplementedError("Please over-ride this in a child class")
+
+class ItemFileLoader(FileLoader):
+ """
+ Class to load data from a file full of items one per line
+
+ >>> item1
+ >>> item2
+ >>> item3
+ >>> item1
+
+ becomes { 'item1':None, 'item2':None, 'item3':None }
+ Note that due to the data store being a dict, duplicates
+ are removed.
+ """
+
+ def __init__(self, filename, validator):
+ FileLoader.__init__(self, filename, validator)
+
+ def lineParser(self, line, line_num, data, errors):
+ line = line.strip()
+ if line.startswith('#'): # Skip commented lines
+ return
+ if not len(line): # skip empty lines
+ return
+ split = line.split()
+ if not len(split):
+ errors.setdefault(self.fname, []).append(
+ _("Malformed data at line: %s, data: %s")
+ % (line_num + 1, line))
+ return
+ key = split[0]
+ if not self._validate(key):
+ errors.setdefault(self.fname, []).append(
+ _("Validation failed at line: %s, data %s")
+ % (line_num + 1, key))
+ return
+ data[key] = None
+
+class KeyListFileLoader(FileLoader):
+ """
+ Class to load data from a file full of key [list] tuples
+
+ >>>>key foo1 foo2 foo3
+ becomes
+ {'key':['foo1','foo2','foo3']}
+ """
+
+ def __init__(self, filename, validator=None, valuevalidator=None):
+ FileLoader.__init__(self, filename, validator)
+
+ f = valuevalidator
+ if f is None:
+ # if they pass in no validator, just make a fake one
+ # that always returns true
+ def validate(key):
+ return True
+ f = validate
+ self._valueValidate = f
+
+ def lineParser(self, line, line_num, data, errors):
+ line = line.strip()
+ if line.startswith('#'): # Skip commented lines
+ return
+ if not len(line): # skip empty lines
+ return
+ split = line.split()
+ if len(split) < 1:
+ errors.setdefault(self.fname, []).append(
+ _("Malformed data at line: %s, data: %s")
+ % (line_num + 1, line))
+ return
+ key = split[0]
+ value = split[1:]
+ if not self._validate(key):
+ errors.setdefault(self.fname, []).append(
+ _("Key validation failed at line: %s, data %s")
+ % (line_num + 1, key))
+ return
+ if not self._valueValidate(value):
+ errors.setdefault(self.fname, []).append(
+ _("Value validation failed at line: %s, data %s")
+ % (line_num + 1, value))
+ return
+ if key in data:
+ data[key].append(value)
+ else:
+ data[key] = value
+
+
+class KeyValuePairFileLoader(FileLoader):
+ """
+ Class to load data from a file full of key=value pairs
+
+ >>>>key=value
+ >>>>foo=bar
+ becomes:
+ {'key':'value',
+ 'foo':'bar'}
+ """
+
+ def __init__(self, filename, validator, valuevalidator=None):
+ FileLoader.__init__(self, filename, validator)
+
+ f = valuevalidator
+ if f is None:
+ # if they pass in no validator, just make a fake one
+ # that always returns true
+ def validate(key):
+ return True
+ f = validate
+ self._valueValidate = f
+
+
+ def lineParser(self, line, line_num, data, errors):
+ line = line.strip()
+ if line.startswith('#'): # skip commented lines
+ return
+ if not len(line): # skip empty lines
+ return
+ split = line.split('=', 1)
+ if len(split) < 2:
+ errors.setdefault(self.fname, []).append(
+ _("Malformed data at line: %s, data %s")
+ % (line_num + 1, line))
+ return
+ key = split[0].strip()
+ value = split[1].strip()
+ if not key:
+ errors.setdefault(self.fname, []).append(
+ _("Malformed key at line: %s, key %s")
+ % (line_num + 1, key))
+ return
+ if not self._validate(key):
+ errors.setdefault(self.fname, []).append(
+ _("Key validation failed at line: %s, data %s")
+ % (line_num + 1, key))
+ return
+ if not self._valueValidate(value):
+ errors.setdefault(self.fname, []).append(
+ _("Value validation failed at line: %s, data %s")
+ % (line_num + 1, value))
+ return
+ data[key] = value
diff --git a/lib/portage/env/validators.py b/lib/portage/env/validators.py
new file mode 100644
index 000000000..4d11d69fe
--- /dev/null
+++ b/lib/portage/env/validators.py
@@ -0,0 +1,20 @@
+# validators.py Portage File Loader Code
+# Copyright 2007 Gentoo Foundation
+
+from portage.dep import isvalidatom
+
+ValidAtomValidator = isvalidatom
+
+def PackagesFileValidator(atom):
+ """ This function mutates atoms that begin with - or *
+ It then checks to see if that atom is valid, and if
+ so returns True, else it returns False.
+
+ Args:
+ atom: a string representing an atom such as sys-apps/portage-2.1
+ """
+ if atom.startswith("*") or atom.startswith("-"):
+ atom = atom[1:]
+ if not isvalidatom(atom):
+ return False
+ return True
diff --git a/lib/portage/exception.py b/lib/portage/exception.py
new file mode 100644
index 000000000..aed8beeb9
--- /dev/null
+++ b/lib/portage/exception.py
@@ -0,0 +1,211 @@
+# Copyright 1998-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import signal
+import sys
+from portage import _encodings, _unicode_encode, _unicode_decode
+from portage.localization import _
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+class PortageException(Exception):
+ """General superclass for portage exceptions"""
+ if sys.hexversion >= 0x3000000:
+ def __init__(self, value):
+ self.value = value[:]
+
+ def __str__(self):
+ if isinstance(self.value, str):
+ return self.value
+ else:
+ return repr(self.value)
+ else:
+ def __init__(self, value):
+ self.value = value[:]
+ if isinstance(self.value, basestring):
+ self.value = _unicode_decode(self.value,
+ encoding=_encodings['content'], errors='replace')
+
+ def __unicode__(self):
+ if isinstance(self.value, unicode):
+ return self.value
+ else:
+ return _unicode_decode(repr(self.value),
+ encoding=_encodings['content'], errors='replace')
+
+ def __str__(self):
+ if isinstance(self.value, unicode):
+ return _unicode_encode(self.value,
+ encoding=_encodings['content'], errors='backslashreplace')
+ else:
+ return repr(self.value)
+
+class PortageKeyError(KeyError, PortageException):
+ __doc__ = KeyError.__doc__
+ def __init__(self, value):
+ KeyError.__init__(self, value)
+ PortageException.__init__(self, value)
+
+class CorruptionError(PortageException):
+ """Corruption indication"""
+
+class InvalidDependString(PortageException):
+ """An invalid depend string has been encountered"""
+ def __init__(self, value, errors=None):
+ PortageException.__init__(self, value)
+ self.errors = errors
+
+class InvalidVersionString(PortageException):
+ """An invalid version string has been encountered"""
+
+class SecurityViolation(PortageException):
+ """An incorrect formatting was passed instead of the expected one"""
+
+class IncorrectParameter(PortageException):
+ """A parameter of the wrong type was passed"""
+
+class MissingParameter(PortageException):
+ """A parameter is required for the action requested but was not passed"""
+
+class ParseError(PortageException):
+ """An error was generated while attempting to parse the request"""
+
+class InvalidData(PortageException):
+ """An incorrect formatting was passed instead of the expected one"""
+ def __init__(self, value, category=None):
+ PortageException.__init__(self, value)
+ self.category = category
+
+class InvalidDataType(PortageException):
+ """An incorrect type was passed instead of the expected one"""
+
+class InvalidLocation(PortageException):
+ """Data was not found when it was expected to exist or was specified incorrectly"""
+
+class FileNotFound(InvalidLocation):
+ """A file was not found when it was expected to exist"""
+
+class DirectoryNotFound(InvalidLocation):
+ """A directory was not found when it was expected to exist"""
+
+class IsADirectory(PortageException):
+ """A directory was found when it was expected to be a file"""
+ from errno import EISDIR as errno
+
+class OperationNotPermitted(PortageException):
+ """An operation was not permitted operating system"""
+ from errno import EPERM as errno
+
+class OperationNotSupported(PortageException):
+ """Operation not supported"""
+ from errno import EOPNOTSUPP as errno
+
+class PermissionDenied(PortageException):
+ """Permission denied"""
+ from errno import EACCES as errno
+
+class TryAgain(PortageException):
+ """Try again"""
+ from errno import EAGAIN as errno
+
+class TimeoutException(PortageException):
+ """Operation timed out"""
+ # NOTE: ETIME is undefined on FreeBSD (bug #336875)
+ #from errno import ETIME as errno
+
+class AlarmSignal(TimeoutException):
+ def __init__(self, value, signum=None, frame=None):
+ TimeoutException.__init__(self, value)
+ self.signum = signum
+ self.frame = frame
+
+ @classmethod
+ def register(cls, time):
+ signal.signal(signal.SIGALRM, cls._signal_handler)
+ signal.alarm(time)
+
+ @classmethod
+ def unregister(cls):
+ signal.alarm(0)
+ signal.signal(signal.SIGALRM, signal.SIG_DFL)
+
+ @classmethod
+ def _signal_handler(cls, signum, frame):
+ signal.signal(signal.SIGALRM, signal.SIG_DFL)
+ raise AlarmSignal("alarm signal",
+ signum=signum, frame=frame)
+
+class ReadOnlyFileSystem(PortageException):
+ """Read-only file system"""
+ from errno import EROFS as errno
+
+class CommandNotFound(PortageException):
+ """A required binary was not available or executable"""
+
+class AmbiguousPackageName(ValueError, PortageException):
+ """Raised by portage.cpv_expand() when the package name is ambiguous due
+ to the existence of multiple matches in different categories. This inherits
+ from ValueError, for backward compatibility with calling code that already
+ handles ValueError."""
+ def __str__(self):
+ return ValueError.__str__(self)
+
+class PortagePackageException(PortageException):
+ """Malformed or missing package data"""
+
+class PackageNotFound(PortagePackageException):
+ """Missing Ebuild or Binary"""
+
+class PackageSetNotFound(PortagePackageException):
+ """Missing package set"""
+
+class InvalidPackageName(PortagePackageException):
+ """Malformed package name"""
+
+class InvalidAtom(PortagePackageException):
+ """Malformed atom spec"""
+ def __init__(self, value, category=None):
+ PortagePackageException.__init__(self, value)
+ self.category = category
+
+class UnsupportedAPIException(PortagePackageException):
+ """Unsupported API"""
+ def __init__(self, cpv, eapi):
+ self.cpv, self.eapi = cpv, eapi
+ def __str__(self):
+ eapi = self.eapi
+ if not isinstance(eapi, basestring):
+ eapi = str(eapi)
+ eapi = eapi.lstrip("-")
+ msg = _("Unable to do any operations on '%(cpv)s', since "
+ "its EAPI is higher than this portage version's. Please upgrade"
+ " to a portage version that supports EAPI '%(eapi)s'.") % \
+ {"cpv": self.cpv, "eapi": eapi}
+ return _unicode_decode(msg,
+ encoding=_encodings['content'], errors='replace')
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'], errors='backslashreplace')
+
+class SignatureException(PortageException):
+ """Signature was not present in the checked file"""
+
+class DigestException(SignatureException):
+ """A problem exists in the digest"""
+
+class MissingSignature(SignatureException):
+ """Signature was not present in the checked file"""
+
+class InvalidSignature(SignatureException):
+ """Signature was checked and was not a valid, current, nor trusted signature"""
+
+class UntrustedSignature(SignatureException):
+ """Signature was not certified to the desired security level"""
+
diff --git a/lib/portage/getbinpkg.py b/lib/portage/getbinpkg.py
new file mode 100644
index 000000000..14dc149b1
--- /dev/null
+++ b/lib/portage/getbinpkg.py
@@ -0,0 +1,934 @@
+# getbinpkg.py -- Portage binary-package helper functions
+# Copyright 2003-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+from portage.output import colorize
+from portage.cache.mappings import slot_dict_class
+from portage.localization import _
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.package.ebuild.fetch import _hide_url_passwd
+from _emerge.Package import _all_metadata_keys
+
+import sys
+import socket
+import time
+import tempfile
+import base64
+import warnings
+
+_all_errors = [NotImplementedError, ValueError, socket.error]
+
+try:
+ from html.parser import HTMLParser as html_parser_HTMLParser
+except ImportError:
+ from HTMLParser import HTMLParser as html_parser_HTMLParser
+
+try:
+ from urllib.parse import unquote as urllib_parse_unquote
+except ImportError:
+ from urllib2 import unquote as urllib_parse_unquote
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+try:
+ import ftplib
+except ImportError as e:
+ sys.stderr.write(colorize("BAD", "!!! CANNOT IMPORT FTPLIB: ") + str(e) + "\n")
+else:
+ _all_errors.extend(ftplib.all_errors)
+
+try:
+ try:
+ from http.client import HTTPConnection as http_client_HTTPConnection
+ from http.client import BadStatusLine as http_client_BadStatusLine
+ from http.client import ResponseNotReady as http_client_ResponseNotReady
+ from http.client import error as http_client_error
+ except ImportError:
+ from httplib import HTTPConnection as http_client_HTTPConnection
+ from httplib import BadStatusLine as http_client_BadStatusLine
+ from httplib import ResponseNotReady as http_client_ResponseNotReady
+ from httplib import error as http_client_error
+except ImportError as e:
+ sys.stderr.write(colorize("BAD", "!!! CANNOT IMPORT HTTP.CLIENT: ") + str(e) + "\n")
+else:
+ _all_errors.append(http_client_error)
+
+_all_errors = tuple(_all_errors)
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+def make_metadata_dict(data):
+
+ warnings.warn("portage.getbinpkg.make_metadata_dict() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ myid, _myglob = data
+
+ mydict = {}
+ for k_bytes in portage.xpak.getindex_mem(myid):
+ k = _unicode_decode(k_bytes,
+ encoding=_encodings['repo.content'], errors='replace')
+ if k not in _all_metadata_keys and k != "CATEGORY":
+ continue
+ v = _unicode_decode(portage.xpak.getitem(data, k_bytes),
+ encoding=_encodings['repo.content'], errors='replace')
+ mydict[k] = v
+
+ return mydict
+
+class ParseLinks(html_parser_HTMLParser):
+ """Parser class that overrides HTMLParser to grab all anchors from an html
+ page and provide suffix and prefix limitors"""
+ def __init__(self):
+
+ warnings.warn("portage.getbinpkg.ParseLinks is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ self.PL_anchors = []
+ html_parser_HTMLParser.__init__(self)
+
+ def get_anchors(self):
+ return self.PL_anchors
+
+ def get_anchors_by_prefix(self, prefix):
+ newlist = []
+ for x in self.PL_anchors:
+ if x.startswith(prefix):
+ if x not in newlist:
+ newlist.append(x[:])
+ return newlist
+
+ def get_anchors_by_suffix(self, suffix):
+ newlist = []
+ for x in self.PL_anchors:
+ if x.endswith(suffix):
+ if x not in newlist:
+ newlist.append(x[:])
+ return newlist
+
+ def handle_endtag(self, tag):
+ pass
+
+ def handle_starttag(self, tag, attrs):
+ if tag == "a":
+ for x in attrs:
+ if x[0] == 'href':
+ if x[1] not in self.PL_anchors:
+ self.PL_anchors.append(urllib_parse_unquote(x[1]))
+
+
+def create_conn(baseurl, conn=None):
+ """Takes a protocol://site:port/address url, and an
+ optional connection. If connection is already active, it is passed on.
+ baseurl is reduced to address and is returned in tuple (conn,address)"""
+
+ warnings.warn("portage.getbinpkg.create_conn() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ parts = baseurl.split("://", 1)
+ if len(parts) != 2:
+ raise ValueError(_("Provided URI does not "
+ "contain protocol identifier. '%s'") % baseurl)
+ protocol, url_parts = parts
+ del parts
+
+ url_parts = url_parts.split("/")
+ host = url_parts[0]
+ if len(url_parts) < 2:
+ address = "/"
+ else:
+ address = "/"+"/".join(url_parts[1:])
+ del url_parts
+
+ userpass_host = host.split("@", 1)
+ if len(userpass_host) == 1:
+ host = userpass_host[0]
+ userpass = ["anonymous"]
+ else:
+ host = userpass_host[1]
+ userpass = userpass_host[0].split(":")
+ del userpass_host
+
+ if len(userpass) > 2:
+ raise ValueError(_("Unable to interpret username/password provided."))
+ elif len(userpass) == 2:
+ username = userpass[0]
+ password = userpass[1]
+ elif len(userpass) == 1:
+ username = userpass[0]
+ password = None
+ del userpass
+
+ http_headers = {}
+ http_params = {}
+ if username and password:
+ try:
+ encodebytes = base64.encodebytes
+ except AttributeError:
+ # Python 2
+ encodebytes = base64.encodestring
+ http_headers = {
+ b"Authorization": "Basic %s" % \
+ encodebytes(_unicode_encode("%s:%s" % (username, password))).replace(
+ b"\012",
+ b""
+ ),
+ }
+
+ if not conn:
+ if protocol == "https":
+ # Use local import since https typically isn't needed, and
+ # this way we can usually avoid triggering the global scope
+ # http.client ImportError handler (like during stage1 -> stage2
+ # builds where USE=ssl is disabled for python).
+ try:
+ try:
+ from http.client import HTTPSConnection as http_client_HTTPSConnection
+ except ImportError:
+ from httplib import HTTPSConnection as http_client_HTTPSConnection
+ except ImportError:
+ raise NotImplementedError(
+ _("python must have ssl enabled for https support"))
+ conn = http_client_HTTPSConnection(host)
+ elif protocol == "http":
+ conn = http_client_HTTPConnection(host)
+ elif protocol == "ftp":
+ passive = 1
+ if(host[-1] == "*"):
+ passive = 0
+ host = host[:-1]
+ conn = ftplib.FTP(host)
+ if password:
+ conn.login(username, password)
+ else:
+ sys.stderr.write(colorize("WARN",
+ _(" * No password provided for username")) + " '%s'" % \
+ (username,) + "\n\n")
+ conn.login(username)
+ conn.set_pasv(passive)
+ conn.set_debuglevel(0)
+ elif protocol == "sftp":
+ try:
+ import paramiko
+ except ImportError:
+ raise NotImplementedError(
+ _("paramiko must be installed for sftp support"))
+ t = paramiko.Transport(host)
+ t.connect(username=username, password=password)
+ conn = paramiko.SFTPClient.from_transport(t)
+ else:
+ raise NotImplementedError(_("%s is not a supported protocol.") % protocol)
+
+ return (conn, protocol, address, http_params, http_headers)
+
+def make_ftp_request(conn, address, rest=None, dest=None):
+ """Uses the |conn| object to request the data
+ from address and issuing a rest if it is passed."""
+
+ warnings.warn("portage.getbinpkg.make_ftp_request() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ try:
+
+ if dest:
+ fstart_pos = dest.tell()
+
+ conn.voidcmd("TYPE I")
+ fsize = conn.size(address)
+
+ if (rest != None) and (rest < 0):
+ rest = fsize+int(rest)
+ if rest < 0:
+ rest = 0
+
+ if rest != None:
+ mysocket = conn.transfercmd("RETR %s" % str(address), rest)
+ else:
+ mysocket = conn.transfercmd("RETR %s" % str(address))
+
+ mydata = ""
+ while 1:
+ somedata = mysocket.recv(8192)
+ if somedata:
+ if dest:
+ dest.write(somedata)
+ else:
+ mydata = mydata + somedata
+ else:
+ break
+
+ if dest:
+ data_size = fstart_pos - dest.tell()
+ else:
+ data_size = len(mydata)
+
+ mysocket.close()
+ conn.voidresp()
+ conn.voidcmd("TYPE A")
+
+ return mydata, (fsize != data_size), ""
+
+ except ValueError as e:
+ return None, int(str(e)[:4]), str(e)
+
+
+def make_http_request(conn, address, _params={}, headers={}, dest=None):
+ """Uses the |conn| object to request
+ the data from address, performing Location forwarding and using the
+ optional params and headers."""
+
+ warnings.warn("portage.getbinpkg.make_http_request() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ rc = 0
+ response = None
+ while (rc == 0) or (rc == 301) or (rc == 302):
+ try:
+ if rc != 0:
+ conn = create_conn(address)[0]
+ conn.request("GET", address, body=None, headers=headers)
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ return None, None, "Server request failed: %s" % str(e)
+ response = conn.getresponse()
+ rc = response.status
+
+ # 301 means that the page address is wrong.
+ if ((rc == 301) or (rc == 302)):
+ ignored_data = response.read()
+ del ignored_data
+ for x in str(response.msg).split("\n"):
+ parts = x.split(": ", 1)
+ if parts[0] == "Location":
+ if (rc == 301):
+ sys.stderr.write(colorize("BAD",
+ _("Location has moved: ")) + str(parts[1]) + "\n")
+ if (rc == 302):
+ sys.stderr.write(colorize("BAD",
+ _("Location has temporarily moved: ")) + \
+ str(parts[1]) + "\n")
+ address = parts[1]
+ break
+
+ if (rc != 200) and (rc != 206):
+ return None, rc, "Server did not respond successfully (%s: %s)" % (str(response.status), str(response.reason))
+
+ if dest:
+ dest.write(response.read())
+ return "", 0, ""
+
+ return response.read(), 0, ""
+
+
+def match_in_array(array, prefix="", suffix="", match_both=1, allow_overlap=0):
+
+ warnings.warn("portage.getbinpkg.match_in_array() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ myarray = []
+
+ if not (prefix and suffix):
+ match_both = 0
+
+ for x in array:
+ add_p = 0
+ if prefix and (len(x) >= len(prefix)) and (x[:len(prefix)] == prefix):
+ add_p = 1
+
+ if match_both:
+ if prefix and not add_p: # Require both, but don't have first one.
+ continue
+ else:
+ if add_p: # Only need one, and we have it.
+ myarray.append(x[:])
+ continue
+
+ if not allow_overlap: # Not allow to overlap prefix and suffix
+ if len(x) >= (len(prefix)+len(suffix)):
+ pass
+ else:
+ continue # Too short to match.
+ else:
+ pass # Do whatever... We're overlapping.
+
+ if suffix and (len(x) >= len(suffix)) and (x[-len(suffix):] == suffix):
+ myarray.append(x) # It matches
+ else:
+ continue # Doesn't match.
+
+ return myarray
+
+
+def dir_get_list(baseurl, conn=None):
+ """Takes a base url to connect to and read from.
+ URI should be in the form <proto>://<site>[:port]<path>
+ Connection is used for persistent connection instances."""
+
+ warnings.warn("portage.getbinpkg.dir_get_list() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ conn, protocol, address, params, headers = create_conn(baseurl, conn)
+
+ listing = None
+ if protocol in ["http","https"]:
+ if not address.endswith("/"):
+ # http servers can return a 400 error here
+ # if the address doesn't end with a slash.
+ address += "/"
+ page, rc, msg = make_http_request(conn, address, params, headers)
+
+ if page:
+ parser = ParseLinks()
+ parser.feed(_unicode_decode(page))
+ del page
+ listing = parser.get_anchors()
+ else:
+ import portage.exception
+ raise portage.exception.PortageException(
+ _("Unable to get listing: %s %s") % (rc,msg))
+ elif protocol in ["ftp"]:
+ if address[-1] == '/':
+ olddir = conn.pwd()
+ conn.cwd(address)
+ listing = conn.nlst()
+ conn.cwd(olddir)
+ del olddir
+ else:
+ listing = conn.nlst(address)
+ elif protocol == "sftp":
+ listing = conn.listdir(address)
+ else:
+ raise TypeError(_("Unknown protocol. '%s'") % protocol)
+
+ if not keepconnection:
+ conn.close()
+
+ return listing
+
+def file_get_metadata(baseurl, conn=None, chunk_size=3000):
+ """Takes a base url to connect to and read from.
+ URI should be in the form <proto>://<site>[:port]<path>
+ Connection is used for persistent connection instances."""
+
+ warnings.warn("portage.getbinpkg.file_get_metadata() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ conn, protocol, address, params, headers = create_conn(baseurl, conn)
+
+ if protocol in ["http","https"]:
+ headers["Range"] = "bytes=-%s" % str(chunk_size)
+ data, _x, _x = make_http_request(conn, address, params, headers)
+ elif protocol in ["ftp"]:
+ data, _x, _x = make_ftp_request(conn, address, -chunk_size)
+ elif protocol == "sftp":
+ f = conn.open(address)
+ try:
+ f.seek(-chunk_size, 2)
+ data = f.read()
+ finally:
+ f.close()
+ else:
+ raise TypeError(_("Unknown protocol. '%s'") % protocol)
+
+ if data:
+ xpaksize = portage.xpak.decodeint(data[-8:-4])
+ if (xpaksize + 8) > chunk_size:
+ myid = file_get_metadata(baseurl, conn, xpaksize + 8)
+ if not keepconnection:
+ conn.close()
+ return myid
+ else:
+ xpak_data = data[len(data) - (xpaksize + 8):-8]
+ del data
+
+ myid = portage.xpak.xsplit_mem(xpak_data)
+ if not myid:
+ myid = None, None
+ del xpak_data
+ else:
+ myid = None, None
+
+ if not keepconnection:
+ conn.close()
+
+ return myid
+
+
+def file_get(baseurl=None, dest=None, conn=None, fcmd=None, filename=None,
+ fcmd_vars=None):
+ """Takes a base url to connect to and read from.
+ URI should be in the form <proto>://[user[:pass]@]<site>[:port]<path>"""
+
+ if not fcmd:
+
+ warnings.warn("Use of portage.getbinpkg.file_get() without the fcmd "
+ "parameter is deprecated", DeprecationWarning, stacklevel=2)
+
+ return file_get_lib(baseurl, dest, conn)
+
+ variables = {}
+
+ if fcmd_vars is not None:
+ variables.update(fcmd_vars)
+
+ if "DISTDIR" not in variables:
+ if dest is None:
+ raise portage.exception.MissingParameter(
+ _("%s is missing required '%s' key") %
+ ("fcmd_vars", "DISTDIR"))
+ variables["DISTDIR"] = dest
+
+ if "URI" not in variables:
+ if baseurl is None:
+ raise portage.exception.MissingParameter(
+ _("%s is missing required '%s' key") %
+ ("fcmd_vars", "URI"))
+ variables["URI"] = baseurl
+
+ if "FILE" not in variables:
+ if filename is None:
+ filename = os.path.basename(variables["URI"])
+ variables["FILE"] = filename
+
+ from portage.util import varexpand
+ from portage.process import spawn
+ myfetch = portage.util.shlex_split(fcmd)
+ myfetch = [varexpand(x, mydict=variables) for x in myfetch]
+ fd_pipes = {
+ 0: portage._get_stdin().fileno(),
+ 1: sys.__stdout__.fileno(),
+ 2: sys.__stdout__.fileno()
+ }
+ sys.__stdout__.flush()
+ sys.__stderr__.flush()
+ retval = spawn(myfetch, env=os.environ.copy(), fd_pipes=fd_pipes)
+ if retval != os.EX_OK:
+ sys.stderr.write(_("Fetcher exited with a failure condition.\n"))
+ return 0
+ return 1
+
+def file_get_lib(baseurl, dest, conn=None):
+ """Takes a base url to connect to and read from.
+ URI should be in the form <proto>://<site>[:port]<path>
+ Connection is used for persistent connection instances."""
+
+ warnings.warn("portage.getbinpkg.file_get_lib() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ conn, protocol, address, params, headers = create_conn(baseurl, conn)
+
+ sys.stderr.write("Fetching '" + str(os.path.basename(address)) + "'\n")
+ if protocol in ["http", "https"]:
+ data, rc, _msg = make_http_request(conn, address, params, headers, dest=dest)
+ elif protocol in ["ftp"]:
+ data, rc, _msg = make_ftp_request(conn, address, dest=dest)
+ elif protocol == "sftp":
+ rc = 0
+ try:
+ f = conn.open(address)
+ except SystemExit:
+ raise
+ except Exception:
+ rc = 1
+ else:
+ try:
+ if dest:
+ bufsize = 8192
+ while True:
+ data = f.read(bufsize)
+ if not data:
+ break
+ dest.write(data)
+ finally:
+ f.close()
+ else:
+ raise TypeError(_("Unknown protocol. '%s'") % protocol)
+
+ if not keepconnection:
+ conn.close()
+
+ return rc
+
+
+def dir_get_metadata(baseurl, conn=None, chunk_size=3000, verbose=1, usingcache=1, makepickle=None):
+
+ warnings.warn("portage.getbinpkg.dir_get_metadata() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ if not conn:
+ keepconnection = 0
+ else:
+ keepconnection = 1
+
+ cache_path = "/var/cache/edb"
+ metadatafilename = os.path.join(cache_path, 'remote_metadata.pickle')
+
+ if makepickle is None:
+ makepickle = "/var/cache/edb/metadata.idx.most_recent"
+
+ try:
+ conn = create_conn(baseurl, conn)[0]
+ except _all_errors as e:
+ # ftplib.FTP(host) can raise errors like this:
+ # socket.error: (111, 'Connection refused')
+ sys.stderr.write("!!! %s\n" % (e,))
+ return {}
+
+ out = sys.stdout
+ try:
+ metadatafile = open(_unicode_encode(metadatafilename,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ mypickle = pickle.Unpickler(metadatafile)
+ try:
+ mypickle.find_global = None
+ except AttributeError:
+ # TODO: If py3k, override Unpickler.find_class().
+ pass
+ metadata = mypickle.load()
+ out.write(_("Loaded metadata pickle.\n"))
+ out.flush()
+ metadatafile.close()
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except Exception:
+ metadata = {}
+ if baseurl not in metadata:
+ metadata[baseurl] = {}
+ if "indexname" not in metadata[baseurl]:
+ metadata[baseurl]["indexname"] = ""
+ if "timestamp" not in metadata[baseurl]:
+ metadata[baseurl]["timestamp"] = 0
+ if "unmodified" not in metadata[baseurl]:
+ metadata[baseurl]["unmodified"] = 0
+ if "data" not in metadata[baseurl]:
+ metadata[baseurl]["data"] = {}
+
+ if not os.access(cache_path, os.W_OK):
+ sys.stderr.write(_("!!! Unable to write binary metadata to disk!\n"))
+ sys.stderr.write(_("!!! Permission denied: '%s'\n") % cache_path)
+ return metadata[baseurl]["data"]
+
+ import portage.exception
+ try:
+ filelist = dir_get_list(baseurl, conn)
+ except portage.exception.PortageException as e:
+ sys.stderr.write(_("!!! Error connecting to '%s'.\n") %
+ _hide_url_passwd(baseurl))
+ sys.stderr.write("!!! %s\n" % str(e))
+ del e
+ return metadata[baseurl]["data"]
+ tbz2list = match_in_array(filelist, suffix=".tbz2")
+ metalist = match_in_array(filelist, prefix="metadata.idx")
+ del filelist
+
+ # Determine if our metadata file is current.
+ metalist.sort()
+ metalist.reverse() # makes the order new-to-old.
+ for mfile in metalist:
+ if usingcache and \
+ ((metadata[baseurl]["indexname"] != mfile) or \
+ (metadata[baseurl]["timestamp"] < int(time.time() - (60 * 60 * 24)))):
+ # Try to download new cache until we succeed on one.
+ data = ""
+ for trynum in [1, 2, 3]:
+ mytempfile = tempfile.TemporaryFile()
+ try:
+ file_get(baseurl + "/" + mfile, mytempfile, conn)
+ if mytempfile.tell() > len(data):
+ mytempfile.seek(0)
+ data = mytempfile.read()
+ except ValueError as e:
+ sys.stderr.write("--- %s\n" % str(e))
+ if trynum < 3:
+ sys.stderr.write(_("Retrying...\n"))
+ sys.stderr.flush()
+ mytempfile.close()
+ continue
+ if match_in_array([mfile], suffix=".gz"):
+ out.write("gzip'd\n")
+ out.flush()
+ try:
+ import gzip
+ mytempfile.seek(0)
+ gzindex = gzip.GzipFile(mfile[:-3], 'rb', 9, mytempfile)
+ data = gzindex.read()
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ mytempfile.close()
+ sys.stderr.write(_("!!! Failed to use gzip: ") + str(e) + "\n")
+ sys.stderr.flush()
+ mytempfile.close()
+ try:
+ metadata[baseurl]["data"] = pickle.loads(data)
+ del data
+ metadata[baseurl]["indexname"] = mfile
+ metadata[baseurl]["timestamp"] = int(time.time())
+ metadata[baseurl]["modified"] = 0 # It's not, right after download.
+ out.write(_("Pickle loaded.\n"))
+ out.flush()
+ break
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ sys.stderr.write(_("!!! Failed to read data from index: ") + str(mfile) + "\n")
+ sys.stderr.write("!!! %s" % str(e))
+ sys.stderr.flush()
+ try:
+ metadatafile = open(_unicode_encode(metadatafilename,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ pickle.dump(metadata, metadatafile, protocol=2)
+ metadatafile.close()
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ sys.stderr.write(_("!!! Failed to write binary metadata to disk!\n"))
+ sys.stderr.write("!!! %s\n" % str(e))
+ sys.stderr.flush()
+ break
+ # We may have metadata... now we run through the tbz2 list and check.
+
+ class CacheStats(object):
+ from time import time
+ def __init__(self, out):
+ self.misses = 0
+ self.hits = 0
+ self.last_update = 0
+ self.out = out
+ self.min_display_latency = 0.2
+ def update(self):
+ cur_time = self.time()
+ if cur_time - self.last_update >= self.min_display_latency:
+ self.last_update = cur_time
+ self.display()
+ def display(self):
+ self.out.write("\r"+colorize("WARN",
+ _("cache miss: '") + str(self.misses) + "'") + \
+ " --- " + colorize("GOOD", _("cache hit: '") + str(self.hits) + "'"))
+ self.out.flush()
+
+ cache_stats = CacheStats(out)
+ have_tty = os.environ.get('TERM') != 'dumb' and out.isatty()
+ if have_tty:
+ cache_stats.display()
+ binpkg_filenames = set()
+ for x in tbz2list:
+ x = os.path.basename(x)
+ binpkg_filenames.add(x)
+ if x not in metadata[baseurl]["data"]:
+ cache_stats.misses += 1
+ if have_tty:
+ cache_stats.update()
+ metadata[baseurl]["modified"] = 1
+ myid = None
+ for _x in range(3):
+ try:
+ myid = file_get_metadata(
+ "/".join((baseurl.rstrip("/"), x.lstrip("/"))),
+ conn, chunk_size)
+ break
+ except http_client_BadStatusLine:
+ # Sometimes this error is thrown from conn.getresponse() in
+ # make_http_request(). The docstring for this error in
+ # httplib.py says "Presumably, the server closed the
+ # connection before sending a valid response".
+ conn = create_conn(baseurl)[0]
+ except http_client_ResponseNotReady:
+ # With some http servers this error is known to be thrown
+ # from conn.getresponse() in make_http_request() when the
+ # remote file does not have appropriate read permissions.
+ # Maybe it's possible to recover from this exception in
+ # cases though, so retry.
+ conn = create_conn(baseurl)[0]
+
+ if myid and myid[0]:
+ metadata[baseurl]["data"][x] = make_metadata_dict(myid)
+ elif verbose:
+ sys.stderr.write(colorize("BAD",
+ _("!!! Failed to retrieve metadata on: ")) + str(x) + "\n")
+ sys.stderr.flush()
+ else:
+ cache_stats.hits += 1
+ if have_tty:
+ cache_stats.update()
+ cache_stats.display()
+ # Cleanse stale cache for files that don't exist on the server anymore.
+ stale_cache = set(metadata[baseurl]["data"]).difference(binpkg_filenames)
+ if stale_cache:
+ for x in stale_cache:
+ del metadata[baseurl]["data"][x]
+ metadata[baseurl]["modified"] = 1
+ del stale_cache
+ del binpkg_filenames
+ out.write("\n")
+ out.flush()
+
+ try:
+ if "modified" in metadata[baseurl] and metadata[baseurl]["modified"]:
+ metadata[baseurl]["timestamp"] = int(time.time())
+ metadatafile = open(_unicode_encode(metadatafilename,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ pickle.dump(metadata, metadatafile, protocol=2)
+ metadatafile.close()
+ if makepickle:
+ metadatafile = open(_unicode_encode(makepickle,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ pickle.dump(metadata[baseurl]["data"], metadatafile, protocol=2)
+ metadatafile.close()
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ sys.stderr.write(_("!!! Failed to write binary metadata to disk!\n"))
+ sys.stderr.write("!!! "+str(e)+"\n")
+ sys.stderr.flush()
+
+ if not keepconnection:
+ conn.close()
+
+ return metadata[baseurl]["data"]
+
+def _cmp_cpv(d1, d2):
+ cpv1 = d1["CPV"]
+ cpv2 = d2["CPV"]
+ if cpv1 > cpv2:
+ return 1
+ elif cpv1 == cpv2:
+ return 0
+ else:
+ return -1
+
+class PackageIndex(object):
+
+ def __init__(self,
+ allowed_pkg_keys=None,
+ default_header_data=None,
+ default_pkg_data=None,
+ inherited_keys=None,
+ translated_keys=None):
+
+ self._pkg_slot_dict = None
+ if allowed_pkg_keys is not None:
+ self._pkg_slot_dict = slot_dict_class(allowed_pkg_keys)
+
+ self._default_header_data = default_header_data
+ self._default_pkg_data = default_pkg_data
+ self._inherited_keys = inherited_keys
+ self._write_translation_map = {}
+ self._read_translation_map = {}
+ if translated_keys:
+ self._write_translation_map.update(translated_keys)
+ self._read_translation_map.update(((y, x) for (x, y) in translated_keys))
+ self.header = {}
+ if self._default_header_data:
+ self.header.update(self._default_header_data)
+ self.packages = []
+ self.modified = True
+
+ def _readpkgindex(self, pkgfile, pkg_entry=True):
+
+ allowed_keys = None
+ if self._pkg_slot_dict is None or not pkg_entry:
+ d = {}
+ else:
+ d = self._pkg_slot_dict()
+ allowed_keys = d.allowed_keys
+
+ for line in pkgfile:
+ line = line.rstrip("\n")
+ if not line:
+ break
+ line = line.split(":", 1)
+ if not len(line) == 2:
+ continue
+ k, v = line
+ if v:
+ v = v[1:]
+ k = self._read_translation_map.get(k, k)
+ if allowed_keys is not None and \
+ k not in allowed_keys:
+ continue
+ d[k] = v
+ return d
+
+ def _writepkgindex(self, pkgfile, items):
+ for k, v in items:
+ pkgfile.write("%s: %s\n" % \
+ (self._write_translation_map.get(k, k), v))
+ pkgfile.write("\n")
+
+ def read(self, pkgfile):
+ self.readHeader(pkgfile)
+ self.readBody(pkgfile)
+
+ def readHeader(self, pkgfile):
+ self.header.update(self._readpkgindex(pkgfile, pkg_entry=False))
+
+ def readBody(self, pkgfile):
+ while True:
+ d = self._readpkgindex(pkgfile)
+ if not d:
+ break
+ mycpv = d.get("CPV")
+ if not mycpv:
+ continue
+ if self._default_pkg_data:
+ for k, v in self._default_pkg_data.items():
+ d.setdefault(k, v)
+ if self._inherited_keys:
+ for k in self._inherited_keys:
+ v = self.header.get(k)
+ if v is not None:
+ d.setdefault(k, v)
+ self.packages.append(d)
+
+ def write(self, pkgfile):
+ if self.modified:
+ self.header["TIMESTAMP"] = str(long(time.time()))
+ self.header["PACKAGES"] = str(len(self.packages))
+ keys = list(self.header)
+ keys.sort()
+ self._writepkgindex(pkgfile, [(k, self.header[k]) \
+ for k in keys if self.header[k]])
+ for metadata in sorted(self.packages,
+ key=portage.util.cmp_sort_key(_cmp_cpv)):
+ metadata = metadata.copy()
+ if self._inherited_keys:
+ for k in self._inherited_keys:
+ v = self.header.get(k)
+ if v is not None and v == metadata.get(k):
+ del metadata[k]
+ if self._default_pkg_data:
+ for k, v in self._default_pkg_data.items():
+ if metadata.get(k) == v:
+ metadata.pop(k, None)
+ keys = list(metadata)
+ keys.sort()
+ self._writepkgindex(pkgfile,
+ [(k, metadata[k]) for k in keys if metadata[k]])
diff --git a/lib/portage/glsa.py b/lib/portage/glsa.py
new file mode 100644
index 000000000..ccf93439d
--- /dev/null
+++ b/lib/portage/glsa.py
@@ -0,0 +1,726 @@
+# Copyright 2003-2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import absolute_import, unicode_literals
+
+import io
+import sys
+try:
+ from urllib.request import urlopen as urllib_request_urlopen
+except ImportError:
+ from urllib import urlopen as urllib_request_urlopen
+import codecs
+import re
+import operator
+import xml.dom.minidom
+from io import StringIO
+from functools import reduce
+
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.versions import pkgsplit, vercmp
+from portage.util import grabfile
+from portage.const import PRIVATE_PATH
+from portage.localization import _
+from portage.dep import _slot_separator
+
+# Note: the space for rgt and rlt is important !!
+# FIXME: use slot deps instead, requires GLSA format versioning
+opMapping = {"le": "<=", "lt": "<", "eq": "=", "gt": ">", "ge": ">=",
+ "rge": ">=~", "rle": "<=~", "rgt": " >~", "rlt": " <~"}
+NEWLINE_ESCAPE = "!;\\n" # some random string to mark newlines that should be preserved
+SPACE_ESCAPE = "!;_" # some random string to mark spaces that should be preserved
+
+def get_applied_glsas(settings):
+ """
+ Return a list of applied or injected GLSA IDs
+
+ @type settings: portage.config
+ @param settings: portage config instance
+ @rtype: list
+ @return: list of glsa IDs
+ """
+ return grabfile(os.path.join(settings["EROOT"], PRIVATE_PATH, "glsa_injected"))
+
+
+# TODO: use the textwrap module instead
+def wrap(text, width, caption=""):
+ """
+ Wraps the given text at column I{width}, optionally indenting
+ it so that no text is under I{caption}. It's possible to encode
+ hard linebreaks in I{text} with L{NEWLINE_ESCAPE}.
+
+ @type text: String
+ @param text: the text to be wrapped
+ @type width: Integer
+ @param width: the column at which the text should be wrapped
+ @type caption: String
+ @param caption: this string is inserted at the beginning of the
+ return value and the paragraph is indented up to
+ C{len(caption)}.
+ @rtype: String
+ @return: the wrapped and indented paragraph
+ """
+ rValue = ""
+ line = caption
+ text = text.replace(2*NEWLINE_ESCAPE, NEWLINE_ESCAPE+" "+NEWLINE_ESCAPE)
+ words = text.split()
+ indentLevel = len(caption)+1
+
+ for w in words:
+ if line != "" and line[-1] == "\n":
+ rValue += line
+ line = " "*indentLevel
+ if len(line)+len(w.replace(NEWLINE_ESCAPE, ""))+1 > width:
+ rValue += line+"\n"
+ line = " "*indentLevel+w.replace(NEWLINE_ESCAPE, "\n")
+ elif w.find(NEWLINE_ESCAPE) >= 0:
+ if len(line.strip()) > 0:
+ rValue += line+" "+w.replace(NEWLINE_ESCAPE, "\n")
+ else:
+ rValue += line+w.replace(NEWLINE_ESCAPE, "\n")
+ line = " "*indentLevel
+ else:
+ if len(line.strip()) > 0:
+ line += " "+w
+ else:
+ line += w
+ if len(line) > 0:
+ rValue += line.replace(NEWLINE_ESCAPE, "\n")
+ rValue = rValue.replace(SPACE_ESCAPE, " ")
+ return rValue
+
+def get_glsa_list(myconfig):
+ """
+ Returns a list of all available GLSAs in the given repository
+ by comparing the filelist there with the pattern described in
+ the config.
+
+ @type myconfig: portage.config
+ @param myconfig: Portage settings instance
+
+ @rtype: List of Strings
+ @return: a list of GLSA IDs in this repository
+ """
+ rValue = []
+
+ if "GLSA_DIR" in myconfig:
+ repository = myconfig["GLSA_DIR"]
+ else:
+ repository = os.path.join(myconfig["PORTDIR"], "metadata", "glsa")
+
+ if not os.access(repository, os.R_OK):
+ return []
+ dirlist = os.listdir(repository)
+ prefix = "glsa-"
+ suffix = ".xml"
+
+ for f in dirlist:
+ try:
+ if f[:len(prefix)] == prefix and f[-1*len(suffix):] == suffix:
+ rValue.append(f[len(prefix):-1*len(suffix)])
+ except IndexError:
+ pass
+ return rValue
+
+def getListElements(listnode):
+ """
+ Get all <li> elements for a given <ol> or <ul> node.
+
+ @type listnode: xml.dom.Node
+ @param listnode: <ul> or <ol> list to get the elements for
+ @rtype: List of Strings
+ @return: a list that contains the value of the <li> elements
+ """
+ if not listnode.nodeName in ["ul", "ol"]:
+ raise GlsaFormatException("Invalid function call: listnode is not <ul> or <ol>")
+ rValue = [getText(li, format="strip") \
+ for li in listnode.childNodes \
+ if li.nodeType == xml.dom.Node.ELEMENT_NODE]
+ return rValue
+
+def getText(node, format, textfd = None):
+ """
+ This is the main parser function. It takes a node and traverses
+ recursive over the subnodes, getting the text of each (and the
+ I{link} attribute for <uri> and <mail>). Depending on the I{format}
+ parameter the text might be formatted by adding/removing newlines,
+ tabs and spaces. This function is only useful for the GLSA DTD,
+ it's not applicable for other DTDs.
+
+ @type node: xml.dom.Node
+ @param node: the root node to start with the parsing
+ @type format: String
+ @param format: this should be either I{strip}, I{keep} or I{xml}
+ I{keep} just gets the text and does no formatting.
+ I{strip} replaces newlines and tabs with spaces and
+ replaces multiple spaces with one space.
+ I{xml} does some more formatting, depending on the
+ type of the encountered nodes.
+ @type textfd: writable file-like object
+ @param textfd: the file-like object to write the output to
+ @rtype: String
+ @return: the (formatted) content of the node and its subnodes
+ except if textfd was not none
+ """
+ if not textfd:
+ textfd = StringIO()
+ returnNone = False
+ else:
+ returnNone = True
+ if format in ["strip", "keep"]:
+ if node.nodeName in ["uri", "mail"]:
+ textfd.write(node.childNodes[0].data+": "+node.getAttribute("link"))
+ else:
+ for subnode in node.childNodes:
+ if subnode.nodeName == "#text":
+ textfd.write(subnode.data)
+ else:
+ getText(subnode, format, textfd)
+ else: # format = "xml"
+ for subnode in node.childNodes:
+ if subnode.nodeName == "p":
+ for p_subnode in subnode.childNodes:
+ if p_subnode.nodeName == "#text":
+ textfd.write(p_subnode.data.strip())
+ elif p_subnode.nodeName in ["uri", "mail"]:
+ textfd.write(p_subnode.childNodes[0].data)
+ textfd.write(" ( "+p_subnode.getAttribute("link")+" )")
+ textfd.write(NEWLINE_ESCAPE)
+ elif subnode.nodeName == "ul":
+ for li in getListElements(subnode):
+ textfd.write("-"+SPACE_ESCAPE+li+NEWLINE_ESCAPE+" ")
+ elif subnode.nodeName == "ol":
+ i = 0
+ for li in getListElements(subnode):
+ i = i+1
+ textfd.write(str(i)+"."+SPACE_ESCAPE+li+NEWLINE_ESCAPE+" ")
+ elif subnode.nodeName == "code":
+ textfd.write(getText(subnode, format="keep").lstrip().replace("\n", NEWLINE_ESCAPE))
+ textfd.write(NEWLINE_ESCAPE)
+ elif subnode.nodeName == "#text":
+ textfd.write(subnode.data)
+ else:
+ raise GlsaFormatException(_("Invalid Tag found: "), subnode.nodeName)
+ if returnNone:
+ return None
+ rValue = textfd.getvalue()
+ if format == "strip":
+ rValue = rValue.strip(" \n\t")
+ rValue = re.sub(r"[\s]{2,}", " ", rValue)
+ return rValue
+
+def getMultiTagsText(rootnode, tagname, format):
+ """
+ Returns a list with the text of all subnodes of type I{tagname}
+ under I{rootnode} (which itself is not parsed) using the given I{format}.
+
+ @type rootnode: xml.dom.Node
+ @param rootnode: the node to search for I{tagname}
+ @type tagname: String
+ @param tagname: the name of the tags to search for
+ @type format: String
+ @param format: see L{getText}
+ @rtype: List of Strings
+ @return: a list containing the text of all I{tagname} childnodes
+ """
+ rValue = [getText(e, format) \
+ for e in rootnode.getElementsByTagName(tagname)]
+ return rValue
+
+def makeAtom(pkgname, versionNode):
+ """
+ creates from the given package name and information in the
+ I{versionNode} a (syntactical) valid portage atom.
+
+ @type pkgname: String
+ @param pkgname: the name of the package for this atom
+ @type versionNode: xml.dom.Node
+ @param versionNode: a <vulnerable> or <unaffected> Node that
+ contains the version information for this atom
+ @rtype: String
+ @return: the portage atom
+ """
+ rValue = opMapping[versionNode.getAttribute("range")] \
+ + pkgname \
+ + "-" + getText(versionNode, format="strip")
+ try:
+ slot = versionNode.getAttribute("slot").strip()
+ except KeyError:
+ pass
+ else:
+ if slot and slot != "*":
+ rValue += _slot_separator + slot
+ return str(rValue)
+
+def makeVersion(versionNode):
+ """
+ creates from the information in the I{versionNode} a
+ version string (format <op><version>).
+
+ @type versionNode: xml.dom.Node
+ @param versionNode: a <vulnerable> or <unaffected> Node that
+ contains the version information for this atom
+ @rtype: String
+ @return: the version string
+ """
+ rValue = opMapping[versionNode.getAttribute("range")] \
+ + getText(versionNode, format="strip")
+ try:
+ slot = versionNode.getAttribute("slot").strip()
+ except KeyError:
+ pass
+ else:
+ if slot and slot != "*":
+ rValue += _slot_separator + slot
+ return rValue
+
+def match(atom, dbapi, match_type="default"):
+ """
+ wrapper that calls revisionMatch() or portage.dbapi.dbapi.match() depending on
+ the given atom.
+
+ @type atom: string
+ @param atom: a <~ or >~ atom or a normal portage atom that contains the atom to match against
+ @type dbapi: portage.dbapi.dbapi
+ @param dbapi: one of the portage databases to use as information source
+ @type match_type: string
+ @param match_type: if != "default" passed as first argument to dbapi.xmatch
+ to apply the wanted visibility filters
+
+ @rtype: list of strings
+ @return: a list with the matching versions
+ """
+ if atom[2] == "~":
+ return revisionMatch(atom, dbapi, match_type=match_type)
+ elif match_type == "default" or not hasattr(dbapi, "xmatch"):
+ return dbapi.match(atom)
+ else:
+ return dbapi.xmatch(match_type, atom)
+
+def revisionMatch(revisionAtom, dbapi, match_type="default"):
+ """
+ handler for the special >~, >=~, <=~ and <~ atoms that are supposed to behave
+ as > and < except that they are limited to the same version, the range only
+ applies to the revision part.
+
+ @type revisionAtom: string
+ @param revisionAtom: a <~ or >~ atom that contains the atom to match against
+ @type dbapi: portage.dbapi.dbapi
+ @param dbapi: one of the portage databases to use as information source
+ @type match_type: string
+ @param match_type: if != "default" passed as first argument to portdb.xmatch
+ to apply the wanted visibility filters
+
+ @rtype: list of strings
+ @return: a list with the matching versions
+ """
+ if match_type == "default" or not hasattr(dbapi, "xmatch"):
+ if ":" in revisionAtom:
+ mylist = dbapi.match(re.sub(r'-r[0-9]+(:[^ ]+)?$', r'\1', revisionAtom[2:]))
+ else:
+ mylist = dbapi.match(re.sub("-r[0-9]+$", "", revisionAtom[2:]))
+ else:
+ if ":" in revisionAtom:
+ mylist = dbapi.xmatch(match_type, re.sub(r'-r[0-9]+(:[^ ]+)?$', r'\1', revisionAtom[2:]))
+ else:
+ mylist = dbapi.xmatch(match_type, re.sub("-r[0-9]+$", "", revisionAtom[2:]))
+ rValue = []
+ for v in mylist:
+ r1 = pkgsplit(v)[-1][1:]
+ r2 = pkgsplit(revisionAtom[3:])[-1][1:]
+ if eval(r1+" "+revisionAtom[0:2]+" "+r2):
+ rValue.append(v)
+ return rValue
+
+
+def getMinUpgrade(vulnerableList, unaffectedList, portdbapi, vardbapi, minimize=True):
+ """
+ Checks if the systemstate is matching an atom in
+ I{vulnerableList} and returns string describing
+ the lowest version for the package that matches an atom in
+ I{unaffectedList} and is greater than the currently installed
+ version. It will return an empty list if the system is affected,
+ and no upgrade is possible or None if the system is not affected.
+ Both I{vulnerableList} and I{unaffectedList} should have the
+ same base package.
+
+ @type vulnerableList: List of Strings
+ @param vulnerableList: atoms matching vulnerable package versions
+ @type unaffectedList: List of Strings
+ @param unaffectedList: atoms matching unaffected package versions
+ @type portdbapi: portage.dbapi.porttree.portdbapi
+ @param portdbapi: Ebuild repository
+ @type vardbapi: portage.dbapi.vartree.vardbapi
+ @param vardbapi: Installed package repository
+ @type minimize: Boolean
+ @param minimize: True for a least-change upgrade, False for emerge-like algorithm
+
+ @rtype: String | None
+ @return: the lowest unaffected version that is greater than
+ the installed version.
+ """
+ rValue = ""
+ v_installed = reduce(operator.add, [match(v, vardbapi) for v in vulnerableList], [])
+ u_installed = reduce(operator.add, [match(u, vardbapi) for u in unaffectedList], [])
+
+ # remove all unaffected atoms from vulnerable list
+ v_installed = list(set(v_installed).difference(set(u_installed)))
+
+ if not v_installed:
+ return None
+
+ # this tuple holds all vulnerable atoms, and the related upgrade atom
+ vuln_update = []
+ avail_updates = set()
+ for u in unaffectedList:
+ # TODO: This had match_type="match-all" before. I don't think it should
+ # since we disregarded masked items later anyway (match(=rValue, "porttree"))
+ avail_updates.update(match(u, portdbapi))
+ # if an atom is already installed, we should not consider it for upgrades
+ avail_updates.difference_update(u_installed)
+
+ for vuln in v_installed:
+ update = ""
+ for c in avail_updates:
+ c_pv = portage.catpkgsplit(c)
+ if vercmp(c.version, vuln.version) > 0 \
+ and (update == "" \
+ or (minimize ^ (vercmp(c.version, update.version) > 0))) \
+ and portdbapi._pkg_str(c, None).slot == vardbapi._pkg_str(vuln, None).slot:
+ update = c_pv[0]+"/"+c_pv[1]+"-"+c_pv[2]
+ if c_pv[3] != "r0": # we don't like -r0 for display
+ update += "-"+c_pv[3]
+ update = portdbapi._pkg_str(update, None)
+ vuln_update.append([vuln, update])
+
+ return vuln_update
+
+def format_date(datestr):
+ """
+ Takes a date (announced, revised) date from a GLSA and formats
+ it as readable text (i.e. "January 1, 2008").
+
+ @type date: String
+ @param date: the date string to reformat
+ @rtype: String
+ @return: a reformatted string, or the original string
+ if it cannot be reformatted.
+ """
+ splitdate = datestr.split("-", 2)
+ if len(splitdate) != 3:
+ return datestr
+
+ # This cannot raise an error as we use () instead of []
+ splitdate = (int(x) for x in splitdate)
+
+ from datetime import date
+ try:
+ d = date(*splitdate)
+ except ValueError:
+ return datestr
+
+ # TODO We could format to local date format '%x' here?
+ return _unicode_decode(d.strftime("%B %d, %Y"),
+ encoding=_encodings['content'], errors='replace')
+
+# simple Exception classes to catch specific errors
+class GlsaTypeException(Exception):
+ def __init__(self, doctype):
+ Exception.__init__(self, "wrong DOCTYPE: %s" % doctype)
+
+class GlsaFormatException(Exception):
+ pass
+
+class GlsaArgumentException(Exception):
+ pass
+
+# GLSA xml data wrapper class
+class Glsa:
+ """
+ This class is a wrapper for the XML data and provides methods to access
+ and display the contained data.
+ """
+ def __init__(self, myid, myconfig, vardbapi, portdbapi):
+ """
+ Simple constructor to set the ID, store the config and gets the
+ XML data by calling C{self.read()}.
+
+ @type myid: String
+ @param myid: String describing the id for the GLSA object (standard
+ GLSAs have an ID of the form YYYYMM-nn) or an existing
+ filename containing a GLSA.
+ @type myconfig: portage.config
+ @param myconfig: the config that should be used for this object.
+ @type vardbapi: portage.dbapi.vartree.vardbapi
+ @param vardbapi: installed package repository
+ @type portdbapi: portage.dbapi.porttree.portdbapi
+ @param portdbapi: ebuild repository
+ """
+ myid = _unicode_decode(myid,
+ encoding=_encodings['content'], errors='strict')
+ if re.match(r'\d{6}-\d{2}', myid):
+ self.type = "id"
+ elif os.path.exists(myid):
+ self.type = "file"
+ else:
+ raise GlsaArgumentException(_("Given ID %s isn't a valid GLSA ID or filename.") % myid)
+ self.nr = myid
+ self.config = myconfig
+ self.vardbapi = vardbapi
+ self.portdbapi = portdbapi
+ self.read()
+
+ def read(self):
+ """
+ Here we build the filename from the config and the ID and pass
+ it to urllib to fetch it from the filesystem or a remote server.
+
+ @rtype: None
+ @return: None
+ """
+ if "GLSA_DIR" in self.config:
+ repository = "file://" + self.config["GLSA_DIR"]+"/"
+ else:
+ repository = "file://" + self.config["PORTDIR"] + "/metadata/glsa/"
+ if self.type == "file":
+ myurl = "file://"+self.nr
+ else:
+ myurl = repository + "glsa-%s.xml" % str(self.nr)
+
+ f = urllib_request_urlopen(myurl)
+ try:
+ self.parse(f)
+ finally:
+ f.close()
+
+ return None
+
+ def parse(self, myfile):
+ """
+ This method parses the XML file and sets up the internal data
+ structures by calling the different helper functions in this
+ module.
+
+ @type myfile: String
+ @param myfile: Filename to grab the XML data from
+ @rtype: None
+ @return: None
+ """
+ self.DOM = xml.dom.minidom.parse(myfile)
+ if not self.DOM.doctype:
+ raise GlsaTypeException(None)
+ elif self.DOM.doctype.systemId == "http://www.gentoo.org/dtd/glsa.dtd":
+ self.dtdversion = 0
+ elif self.DOM.doctype.systemId == "http://www.gentoo.org/dtd/glsa-2.dtd":
+ self.dtdversion = 2
+ else:
+ raise GlsaTypeException(self.DOM.doctype.systemId)
+ myroot = self.DOM.getElementsByTagName("glsa")[0]
+ if self.type == "id" and myroot.getAttribute("id") != self.nr:
+ raise GlsaFormatException(_("filename and internal id don't match:") + myroot.getAttribute("id") + " != " + self.nr)
+
+ # the simple (single, required, top-level, #PCDATA) tags first
+ self.title = getText(myroot.getElementsByTagName("title")[0], format="strip")
+ self.synopsis = getText(myroot.getElementsByTagName("synopsis")[0], format="strip")
+ self.announced = format_date(getText(myroot.getElementsByTagName("announced")[0], format="strip"))
+
+ # Support both formats of revised:
+ # <revised>December 30, 2007: 02</revised>
+ # <revised count="2">2007-12-30</revised>
+ revisedEl = myroot.getElementsByTagName("revised")[0]
+ self.revised = getText(revisedEl, format="strip")
+ count = revisedEl.getAttribute("count")
+ if not count:
+ if self.revised.find(":") >= 0:
+ (self.revised, count) = self.revised.split(":")
+ else:
+ count = 1
+
+ self.revised = format_date(self.revised)
+
+ try:
+ self.count = int(count)
+ except ValueError:
+ # TODO should this raise a GlsaFormatException?
+ self.count = 1
+
+ # now the optional and 0-n toplevel, #PCDATA tags and references
+ try:
+ self.access = getText(myroot.getElementsByTagName("access")[0], format="strip")
+ except IndexError:
+ self.access = ""
+ self.bugs = getMultiTagsText(myroot, "bug", format="strip")
+ self.references = getMultiTagsText(myroot.getElementsByTagName("references")[0], "uri", format="keep")
+
+ # and now the formatted text elements
+ self.description = getText(myroot.getElementsByTagName("description")[0], format="xml")
+ self.workaround = getText(myroot.getElementsByTagName("workaround")[0], format="xml")
+ self.resolution = getText(myroot.getElementsByTagName("resolution")[0], format="xml")
+ self.impact_text = getText(myroot.getElementsByTagName("impact")[0], format="xml")
+ self.impact_type = myroot.getElementsByTagName("impact")[0].getAttribute("type")
+ try:
+ self.background = getText(myroot.getElementsByTagName("background")[0], format="xml")
+ except IndexError:
+ self.background = ""
+
+ # finally the interesting tags (product, affected, package)
+ self.glsatype = myroot.getElementsByTagName("product")[0].getAttribute("type")
+ self.product = getText(myroot.getElementsByTagName("product")[0], format="strip")
+ self.affected = myroot.getElementsByTagName("affected")[0]
+ self.packages = {}
+ for p in self.affected.getElementsByTagName("package"):
+ name = p.getAttribute("name")
+ try:
+ name = portage.dep.Atom(name)
+ except portage.exception.InvalidAtom:
+ raise GlsaFormatException(_("invalid package name: %s") % name)
+ if name != name.cp:
+ raise GlsaFormatException(_("invalid package name: %s") % name)
+ name = name.cp
+ if name not in self.packages:
+ self.packages[name] = []
+ tmp = {}
+ tmp["arch"] = p.getAttribute("arch")
+ tmp["auto"] = (p.getAttribute("auto") == "yes")
+ tmp["vul_vers"] = [makeVersion(v) for v in p.getElementsByTagName("vulnerable")]
+ tmp["unaff_vers"] = [makeVersion(v) for v in p.getElementsByTagName("unaffected")]
+ tmp["vul_atoms"] = [makeAtom(name, v) for v in p.getElementsByTagName("vulnerable")]
+ tmp["unaff_atoms"] = [makeAtom(name, v) for v in p.getElementsByTagName("unaffected")]
+ self.packages[name].append(tmp)
+ # TODO: services aren't really used yet
+ self.services = self.affected.getElementsByTagName("service")
+ return None
+
+ def dump(self, outstream=sys.stdout, encoding="utf-8"):
+ """
+ Dumps a plaintext representation of this GLSA to I{outfile} or
+ B{stdout} if it is ommitted. You can specify an alternate
+ I{encoding} if needed (default is utf-8).
+
+ @type outstream: File
+ @param outfile: Stream that should be used for writing
+ (defaults to sys.stdout)
+ """
+ outstream = getattr(outstream, "buffer", outstream)
+ outstream = codecs.getwriter(encoding)(outstream)
+ width = 76
+ outstream.write(("GLSA %s: \n%s" % (self.nr, self.title)).center(width)+"\n")
+ outstream.write((width*"=")+"\n")
+ outstream.write(wrap(self.synopsis, width, caption=_("Synopsis: "))+"\n")
+ outstream.write(_("Announced on: %s\n") % self.announced)
+ outstream.write(_("Last revised on: %s : %02d\n\n") % (self.revised, self.count))
+ if self.glsatype == "ebuild":
+ for k in self.packages:
+ pkg = self.packages[k]
+ for path in pkg:
+ vul_vers = ", ".join(path["vul_vers"])
+ unaff_vers = ", ".join(path["unaff_vers"])
+ outstream.write(_("Affected package: %s\n") % k)
+ outstream.write(_("Affected archs: "))
+ if path["arch"] == "*":
+ outstream.write(_("All\n"))
+ else:
+ outstream.write("%s\n" % path["arch"])
+ outstream.write(_("Vulnerable: %s\n") % vul_vers)
+ outstream.write(_("Unaffected: %s\n\n") % unaff_vers)
+ elif self.glsatype == "infrastructure":
+ pass
+ if len(self.bugs) > 0:
+ outstream.write(_("\nRelated bugs: "))
+ outstream.write(", ".join(self.bugs))
+ outstream.write("\n")
+ if self.background:
+ outstream.write("\n"+wrap(self.background, width, caption=_("Background: ")))
+ outstream.write("\n"+wrap(self.description, width, caption=_("Description: ")))
+ outstream.write("\n"+wrap(self.impact_text, width, caption=_("Impact: ")))
+ outstream.write("\n"+wrap(self.workaround, width, caption=_("Workaround: ")))
+ outstream.write("\n"+wrap(self.resolution, width, caption=_("Resolution: ")))
+ myreferences = " ".join(r.replace(" ", SPACE_ESCAPE)+NEWLINE_ESCAPE for r in self.references)
+ outstream.write("\n"+wrap(myreferences, width, caption=_("References: ")))
+ outstream.write("\n")
+
+ def isVulnerable(self):
+ """
+ Tests if the system is affected by this GLSA by checking if any
+ vulnerable package versions are installed. Also checks for affected
+ architectures.
+
+ @rtype: Boolean
+ @return: True if the system is affected, False if not
+ """
+ rValue = False
+ for k in self.packages:
+ pkg = self.packages[k]
+ for path in pkg:
+ if path["arch"] == "*" or self.config["ARCH"] in path["arch"].split():
+ for v in path["vul_atoms"]:
+ rValue = rValue \
+ or (len(match(v, self.vardbapi)) > 0 \
+ and None != getMinUpgrade(path["vul_atoms"], path["unaff_atoms"], \
+ self.portdbapi, self.vardbapi))
+ return rValue
+
+ def isInjected(self):
+ """
+ Looks if the GLSA ID is in the GLSA checkfile to check if this
+ GLSA should be marked as applied.
+
+ @rtype: Boolean
+ @returns: True if the GLSA is in the inject file, False if not
+ """
+ if not os.access(os.path.join(self.config["EROOT"],
+ PRIVATE_PATH, "glsa_injected"), os.R_OK):
+ return False
+ return (self.nr in get_applied_glsas(self.config))
+
+ def inject(self):
+ """
+ Puts the ID of this GLSA into the GLSA checkfile, so it won't
+ show up on future checks. Should be called after a GLSA is
+ applied or on explicit user request.
+
+ @rtype: None
+ @return: None
+ """
+ if not self.isInjected():
+ checkfile = io.open(
+ _unicode_encode(os.path.join(self.config["EROOT"],
+ PRIVATE_PATH, "glsa_injected"),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='a+', encoding=_encodings['content'], errors='strict')
+ checkfile.write(_unicode_decode(self.nr + "\n"))
+ checkfile.close()
+ return None
+
+ def getMergeList(self, least_change=True):
+ """
+ Returns the list of package-versions that have to be merged to
+ apply this GLSA properly. The versions are as low as possible
+ while avoiding downgrades (see L{getMinUpgrade}).
+
+ @type least_change: Boolean
+ @param least_change: True if the smallest possible upgrade should be selected,
+ False for an emerge-like algorithm
+ @rtype: List of Strings
+ @return: list of package-versions that have to be merged
+ """
+ return list(set(update for (vuln, update) in self.getAffectionTable(least_change) if update))
+
+ def getAffectionTable(self, least_change=True):
+ """
+ Will initialize the self.systemAffection list of
+ atoms installed on the system that are affected
+ by this GLSA, and the atoms that are minimal upgrades.
+ """
+ systemAffection = []
+ for pkg in self.packages.keys():
+ for path in self.packages[pkg]:
+ update = getMinUpgrade(path["vul_atoms"], path["unaff_atoms"],
+ self.portdbapi, self.vardbapi, minimize=least_change)
+ if update:
+ systemAffection.extend(update)
+ return systemAffection
diff --git a/lib/portage/localization.py b/lib/portage/localization.py
new file mode 100644
index 000000000..b215b9cba
--- /dev/null
+++ b/lib/portage/localization.py
@@ -0,0 +1,46 @@
+# localization.py -- Code to manage/help portage localization.
+# Copyright 2004-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division
+
+import locale
+import math
+
+from portage import _encodings, _unicode_decode
+
+# We define this to make the transition easier for us.
+def _(mystr):
+ """
+ Always returns unicode, regardless of the input type. This is
+ helpful for avoiding UnicodeDecodeError from __str__() with
+ Python 2, by ensuring that string format operations invoke
+ __unicode__() instead of __str__().
+ """
+ return _unicode_decode(mystr)
+
+def localization_example():
+ # Dict references allow translators to rearrange word order.
+ print(_("You can use this string for translating."))
+ print(_("Strings can be formatted with %(mystr)s like this.") % {"mystr": "VALUES"})
+
+ a_value = "value.of.a"
+ b_value = 123
+ c_value = [1, 2, 3, 4]
+ print(_("A: %(a)s -- B: %(b)s -- C: %(c)s") %
+ {"a": a_value, "b": b_value, "c": c_value})
+
+def localized_size(num_bytes):
+ """
+ Return pretty localized size string for num_bytes size
+ (given in bytes). The output will be in kibibytes.
+ """
+
+ # always round up, so that small files don't end up as '0 KiB'
+ num_kib = math.ceil(num_bytes / 1024)
+ try:
+ formatted_num = locale.format_string('%d', num_kib, grouping=True)
+ except UnicodeDecodeError:
+ # failure to decode locale data
+ formatted_num = str(num_kib)
+ return (_unicode_decode(formatted_num, encoding=_encodings['stdio']) + ' KiB')
diff --git a/lib/portage/locks.py b/lib/portage/locks.py
new file mode 100644
index 000000000..f61e1819a
--- /dev/null
+++ b/lib/portage/locks.py
@@ -0,0 +1,607 @@
+# portage: Lock management code
+# Copyright 2004-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ["lockdir", "unlockdir", "lockfile", "unlockfile", \
+ "hardlock_name", "hardlink_is_mine", "hardlink_lockfile", \
+ "unhardlink_lockfile", "hardlock_cleanup"]
+
+import errno
+import fcntl
+import multiprocessing
+import sys
+import tempfile
+import time
+import warnings
+
+import portage
+from portage import os, _encodings, _unicode_decode
+from portage.exception import (DirectoryNotFound, FileNotFound,
+ InvalidData, TryAgain, OperationNotPermitted, PermissionDenied,
+ ReadOnlyFileSystem)
+from portage.util import writemsg
+from portage.localization import _
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+HARDLINK_FD = -2
+_HARDLINK_POLL_LATENCY = 3 # seconds
+
+# Used by emerge in order to disable the "waiting for lock" message
+# so that it doesn't interfere with the status display.
+_quiet = False
+
+
+_lock_fn = None
+
+
+def _get_lock_fn():
+ """
+ Returns fcntl.lockf if proven to work, and otherwise returns fcntl.flock.
+ On some platforms fcntl.lockf is known to be broken.
+ """
+ global _lock_fn
+ if _lock_fn is not None:
+ return _lock_fn
+
+ def _test_lock(fd, lock_path):
+ os.close(fd)
+ try:
+ with open(lock_path, 'a') as f:
+ fcntl.lockf(f.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
+ except EnvironmentError as e:
+ if e.errno == errno.EAGAIN:
+ # Parent process holds lock, as expected.
+ sys.exit(0)
+
+ # Something went wrong.
+ sys.exit(1)
+
+ fd, lock_path = tempfile.mkstemp()
+ try:
+ try:
+ fcntl.lockf(fd, fcntl.LOCK_EX)
+ except EnvironmentError:
+ pass
+ else:
+ proc = multiprocessing.Process(target=_test_lock,
+ args=(fd, lock_path))
+ proc.start()
+ proc.join()
+ if proc.exitcode == os.EX_OK:
+ # Use fcntl.lockf because the test passed.
+ _lock_fn = fcntl.lockf
+ return _lock_fn
+ finally:
+ os.close(fd)
+ os.unlink(lock_path)
+
+ # Fall back to fcntl.flock.
+ _lock_fn = fcntl.flock
+ return _lock_fn
+
+
+_open_fds = set()
+
+def _close_fds():
+ """
+ This is intended to be called after a fork, in order to close file
+ descriptors for locks held by the parent process. This can be called
+ safely after a fork without exec, unlike the _setup_pipes close_fds
+ behavior.
+ """
+ while _open_fds:
+ os.close(_open_fds.pop())
+
+def lockdir(mydir, flags=0):
+ return lockfile(mydir, wantnewlockfile=1, flags=flags)
+def unlockdir(mylock):
+ return unlockfile(mylock)
+
+def lockfile(mypath, wantnewlockfile=0, unlinkfile=0,
+ waiting_msg=None, flags=0):
+ """
+ If wantnewlockfile is True then this creates a lockfile in the parent
+ directory as the file: '.' + basename + '.portage_lockfile'.
+ """
+
+ if not mypath:
+ raise InvalidData(_("Empty path given"))
+
+ # Since Python 3.4, chown requires int type (no proxies).
+ portage_gid = int(portage.data.portage_gid)
+
+ # Support for file object or integer file descriptor parameters is
+ # deprecated due to ambiguity in whether or not it's safe to close
+ # the file descriptor, making it prone to "Bad file descriptor" errors
+ # or file descriptor leaks.
+ if isinstance(mypath, basestring) and mypath[-1] == '/':
+ mypath = mypath[:-1]
+
+ lockfilename_path = mypath
+ if hasattr(mypath, 'fileno'):
+ warnings.warn("portage.locks.lockfile() support for "
+ "file object parameters is deprecated. Use a file path instead.",
+ DeprecationWarning, stacklevel=2)
+ lockfilename_path = getattr(mypath, 'name', None)
+ mypath = mypath.fileno()
+ if isinstance(mypath, int):
+ warnings.warn("portage.locks.lockfile() support for integer file "
+ "descriptor parameters is deprecated. Use a file path instead.",
+ DeprecationWarning, stacklevel=2)
+ lockfilename = mypath
+ wantnewlockfile = 0
+ unlinkfile = 0
+ elif wantnewlockfile:
+ base, tail = os.path.split(mypath)
+ lockfilename = os.path.join(base, "." + tail + ".portage_lockfile")
+ lockfilename_path = lockfilename
+ unlinkfile = 1
+ else:
+ lockfilename = mypath
+
+ if isinstance(mypath, basestring):
+ if not os.path.exists(os.path.dirname(mypath)):
+ raise DirectoryNotFound(os.path.dirname(mypath))
+ preexisting = os.path.exists(lockfilename)
+ old_mask = os.umask(000)
+ try:
+ try:
+ myfd = os.open(lockfilename, os.O_CREAT|os.O_RDWR, 0o660)
+ except OSError as e:
+ func_call = "open('%s')" % lockfilename
+ if e.errno == OperationNotPermitted.errno:
+ raise OperationNotPermitted(func_call)
+ elif e.errno == PermissionDenied.errno:
+ raise PermissionDenied(func_call)
+ elif e.errno == ReadOnlyFileSystem.errno:
+ raise ReadOnlyFileSystem(func_call)
+ else:
+ raise
+
+ if not preexisting:
+ try:
+ if os.stat(lockfilename).st_gid != portage_gid:
+ os.chown(lockfilename, -1, portage_gid)
+ except OSError as e:
+ if e.errno in (errno.ENOENT, errno.ESTALE):
+ return lockfile(mypath,
+ wantnewlockfile=wantnewlockfile,
+ unlinkfile=unlinkfile, waiting_msg=waiting_msg,
+ flags=flags)
+ else:
+ writemsg("%s: chown('%s', -1, %d)\n" % \
+ (e, lockfilename, portage_gid), noiselevel=-1)
+ writemsg(_("Cannot chown a lockfile: '%s'\n") % \
+ lockfilename, noiselevel=-1)
+ writemsg(_("Group IDs of current user: %s\n") % \
+ " ".join(str(n) for n in os.getgroups()),
+ noiselevel=-1)
+ finally:
+ os.umask(old_mask)
+
+ elif isinstance(mypath, int):
+ myfd = mypath
+
+ else:
+ raise ValueError(_("Unknown type passed in '%s': '%s'") % \
+ (type(mypath), mypath))
+
+ # try for a non-blocking lock, if it's held, throw a message
+ # we're waiting on lockfile and use a blocking attempt.
+ locking_method = portage._eintr_func_wrapper(_get_lock_fn())
+ try:
+ if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+ raise IOError(errno.ENOSYS, "Function not implemented")
+ locking_method(myfd, fcntl.LOCK_EX|fcntl.LOCK_NB)
+ except IOError as e:
+ if not hasattr(e, "errno"):
+ raise
+ if e.errno in (errno.EACCES, errno.EAGAIN, errno.ENOLCK):
+ # resource temp unavailable; eg, someone beat us to the lock.
+ if flags & os.O_NONBLOCK:
+ os.close(myfd)
+ raise TryAgain(mypath)
+
+ global _quiet
+ if _quiet:
+ out = None
+ else:
+ out = portage.output.EOutput()
+ if waiting_msg is None:
+ if isinstance(mypath, int):
+ waiting_msg = _("waiting for lock on fd %i") % myfd
+ else:
+ waiting_msg = _("waiting for lock on %s") % lockfilename
+ if out is not None:
+ out.ebegin(waiting_msg)
+ # try for the exclusive lock now.
+ enolock_msg_shown = False
+ while True:
+ try:
+ locking_method(myfd, fcntl.LOCK_EX)
+ except EnvironmentError as e:
+ if e.errno == errno.ENOLCK:
+ # This is known to occur on Solaris NFS (see
+ # bug #462694). Assume that the error is due
+ # to temporary exhaustion of record locks,
+ # and loop until one becomes available.
+ if not enolock_msg_shown:
+ enolock_msg_shown = True
+ if isinstance(mypath, int):
+ context_desc = _("Error while waiting "
+ "to lock fd %i") % myfd
+ else:
+ context_desc = _("Error while waiting "
+ "to lock '%s'") % lockfilename
+ writemsg("\n!!! %s: %s\n" % (context_desc, e),
+ noiselevel=-1)
+
+ time.sleep(_HARDLINK_POLL_LATENCY)
+ continue
+
+ if out is not None:
+ out.eend(1, str(e))
+ raise
+ else:
+ break
+
+ if out is not None:
+ out.eend(os.EX_OK)
+ elif e.errno in (errno.ENOSYS,):
+ # We're not allowed to lock on this FS.
+ if not isinstance(lockfilename, int):
+ # If a file object was passed in, it's not safe
+ # to close the file descriptor because it may
+ # still be in use.
+ os.close(myfd)
+ lockfilename_path = _unicode_decode(lockfilename_path,
+ encoding=_encodings['fs'], errors='strict')
+ if not isinstance(lockfilename_path, basestring):
+ raise
+ link_success = hardlink_lockfile(lockfilename_path,
+ waiting_msg=waiting_msg, flags=flags)
+ if not link_success:
+ raise
+ lockfilename = lockfilename_path
+ locking_method = None
+ myfd = HARDLINK_FD
+ else:
+ raise
+
+
+ if isinstance(lockfilename, basestring) and \
+ myfd != HARDLINK_FD and _fstat_nlink(myfd) == 0:
+ # The file was deleted on us... Keep trying to make one...
+ os.close(myfd)
+ writemsg(_("lockfile recurse\n"), 1)
+ lockfilename, myfd, unlinkfile, locking_method = lockfile(
+ mypath, wantnewlockfile=wantnewlockfile, unlinkfile=unlinkfile,
+ waiting_msg=waiting_msg, flags=flags)
+
+ if myfd != HARDLINK_FD:
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(myfd, fcntl.F_SETFD,
+ fcntl.fcntl(myfd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ _open_fds.add(myfd)
+
+ writemsg(str((lockfilename, myfd, unlinkfile)) + "\n", 1)
+ return (lockfilename, myfd, unlinkfile, locking_method)
+
+def _fstat_nlink(fd):
+ """
+ @param fd: an open file descriptor
+ @type fd: Integer
+ @rtype: Integer
+ @return: the current number of hardlinks to the file
+ """
+ try:
+ return os.fstat(fd).st_nlink
+ except EnvironmentError as e:
+ if e.errno in (errno.ENOENT, errno.ESTALE):
+ # Some filesystems such as CIFS return
+ # ENOENT which means st_nlink == 0.
+ return 0
+ raise
+
+def unlockfile(mytuple):
+
+ #XXX: Compatability hack.
+ if len(mytuple) == 3:
+ lockfilename, myfd, unlinkfile = mytuple
+ locking_method = fcntl.flock
+ elif len(mytuple) == 4:
+ lockfilename, myfd, unlinkfile, locking_method = mytuple
+ else:
+ raise InvalidData
+
+ if(myfd == HARDLINK_FD):
+ unhardlink_lockfile(lockfilename, unlinkfile=unlinkfile)
+ return True
+
+ # myfd may be None here due to myfd = mypath in lockfile()
+ if isinstance(lockfilename, basestring) and \
+ not os.path.exists(lockfilename):
+ writemsg(_("lockfile does not exist '%s'\n") % lockfilename, 1)
+ if myfd is not None:
+ os.close(myfd)
+ _open_fds.remove(myfd)
+ return False
+
+ try:
+ if myfd is None:
+ myfd = os.open(lockfilename, os.O_WRONLY, 0o660)
+ unlinkfile = 1
+ locking_method(myfd, fcntl.LOCK_UN)
+ except OSError:
+ if isinstance(lockfilename, basestring):
+ os.close(myfd)
+ _open_fds.remove(myfd)
+ raise IOError(_("Failed to unlock file '%s'\n") % lockfilename)
+
+ try:
+ # This sleep call was added to allow other processes that are
+ # waiting for a lock to be able to grab it before it is deleted.
+ # lockfile() already accounts for this situation, however, and
+ # the sleep here adds more time than is saved overall, so am
+ # commenting until it is proved necessary.
+ #time.sleep(0.0001)
+ if unlinkfile:
+ locking_method(myfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
+ # We won the lock, so there isn't competition for it.
+ # We can safely delete the file.
+ writemsg(_("Got the lockfile...\n"), 1)
+ if _fstat_nlink(myfd) == 1:
+ os.unlink(lockfilename)
+ writemsg(_("Unlinked lockfile...\n"), 1)
+ locking_method(myfd, fcntl.LOCK_UN)
+ else:
+ writemsg(_("lockfile does not exist '%s'\n") % lockfilename, 1)
+ os.close(myfd)
+ _open_fds.remove(myfd)
+ return False
+ except SystemExit:
+ raise
+ except Exception as e:
+ writemsg(_("Failed to get lock... someone took it.\n"), 1)
+ writemsg(str(e) + "\n", 1)
+
+ # why test lockfilename? because we may have been handed an
+ # fd originally, and the caller might not like having their
+ # open fd closed automatically on them.
+ if isinstance(lockfilename, basestring):
+ os.close(myfd)
+ _open_fds.remove(myfd)
+
+ return True
+
+
+def hardlock_name(path):
+ base, tail = os.path.split(path)
+ return os.path.join(base, ".%s.hardlock-%s-%s" %
+ (tail, os.uname()[1], os.getpid()))
+
+def hardlink_is_mine(link, lock):
+ try:
+ lock_st = os.stat(lock)
+ if lock_st.st_nlink == 2:
+ link_st = os.stat(link)
+ return lock_st.st_ino == link_st.st_ino and \
+ lock_st.st_dev == link_st.st_dev
+ except OSError:
+ pass
+ return False
+
+def hardlink_lockfile(lockfilename, max_wait=DeprecationWarning,
+ waiting_msg=None, flags=0):
+ """Does the NFS, hardlink shuffle to ensure locking on the disk.
+ We create a PRIVATE hardlink to the real lockfile, that is just a
+ placeholder on the disk.
+ If our file can 2 references, then we have the lock. :)
+ Otherwise we lather, rise, and repeat.
+ """
+
+ if max_wait is not DeprecationWarning:
+ warnings.warn("The 'max_wait' parameter of "
+ "portage.locks.hardlink_lockfile() is now unused. Use "
+ "flags=os.O_NONBLOCK instead.",
+ DeprecationWarning, stacklevel=2)
+
+ global _quiet
+ out = None
+ displayed_waiting_msg = False
+ preexisting = os.path.exists(lockfilename)
+ myhardlock = hardlock_name(lockfilename)
+
+ # Since Python 3.4, chown requires int type (no proxies).
+ portage_gid = int(portage.data.portage_gid)
+
+ # myhardlock must not exist prior to our link() call, and we can
+ # safely unlink it since its file name is unique to our PID
+ try:
+ os.unlink(myhardlock)
+ except OSError as e:
+ if e.errno in (errno.ENOENT, errno.ESTALE):
+ pass
+ else:
+ func_call = "unlink('%s')" % myhardlock
+ if e.errno == OperationNotPermitted.errno:
+ raise OperationNotPermitted(func_call)
+ elif e.errno == PermissionDenied.errno:
+ raise PermissionDenied(func_call)
+ else:
+ raise
+
+ while True:
+ # create lockfilename if it doesn't exist yet
+ try:
+ myfd = os.open(lockfilename, os.O_CREAT|os.O_RDWR, 0o660)
+ except OSError as e:
+ func_call = "open('%s')" % lockfilename
+ if e.errno == OperationNotPermitted.errno:
+ raise OperationNotPermitted(func_call)
+ elif e.errno == PermissionDenied.errno:
+ raise PermissionDenied(func_call)
+ elif e.errno == ReadOnlyFileSystem.errno:
+ raise ReadOnlyFileSystem(func_call)
+ else:
+ raise
+ else:
+ myfd_st = None
+ try:
+ myfd_st = os.fstat(myfd)
+ if not preexisting:
+ # Don't chown the file if it is preexisting, since we
+ # want to preserve existing permissions in that case.
+ if myfd_st.st_gid != portage_gid:
+ os.fchown(myfd, -1, portage_gid)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ writemsg("%s: fchown('%s', -1, %d)\n" % \
+ (e, lockfilename, portage_gid), noiselevel=-1)
+ writemsg(_("Cannot chown a lockfile: '%s'\n") % \
+ lockfilename, noiselevel=-1)
+ writemsg(_("Group IDs of current user: %s\n") % \
+ " ".join(str(n) for n in os.getgroups()),
+ noiselevel=-1)
+ else:
+ # another process has removed the file, so we'll have
+ # to create it again
+ continue
+ finally:
+ os.close(myfd)
+
+ # If fstat shows more than one hardlink, then it's extremely
+ # unlikely that the following link call will result in a lock,
+ # so optimize away the wasteful link call and sleep or raise
+ # TryAgain.
+ if myfd_st is not None and myfd_st.st_nlink < 2:
+ try:
+ os.link(lockfilename, myhardlock)
+ except OSError as e:
+ func_call = "link('%s', '%s')" % (lockfilename, myhardlock)
+ if e.errno == OperationNotPermitted.errno:
+ raise OperationNotPermitted(func_call)
+ elif e.errno == PermissionDenied.errno:
+ raise PermissionDenied(func_call)
+ elif e.errno in (errno.ESTALE, errno.ENOENT):
+ # another process has removed the file, so we'll have
+ # to create it again
+ continue
+ else:
+ raise
+ else:
+ if hardlink_is_mine(myhardlock, lockfilename):
+ if out is not None:
+ out.eend(os.EX_OK)
+ break
+
+ try:
+ os.unlink(myhardlock)
+ except OSError as e:
+ # This should not happen, since the file name of
+ # myhardlock is unique to our host and PID,
+ # and the above link() call succeeded.
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ raise FileNotFound(myhardlock)
+
+ if flags & os.O_NONBLOCK:
+ raise TryAgain(lockfilename)
+
+ if out is None and not _quiet:
+ out = portage.output.EOutput()
+ if out is not None and not displayed_waiting_msg:
+ displayed_waiting_msg = True
+ if waiting_msg is None:
+ waiting_msg = _("waiting for lock on %s\n") % lockfilename
+ out.ebegin(waiting_msg)
+
+ time.sleep(_HARDLINK_POLL_LATENCY)
+
+ return True
+
+def unhardlink_lockfile(lockfilename, unlinkfile=True):
+ myhardlock = hardlock_name(lockfilename)
+ if unlinkfile and hardlink_is_mine(myhardlock, lockfilename):
+ # Make sure not to touch lockfilename unless we really have a lock.
+ try:
+ os.unlink(lockfilename)
+ except OSError:
+ pass
+ try:
+ os.unlink(myhardlock)
+ except OSError:
+ pass
+
+def hardlock_cleanup(path, remove_all_locks=False):
+ myhost = os.uname()[1]
+ mydl = os.listdir(path)
+
+ results = []
+ mycount = 0
+
+ mylist = {}
+ for x in mydl:
+ if os.path.isfile(path + "/" + x):
+ parts = x.split(".hardlock-")
+ if len(parts) == 2:
+ filename = parts[0][1:]
+ hostpid = parts[1].split("-")
+ host = "-".join(hostpid[:-1])
+ pid = hostpid[-1]
+
+ if filename not in mylist:
+ mylist[filename] = {}
+ if host not in mylist[filename]:
+ mylist[filename][host] = []
+ mylist[filename][host].append(pid)
+
+ mycount += 1
+
+
+ results.append(_("Found %(count)s locks") % {"count": mycount})
+
+ for x in mylist:
+ if myhost in mylist[x] or remove_all_locks:
+ mylockname = hardlock_name(path + "/" + x)
+ if hardlink_is_mine(mylockname, path + "/" + x) or \
+ not os.path.exists(path + "/" + x) or \
+ remove_all_locks:
+ for y in mylist[x]:
+ for z in mylist[x][y]:
+ filename = path + "/." + x + ".hardlock-" + y + "-" + z
+ if filename == mylockname:
+ continue
+ try:
+ # We're sweeping through, unlinking everyone's locks.
+ os.unlink(filename)
+ results.append(_("Unlinked: ") + filename)
+ except OSError:
+ pass
+ try:
+ os.unlink(path + "/" + x)
+ results.append(_("Unlinked: ") + path + "/" + x)
+ os.unlink(mylockname)
+ results.append(_("Unlinked: ") + mylockname)
+ except OSError:
+ pass
+ else:
+ try:
+ os.unlink(mylockname)
+ results.append(_("Unlinked: ") + mylockname)
+ except OSError:
+ pass
+
+ return results
+
diff --git a/lib/portage/mail.py b/lib/portage/mail.py
new file mode 100644
index 000000000..11923eea6
--- /dev/null
+++ b/lib/portage/mail.py
@@ -0,0 +1,177 @@
+# Copyright 1998-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+# Since python ebuilds remove the 'email' module when USE=build
+# is enabled, use a local import so that
+# portage.proxy.lazyimport._preload_portage_submodules()
+# can load this module even though the 'email' module is missing.
+# The elog mail modules won't work, but at least an ImportError
+# won't cause portage to crash during stage builds. Since the
+# 'smtlib' module imports the 'email' module, that's imported
+# locally as well.
+
+import socket
+import sys
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode, _unicode_encode
+from portage.localization import _
+import portage
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+ def _force_ascii_if_necessary(s):
+ # Force ascii encoding in order to avoid UnicodeEncodeError
+ # from smtplib.sendmail with python3 (bug #291331).
+ s = _unicode_encode(s,
+ encoding='ascii', errors='backslashreplace')
+ s = _unicode_decode(s,
+ encoding='ascii', errors='replace')
+ return s
+
+else:
+
+ def _force_ascii_if_necessary(s):
+ return s
+
+def TextMessage(_text):
+ from email.mime.text import MIMEText
+ mimetext = MIMEText(_text)
+ mimetext.set_charset("UTF-8")
+ return mimetext
+
+def create_message(sender, recipient, subject, body, attachments=None):
+
+ from email.header import Header
+ from email.mime.base import MIMEBase as BaseMessage
+ from email.mime.multipart import MIMEMultipart as MultipartMessage
+ from email.utils import formatdate
+
+ if sys.hexversion < 0x3000000:
+ sender = _unicode_encode(sender,
+ encoding=_encodings['content'], errors='strict')
+ recipient = _unicode_encode(recipient,
+ encoding=_encodings['content'], errors='strict')
+ subject = _unicode_encode(subject,
+ encoding=_encodings['content'], errors='backslashreplace')
+ body = _unicode_encode(body,
+ encoding=_encodings['content'], errors='backslashreplace')
+
+ if attachments == None:
+ mymessage = TextMessage(body)
+ else:
+ mymessage = MultipartMessage()
+ mymessage.attach(TextMessage(body))
+ for x in attachments:
+ if isinstance(x, BaseMessage):
+ mymessage.attach(x)
+ elif isinstance(x, basestring):
+ if sys.hexversion < 0x3000000:
+ x = _unicode_encode(x,
+ encoding=_encodings['content'],
+ errors='backslashreplace')
+ mymessage.attach(TextMessage(x))
+ else:
+ raise portage.exception.PortageException(_("Can't handle type of attachment: %s") % type(x))
+
+ mymessage.set_unixfrom(sender)
+ mymessage["To"] = recipient
+ mymessage["From"] = sender
+
+ # Use Header as a workaround so that long subject lines are wrapped
+ # correctly by <=python-2.6 (gentoo bug #263370, python issue #1974).
+ # Also, need to force ascii for python3, in order to avoid
+ # UnicodeEncodeError with non-ascii characters:
+ # File "/usr/lib/python3.1/email/header.py", line 189, in __init__
+ # self.append(s, charset, errors)
+ # File "/usr/lib/python3.1/email/header.py", line 262, in append
+ # input_bytes = s.encode(input_charset, errors)
+ #UnicodeEncodeError: 'ascii' codec can't encode characters in position 0-9: ordinal not in range(128)
+ mymessage["Subject"] = Header(_force_ascii_if_necessary(subject))
+ mymessage["Date"] = formatdate(localtime=True)
+
+ return mymessage
+
+def send_mail(mysettings, message):
+
+ import smtplib
+
+ mymailhost = "localhost"
+ mymailport = 25
+ mymailuser = ""
+ mymailpasswd = ""
+ myrecipient = "root@localhost"
+
+ # Syntax for PORTAGE_ELOG_MAILURI (if defined):
+ # address [[user:passwd@]mailserver[:port]]
+ # where address: recipient address
+ # user: username for smtp auth (defaults to none)
+ # passwd: password for smtp auth (defaults to none)
+ # mailserver: smtp server that should be used to deliver the mail (defaults to localhost)
+ # alternatively this can also be the absolute path to a sendmail binary if you don't want to use smtp
+ # port: port to use on the given smtp server (defaults to 25, values > 100000 indicate that starttls should be used on (port-100000))
+ if " " in mysettings.get("PORTAGE_ELOG_MAILURI", ""):
+ myrecipient, mymailuri = mysettings["PORTAGE_ELOG_MAILURI"].split()
+ if "@" in mymailuri:
+ myauthdata, myconndata = mymailuri.rsplit("@", 1)
+ try:
+ mymailuser, mymailpasswd = myauthdata.split(":")
+ except ValueError:
+ print(_("!!! invalid SMTP AUTH configuration, trying unauthenticated ..."))
+ else:
+ myconndata = mymailuri
+ if ":" in myconndata:
+ mymailhost, mymailport = myconndata.split(":")
+ else:
+ mymailhost = myconndata
+ else:
+ myrecipient = mysettings.get("PORTAGE_ELOG_MAILURI", "")
+
+ myfrom = message.get("From")
+
+ if sys.hexversion < 0x3000000:
+ myrecipient = _unicode_encode(myrecipient,
+ encoding=_encodings['content'], errors='strict')
+ mymailhost = _unicode_encode(mymailhost,
+ encoding=_encodings['content'], errors='strict')
+ mymailport = _unicode_encode(mymailport,
+ encoding=_encodings['content'], errors='strict')
+ myfrom = _unicode_encode(myfrom,
+ encoding=_encodings['content'], errors='strict')
+ mymailuser = _unicode_encode(mymailuser,
+ encoding=_encodings['content'], errors='strict')
+ mymailpasswd = _unicode_encode(mymailpasswd,
+ encoding=_encodings['content'], errors='strict')
+
+ # user wants to use a sendmail binary instead of smtp
+ if mymailhost[0] == os.sep and os.path.exists(mymailhost):
+ fd = os.popen(mymailhost+" -f "+myfrom+" "+myrecipient, "w")
+ fd.write(_force_ascii_if_necessary(message.as_string()))
+ if fd.close() != None:
+ sys.stderr.write(_("!!! %s returned with a non-zero exit code. This generally indicates an error.\n") % mymailhost)
+ else:
+ try:
+ if int(mymailport) > 100000:
+ myconn = smtplib.SMTP(mymailhost, int(mymailport) - 100000)
+ myconn.ehlo()
+ if not myconn.has_extn("STARTTLS"):
+ raise portage.exception.PortageException(_("!!! TLS support requested for logmail but not supported by server"))
+ myconn.starttls()
+ myconn.ehlo()
+ else:
+ myconn = smtplib.SMTP(mymailhost, mymailport)
+ if mymailuser != "" and mymailpasswd != "":
+ myconn.login(mymailuser, mymailpasswd)
+
+ message_str = _force_ascii_if_necessary(message.as_string())
+ myconn.sendmail(myfrom, myrecipient, message_str)
+ myconn.quit()
+ except smtplib.SMTPException as e:
+ raise portage.exception.PortageException(_("!!! An error occurred while trying to send logmail:\n")+str(e))
+ except socket.error as e:
+ raise portage.exception.PortageException(_("!!! A network error occurred while trying to send logmail:\n%s\nSure you configured PORTAGE_ELOG_MAILURI correctly?") % str(e))
+ return
+
diff --git a/lib/portage/manifest.py b/lib/portage/manifest.py
new file mode 100644
index 000000000..4bca61e86
--- /dev/null
+++ b/lib/portage/manifest.py
@@ -0,0 +1,729 @@
+# Copyright 1999-2016 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import errno
+import io
+import logging
+import re
+import stat
+import sys
+import warnings
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.checksum:get_valid_checksum_keys,perform_multiple_checksums,' + \
+ 'verify_all,_apply_hash_filter,_filter_unaccelarated_hashes',
+ 'portage.repository.config:_find_invalid_path_char',
+ 'portage.util:write_atomic,writemsg_level',
+)
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.exception import DigestException, FileNotFound, \
+ InvalidDataType, MissingParameter, PermissionDenied, \
+ PortageException, PortagePackageException
+from portage.const import (MANIFEST2_HASH_DEFAULTS, MANIFEST2_IDENTIFIERS)
+from portage.localization import _
+
+_manifest_re = re.compile(
+ r'^(' + '|'.join(MANIFEST2_IDENTIFIERS) + r') (\S+)( \d+( \S+ \S+)+)$',
+ re.UNICODE)
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ _unicode = str
+ basestring = str
+else:
+ _unicode = unicode
+
+class FileNotInManifestException(PortageException):
+ pass
+
+def manifest2AuxfileFilter(filename):
+ filename = filename.strip(os.sep)
+ mysplit = filename.split(os.path.sep)
+ if "CVS" in mysplit:
+ return False
+ for x in mysplit:
+ if x[:1] == '.':
+ return False
+ return not filename[:7] == 'digest-'
+
+def manifest2MiscfileFilter(filename):
+ return not (filename == "Manifest" or filename.endswith(".ebuild"))
+
+def guessManifestFileType(filename):
+ """ Perform a best effort guess of which type the given filename is, avoid using this if possible """
+ if filename.startswith("files" + os.sep + "digest-"):
+ return None
+ if filename.startswith("files" + os.sep):
+ return "AUX"
+ elif filename.endswith(".ebuild"):
+ return "EBUILD"
+ elif filename in ["ChangeLog", "metadata.xml"]:
+ return "MISC"
+ else:
+ return "DIST"
+
+def guessThinManifestFileType(filename):
+ type = guessManifestFileType(filename)
+ if type != "DIST":
+ return None
+ return "DIST"
+
+def parseManifest2(line):
+ if not isinstance(line, basestring):
+ line = ' '.join(line)
+ myentry = None
+ match = _manifest_re.match(line)
+ if match is not None:
+ tokens = match.group(3).split()
+ hashes = dict(zip(tokens[1::2], tokens[2::2]))
+ hashes["size"] = int(tokens[0])
+ myentry = Manifest2Entry(type=match.group(1),
+ name=match.group(2), hashes=hashes)
+ return myentry
+
+class ManifestEntry(object):
+ __slots__ = ("type", "name", "hashes")
+ def __init__(self, **kwargs):
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+
+class Manifest2Entry(ManifestEntry):
+ def __str__(self):
+ myline = " ".join([self.type, self.name, str(self.hashes["size"])])
+ myhashkeys = list(self.hashes)
+ myhashkeys.remove("size")
+ myhashkeys.sort()
+ for h in myhashkeys:
+ myline += " " + h + " " + str(self.hashes[h])
+ return myline
+
+ def __eq__(self, other):
+ if not isinstance(other, Manifest2Entry) or \
+ self.type != other.type or \
+ self.name != other.name or \
+ self.hashes != other.hashes:
+ return False
+ return True
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['repo.content'], errors='strict')
+
+class Manifest(object):
+ parsers = (parseManifest2,)
+ def __init__(self, pkgdir, distdir=None, fetchlist_dict=None,
+ manifest1_compat=DeprecationWarning, from_scratch=False, thin=False,
+ allow_missing=False, allow_create=True, hashes=None, required_hashes=None,
+ find_invalid_path_char=None, strict_misc_digests=True):
+ """ Create new Manifest instance for package in pkgdir.
+ Do not parse Manifest file if from_scratch == True (only for internal use)
+ The fetchlist_dict parameter is required only for generation of
+ a Manifest (not needed for parsing and checking sums).
+ If thin is specified, then the manifest carries only info for
+ distfiles."""
+
+ if manifest1_compat is not DeprecationWarning:
+ warnings.warn("The manifest1_compat parameter of the "
+ "portage.manifest.Manifest constructor is deprecated.",
+ DeprecationWarning, stacklevel=2)
+
+ if find_invalid_path_char is None:
+ find_invalid_path_char = _find_invalid_path_char
+ self._find_invalid_path_char = find_invalid_path_char
+ self.pkgdir = _unicode_decode(pkgdir).rstrip(os.sep) + os.sep
+ self.fhashdict = {}
+ self.hashes = set()
+ self.required_hashes = set()
+
+ if hashes is None:
+ hashes = MANIFEST2_HASH_DEFAULTS
+ if required_hashes is None:
+ required_hashes = hashes
+
+ self.hashes.update(hashes)
+ self.hashes.difference_update(hashname for hashname in \
+ list(self.hashes) if hashname not in get_valid_checksum_keys())
+ self.hashes.add("size")
+
+ self.required_hashes.update(required_hashes)
+ self.required_hashes.intersection_update(self.hashes)
+
+ for t in MANIFEST2_IDENTIFIERS:
+ self.fhashdict[t] = {}
+ if not from_scratch:
+ self._read()
+ if fetchlist_dict != None:
+ self.fetchlist_dict = fetchlist_dict
+ else:
+ self.fetchlist_dict = {}
+ self.distdir = distdir
+ self.thin = thin
+ if thin:
+ self.guessType = guessThinManifestFileType
+ else:
+ self.guessType = guessManifestFileType
+ self.allow_missing = allow_missing
+ self.allow_create = allow_create
+ self.strict_misc_digests = strict_misc_digests
+
+ def getFullname(self):
+ """ Returns the absolute path to the Manifest file for this instance """
+ return os.path.join(self.pkgdir, "Manifest")
+
+ def getDigests(self):
+ """ Compability function for old digest/manifest code, returns dict of filename:{hashfunction:hashvalue} """
+ rval = {}
+ for t in MANIFEST2_IDENTIFIERS:
+ rval.update(self.fhashdict[t])
+ return rval
+
+ def getTypeDigests(self, ftype):
+ """ Similar to getDigests(), but restricted to files of the given type. """
+ return self.fhashdict[ftype]
+
+ def _readManifest(self, file_path, myhashdict=None, **kwargs):
+ """Parse a manifest. If myhashdict is given then data will be added too it.
+ Otherwise, a new dict will be created and returned."""
+ try:
+ with io.open(_unicode_encode(file_path,
+ encoding=_encodings['fs'], errors='strict'), mode='r',
+ encoding=_encodings['repo.content'], errors='replace') as f:
+ if myhashdict is None:
+ myhashdict = {}
+ self._parseDigests(f, myhashdict=myhashdict, **kwargs)
+ return myhashdict
+ except (OSError, IOError) as e:
+ if e.errno == errno.ENOENT:
+ raise FileNotFound(file_path)
+ else:
+ raise
+
+ def _read(self):
+ """ Parse Manifest file for this instance """
+ try:
+ self._readManifest(self.getFullname(), myhashdict=self.fhashdict)
+ except FileNotFound:
+ pass
+
+ def _parseManifestLines(self, mylines):
+ """Parse manifest lines and return a list of manifest entries."""
+ for myline in mylines:
+ myentry = None
+ for parser in self.parsers:
+ myentry = parser(myline)
+ if myentry is not None:
+ yield myentry
+ break # go to the next line
+
+ def _parseDigests(self, mylines, myhashdict=None, mytype=None):
+ """Parse manifest entries and store the data in myhashdict. If mytype
+ is specified, it will override the type for all parsed entries."""
+ if myhashdict is None:
+ myhashdict = {}
+ for myentry in self._parseManifestLines(mylines):
+ if mytype is None:
+ myentry_type = myentry.type
+ else:
+ myentry_type = mytype
+ myhashdict.setdefault(myentry_type, {})
+ myhashdict[myentry_type].setdefault(myentry.name, {})
+ myhashdict[myentry_type][myentry.name].update(myentry.hashes)
+ return myhashdict
+
+ def _getDigestData(self, distlist):
+ """create a hash dict for a specific list of files"""
+ myhashdict = {}
+ for myname in distlist:
+ for mytype in self.fhashdict:
+ if myname in self.fhashdict[mytype]:
+ myhashdict.setdefault(mytype, {})
+ myhashdict[mytype].setdefault(myname, {})
+ myhashdict[mytype][myname].update(self.fhashdict[mytype][myname])
+ return myhashdict
+
+ def _createManifestEntries(self):
+ valid_hashes = set(get_valid_checksum_keys())
+ valid_hashes.add('size')
+ mytypes = list(self.fhashdict)
+ mytypes.sort()
+ for t in mytypes:
+ myfiles = list(self.fhashdict[t])
+ myfiles.sort()
+ for f in myfiles:
+ myentry = Manifest2Entry(
+ type=t, name=f, hashes=self.fhashdict[t][f].copy())
+ for h in list(myentry.hashes):
+ if h not in valid_hashes:
+ del myentry.hashes[h]
+ yield myentry
+
+ def checkIntegrity(self):
+ for t in self.fhashdict:
+ for f in self.fhashdict[t]:
+ diff = self.required_hashes.difference(
+ set(self.fhashdict[t][f]))
+ if diff:
+ raise MissingParameter(_("Missing %s checksum(s): %s %s") %
+ (' '.join(diff), t, f))
+
+ def write(self, sign=False, force=False):
+ """ Write Manifest instance to disk, optionally signing it. Returns
+ True if the Manifest is actually written, and False if the write
+ is skipped due to existing Manifest being identical."""
+ rval = False
+ if not self.allow_create:
+ return rval
+ self.checkIntegrity()
+ try:
+ myentries = list(self._createManifestEntries())
+ update_manifest = True
+ preserved_stats = {}
+ preserved_stats[self.pkgdir.rstrip(os.sep)] = os.stat(self.pkgdir)
+ if myentries and not force:
+ try:
+ f = io.open(_unicode_encode(self.getFullname(),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ oldentries = list(self._parseManifestLines(f))
+ preserved_stats[self.getFullname()] = os.fstat(f.fileno())
+ f.close()
+ if len(oldentries) == len(myentries):
+ update_manifest = False
+ for i in range(len(oldentries)):
+ if oldentries[i] != myentries[i]:
+ update_manifest = True
+ break
+ except (IOError, OSError) as e:
+ if e.errno == errno.ENOENT:
+ pass
+ else:
+ raise
+
+ if update_manifest:
+ if myentries or not (self.thin or self.allow_missing):
+ # If myentries is empty, don't write an empty manifest
+ # when thin or allow_missing is enabled. Except for
+ # thin manifests with no DIST entries, myentries is
+ # non-empty for all currently known use cases.
+ write_atomic(self.getFullname(), "".join("%s\n" %
+ _unicode(myentry) for myentry in myentries))
+ self._apply_max_mtime(preserved_stats, myentries)
+ rval = True
+ else:
+ # With thin manifest, there's no need to have
+ # a Manifest file if there are no DIST entries.
+ try:
+ os.unlink(self.getFullname())
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ rval = True
+
+ if sign:
+ self.sign()
+ except (IOError, OSError) as e:
+ if e.errno == errno.EACCES:
+ raise PermissionDenied(str(e))
+ raise
+ return rval
+
+ def _apply_max_mtime(self, preserved_stats, entries):
+ """
+ Set the Manifest mtime to the max mtime of all relevant files
+ and directories. Directory mtimes account for file renames and
+ removals. The existing Manifest mtime accounts for eclass
+ modifications that change DIST entries. This results in a
+ stable/predictable mtime, which is useful when converting thin
+ manifests to thick manifests for distribution via rsync. For
+ portability, the mtime is set with 1 second resolution.
+
+ @param preserved_stats: maps paths to preserved stat results
+ that should be used instead of os.stat() calls
+ @type preserved_stats: dict
+ @param entries: list of current Manifest2Entry instances
+ @type entries: list
+ """
+ # Use stat_result[stat.ST_MTIME] for 1 second resolution, since
+ # it always rounds down. Note that stat_result.st_mtime will round
+ # up from 0.999999999 to 1.0 when precision is lost during conversion
+ # from nanosecond resolution to float.
+ max_mtime = None
+ _update_max = (lambda st: max_mtime if max_mtime is not None
+ and max_mtime > st[stat.ST_MTIME] else st[stat.ST_MTIME])
+ _stat = (lambda path: preserved_stats[path] if path in preserved_stats
+ else os.stat(path))
+
+ for stat_result in preserved_stats.values():
+ max_mtime = _update_max(stat_result)
+
+ for entry in entries:
+ if entry.type == 'DIST':
+ continue
+ abs_path = (os.path.join(self.pkgdir, 'files', entry.name) if
+ entry.type == 'AUX' else os.path.join(self.pkgdir, entry.name))
+ max_mtime = _update_max(_stat(abs_path))
+
+ if not self.thin:
+ # Account for changes to all relevant nested directories.
+ # This is not necessary for thin manifests because
+ # self.pkgdir is already included via preserved_stats.
+ for parent_dir, dirs, files in os.walk(self.pkgdir.rstrip(os.sep)):
+ try:
+ parent_dir = _unicode_decode(parent_dir,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ # If an absolute path cannot be decoded, then it is
+ # always excluded from the manifest (repoman will
+ # report such problems).
+ pass
+ else:
+ max_mtime = _update_max(_stat(parent_dir))
+
+ if max_mtime is not None:
+ for path in preserved_stats:
+ try:
+ os.utime(path, (max_mtime, max_mtime))
+ except OSError as e:
+ # Even though we have write permission, utime fails
+ # with EPERM if path is owned by a different user.
+ # Only warn in this case, since it's not a problem
+ # unless this repo is being prepared for distribution
+ # via rsync.
+ writemsg_level('!!! utime(\'%s\', (%s, %s)): %s\n' %
+ (path, max_mtime, max_mtime, e),
+ level=logging.WARNING, noiselevel=-1)
+
+ def sign(self):
+ """ Sign the Manifest """
+ raise NotImplementedError()
+
+ def validateSignature(self):
+ """ Validate signature on Manifest """
+ raise NotImplementedError()
+
+ def addFile(self, ftype, fname, hashdict=None, ignoreMissing=False):
+ """ Add entry to Manifest optionally using hashdict to avoid recalculation of hashes """
+ if ftype == "AUX" and not fname.startswith("files/"):
+ fname = os.path.join("files", fname)
+ if not os.path.exists(self.pkgdir+fname) and not ignoreMissing:
+ raise FileNotFound(fname)
+ if not ftype in MANIFEST2_IDENTIFIERS:
+ raise InvalidDataType(ftype)
+ if ftype == "AUX" and fname.startswith("files"):
+ fname = fname[6:]
+ self.fhashdict[ftype][fname] = {}
+ if hashdict != None:
+ self.fhashdict[ftype][fname].update(hashdict)
+ if self.required_hashes.difference(set(self.fhashdict[ftype][fname])):
+ self.updateFileHashes(ftype, fname, checkExisting=False, ignoreMissing=ignoreMissing)
+
+ def removeFile(self, ftype, fname):
+ """ Remove given entry from Manifest """
+ del self.fhashdict[ftype][fname]
+
+ def hasFile(self, ftype, fname):
+ """ Return whether the Manifest contains an entry for the given type,filename pair """
+ return (fname in self.fhashdict[ftype])
+
+ def findFile(self, fname):
+ """ Return entrytype of the given file if present in Manifest or None if not present """
+ for t in MANIFEST2_IDENTIFIERS:
+ if fname in self.fhashdict[t]:
+ return t
+ return None
+
+ def create(self, checkExisting=False, assumeDistHashesSometimes=False,
+ assumeDistHashesAlways=False, requiredDistfiles=[]):
+ """ Recreate this Manifest from scratch. This will not use any
+ existing checksums unless assumeDistHashesSometimes or
+ assumeDistHashesAlways is true (assumeDistHashesSometimes will only
+ cause DIST checksums to be reused if the file doesn't exist in
+ DISTDIR). The requiredDistfiles parameter specifies a list of
+ distfiles to raise a FileNotFound exception for (if no file or existing
+ checksums are available), and defaults to all distfiles when not
+ specified."""
+ if not self.allow_create:
+ return
+ if checkExisting:
+ self.checkAllHashes()
+ if assumeDistHashesSometimes or assumeDistHashesAlways:
+ distfilehashes = self.fhashdict["DIST"]
+ else:
+ distfilehashes = {}
+ self.__init__(self.pkgdir, distdir=self.distdir,
+ fetchlist_dict=self.fetchlist_dict, from_scratch=True,
+ thin=self.thin, allow_missing=self.allow_missing,
+ allow_create=self.allow_create, hashes=self.hashes,
+ required_hashes=self.required_hashes,
+ find_invalid_path_char=self._find_invalid_path_char,
+ strict_misc_digests=self.strict_misc_digests)
+ pn = os.path.basename(self.pkgdir.rstrip(os.path.sep))
+ cat = self._pkgdir_category()
+
+ pkgdir = self.pkgdir
+ if self.thin:
+ cpvlist = self._update_thin_pkgdir(cat, pn, pkgdir)
+ else:
+ cpvlist = self._update_thick_pkgdir(cat, pn, pkgdir)
+
+ distlist = set()
+ for cpv in cpvlist:
+ distlist.update(self._getCpvDistfiles(cpv))
+
+ if requiredDistfiles is None:
+ # This allows us to force removal of stale digests for the
+ # ebuild --force digest option (no distfiles are required).
+ requiredDistfiles = set()
+ elif len(requiredDistfiles) == 0:
+ # repoman passes in an empty list, which implies that all distfiles
+ # are required.
+ requiredDistfiles = distlist.copy()
+ required_hash_types = set()
+ required_hash_types.add("size")
+ required_hash_types.update(self.required_hashes)
+ for f in distlist:
+ fname = os.path.join(self.distdir, f)
+ mystat = None
+ try:
+ mystat = os.stat(fname)
+ except OSError:
+ pass
+ if f in distfilehashes and \
+ not required_hash_types.difference(distfilehashes[f]) and \
+ ((assumeDistHashesSometimes and mystat is None) or \
+ (assumeDistHashesAlways and mystat is None) or \
+ (assumeDistHashesAlways and mystat is not None and \
+ set(distfilehashes[f]) == set(self.hashes) and \
+ distfilehashes[f]["size"] == mystat.st_size)):
+ self.fhashdict["DIST"][f] = distfilehashes[f]
+ else:
+ try:
+ self.fhashdict["DIST"][f] = perform_multiple_checksums(fname, self.hashes)
+ except FileNotFound:
+ if f in requiredDistfiles:
+ raise
+
+ def _is_cpv(self, cat, pn, filename):
+ if not filename.endswith(".ebuild"):
+ return None
+ pf = filename[:-7]
+ ps = portage.versions._pkgsplit(pf)
+ cpv = "%s/%s" % (cat, pf)
+ if not ps:
+ raise PortagePackageException(
+ _("Invalid package name: '%s'") % cpv)
+ if ps[0] != pn:
+ raise PortagePackageException(
+ _("Package name does not "
+ "match directory name: '%s'") % cpv)
+ return cpv
+
+ def _update_thin_pkgdir(self, cat, pn, pkgdir):
+ for pkgdir, pkgdir_dirs, pkgdir_files in os.walk(pkgdir):
+ break
+ cpvlist = []
+ for f in pkgdir_files:
+ try:
+ f = _unicode_decode(f,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if f[:1] == '.':
+ continue
+ pf = self._is_cpv(cat, pn, f)
+ if pf is not None:
+ cpvlist.append(pf)
+ return cpvlist
+
+ def _update_thick_pkgdir(self, cat, pn, pkgdir):
+ cpvlist = []
+ for pkgdir, pkgdir_dirs, pkgdir_files in os.walk(pkgdir):
+ break
+ for f in pkgdir_files:
+ try:
+ f = _unicode_decode(f,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if f[:1] == ".":
+ continue
+ pf = self._is_cpv(cat, pn, f)
+ if pf is not None:
+ mytype = "EBUILD"
+ cpvlist.append(pf)
+ elif self._find_invalid_path_char(f) == -1 and \
+ manifest2MiscfileFilter(f):
+ mytype = "MISC"
+ else:
+ continue
+ self.fhashdict[mytype][f] = perform_multiple_checksums(self.pkgdir+f, self.hashes)
+ recursive_files = []
+
+ pkgdir = self.pkgdir
+ cut_len = len(os.path.join(pkgdir, "files") + os.sep)
+ for parentdir, dirs, files in os.walk(os.path.join(pkgdir, "files")):
+ for f in files:
+ try:
+ f = _unicode_decode(f,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ full_path = os.path.join(parentdir, f)
+ recursive_files.append(full_path[cut_len:])
+ for f in recursive_files:
+ if self._find_invalid_path_char(f) != -1 or \
+ not manifest2AuxfileFilter(f):
+ continue
+ self.fhashdict["AUX"][f] = perform_multiple_checksums(
+ os.path.join(self.pkgdir, "files", f.lstrip(os.sep)), self.hashes)
+ return cpvlist
+
+ def _pkgdir_category(self):
+ return self.pkgdir.rstrip(os.sep).split(os.sep)[-2]
+
+ def _getAbsname(self, ftype, fname):
+ if ftype == "DIST":
+ absname = os.path.join(self.distdir, fname)
+ elif ftype == "AUX":
+ absname = os.path.join(self.pkgdir, "files", fname)
+ else:
+ absname = os.path.join(self.pkgdir, fname)
+ return absname
+
+ def checkAllHashes(self, ignoreMissingFiles=False):
+ for t in MANIFEST2_IDENTIFIERS:
+ self.checkTypeHashes(t, ignoreMissingFiles=ignoreMissingFiles)
+
+ def checkTypeHashes(self, idtype, ignoreMissingFiles=False, hash_filter=None):
+ for f in self.fhashdict[idtype]:
+ self.checkFileHashes(idtype, f, ignoreMissing=ignoreMissingFiles,
+ hash_filter=hash_filter)
+
+ def checkFileHashes(self, ftype, fname, ignoreMissing=False, hash_filter=None):
+ digests = _filter_unaccelarated_hashes(self.fhashdict[ftype][fname])
+ if hash_filter is not None:
+ digests = _apply_hash_filter(digests, hash_filter)
+ try:
+ ok, reason = verify_all(self._getAbsname(ftype, fname), digests)
+ if not ok:
+ raise DigestException(tuple([self._getAbsname(ftype, fname)]+list(reason)))
+ return ok, reason
+ except FileNotFound as e:
+ if not ignoreMissing:
+ raise
+ return False, _("File Not Found: '%s'") % str(e)
+
+ def checkCpvHashes(self, cpv, checkDistfiles=True, onlyDistfiles=False, checkMiscfiles=False):
+ """ check the hashes for all files associated to the given cpv, include all
+ AUX files and optionally all MISC files. """
+ if not onlyDistfiles:
+ self.checkTypeHashes("AUX", ignoreMissingFiles=False)
+ if checkMiscfiles:
+ self.checkTypeHashes("MISC", ignoreMissingFiles=False)
+ ebuildname = "%s.ebuild" % self._catsplit(cpv)[1]
+ self.checkFileHashes("EBUILD", ebuildname, ignoreMissing=False)
+ if checkDistfiles or onlyDistfiles:
+ for f in self._getCpvDistfiles(cpv):
+ self.checkFileHashes("DIST", f, ignoreMissing=False)
+
+ def _getCpvDistfiles(self, cpv):
+ """ Get a list of all DIST files associated to the given cpv """
+ return self.fetchlist_dict[cpv]
+
+ def getDistfilesSize(self, fetchlist):
+ total_bytes = 0
+ for f in fetchlist:
+ total_bytes += int(self.fhashdict["DIST"][f]["size"])
+ return total_bytes
+
+ def updateFileHashes(self, ftype, fname, checkExisting=True, ignoreMissing=True, reuseExisting=False):
+ """ Regenerate hashes for the given file """
+ if checkExisting:
+ self.checkFileHashes(ftype, fname, ignoreMissing=ignoreMissing)
+ if not ignoreMissing and fname not in self.fhashdict[ftype]:
+ raise FileNotInManifestException(fname)
+ if fname not in self.fhashdict[ftype]:
+ self.fhashdict[ftype][fname] = {}
+ myhashkeys = list(self.hashes)
+ if reuseExisting:
+ for k in [h for h in self.fhashdict[ftype][fname] if h in myhashkeys]:
+ myhashkeys.remove(k)
+ myhashes = perform_multiple_checksums(self._getAbsname(ftype, fname), myhashkeys)
+ self.fhashdict[ftype][fname].update(myhashes)
+
+ def updateTypeHashes(self, idtype, checkExisting=False, ignoreMissingFiles=True):
+ """ Regenerate all hashes for all files of the given type """
+ for fname in self.fhashdict[idtype]:
+ self.updateFileHashes(idtype, fname, checkExisting)
+
+ def updateAllHashes(self, checkExisting=False, ignoreMissingFiles=True):
+ """ Regenerate all hashes for all files in this Manifest. """
+ for idtype in MANIFEST2_IDENTIFIERS:
+ self.updateTypeHashes(idtype, checkExisting=checkExisting,
+ ignoreMissingFiles=ignoreMissingFiles)
+
+ def updateCpvHashes(self, cpv, ignoreMissingFiles=True):
+ """ Regenerate all hashes associated to the given cpv (includes all AUX and MISC
+ files)."""
+ self.updateTypeHashes("AUX", ignoreMissingFiles=ignoreMissingFiles)
+ self.updateTypeHashes("MISC", ignoreMissingFiles=ignoreMissingFiles)
+ ebuildname = "%s.ebuild" % self._catsplit(cpv)[1]
+ self.updateFileHashes("EBUILD", ebuildname, ignoreMissingFiles=ignoreMissingFiles)
+ for f in self._getCpvDistfiles(cpv):
+ self.updateFileHashes("DIST", f, ignoreMissingFiles=ignoreMissingFiles)
+
+ def updateHashesGuessType(self, fname, *args, **kwargs):
+ """ Regenerate hashes for the given file (guesses the type and then
+ calls updateFileHashes)."""
+ mytype = self.guessType(fname)
+ if mytype == "AUX":
+ fname = fname[len("files" + os.sep):]
+ elif mytype is None:
+ return
+ myrealtype = self.findFile(fname)
+ if myrealtype is not None:
+ mytype = myrealtype
+ return self.updateFileHashes(mytype, fname, *args, **kwargs)
+
+ def getFileData(self, ftype, fname, key):
+ """ Return the value of a specific (type,filename,key) triple, mainly useful
+ to get the size for distfiles."""
+ return self.fhashdict[ftype][fname][key]
+
+ def getVersions(self):
+ """ Returns a list of manifest versions present in the manifest file. """
+ rVal = []
+ mfname = self.getFullname()
+ if not os.path.exists(mfname):
+ return rVal
+ myfile = io.open(_unicode_encode(mfname,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace')
+ lines = myfile.readlines()
+ myfile.close()
+ for l in lines:
+ mysplit = l.split()
+ if len(mysplit) > 4 and mysplit[0] in MANIFEST2_IDENTIFIERS \
+ and ((len(mysplit) - 3) % 2) == 0 and not 2 in rVal:
+ rVal.append(2)
+ return rVal
+
+ def _catsplit(self, pkg_key):
+ """Split a category and package, returning a list of [cat, pkg].
+ This is compatible with portage.catsplit()"""
+ return pkg_key.split("/", 1)
diff --git a/lib/portage/metadata.py b/lib/portage/metadata.py
new file mode 100644
index 000000000..1abec5a89
--- /dev/null
+++ b/lib/portage/metadata.py
@@ -0,0 +1,208 @@
+
+from __future__ import print_function
+
+import sys
+import signal
+import logging
+import operator
+
+import portage
+from portage import os
+from portage import eapi_is_supported
+from portage.util import writemsg_level
+from portage.cache.cache_errors import CacheError
+from _emerge.ProgressHandler import ProgressHandler
+from portage.eclass_cache import hashed_path
+
+
+def action_metadata(settings, portdb, myopts, porttrees=None):
+ if porttrees is None:
+ porttrees = portdb.porttrees
+ portage.writemsg_stdout("\n>>> Updating Portage cache\n")
+ cachedir = os.path.normpath(settings.depcachedir)
+ if cachedir in ["/", "/bin", "/dev", "/etc", "/home",
+ "/lib", "/opt", "/proc", "/root", "/sbin",
+ "/sys", "/tmp", "/usr", "/var"]:
+ print("!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
+ "ROOT DIRECTORY ON YOUR SYSTEM.", file=sys.stderr)
+ print("!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir, file=sys.stderr)
+ sys.exit(73)
+ if not os.path.exists(cachedir):
+ os.makedirs(cachedir)
+
+ auxdbkeys = portdb._known_keys
+
+ class TreeData(object):
+ __slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes')
+ def __init__(self, dest_db, eclass_db, path, src_db):
+ self.dest_db = dest_db
+ self.eclass_db = eclass_db
+ self.path = path
+ self.src_db = src_db
+ self.valid_nodes = set()
+
+ porttrees_data = []
+ for path in porttrees:
+ src_db = portdb._pregen_auxdb.get(path)
+ if src_db is None:
+ # portdbapi does not populate _pregen_auxdb
+ # when FEATURES=metadata-transfer is enabled
+ src_db = portdb._create_pregen_cache(path)
+
+ if src_db is not None:
+ porttrees_data.append(TreeData(portdb.auxdb[path],
+ portdb.repositories.get_repo_for_location(path).eclass_db, path, src_db))
+
+ porttrees = [tree_data.path for tree_data in porttrees_data]
+
+ quiet = settings.get('TERM') == 'dumb' or \
+ '--quiet' in myopts or \
+ not sys.stdout.isatty()
+
+ onProgress = None
+ if not quiet:
+ progressBar = portage.output.TermProgressBar()
+ progressHandler = ProgressHandler()
+ onProgress = progressHandler.onProgress
+ def display():
+ progressBar.set(progressHandler.curval, progressHandler.maxval)
+ progressHandler.display = display
+ def sigwinch_handler(signum, frame):
+ lines, progressBar.term_columns = \
+ portage.output.get_term_size()
+ signal.signal(signal.SIGWINCH, sigwinch_handler)
+
+ # Temporarily override portdb.porttrees so portdb.cp_all()
+ # will only return the relevant subset.
+ portdb_porttrees = portdb.porttrees
+ portdb.porttrees = porttrees
+ try:
+ cp_all = portdb.cp_all()
+ finally:
+ portdb.porttrees = portdb_porttrees
+
+ curval = 0
+ maxval = len(cp_all)
+ if onProgress is not None:
+ onProgress(maxval, curval)
+
+ # TODO: Display error messages, but do not interfere with the progress bar.
+ # Here's how:
+ # 1) erase the progress bar
+ # 2) show the error message
+ # 3) redraw the progress bar on a new line
+
+ for cp in cp_all:
+ for tree_data in porttrees_data:
+
+ src_chf = tree_data.src_db.validation_chf
+ dest_chf = tree_data.dest_db.validation_chf
+ dest_chf_key = '_%s_' % dest_chf
+ dest_chf_getter = operator.attrgetter(dest_chf)
+
+ for cpv in portdb.cp_list(cp, mytree=tree_data.path):
+ tree_data.valid_nodes.add(cpv)
+ try:
+ src = tree_data.src_db[cpv]
+ except (CacheError, KeyError):
+ continue
+
+ ebuild_location = portdb.findname(cpv, mytree=tree_data.path)
+ if ebuild_location is None:
+ continue
+ ebuild_hash = hashed_path(ebuild_location)
+
+ try:
+ if not tree_data.src_db.validate_entry(src,
+ ebuild_hash, tree_data.eclass_db):
+ continue
+ except CacheError:
+ continue
+
+ eapi = src.get('EAPI')
+ if not eapi:
+ eapi = '0'
+ eapi_supported = eapi_is_supported(eapi)
+ if not eapi_supported:
+ continue
+
+ dest = None
+ try:
+ dest = tree_data.dest_db[cpv]
+ except (KeyError, CacheError):
+ pass
+
+ for d in (src, dest):
+ if d is not None and d.get('EAPI') in ('', '0'):
+ del d['EAPI']
+
+ if src_chf != 'mtime':
+ # src may contain an irrelevant _mtime_ which corresponds
+ # to the time that the cache entry was written
+ src.pop('_mtime_', None)
+
+ if src_chf != dest_chf:
+ # populate src entry with dest_chf_key
+ # (the validity of the dest_chf that we generate from the
+ # ebuild here relies on the fact that we already used
+ # validate_entry to validate the ebuild with src_chf)
+ src[dest_chf_key] = dest_chf_getter(ebuild_hash)
+
+ if dest is not None:
+ if not (dest.get(dest_chf_key) == src[dest_chf_key] and \
+ tree_data.eclass_db.validate_and_rewrite_cache(
+ dest['_eclasses_'], tree_data.dest_db.validation_chf,
+ tree_data.dest_db.store_eclass_paths) is not None and \
+ set(dest['_eclasses_']) == set(src['_eclasses_'])):
+ dest = None
+ else:
+ # We don't want to skip the write unless we're really
+ # sure that the existing cache is identical, so don't
+ # trust _mtime_ and _eclasses_ alone.
+ for k in auxdbkeys:
+ if dest.get(k, '') != src.get(k, ''):
+ dest = None
+ break
+
+ if dest is not None:
+ # The existing data is valid and identical,
+ # so there's no need to overwrite it.
+ continue
+
+ try:
+ tree_data.dest_db[cpv] = src
+ except CacheError:
+ # ignore it; can't do anything about it.
+ pass
+
+ curval += 1
+ if onProgress is not None:
+ onProgress(maxval, curval)
+
+ if onProgress is not None:
+ onProgress(maxval, curval)
+
+ for tree_data in porttrees_data:
+ try:
+ dead_nodes = set(tree_data.dest_db)
+ except CacheError as e:
+ writemsg_level("Error listing cache entries for " + \
+ "'%s': %s, continuing...\n" % (tree_data.path, e),
+ level=logging.ERROR, noiselevel=-1)
+ del e
+ else:
+ dead_nodes.difference_update(tree_data.valid_nodes)
+ for cpv in dead_nodes:
+ try:
+ del tree_data.dest_db[cpv]
+ except (KeyError, CacheError):
+ pass
+
+ if not quiet:
+ # make sure the final progress is displayed
+ progressHandler.display()
+ print()
+ signal.signal(signal.SIGWINCH, signal.SIG_DFL)
+
+ portdb.flush_cache()
+ sys.stdout.flush()
diff --git a/lib/portage/module.py b/lib/portage/module.py
new file mode 100644
index 000000000..bd7c94d4e
--- /dev/null
+++ b/lib/portage/module.py
@@ -0,0 +1,240 @@
+# Copyright 2005-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+
+from __future__ import print_function
+
+from portage import os
+from portage.exception import PortageException
+from portage.cache.mappings import ProtectedDict
+from portage.localization import _
+from portage.util import writemsg
+
+
+class InvalidModuleName(PortageException):
+ """An invalid or unknown module name."""
+
+
+class ModuleVersionError(PortageException):
+ '''An incompatible module version'''
+
+
+class Module(object):
+ """Class to define and hold our plug-in module
+
+ @type name: string
+ @param name: the module name
+ @type path: the path to the new module
+ """
+
+ def __init__(self, name, namepath):
+ """Some variables initialization"""
+ self.name = name
+ self._namepath = namepath
+ self.kids_names = []
+ self.kids = {}
+ self.initialized = self._initialize()
+
+ def _initialize(self):
+ """Initialize the plug-in module
+
+ @rtype: boolean
+ """
+ self.valid = False
+ try:
+ mod_name = ".".join([self._namepath, self.name])
+ self._module = __import__(mod_name, [], [], ["not empty"])
+ self.valid = True
+ except ImportError as e:
+ print("MODULE; failed import", mod_name, " error was:", e)
+ return False
+ self.module_spec = self._module.module_spec
+ for submodule in self.module_spec['provides']:
+ kid = self.module_spec['provides'][submodule]
+ kidname = kid['name']
+ try:
+ kid['module_name'] = '.'.join([mod_name, kid['sourcefile']])
+ except KeyError:
+ kid['module_name'] = '.'.join([mod_name, self.name])
+ msg = ("%s module's module_spec is old, missing attribute: "
+ "'sourcefile'. Backward compatibility may be "
+ "removed in the future.\nFile: %s\n")
+ writemsg(_(msg) % (self.name, self._module.__file__))
+ kid['is_imported'] = False
+ self.kids[kidname] = kid
+ self.kids_names.append(kidname)
+ return True
+
+ def get_class(self, name):
+ if not name or name not in self.kids_names:
+ raise InvalidModuleName("Module name '%s' is invalid or not"
+ %name + "part of the module '%s'" %self.name)
+ kid = self.kids[name]
+ if kid['is_imported']:
+ module = kid['instance']
+ else:
+ try:
+ module = __import__(kid['module_name'], [], [], ["not empty"])
+ kid['instance'] = module
+ kid['is_imported'] = True
+ except ImportError:
+ raise
+ mod_class = getattr(module, kid['class'])
+ return mod_class
+
+
+class Modules(object):
+ """Dynamic modules system for loading and retrieving any of the
+ installed emaint modules and/or provided class's
+
+ @param path: Path to the "modules" directory
+ @param namepath: Python import path to the "modules" directory
+ """
+
+ def __init__(self, path, namepath, compat_versions=None):
+ self._module_path = path
+ self._namepath = namepath
+ self.compat_versions = compat_versions
+ self.parents = []
+ self._modules = self._get_all_modules()
+ self.modules = ProtectedDict(self._modules)
+ self.module_names = sorted(self._modules)
+
+ def _get_all_modules(self):
+ """scans the _module_path dir for loadable modules
+
+ @rtype: dictionary of module_plugins
+ """
+ module_dir = self._module_path
+ importables = []
+ names = os.listdir(module_dir)
+ for entry in names:
+ # skip any __init__ or __pycache__ files or directories
+ if entry.startswith('__'):
+ continue
+ try:
+ # test for statinfo to ensure it should a real module
+ # it will bail if it errors
+ os.lstat(os.path.join(module_dir, entry, '__init__.py'))
+ importables.append(entry)
+ except EnvironmentError:
+ pass
+ kids = {}
+ for entry in importables:
+ new_module = Module(entry, self._namepath)
+ self._check_compat(new_module)
+ for module_name in new_module.kids:
+ kid = new_module.kids[module_name]
+ kid['parent'] = new_module
+ kids[kid['name']] = kid
+ self.parents.append(entry)
+ return kids
+
+ def get_module_names(self):
+ """Convenience function to return the list of installed modules
+ available
+
+ @rtype: list
+ @return: the installed module names available
+ """
+ return self.module_names
+
+ def get_class(self, modname):
+ """Retrieves a module class desired
+
+ @type modname: string
+ @param modname: the module class name
+ """
+ if modname and modname in self.module_names:
+ mod = self._modules[modname]['parent'].get_class(modname)
+ else:
+ raise InvalidModuleName("Module name '%s' is invalid or not"
+ %modname + "found")
+ return mod
+
+ def get_description(self, modname):
+ """Retrieves the module class decription
+
+ @type modname: string
+ @param modname: the module class name
+ @type string
+ @return: the modules class decription
+ """
+ if modname and modname in self.module_names:
+ mod = self._modules[modname]['description']
+ else:
+ raise InvalidModuleName("Module name '%s' is invalid or not"
+ %modname + "found")
+ return mod
+
+ def get_functions(self, modname):
+ """Retrieves the module class exported function names
+
+ @type modname: string
+ @param modname: the module class name
+ @type list
+ @return: the modules class exported function names
+ """
+ if modname and modname in self.module_names:
+ mod = self._modules[modname]['functions']
+ else:
+ raise InvalidModuleName("Module name '%s' is invalid or not"
+ %modname + "found")
+ return mod
+
+ def get_func_descriptions(self, modname):
+ """Retrieves the module class exported functions descriptions
+
+ @type modname: string
+ @param modname: the module class name
+ @type dictionary
+ @return: the modules class exported functions descriptions
+ """
+ if modname and modname in self.module_names:
+ desc = self._modules[modname]['func_desc']
+ else:
+ raise InvalidModuleName("Module name '%s' is invalid or not"
+ %modname + "found")
+ return desc
+
+ def get_opt_descriptions(self, modname):
+ """Retrieves the module class exported options descriptions
+
+ @type modname: string
+ @param modname: the module class name
+ @type dictionary
+ @return: the modules class exported options descriptions
+ """
+ if modname and modname in self.module_names:
+ desc = self._modules[modname].get('opt_desc')
+ else:
+ raise InvalidModuleName(
+ "Module name '%s' is invalid or not found" % modname)
+ return desc
+
+ def get_spec(self, modname, var):
+ """Retrieves the module class exported spec variable
+
+ @type modname: string
+ @param modname: the module class name
+ @type var: string
+ @param var: the base level variable to return
+ @type dictionary
+ @return: the modules class exported options descriptions
+ """
+ if modname and modname in self.module_names:
+ value = self._modules[modname].get(var, None)
+ else:
+ raise InvalidModuleName(
+ "Module name '%s' is invalid or not found" % modname)
+ return value
+
+ def _check_compat(self, module):
+ if self.compat_versions:
+ if not module.module_spec['version'] in self.compat_versions:
+ raise ModuleVersionError(
+ "Error loading '%s' plugin module: %s, version: %s\n"
+ "Module is not compatible with the current application version\n"
+ "Compatible module API versions are: %s"
+ % (self._namepath, module.module_spec['name'],
+ module.module_spec['version'], self.compat_versions))
diff --git a/lib/portage/news.py b/lib/portage/news.py
new file mode 100644
index 000000000..d4f1429b2
--- /dev/null
+++ b/lib/portage/news.py
@@ -0,0 +1,452 @@
+# portage: news management code
+# Copyright 2006-2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function, unicode_literals
+
+__all__ = ["NewsManager", "NewsItem", "DisplayRestriction",
+ "DisplayProfileRestriction", "DisplayKeywordRestriction",
+ "DisplayInstalledRestriction",
+ "count_unread_news", "display_news_notifications"]
+
+import fnmatch
+import io
+import logging
+import os as _os
+import re
+import portage
+from portage import OrderedDict
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.const import NEWS_LIB_PATH
+from portage.util import apply_secpass_permissions, ensure_dirs, \
+ grabfile, normalize_path, write_atomic, writemsg_level
+from portage.data import portage_gid
+from portage.dep import isvalidatom
+from portage.localization import _
+from portage.locks import lockfile, unlockfile
+from portage.output import colorize
+from portage.exception import (InvalidLocation, OperationNotPermitted,
+ PermissionDenied, ReadOnlyFileSystem)
+
+class NewsManager(object):
+ """
+ This object manages GLEP 42 style news items. It will cache news items
+ that have previously shown up and notify users when there are relevant news
+ items that apply to their packages that the user has not previously read.
+
+ Creating a news manager requires:
+ root - typically ${ROOT} see man make.conf and man emerge for details
+ news_path - path to news items; usually $REPODIR/metadata/news
+ unread_path - path to the news.repoid.unread file; this helps us track news items
+
+ """
+
+ def __init__(self, portdb, vardb, news_path, unread_path, language_id='en'):
+ self.news_path = news_path
+ self.unread_path = unread_path
+ self.language_id = language_id
+ self.config = vardb.settings
+ self.vdb = vardb
+ self.portdb = portdb
+
+ # GLEP 42 says:
+ # All news item related files should be root owned and in the
+ # portage group with the group write (and, for directories,
+ # execute) bits set. News files should be world readable.
+ self._uid = int(self.config["PORTAGE_INST_UID"])
+ self._gid = portage_gid
+ self._file_mode = 0o0064
+ self._dir_mode = 0o0074
+ self._mode_mask = 0o0000
+
+ portdir = portdb.repositories.mainRepoLocation()
+ profiles_base = None
+ if portdir is not None:
+ profiles_base = os.path.join(portdir, 'profiles') + os.path.sep
+ profile_path = None
+ if profiles_base is not None and portdb.settings.profile_path:
+ profile_path = normalize_path(
+ os.path.realpath(portdb.settings.profile_path))
+ if profile_path.startswith(profiles_base):
+ profile_path = profile_path[len(profiles_base):]
+ self._profile_path = profile_path
+
+ def _unread_filename(self, repoid):
+ return os.path.join(self.unread_path, 'news-%s.unread' % repoid)
+
+ def _skip_filename(self, repoid):
+ return os.path.join(self.unread_path, 'news-%s.skip' % repoid)
+
+ def _news_dir(self, repoid):
+ repo_path = self.portdb.getRepositoryPath(repoid)
+ if repo_path is None:
+ raise AssertionError(_("Invalid repoID: %s") % repoid)
+ return os.path.join(repo_path, self.news_path)
+
+ def updateItems(self, repoid):
+ """
+ Figure out which news items from NEWS_PATH are both unread and relevant to
+ the user (according to the GLEP 42 standards of relevancy). Then add these
+ items into the news.repoid.unread file.
+ """
+
+ # Ensure that the unread path exists and is writable.
+
+ try:
+ ensure_dirs(self.unread_path, uid=self._uid, gid=self._gid,
+ mode=self._dir_mode, mask=self._mode_mask)
+ except (OperationNotPermitted, PermissionDenied):
+ return
+
+ if not os.access(self.unread_path, os.W_OK):
+ return
+
+ news_dir = self._news_dir(repoid)
+ try:
+ news = _os.listdir(_unicode_encode(news_dir,
+ encoding=_encodings['fs'], errors='strict'))
+ except OSError:
+ return
+
+ skip_filename = self._skip_filename(repoid)
+ unread_filename = self._unread_filename(repoid)
+ unread_lock = lockfile(unread_filename, wantnewlockfile=1)
+ try:
+ try:
+ unread = set(grabfile(unread_filename))
+ unread_orig = unread.copy()
+ skip = set(grabfile(skip_filename))
+ skip_orig = skip.copy()
+ except PermissionDenied:
+ return
+
+ for itemid in news:
+ try:
+ itemid = _unicode_decode(itemid,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ itemid = _unicode_decode(itemid,
+ encoding=_encodings['fs'], errors='replace')
+ writemsg_level(
+ _("!!! Invalid encoding in news item name: '%s'\n") % \
+ itemid, level=logging.ERROR, noiselevel=-1)
+ continue
+
+ if itemid in skip:
+ continue
+ filename = os.path.join(news_dir, itemid,
+ itemid + "." + self.language_id + ".txt")
+ if not os.path.isfile(filename):
+ continue
+ item = NewsItem(filename, itemid)
+ if not item.isValid():
+ continue
+ if item.isRelevant(profile=self._profile_path,
+ config=self.config, vardb=self.vdb):
+ unread.add(item.name)
+ skip.add(item.name)
+
+ if unread != unread_orig:
+ write_atomic(unread_filename,
+ "".join("%s\n" % x for x in sorted(unread)))
+ apply_secpass_permissions(unread_filename,
+ uid=self._uid, gid=self._gid,
+ mode=self._file_mode, mask=self._mode_mask)
+
+ if skip != skip_orig:
+ write_atomic(skip_filename,
+ "".join("%s\n" % x for x in sorted(skip)))
+ apply_secpass_permissions(skip_filename,
+ uid=self._uid, gid=self._gid,
+ mode=self._file_mode, mask=self._mode_mask)
+
+ finally:
+ unlockfile(unread_lock)
+
+ def getUnreadItems(self, repoid, update=False):
+ """
+ Determine if there are unread relevant items in news.repoid.unread.
+ If there are unread items return their number.
+ If update is specified, updateNewsItems( repoid ) will be called to
+ check for new items.
+ """
+
+ if update:
+ self.updateItems(repoid)
+
+ unread_filename = self._unread_filename(repoid)
+ unread_lock = None
+ try:
+ unread_lock = lockfile(unread_filename, wantnewlockfile=1)
+ except (InvalidLocation, OperationNotPermitted, PermissionDenied,
+ ReadOnlyFileSystem):
+ pass
+ try:
+ try:
+ return len(grabfile(unread_filename))
+ except PermissionDenied:
+ return 0
+ finally:
+ if unread_lock:
+ unlockfile(unread_lock)
+
+_formatRE = re.compile(r"News-Item-Format:\s*([^\s]*)\s*$")
+_installedRE = re.compile("Display-If-Installed:(.*)\n")
+_profileRE = re.compile("Display-If-Profile:(.*)\n")
+_keywordRE = re.compile("Display-If-Keyword:(.*)\n")
+_valid_profile_RE = re.compile(r'^[^*]+(/\*)?$')
+
+class NewsItem(object):
+ """
+ This class encapsulates a GLEP 42 style news item.
+ It's purpose is to wrap parsing of these news items such that portage can determine
+ whether a particular item is 'relevant' or not. This requires parsing the item
+ and determining 'relevancy restrictions'; these include "Display if Installed" or
+ "display if arch: x86" and so forth.
+
+ Creation of a news item involves passing in the path to the particular news item.
+ """
+
+ def __init__(self, path, name):
+ """
+ For a given news item we only want if it path is a file.
+ """
+ self.path = path
+ self.name = name
+ self._parsed = False
+ self._valid = True
+
+ def isRelevant(self, vardb, config, profile):
+ """
+ This function takes a dict of keyword arguments; one should pass in any
+ objects need to do to lookups (like what keywords we are on, what profile,
+ and a vardb so we can look at installed packages).
+ Each restriction will pluck out the items that are required for it to match
+ or raise a ValueError exception if the required object is not present.
+
+ Restrictions of the form Display-X are OR'd with like-restrictions;
+ otherwise restrictions are AND'd. any_match is the ORing and
+ all_match is the ANDing.
+ """
+
+ if not self._parsed:
+ self.parse()
+
+ if not len(self.restrictions):
+ return True
+
+ kwargs = \
+ { 'vardb' : vardb,
+ 'config' : config,
+ 'profile' : profile }
+
+ all_match = True
+ for values in self.restrictions.values():
+ any_match = False
+ for restriction in values:
+ if restriction.checkRestriction(**kwargs):
+ any_match = True
+ if not any_match:
+ all_match = False
+
+ return all_match
+
+ def isValid(self):
+ if not self._parsed:
+ self.parse()
+ return self._valid
+
+ def parse(self):
+ f = io.open(_unicode_encode(self.path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace')
+ lines = f.readlines()
+ f.close()
+ self.restrictions = {}
+ invalids = []
+ news_format = None
+
+ # Look for News-Item-Format
+ for i, line in enumerate(lines):
+ format_match = _formatRE.match(line)
+ if format_match is not None:
+ news_format = format_match.group(1)
+ if fnmatch.fnmatch(news_format, '[12].*'):
+ break
+ invalids.append((i + 1, line.rstrip('\n')))
+
+ if news_format is None:
+ invalids.append((0, 'News-Item-Format unspecified'))
+ else:
+ # Parse the rest
+ for i, line in enumerate(lines):
+ # Optimization to ignore regex matches on lines that
+ # will never match
+ if not line.startswith('D'):
+ continue
+ restricts = { _installedRE : DisplayInstalledRestriction,
+ _profileRE : DisplayProfileRestriction,
+ _keywordRE : DisplayKeywordRestriction }
+ for regex, restriction in restricts.items():
+ match = regex.match(line)
+ if match:
+ restrict = restriction(match.groups()[0].strip(), news_format)
+ if not restrict.isValid():
+ invalids.append((i + 1, line.rstrip("\n")))
+ else:
+ self.restrictions.setdefault(
+ id(restriction), []).append(restrict)
+ continue
+
+ if invalids:
+ self._valid = False
+ msg = []
+ msg.append(_("Invalid news item: %s") % (self.path,))
+ for lineno, line in invalids:
+ msg.append(_(" line %d: %s") % (lineno, line))
+ writemsg_level("".join("!!! %s\n" % x for x in msg),
+ level=logging.ERROR, noiselevel=-1)
+
+ self._parsed = True
+
+class DisplayRestriction(object):
+ """
+ A base restriction object representing a restriction of display.
+ news items may have 'relevancy restrictions' preventing them from
+ being important. In this case we need a manner of figuring out if
+ a particular item is relevant or not. If any of it's restrictions
+ are met, then it is displayed
+ """
+
+ def isValid(self):
+ return True
+
+ def checkRestriction(self, **kwargs):
+ raise NotImplementedError('Derived class should override this method')
+
+class DisplayProfileRestriction(DisplayRestriction):
+ """
+ A profile restriction where a particular item shall only be displayed
+ if the user is running a specific profile.
+ """
+
+ def __init__(self, profile, news_format):
+ self.profile = profile
+ self.format = news_format
+
+ def isValid(self):
+ if fnmatch.fnmatch(self.format, '1.*') and '*' in self.profile:
+ return False
+ if fnmatch.fnmatch(self.format, '2.*') and not _valid_profile_RE.match(self.profile):
+ return False
+ return True
+
+ def checkRestriction(self, **kwargs):
+ if fnmatch.fnmatch(self.format, '2.*') and self.profile.endswith('/*'):
+ return (kwargs['profile'].startswith(self.profile[:-1]))
+ return (kwargs['profile'] == self.profile)
+
+class DisplayKeywordRestriction(DisplayRestriction):
+ """
+ A keyword restriction where a particular item shall only be displayed
+ if the user is running a specific keyword.
+ """
+
+ def __init__(self, keyword, news_format):
+ self.keyword = keyword
+ self.format = news_format
+
+ def checkRestriction(self, **kwargs):
+ if kwargs['config'].get('ARCH', '') == self.keyword:
+ return True
+ return False
+
+class DisplayInstalledRestriction(DisplayRestriction):
+ """
+ An Installation restriction where a particular item shall only be displayed
+ if the user has that item installed.
+ """
+
+ def __init__(self, atom, news_format):
+ self.atom = atom
+ self.format = news_format
+
+ def isValid(self):
+ if fnmatch.fnmatch(self.format, '1.*'):
+ return isvalidatom(self.atom, eapi='0')
+ if fnmatch.fnmatch(self.format, '2.*'):
+ return isvalidatom(self.atom, eapi='5')
+ return isvalidatom(self.atom)
+
+ def checkRestriction(self, **kwargs):
+ vdb = kwargs['vardb']
+ if vdb.match(self.atom):
+ return True
+ return False
+
+def count_unread_news(portdb, vardb, repos=None, update=True):
+ """
+ Returns a dictionary mapping repos to integer counts of unread news items.
+ By default, this will scan all repos and check for new items that have
+ appeared since the last scan.
+
+ @param portdb: a portage tree database
+ @type portdb: pordbapi
+ @param vardb: an installed package database
+ @type vardb: vardbapi
+ @param repos: names of repos to scan (None means to scan all available repos)
+ @type repos: list or None
+ @param update: check for new items (default is True)
+ @type update: boolean
+ @rtype: dict
+ @return: dictionary mapping repos to integer counts of unread news items
+ """
+
+ NEWS_PATH = os.path.join("metadata", "news")
+ UNREAD_PATH = os.path.join(vardb.settings['EROOT'], NEWS_LIB_PATH, "news")
+ news_counts = OrderedDict()
+ if repos is None:
+ repos = portdb.getRepositories()
+
+ permission_msgs = set()
+ for repo in repos:
+ try:
+ manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
+ count = manager.getUnreadItems(repo, update=True)
+ except PermissionDenied as e:
+ # NOTE: The NewsManager typically handles permission errors by
+ # returning silently, so PermissionDenied won't necessarily be
+ # raised even if we do trigger a permission error above.
+ msg = "Permission denied: '%s'\n" % (e,)
+ if msg in permission_msgs:
+ pass
+ else:
+ permission_msgs.add(msg)
+ writemsg_level(msg, level=logging.ERROR, noiselevel=-1)
+ news_counts[repo] = 0
+ else:
+ news_counts[repo] = count
+
+ return news_counts
+
+def display_news_notifications(news_counts):
+ """
+ Display a notification for unread news items, using a dictionary mapping
+ repos to integer counts, like that returned from count_unread_news().
+ """
+ newsReaderDisplay = False
+ for repo, count in news_counts.items():
+ if count > 0:
+ if not newsReaderDisplay:
+ newsReaderDisplay = True
+ print()
+ print(colorize("WARN", " * IMPORTANT:"), end=' ')
+ print("%s news items need reading for repository '%s'." % (count, repo))
+
+ if newsReaderDisplay:
+ print(colorize("WARN", " *"), end=' ')
+ print("Use " + colorize("GOOD", "eselect news read") + " to view new items.")
+ print()
diff --git a/lib/portage/output.py b/lib/portage/output.py
new file mode 100644
index 000000000..1070d0ef3
--- /dev/null
+++ b/lib/portage/output.py
@@ -0,0 +1,844 @@
+# Copyright 1998-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division
+
+__docformat__ = "epytext"
+
+import errno
+import io
+import re
+import subprocess
+import sys
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.util:writemsg',
+)
+import portage.util.formatter as formatter
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage import _unicode_decode
+from portage.const import COLOR_MAP_FILE
+from portage.exception import CommandNotFound, FileNotFound, \
+ ParseError, PermissionDenied, PortageException
+from portage.localization import _
+
+havecolor = 1
+dotitles = 1
+
+_styles = {}
+"""Maps style class to tuple of attribute names."""
+
+codes = {}
+"""Maps attribute name to ansi code."""
+
+esc_seq = "\x1b["
+
+codes["normal"] = esc_seq + "0m"
+codes["reset"] = esc_seq + "39;49;00m"
+
+codes["bold"] = esc_seq + "01m"
+codes["faint"] = esc_seq + "02m"
+codes["standout"] = esc_seq + "03m"
+codes["underline"] = esc_seq + "04m"
+codes["blink"] = esc_seq + "05m"
+codes["overline"] = esc_seq + "06m"
+codes["reverse"] = esc_seq + "07m"
+codes["invisible"] = esc_seq + "08m"
+
+codes["no-attr"] = esc_seq + "22m"
+codes["no-standout"] = esc_seq + "23m"
+codes["no-underline"] = esc_seq + "24m"
+codes["no-blink"] = esc_seq + "25m"
+codes["no-overline"] = esc_seq + "26m"
+codes["no-reverse"] = esc_seq + "27m"
+
+codes["bg_black"] = esc_seq + "40m"
+codes["bg_darkred"] = esc_seq + "41m"
+codes["bg_darkgreen"] = esc_seq + "42m"
+codes["bg_brown"] = esc_seq + "43m"
+codes["bg_darkblue"] = esc_seq + "44m"
+codes["bg_purple"] = esc_seq + "45m"
+codes["bg_teal"] = esc_seq + "46m"
+codes["bg_lightgray"] = esc_seq + "47m"
+codes["bg_default"] = esc_seq + "49m"
+codes["bg_darkyellow"] = codes["bg_brown"]
+
+def color(fg, bg="default", attr=["normal"]):
+ mystr = codes[fg]
+ for x in [bg]+attr:
+ mystr += codes[x]
+ return mystr
+
+
+ansi_codes = []
+for x in range(30, 38):
+ ansi_codes.append("%im" % x)
+ ansi_codes.append("%i;01m" % x)
+
+rgb_ansi_colors = ['0x000000', '0x555555', '0xAA0000', '0xFF5555', '0x00AA00',
+ '0x55FF55', '0xAA5500', '0xFFFF55', '0x0000AA', '0x5555FF', '0xAA00AA',
+ '0xFF55FF', '0x00AAAA', '0x55FFFF', '0xAAAAAA', '0xFFFFFF']
+
+for x in range(len(rgb_ansi_colors)):
+ codes[rgb_ansi_colors[x]] = esc_seq + ansi_codes[x]
+
+del x
+
+codes["black"] = codes["0x000000"]
+codes["darkgray"] = codes["0x555555"]
+
+codes["red"] = codes["0xFF5555"]
+codes["darkred"] = codes["0xAA0000"]
+
+codes["green"] = codes["0x55FF55"]
+codes["darkgreen"] = codes["0x00AA00"]
+
+codes["yellow"] = codes["0xFFFF55"]
+codes["brown"] = codes["0xAA5500"]
+
+codes["blue"] = codes["0x5555FF"]
+codes["darkblue"] = codes["0x0000AA"]
+
+codes["fuchsia"] = codes["0xFF55FF"]
+codes["purple"] = codes["0xAA00AA"]
+
+codes["turquoise"] = codes["0x55FFFF"]
+codes["teal"] = codes["0x00AAAA"]
+
+codes["white"] = codes["0xFFFFFF"]
+codes["lightgray"] = codes["0xAAAAAA"]
+
+codes["darkteal"] = codes["turquoise"]
+# Some terminals have darkyellow instead of brown.
+codes["0xAAAA00"] = codes["brown"]
+codes["darkyellow"] = codes["0xAAAA00"]
+
+
+
+# Colors from /etc/init.d/functions.sh
+_styles["NORMAL"] = ( "normal", )
+_styles["GOOD"] = ( "green", )
+_styles["WARN"] = ( "yellow", )
+_styles["BAD"] = ( "red", )
+_styles["HILITE"] = ( "teal", )
+_styles["BRACKET"] = ( "blue", )
+
+# Portage functions
+_styles["INFORM"] = ( "darkgreen", )
+_styles["UNMERGE_WARN"] = ( "red", )
+_styles["SECURITY_WARN"] = ( "red", )
+_styles["MERGE_LIST_PROGRESS"] = ( "yellow", )
+_styles["PKG_BLOCKER"] = ( "red", )
+_styles["PKG_BLOCKER_SATISFIED"] = ( "darkblue", )
+_styles["PKG_MERGE"] = ( "darkgreen", )
+_styles["PKG_MERGE_SYSTEM"] = ( "darkgreen", )
+_styles["PKG_MERGE_WORLD"] = ( "green", )
+_styles["PKG_BINARY_MERGE"] = ( "purple", )
+_styles["PKG_BINARY_MERGE_SYSTEM"] = ( "purple", )
+_styles["PKG_BINARY_MERGE_WORLD"] = ( "fuchsia", )
+_styles["PKG_UNINSTALL"] = ( "red", )
+_styles["PKG_NOMERGE"] = ( "darkblue", )
+_styles["PKG_NOMERGE_SYSTEM"] = ( "darkblue", )
+_styles["PKG_NOMERGE_WORLD"] = ( "blue", )
+_styles["PROMPT_CHOICE_DEFAULT"] = ( "green", )
+_styles["PROMPT_CHOICE_OTHER"] = ( "red", )
+
+def _parse_color_map(config_root='/', onerror=None):
+ """
+ Parse /etc/portage/color.map and return a dict of error codes.
+
+ @param onerror: an optional callback to handle any ParseError that would
+ otherwise be raised
+ @type onerror: callable
+ @rtype: dict
+ @return: a dictionary mapping color classes to color codes
+ """
+ global codes, _styles
+ myfile = os.path.join(config_root, COLOR_MAP_FILE)
+ ansi_code_pattern = re.compile("^[0-9;]*m$")
+ quotes = '\'"'
+ def strip_quotes(token):
+ if token[0] in quotes and token[0] == token[-1]:
+ token = token[1:-1]
+ return token
+
+ try:
+ with io.open(_unicode_encode(myfile,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace') as f:
+ lines = f.readlines()
+ for lineno, line in enumerate(lines):
+ commenter_pos = line.find("#")
+ line = line[:commenter_pos].strip()
+
+ if len(line) == 0:
+ continue
+
+ split_line = line.split("=")
+ if len(split_line) != 2:
+ e = ParseError(_("'%s', line %s: expected exactly one occurrence of '=' operator") % \
+ (myfile, lineno))
+ raise e
+ if onerror:
+ onerror(e)
+ else:
+ raise e
+ continue
+
+ k = strip_quotes(split_line[0].strip())
+ v = strip_quotes(split_line[1].strip())
+ if not k in _styles and not k in codes:
+ e = ParseError(_("'%s', line %s: Unknown variable: '%s'") % \
+ (myfile, lineno, k))
+ if onerror:
+ onerror(e)
+ else:
+ raise e
+ continue
+ if ansi_code_pattern.match(v):
+ if k in _styles:
+ _styles[k] = ( esc_seq + v, )
+ elif k in codes:
+ codes[k] = esc_seq + v
+ else:
+ code_list = []
+ for x in v.split():
+ if x in codes:
+ if k in _styles:
+ code_list.append(x)
+ elif k in codes:
+ code_list.append(codes[x])
+ else:
+ e = ParseError(_("'%s', line %s: Undefined: '%s'") % \
+ (myfile, lineno, x))
+ if onerror:
+ onerror(e)
+ else:
+ raise e
+ if k in _styles:
+ _styles[k] = tuple(code_list)
+ elif k in codes:
+ codes[k] = "".join(code_list)
+ except (IOError, OSError) as e:
+ if e.errno == errno.ENOENT:
+ raise FileNotFound(myfile)
+ elif e.errno == errno.EACCES:
+ raise PermissionDenied(myfile)
+ raise
+
+def nc_len(mystr):
+ tmp = re.sub(esc_seq + "^m]+m", "", mystr);
+ return len(tmp)
+
+_legal_terms_re = re.compile(r'^(xterm|xterm-color|Eterm|aterm|rxvt|screen|kterm|rxvt-unicode|gnome|interix|tmux|st-256color)')
+_disable_xtermTitle = None
+_max_xtermTitle_len = 253
+
+def xtermTitle(mystr, raw=False):
+ global _disable_xtermTitle
+ if _disable_xtermTitle is None:
+ _disable_xtermTitle = not (sys.__stderr__.isatty() and \
+ 'TERM' in os.environ and \
+ _legal_terms_re.match(os.environ['TERM']) is not None)
+
+ if dotitles and not _disable_xtermTitle:
+ # If the title string is too big then the terminal can
+ # misbehave. Therefore, truncate it if it's too big.
+ if len(mystr) > _max_xtermTitle_len:
+ mystr = mystr[:_max_xtermTitle_len]
+ if not raw:
+ mystr = '\x1b]0;%s\x07' % mystr
+
+ # avoid potential UnicodeEncodeError
+ mystr = _unicode_encode(mystr,
+ encoding=_encodings['stdio'], errors='backslashreplace')
+ f = sys.stderr
+ if sys.hexversion >= 0x3000000:
+ f = f.buffer
+ f.write(mystr)
+ f.flush()
+
+default_xterm_title = None
+
+def xtermTitleReset():
+ global default_xterm_title
+ if default_xterm_title is None:
+ prompt_command = os.environ.get('PROMPT_COMMAND')
+ if prompt_command == "":
+ default_xterm_title = ""
+ elif prompt_command is not None:
+ if dotitles and \
+ 'TERM' in os.environ and \
+ _legal_terms_re.match(os.environ['TERM']) is not None and \
+ sys.__stderr__.isatty():
+ from portage.process import find_binary, spawn
+ shell = os.environ.get("SHELL")
+ if not shell or not os.access(shell, os.EX_OK):
+ shell = find_binary("sh")
+ if shell:
+ spawn([shell, "-c", prompt_command], env=os.environ,
+ fd_pipes={
+ 0: portage._get_stdin().fileno(),
+ 1: sys.__stderr__.fileno(),
+ 2: sys.__stderr__.fileno()
+ })
+ else:
+ os.system(prompt_command)
+ return
+ else:
+ pwd = os.environ.get('PWD','')
+ home = os.environ.get('HOME', '')
+ if home != '' and pwd.startswith(home):
+ pwd = '~' + pwd[len(home):]
+ default_xterm_title = '\x1b]0;%s@%s:%s\x07' % (
+ os.environ.get('LOGNAME', ''),
+ os.environ.get('HOSTNAME', '').split('.', 1)[0], pwd)
+ xtermTitle(default_xterm_title, raw=True)
+
+def notitles():
+ "turn off title setting"
+ dotitles = 0
+
+def nocolor():
+ "turn off colorization"
+ global havecolor
+ havecolor = 0
+
+def resetColor():
+ return codes["reset"]
+
+def style_to_ansi_code(style):
+ """
+ @param style: A style name
+ @type style: String
+ @rtype: String
+ @return: A string containing one or more ansi escape codes that are
+ used to render the given style.
+ """
+ ret = ""
+ for attr_name in _styles[style]:
+ # allow stuff that has found it's way through ansi_code_pattern
+ ret += codes.get(attr_name, attr_name)
+ return ret
+
+def colormap():
+ mycolors = []
+ for c in ("GOOD", "WARN", "BAD", "HILITE", "BRACKET", "NORMAL"):
+ mycolors.append("%s=$'%s'" % (c, style_to_ansi_code(c)))
+ return "\n".join(mycolors)
+
+def colorize(color_key, text):
+ global havecolor
+ if havecolor:
+ if color_key in codes:
+ return codes[color_key] + text + codes["reset"]
+ elif color_key in _styles:
+ return style_to_ansi_code(color_key) + text + codes["reset"]
+ else:
+ return text
+ else:
+ return text
+
+compat_functions_colors = [
+ "bold", "white", "teal", "turquoise", "darkteal",
+ "fuchsia", "purple", "blue", "darkblue", "green", "darkgreen", "yellow",
+ "brown", "darkyellow", "red", "darkred",
+]
+
+class create_color_func(object):
+ __slots__ = ("_color_key",)
+ def __init__(self, color_key):
+ self._color_key = color_key
+ def __call__(self, text):
+ return colorize(self._color_key, text)
+
+for c in compat_functions_colors:
+ globals()[c] = create_color_func(c)
+
+class ConsoleStyleFile(object):
+ """
+ A file-like object that behaves something like
+ the colorize() function. Style identifiers
+ passed in via the new_styles() method will be used to
+ apply console codes to output.
+ """
+ def __init__(self, f):
+ self._file = f
+ self._styles = None
+ self.write_listener = None
+
+ def new_styles(self, styles):
+ self._styles = styles
+
+ def write(self, s):
+ # In python-2.6, DumbWriter.send_line_break() can write
+ # non-unicode '\n' which fails with TypeError if self._file
+ # is a text stream such as io.StringIO. Therefore, make sure
+ # input is converted to unicode when necessary.
+ s = _unicode_decode(s)
+ global havecolor
+ if havecolor and self._styles:
+ styled_s = []
+ for style in self._styles:
+ styled_s.append(style_to_ansi_code(style))
+ styled_s.append(s)
+ styled_s.append(codes["reset"])
+ self._write(self._file, "".join(styled_s))
+ else:
+ self._write(self._file, s)
+ if self.write_listener:
+ self._write(self.write_listener, s)
+
+ def _write(self, f, s):
+ # avoid potential UnicodeEncodeError
+ if f in (sys.stdout, sys.stderr):
+ s = _unicode_encode(s,
+ encoding=_encodings['stdio'], errors='backslashreplace')
+ if sys.hexversion >= 0x3000000:
+ f = f.buffer
+ f.write(s)
+
+ def writelines(self, lines):
+ for s in lines:
+ self.write(s)
+
+ def flush(self):
+ self._file.flush()
+
+ def close(self):
+ self._file.close()
+
+class StyleWriter(formatter.DumbWriter):
+ """
+ This is just a DumbWriter with a hook in the new_styles() method
+ that passes a styles tuple as a single argument to a callable
+ style_listener attribute.
+ """
+ def __init__(self, **kwargs):
+ formatter.DumbWriter.__init__(self, **kwargs)
+ self.style_listener = None
+
+ def new_styles(self, styles):
+ formatter.DumbWriter.new_styles(self, styles)
+ if self.style_listener:
+ self.style_listener(styles)
+
+def get_term_size(fd=None):
+ """
+ Get the number of lines and columns of the tty that is connected to
+ fd. Returns a tuple of (lines, columns) or (0, 0) if an error
+ occurs. The curses module is used if available, otherwise the output of
+ `stty size` is parsed. The lines and columns values are guaranteed to be
+ greater than or equal to zero, since a negative COLUMNS variable is
+ known to prevent some commands from working (see bug #394091).
+ """
+ if fd is None:
+ fd = sys.stdout
+ if not hasattr(fd, 'isatty') or not fd.isatty():
+ return (0, 0)
+ try:
+ import curses
+ try:
+ curses.setupterm(term=os.environ.get("TERM", "unknown"),
+ fd=fd.fileno())
+ return curses.tigetnum('lines'), curses.tigetnum('cols')
+ except curses.error:
+ pass
+ except ImportError:
+ pass
+
+ try:
+ proc = subprocess.Popen(["stty", "size"],
+ stdout=subprocess.PIPE, stderr=fd)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ # stty command not found
+ return (0, 0)
+
+ out = _unicode_decode(proc.communicate()[0])
+ if proc.wait() == os.EX_OK:
+ out = out.split()
+ if len(out) == 2:
+ try:
+ val = (int(out[0]), int(out[1]))
+ except ValueError:
+ pass
+ else:
+ if val[0] >= 0 and val[1] >= 0:
+ return val
+ return (0, 0)
+
+def set_term_size(lines, columns, fd):
+ """
+ Set the number of lines and columns for the tty that is connected to fd.
+ For portability, this simply calls `stty rows $lines columns $columns`.
+ """
+ from portage.process import spawn
+ cmd = ["stty", "rows", str(lines), "columns", str(columns)]
+ try:
+ spawn(cmd, env=os.environ, fd_pipes={0:fd})
+ except CommandNotFound:
+ writemsg(_("portage: stty: command not found\n"), noiselevel=-1)
+
+class EOutput(object):
+ """
+ Performs fancy terminal formatting for status and informational messages.
+
+ The provided methods produce identical terminal output to the eponymous
+ functions in the shell script C{/sbin/functions.sh} and also accept
+ identical parameters.
+
+ This is not currently a drop-in replacement however, as the output-related
+ functions in C{/sbin/functions.sh} are oriented for use mainly by system
+ init scripts and ebuilds and their output can be customized via certain
+ C{RC_*} environment variables (see C{/etc/conf.d/rc}). B{EOutput} is not
+ customizable in this manner since it's intended for more general uses.
+ Likewise, no logging is provided.
+
+ @ivar quiet: Specifies if output should be silenced.
+ @type quiet: BooleanType
+ @ivar term_columns: Width of terminal in characters. Defaults to the value
+ specified by the shell's C{COLUMNS} variable, else to the queried tty
+ size, else to C{80}.
+ @type term_columns: IntType
+ """
+
+ def __init__(self, quiet=False):
+ self.__last_e_cmd = ""
+ self.__last_e_len = 0
+ self.quiet = quiet
+ lines, columns = get_term_size()
+ if columns <= 0:
+ columns = 80
+ self.term_columns = columns
+ sys.stdout.flush()
+ sys.stderr.flush()
+
+ def _write(self, f, s):
+ # avoid potential UnicodeEncodeError
+ writemsg(s, noiselevel=-1, fd=f)
+
+ def __eend(self, caller, errno, msg):
+ if errno == 0:
+ status_brackets = colorize("BRACKET", "[ ") + colorize("GOOD", "ok") + colorize("BRACKET", " ]")
+ else:
+ status_brackets = colorize("BRACKET", "[ ") + colorize("BAD", "!!") + colorize("BRACKET", " ]")
+ if msg:
+ if caller == "eend":
+ self.eerror(msg[0])
+ elif caller == "ewend":
+ self.ewarn(msg[0])
+ if self.__last_e_cmd != "ebegin":
+ self.__last_e_len = 0
+ if not self.quiet:
+ out = sys.stdout
+ self._write(out,
+ "%*s%s\n" % ((self.term_columns - self.__last_e_len - 7),
+ "", status_brackets))
+
+ def ebegin(self, msg):
+ """
+ Shows a message indicating the start of a process.
+
+ @param msg: A very brief (shorter than one line) description of the
+ starting process.
+ @type msg: StringType
+ """
+ msg += " ..."
+ if not self.quiet:
+ self.einfon(msg)
+ self.__last_e_len = len(msg) + 3
+ self.__last_e_cmd = "ebegin"
+
+ def eend(self, errno, *msg):
+ """
+ Indicates the completion of a process, optionally displaying a message
+ via L{eerror} if the process's exit status isn't C{0}.
+
+ @param errno: A standard UNIX C{errno} code returned by processes upon
+ exit.
+ @type errno: IntType
+ @param msg: I{(optional)} An error message, typically a standard UNIX
+ error string corresponding to C{errno}.
+ @type msg: StringType
+ """
+ if not self.quiet:
+ self.__eend("eend", errno, msg)
+ self.__last_e_cmd = "eend"
+
+ def eerror(self, msg):
+ """
+ Shows an error message.
+
+ @param msg: A very brief (shorter than one line) error message.
+ @type msg: StringType
+ """
+ out = sys.stderr
+ if not self.quiet:
+ if self.__last_e_cmd == "ebegin":
+ self._write(out, "\n")
+ self._write(out, colorize("BAD", " * ") + msg + "\n")
+ self.__last_e_cmd = "eerror"
+
+ def einfo(self, msg):
+ """
+ Shows an informative message terminated with a newline.
+
+ @param msg: A very brief (shorter than one line) informative message.
+ @type msg: StringType
+ """
+ out = sys.stdout
+ if not self.quiet:
+ if self.__last_e_cmd == "ebegin":
+ self._write(out, "\n")
+ self._write(out, colorize("GOOD", " * ") + msg + "\n")
+ self.__last_e_cmd = "einfo"
+
+ def einfon(self, msg):
+ """
+ Shows an informative message terminated without a newline.
+
+ @param msg: A very brief (shorter than one line) informative message.
+ @type msg: StringType
+ """
+ out = sys.stdout
+ if not self.quiet:
+ if self.__last_e_cmd == "ebegin":
+ self._write(out, "\n")
+ self._write(out, colorize("GOOD", " * ") + msg)
+ self.__last_e_cmd = "einfon"
+
+ def ewarn(self, msg):
+ """
+ Shows a warning message.
+
+ @param msg: A very brief (shorter than one line) warning message.
+ @type msg: StringType
+ """
+ out = sys.stderr
+ if not self.quiet:
+ if self.__last_e_cmd == "ebegin":
+ self._write(out, "\n")
+ self._write(out, colorize("WARN", " * ") + msg + "\n")
+ self.__last_e_cmd = "ewarn"
+
+ def ewend(self, errno, *msg):
+ """
+ Indicates the completion of a process, optionally displaying a message
+ via L{ewarn} if the process's exit status isn't C{0}.
+
+ @param errno: A standard UNIX C{errno} code returned by processes upon
+ exit.
+ @type errno: IntType
+ @param msg: I{(optional)} A warning message, typically a standard UNIX
+ error string corresponding to C{errno}.
+ @type msg: StringType
+ """
+ if not self.quiet:
+ self.__eend("ewend", errno, msg)
+ self.__last_e_cmd = "ewend"
+
+class ProgressBar(object):
+ """The interface is copied from the ProgressBar class from the EasyDialogs
+ module (which is Mac only)."""
+ def __init__(self, title=None, maxval=0, label=None, max_desc_length=25):
+ self._title = title or ""
+ self._maxval = maxval
+ self._label = label or ""
+ self._curval = 0
+ self._desc = ""
+ self._desc_max_length = max_desc_length
+ self._set_desc()
+
+ @property
+ def curval(self):
+ """
+ The current value (of type integer or long integer) of the progress
+ bar. The normal access methods coerce curval between 0 and maxval. This
+ attribute should not be altered directly.
+ """
+ return self._curval
+
+ @property
+ def maxval(self):
+ """
+ The maximum value (of type integer or long integer) of the progress
+ bar; the progress bar (thermometer style) is full when curval equals
+ maxval. If maxval is 0, the bar will be indeterminate (barber-pole).
+ This attribute should not be altered directly.
+ """
+ return self._maxval
+
+ def title(self, newstr):
+ """Sets the text in the title bar of the progress dialog to newstr."""
+ self._title = newstr
+ self._set_desc()
+
+ def label(self, newstr):
+ """Sets the text in the progress box of the progress dialog to newstr."""
+ self._label = newstr
+ self._set_desc()
+
+ def _set_desc(self):
+ self._desc = "%s%s" % (
+ "%s: " % self._title if self._title else "",
+ "%s" % self._label if self._label else ""
+ )
+ if len(self._desc) > self._desc_max_length: # truncate if too long
+ self._desc = "%s..." % self._desc[:self._desc_max_length - 3]
+ if len(self._desc):
+ self._desc = self._desc.ljust(self._desc_max_length)
+
+
+ def set(self, value, maxval=None):
+ """
+ Sets the progress bar's curval to value, and also maxval to max if the
+ latter is provided. value is first coerced between 0 and maxval. The
+ thermometer bar is updated to reflect the changes, including a change
+ from indeterminate to determinate or vice versa.
+ """
+ if maxval is not None:
+ self._maxval = maxval
+ if value < 0:
+ value = 0
+ elif value > self._maxval:
+ value = self._maxval
+ self._curval = value
+
+ def inc(self, n=1):
+ """Increments the progress bar's curval by n, or by 1 if n is not
+ provided. (Note that n may be negative, in which case the effect is a
+ decrement.) The progress bar is updated to reflect the change. If the
+ bar is indeterminate, this causes one ``spin'' of the barber pole. The
+ resulting curval is coerced between 0 and maxval if incrementing causes
+ it to fall outside this range.
+ """
+ self.set(self._curval+n)
+
+class TermProgressBar(ProgressBar):
+ """A tty progress bar similar to wget's."""
+ def __init__(self, fd=sys.stdout, **kwargs):
+ ProgressBar.__init__(self, **kwargs)
+ lines, self.term_columns = get_term_size(fd)
+ self.file = fd
+ self._min_columns = 11
+ self._max_columns = 80
+ # for indeterminate mode, ranges from 0.0 to 1.0
+ self._position = 0.0
+
+ def set(self, value, maxval=None):
+ ProgressBar.set(self, value, maxval=maxval)
+ self._display_image(self._create_image())
+
+ def _display_image(self, image):
+ self.file.write('\r')
+ self.file.write(image)
+ self.file.flush()
+
+ def _create_image(self):
+ cols = self.term_columns
+ if cols > self._max_columns:
+ cols = self._max_columns
+ min_columns = self._min_columns
+ curval = self._curval
+ maxval = self._maxval
+ position = self._position
+ percentage_str_width = 5
+ square_brackets_width = 2
+ if cols < percentage_str_width:
+ return ""
+ bar_space = cols - percentage_str_width - square_brackets_width - 1
+ if self._desc:
+ bar_space -= self._desc_max_length
+ if maxval == 0:
+ max_bar_width = bar_space-3
+ _percent = "".ljust(percentage_str_width)
+ if cols < min_columns:
+ return ""
+ if position <= 0.5:
+ offset = 2 * position
+ else:
+ offset = 2 * (1 - position)
+ delta = 0.5 / max_bar_width
+ position += delta
+ if position >= 1.0:
+ position = 0.0
+ # make sure it touches the ends
+ if 1.0 - position < delta:
+ position = 1.0
+ if position < 0.5 and 0.5 - position < delta:
+ position = 0.5
+ self._position = position
+ bar_width = int(offset * max_bar_width)
+ image = "%s%s%s" % (self._desc, _percent,
+ "[" + (bar_width * " ") + \
+ "<=>" + ((max_bar_width - bar_width) * " ") + "]")
+ return image
+ else:
+ percentage = 100 * curval // maxval
+ max_bar_width = bar_space - 1
+ _percent = ("%d%% " % percentage).rjust(percentage_str_width)
+ image = "%s%s" % (self._desc, _percent)
+
+ if cols < min_columns:
+ return image
+ offset = curval / maxval
+ bar_width = int(offset * max_bar_width)
+ image = image + "[" + (bar_width * "=") + \
+ ">" + ((max_bar_width - bar_width) * " ") + "]"
+ return image
+
+_color_map_loaded = False
+
+def _init(config_root='/'):
+ """
+ Load color.map from the given config_root. This is called automatically
+ on first access of the codes or _styles attributes (unless it has already
+ been called for some other reason).
+ """
+
+ global _color_map_loaded, codes, _styles
+ if _color_map_loaded:
+ return
+
+ _color_map_loaded = True
+ codes = object.__getattribute__(codes, '_attr')
+ _styles = object.__getattribute__(_styles, '_attr')
+
+ for k, v in codes.items():
+ codes[k] = _unicode_decode(v)
+
+ for k, v in _styles.items():
+ _styles[k] = _unicode_decode(v)
+
+ try:
+ _parse_color_map(config_root=config_root,
+ onerror=lambda e: writemsg("%s\n" % str(e), noiselevel=-1))
+ except FileNotFound:
+ pass
+ except PermissionDenied as e:
+ writemsg(_("Permission denied: '%s'\n") % str(e), noiselevel=-1)
+ del e
+ except PortageException as e:
+ writemsg("%s\n" % str(e), noiselevel=-1)
+ del e
+
+class _LazyInitColorMap(portage.proxy.objectproxy.ObjectProxy):
+
+ __slots__ = ('_attr',)
+
+ def __init__(self, attr):
+ portage.proxy.objectproxy.ObjectProxy.__init__(self)
+ object.__setattr__(self, '_attr', attr)
+
+ def _get_target(self):
+ _init()
+ return object.__getattribute__(self, '_attr')
+
+codes = _LazyInitColorMap(codes)
+_styles = _LazyInitColorMap(_styles)
diff --git a/lib/portage/package/__init__.py b/lib/portage/package/__init__.py
new file mode 100644
index 000000000..21a391aee
--- /dev/null
+++ b/lib/portage/package/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/package/ebuild/__init__.py b/lib/portage/package/ebuild/__init__.py
new file mode 100644
index 000000000..21a391aee
--- /dev/null
+++ b/lib/portage/package/ebuild/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/package/ebuild/_config/KeywordsManager.py b/lib/portage/package/ebuild/_config/KeywordsManager.py
new file mode 100644
index 000000000..fd0a6318d
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/KeywordsManager.py
@@ -0,0 +1,325 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'KeywordsManager',
+)
+
+from _emerge.Package import Package
+from portage import os
+from portage.dep import ExtendedAtomDict, _repo_separator, _slot_separator
+from portage.localization import _
+from portage.package.ebuild._config.helper import ordered_by_atom_specificity
+from portage.util import grabdict_package, stack_lists, writemsg
+from portage.versions import _pkg_str
+
+class KeywordsManager(object):
+ """Manager class to handle keywords processing and validation"""
+
+ def __init__(self, profiles, abs_user_config, user_config=True,
+ global_accept_keywords=""):
+ self._pkeywords_list = []
+ rawpkeywords = [grabdict_package(
+ os.path.join(x.location, "package.keywords"),
+ recursive=x.portage1_directories,
+ verify_eapi=True, eapi=x.eapi, eapi_default=None,
+ allow_build_id=x.allow_build_id)
+ for x in profiles]
+ for pkeyworddict in rawpkeywords:
+ if not pkeyworddict:
+ # Omit non-existent files from the stack.
+ continue
+ cpdict = {}
+ for k, v in pkeyworddict.items():
+ cpdict.setdefault(k.cp, {})[k] = v
+ self._pkeywords_list.append(cpdict)
+ self._pkeywords_list = tuple(self._pkeywords_list)
+
+ self._p_accept_keywords = []
+ raw_p_accept_keywords = [grabdict_package(
+ os.path.join(x.location, "package.accept_keywords"),
+ recursive=x.portage1_directories,
+ verify_eapi=True, eapi=x.eapi, eapi_default=None)
+ for x in profiles]
+ for d in raw_p_accept_keywords:
+ if not d:
+ # Omit non-existent files from the stack.
+ continue
+ cpdict = {}
+ for k, v in d.items():
+ cpdict.setdefault(k.cp, {})[k] = tuple(v)
+ self._p_accept_keywords.append(cpdict)
+ self._p_accept_keywords = tuple(self._p_accept_keywords)
+
+ self.pkeywordsdict = ExtendedAtomDict(dict)
+
+ if user_config:
+ pkgdict = grabdict_package(
+ os.path.join(abs_user_config, "package.keywords"),
+ recursive=1, allow_wildcard=True, allow_repo=True,
+ verify_eapi=False, allow_build_id=True)
+
+ for k, v in grabdict_package(
+ os.path.join(abs_user_config, "package.accept_keywords"),
+ recursive=1, allow_wildcard=True, allow_repo=True,
+ verify_eapi=False, allow_build_id=True).items():
+ pkgdict.setdefault(k, []).extend(v)
+
+ accept_keywords_defaults = global_accept_keywords.split()
+ accept_keywords_defaults = tuple('~' + keyword for keyword in \
+ accept_keywords_defaults if keyword[:1] not in "~-")
+ for k, v in pkgdict.items():
+ # default to ~arch if no specific keyword is given
+ if not v:
+ v = accept_keywords_defaults
+ else:
+ v = tuple(v)
+ self.pkeywordsdict.setdefault(k.cp, {})[k] = v
+
+
+ def getKeywords(self, cpv, slot, keywords, repo):
+ try:
+ cpv.slot
+ except AttributeError:
+ pkg = _pkg_str(cpv, slot=slot, repo=repo)
+ else:
+ pkg = cpv
+ cp = pkg.cp
+ keywords = [[x for x in keywords.split() if x != "-*"]]
+ for pkeywords_dict in self._pkeywords_list:
+ cpdict = pkeywords_dict.get(cp)
+ if cpdict:
+ pkg_keywords = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_keywords:
+ keywords.extend(pkg_keywords)
+ return stack_lists(keywords, incremental=True)
+
+ def isStable(self, pkg, global_accept_keywords, backuped_accept_keywords):
+ mygroups = self.getKeywords(pkg, None, pkg._metadata["KEYWORDS"], None)
+ pgroups = global_accept_keywords.split()
+
+ unmaskgroups = self.getPKeywords(pkg, None, None,
+ global_accept_keywords)
+ pgroups.extend(unmaskgroups)
+
+ egroups = backuped_accept_keywords.split()
+
+ if unmaskgroups or egroups:
+ pgroups = self._getEgroups(egroups, pgroups)
+ else:
+ pgroups = set(pgroups)
+
+ if self._getMissingKeywords(pkg, pgroups, mygroups):
+ return False
+
+ # If replacing all keywords with unstable variants would mask the
+ # package, then it's considered stable for the purposes of
+ # use.stable.mask/force interpretation. For unstable configurations,
+ # this guarantees that the effective use.force/mask settings for a
+ # particular ebuild do not change when that ebuild is stabilized.
+ unstable = []
+ for kw in mygroups:
+ if kw[:1] != "~":
+ kw = "~" + kw
+ unstable.append(kw)
+
+ return bool(self._getMissingKeywords(pkg, pgroups, set(unstable)))
+
+ def getMissingKeywords(self,
+ cpv,
+ slot,
+ keywords,
+ repo,
+ global_accept_keywords,
+ backuped_accept_keywords):
+ """
+ Take a package and return a list of any KEYWORDS that the user may
+ need to accept for the given package. If the KEYWORDS are empty
+ and the the ** keyword has not been accepted, the returned list will
+ contain ** alone (in order to distinguish from the case of "none
+ missing").
+
+ @param cpv: The package name (for package.keywords support)
+ @type cpv: String
+ @param slot: The 'SLOT' key from the raw package metadata
+ @type slot: String
+ @param keywords: The 'KEYWORDS' key from the raw package metadata
+ @type keywords: String
+ @param global_accept_keywords: The current value of ACCEPT_KEYWORDS
+ @type global_accept_keywords: String
+ @param backuped_accept_keywords: ACCEPT_KEYWORDS from the backup env
+ @type backuped_accept_keywords: String
+ @rtype: List
+ @return: A list of KEYWORDS that have not been accepted.
+ """
+
+ mygroups = self.getKeywords(cpv, slot, keywords, repo)
+ # Repoman may modify this attribute as necessary.
+ pgroups = global_accept_keywords.split()
+
+ unmaskgroups = self.getPKeywords(cpv, slot, repo,
+ global_accept_keywords)
+ pgroups.extend(unmaskgroups)
+
+ # Hack: Need to check the env directly here as otherwise stacking
+ # doesn't work properly as negative values are lost in the config
+ # object (bug #139600)
+ egroups = backuped_accept_keywords.split()
+
+ if unmaskgroups or egroups:
+ pgroups = self._getEgroups(egroups, pgroups)
+ else:
+ pgroups = set(pgroups)
+
+ return self._getMissingKeywords(cpv, pgroups, mygroups)
+
+
+ def getRawMissingKeywords(self,
+ cpv,
+ slot,
+ keywords,
+ repo,
+ global_accept_keywords):
+ """
+ Take a package and return a list of any KEYWORDS that the user may
+ need to accept for the given package. If the KEYWORDS are empty,
+ the returned list will contain ** alone (in order to distinguish
+ from the case of "none missing"). This DOES NOT apply any user config
+ package.accept_keywords acceptance.
+
+ @param cpv: The package name (for package.keywords support)
+ @type cpv: String
+ @param slot: The 'SLOT' key from the raw package metadata
+ @type slot: String
+ @param keywords: The 'KEYWORDS' key from the raw package metadata
+ @type keywords: String
+ @param global_accept_keywords: The current value of ACCEPT_KEYWORDS
+ @type global_accept_keywords: String
+ @rtype: List
+ @return: lists of KEYWORDS that have not been accepted
+ and the keywords it looked for.
+ """
+
+ mygroups = self.getKeywords(cpv, slot, keywords, repo)
+ pgroups = global_accept_keywords.split()
+ pgroups = set(pgroups)
+ return self._getMissingKeywords(cpv, pgroups, mygroups)
+
+
+ @staticmethod
+ def _getEgroups(egroups, mygroups):
+ """gets any keywords defined in the environment
+
+ @param backuped_accept_keywords: ACCEPT_KEYWORDS from the backup env
+ @type backuped_accept_keywords: String
+ @rtype: List
+ @return: list of KEYWORDS that have been accepted
+ """
+ mygroups = list(mygroups)
+ mygroups.extend(egroups)
+ inc_pgroups = set()
+ for x in mygroups:
+ if x[:1] == "-":
+ if x == "-*":
+ inc_pgroups.clear()
+ else:
+ inc_pgroups.discard(x[1:])
+ else:
+ inc_pgroups.add(x)
+ return inc_pgroups
+
+
+ @staticmethod
+ def _getMissingKeywords(cpv, pgroups, mygroups):
+ """Determines the missing keywords
+
+ @param pgroups: The pkg keywords accepted
+ @type pgroups: list
+ @param mygroups: The ebuild keywords
+ @type mygroups: list
+ """
+ match = False
+ hasstable = False
+ hastesting = False
+ for gp in mygroups:
+ if gp == "*":
+ match = True
+ break
+ elif gp == "~*":
+ hastesting = True
+ for x in pgroups:
+ if x[:1] == "~":
+ match = True
+ break
+ if match:
+ break
+ elif gp in pgroups:
+ match = True
+ break
+ elif gp.startswith("~"):
+ hastesting = True
+ elif not gp.startswith("-"):
+ hasstable = True
+ if not match and \
+ ((hastesting and "~*" in pgroups) or \
+ (hasstable and "*" in pgroups) or "**" in pgroups):
+ match = True
+ if match:
+ missing = []
+ else:
+ if not mygroups:
+ # If KEYWORDS is empty then we still have to return something
+ # in order to distinguish from the case of "none missing".
+ mygroups = ["**"]
+ missing = mygroups
+ return missing
+
+
+ def getPKeywords(self, cpv, slot, repo, global_accept_keywords):
+ """Gets any package.keywords settings for cp for the given
+ cpv, slot and repo
+
+ @param cpv: The package name (for package.keywords support)
+ @type cpv: String
+ @param slot: The 'SLOT' key from the raw package metadata
+ @type slot: String
+ @param keywords: The 'KEYWORDS' key from the raw package metadata
+ @type keywords: String
+ @param global_accept_keywords: The current value of ACCEPT_KEYWORDS
+ @type global_accept_keywords: String
+ @param backuped_accept_keywords: ACCEPT_KEYWORDS from the backup env
+ @type backuped_accept_keywords: String
+ @rtype: List
+ @return: list of KEYWORDS that have been accepted
+ """
+
+ pgroups = global_accept_keywords.split()
+ try:
+ cpv.slot
+ except AttributeError:
+ cpv = _pkg_str(cpv, slot=slot, repo=repo)
+ cp = cpv.cp
+
+ unmaskgroups = []
+ if self._p_accept_keywords:
+ accept_keywords_defaults = tuple('~' + keyword for keyword in \
+ pgroups if keyword[:1] not in "~-")
+ for d in self._p_accept_keywords:
+ cpdict = d.get(cp)
+ if cpdict:
+ pkg_accept_keywords = \
+ ordered_by_atom_specificity(cpdict, cpv)
+ if pkg_accept_keywords:
+ for x in pkg_accept_keywords:
+ if not x:
+ x = accept_keywords_defaults
+ unmaskgroups.extend(x)
+
+ pkgdict = self.pkeywordsdict.get(cp)
+ if pkgdict:
+ pkg_accept_keywords = \
+ ordered_by_atom_specificity(pkgdict, cpv)
+ if pkg_accept_keywords:
+ for x in pkg_accept_keywords:
+ unmaskgroups.extend(x)
+ return unmaskgroups
diff --git a/lib/portage/package/ebuild/_config/LicenseManager.py b/lib/portage/package/ebuild/_config/LicenseManager.py
new file mode 100644
index 000000000..1d4e08207
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/LicenseManager.py
@@ -0,0 +1,237 @@
+# Copyright 2010-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'LicenseManager',
+)
+
+from portage import os
+from portage.dep import ExtendedAtomDict, use_reduce
+from portage.exception import InvalidDependString
+from portage.localization import _
+from portage.util import grabdict, grabdict_package, writemsg
+from portage.versions import cpv_getkey, _pkg_str
+
+from portage.package.ebuild._config.helper import ordered_by_atom_specificity
+
+
+class LicenseManager(object):
+
+ def __init__(self, license_group_locations, abs_user_config, user_config=True):
+
+ self._accept_license_str = None
+ self._accept_license = None
+ self._license_groups = {}
+ self._plicensedict = ExtendedAtomDict(dict)
+ self._undef_lic_groups = set()
+
+ if user_config:
+ license_group_locations = list(license_group_locations) + [abs_user_config]
+
+ self._read_license_groups(license_group_locations)
+
+ if user_config:
+ self._read_user_config(abs_user_config)
+
+ def _read_user_config(self, abs_user_config):
+ licdict = grabdict_package(os.path.join(
+ abs_user_config, "package.license"), recursive=1, allow_wildcard=True, allow_repo=True, verify_eapi=False)
+ for k, v in licdict.items():
+ self._plicensedict.setdefault(k.cp, {})[k] = \
+ self.expandLicenseTokens(v)
+
+ def _read_license_groups(self, locations):
+ for loc in locations:
+ for k, v in grabdict(
+ os.path.join(loc, "license_groups")).items():
+ self._license_groups.setdefault(k, []).extend(v)
+
+ for k, v in self._license_groups.items():
+ self._license_groups[k] = frozenset(v)
+
+ def extract_global_changes(self, old=""):
+ ret = old
+ atom_license_map = self._plicensedict.get("*/*")
+ if atom_license_map is not None:
+ v = atom_license_map.pop("*/*", None)
+ if v is not None:
+ ret = " ".join(v)
+ if old:
+ ret = old + " " + ret
+ if not atom_license_map:
+ #No tokens left in atom_license_map, remove it.
+ del self._plicensedict["*/*"]
+ return ret
+
+ def expandLicenseTokens(self, tokens):
+ """ Take a token from ACCEPT_LICENSE or package.license and expand it
+ if it's a group token (indicated by @) or just return it if it's not a
+ group. If a group is negated then negate all group elements."""
+ expanded_tokens = []
+ for x in tokens:
+ expanded_tokens.extend(self._expandLicenseToken(x, None))
+ return expanded_tokens
+
+ def _expandLicenseToken(self, token, traversed_groups):
+ negate = False
+ rValue = []
+ if token.startswith("-"):
+ negate = True
+ license_name = token[1:]
+ else:
+ license_name = token
+ if not license_name.startswith("@"):
+ rValue.append(token)
+ return rValue
+ group_name = license_name[1:]
+ if traversed_groups is None:
+ traversed_groups = set()
+ license_group = self._license_groups.get(group_name)
+ if group_name in traversed_groups:
+ writemsg(_("Circular license group reference"
+ " detected in '%s'\n") % group_name, noiselevel=-1)
+ rValue.append("@"+group_name)
+ elif license_group:
+ traversed_groups.add(group_name)
+ for l in license_group:
+ if l.startswith("-"):
+ writemsg(_("Skipping invalid element %s"
+ " in license group '%s'\n") % (l, group_name),
+ noiselevel=-1)
+ else:
+ rValue.extend(self._expandLicenseToken(l, traversed_groups))
+ else:
+ if self._license_groups and \
+ group_name not in self._undef_lic_groups:
+ self._undef_lic_groups.add(group_name)
+ writemsg(_("Undefined license group '%s'\n") % group_name,
+ noiselevel=-1)
+ rValue.append("@"+group_name)
+ if negate:
+ rValue = ["-" + token for token in rValue]
+ return rValue
+
+ def _getPkgAcceptLicense(self, cpv, slot, repo):
+ """
+ Get an ACCEPT_LICENSE list, accounting for package.license.
+ """
+ accept_license = self._accept_license
+ cp = cpv_getkey(cpv)
+ cpdict = self._plicensedict.get(cp)
+ if cpdict:
+ if not hasattr(cpv, "slot"):
+ cpv = _pkg_str(cpv, slot=slot, repo=repo)
+ plicence_list = ordered_by_atom_specificity(cpdict, cpv)
+ if plicence_list:
+ accept_license = list(self._accept_license)
+ for x in plicence_list:
+ accept_license.extend(x)
+ return accept_license
+
+ def get_prunned_accept_license(self, cpv, use, lic, slot, repo):
+ """
+ Generate a pruned version of ACCEPT_LICENSE, by intersection with
+ LICENSE. This is required since otherwise ACCEPT_LICENSE might be
+ too big (bigger than ARG_MAX), causing execve() calls to fail with
+ E2BIG errors as in bug #262647.
+ """
+ try:
+ licenses = set(use_reduce(lic, uselist=use, flat=True))
+ except InvalidDependString:
+ licenses = set()
+ licenses.discard('||')
+
+ accept_license = self._getPkgAcceptLicense(cpv, slot, repo)
+
+ if accept_license:
+ acceptable_licenses = set()
+ for x in accept_license:
+ if x == '*':
+ acceptable_licenses.update(licenses)
+ elif x == '-*':
+ acceptable_licenses.clear()
+ elif x[:1] == '-':
+ acceptable_licenses.discard(x[1:])
+ elif x in licenses:
+ acceptable_licenses.add(x)
+
+ licenses = acceptable_licenses
+ return ' '.join(sorted(licenses))
+
+ def getMissingLicenses(self, cpv, use, lic, slot, repo):
+ """
+ Take a LICENSE string and return a list of any licenses that the user
+ may need to accept for the given package. The returned list will not
+ contain any licenses that have already been accepted. This method
+ can throw an InvalidDependString exception.
+
+ @param cpv: The package name (for package.license support)
+ @type cpv: String
+ @param use: "USE" from the cpv's metadata
+ @type use: String
+ @param lic: "LICENSE" from the cpv's metadata
+ @type lic: String
+ @param slot: "SLOT" from the cpv's metadata
+ @type slot: String
+ @rtype: List
+ @return: A list of licenses that have not been accepted.
+ """
+
+ licenses = set(use_reduce(lic, matchall=1, flat=True))
+ licenses.discard('||')
+
+ acceptable_licenses = set()
+ for x in self._getPkgAcceptLicense(cpv, slot, repo):
+ if x == '*':
+ acceptable_licenses.update(licenses)
+ elif x == '-*':
+ acceptable_licenses.clear()
+ elif x[:1] == '-':
+ acceptable_licenses.discard(x[1:])
+ else:
+ acceptable_licenses.add(x)
+
+ license_str = lic
+ if "?" in license_str:
+ use = use.split()
+ else:
+ use = []
+
+ license_struct = use_reduce(license_str, uselist=use, opconvert=True)
+ return self._getMaskedLicenses(license_struct, acceptable_licenses)
+
+ def _getMaskedLicenses(self, license_struct, acceptable_licenses):
+ if not license_struct:
+ return []
+ if license_struct[0] == "||":
+ ret = []
+ for element in license_struct[1:]:
+ if isinstance(element, list):
+ if element:
+ tmp = self._getMaskedLicenses(element, acceptable_licenses)
+ if not tmp:
+ return []
+ ret.extend(tmp)
+ else:
+ if element in acceptable_licenses:
+ return []
+ ret.append(element)
+ # Return all masked licenses, since we don't know which combination
+ # (if any) the user will decide to unmask.
+ return ret
+
+ ret = []
+ for element in license_struct:
+ if isinstance(element, list):
+ if element:
+ ret.extend(self._getMaskedLicenses(element,
+ acceptable_licenses))
+ else:
+ if element not in acceptable_licenses:
+ ret.append(element)
+ return ret
+
+ def set_accept_license_str(self, accept_license_str):
+ if accept_license_str != self._accept_license_str:
+ self._accept_license_str = accept_license_str
+ self._accept_license = tuple(self.expandLicenseTokens(accept_license_str.split()))
diff --git a/lib/portage/package/ebuild/_config/LocationsManager.py b/lib/portage/package/ebuild/_config/LocationsManager.py
new file mode 100644
index 000000000..f7d7209ff
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/LocationsManager.py
@@ -0,0 +1,349 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = (
+ 'LocationsManager',
+)
+
+import collections
+import io
+import warnings
+
+import portage
+from portage import os, eapi_is_supported, _encodings, _unicode_encode
+from portage.const import CUSTOM_PROFILE_PATH, GLOBAL_CONFIG_PATH, \
+ PROFILE_PATH, USER_CONFIG_PATH
+from portage.eapi import eapi_allows_directories_on_profile_level_and_repository_level
+from portage.exception import DirectoryNotFound, InvalidLocation, ParseError
+from portage.localization import _
+from portage.util import ensure_dirs, grabfile, \
+ normalize_path, read_corresponding_eapi_file, shlex_split, writemsg
+from portage.util._path import exists_raise_eaccess, isdir_raise_eaccess
+from portage.repository.config import parse_layout_conf, \
+ _portage1_profiles_allow_directories
+
+
+_PORTAGE1_DIRECTORIES = frozenset([
+ 'package.mask', 'package.provided',
+ 'package.use', 'package.use.mask', 'package.use.force',
+ 'use.mask', 'use.force'])
+
+_profile_node = collections.namedtuple('_profile_node',
+ ('location', 'portage1_directories', 'user_config',
+ 'profile_formats', 'eapi', 'allow_build_id'))
+
+_allow_parent_colon = frozenset(
+ ["portage-2"])
+
+class LocationsManager(object):
+
+ def __init__(self, config_root=None, eprefix=None, config_profile_path=None, local_config=True, \
+ target_root=None, sysroot=None):
+ self.user_profile_dir = None
+ self._local_repo_conf_path = None
+ self.eprefix = eprefix
+ self.config_root = config_root
+ self.target_root = target_root
+ self.sysroot = sysroot
+ self._user_config = local_config
+
+ if self.eprefix is None:
+ self.eprefix = portage.const.EPREFIX
+ elif self.eprefix:
+ self.eprefix = normalize_path(self.eprefix)
+ if self.eprefix == os.sep:
+ self.eprefix = ""
+
+ if self.config_root is None:
+ self.config_root = portage.const.EPREFIX + os.sep
+
+ self.config_root = normalize_path(os.path.abspath(
+ self.config_root)).rstrip(os.path.sep) + os.path.sep
+
+ self._check_var_directory("PORTAGE_CONFIGROOT", self.config_root)
+ self.abs_user_config = os.path.join(self.config_root, USER_CONFIG_PATH)
+ self.config_profile_path = config_profile_path
+
+ if self.sysroot is None:
+ self.sysroot = "/"
+ else:
+ self.sysroot = normalize_path(os.path.abspath(self.sysroot or os.sep)).rstrip(os.sep) + os.sep
+
+ self.esysroot = self.sysroot.rstrip(os.sep) + self.eprefix + os.sep
+
+ # TODO: Set this via the constructor using
+ # PORTAGE_OVERRIDE_EPREFIX.
+ self.broot = portage.const.EPREFIX
+
+ def load_profiles(self, repositories, known_repository_paths):
+ known_repository_paths = set(os.path.realpath(x)
+ for x in known_repository_paths)
+
+ known_repos = []
+ for x in known_repository_paths:
+ try:
+ repo = repositories.get_repo_for_location(x)
+ except KeyError:
+ layout_data = parse_layout_conf(x)[0]
+ else:
+ layout_data = {
+ "profile-formats": repo.profile_formats,
+ "profile_eapi_when_unspecified": repo.eapi
+ }
+ # force a trailing '/' for ease of doing startswith checks
+ known_repos.append((x + '/', layout_data))
+ known_repos = tuple(known_repos)
+
+ if self.config_profile_path is None:
+ deprecated_profile_path = os.path.join(
+ self.config_root, 'etc', 'make.profile')
+ self.config_profile_path = \
+ os.path.join(self.config_root, PROFILE_PATH)
+ if isdir_raise_eaccess(self.config_profile_path):
+ self.profile_path = self.config_profile_path
+ if isdir_raise_eaccess(deprecated_profile_path) and not \
+ os.path.samefile(self.profile_path,
+ deprecated_profile_path):
+ # Don't warn if they refer to the same path, since
+ # that can be used for backward compatibility with
+ # old software.
+ writemsg("!!! %s\n" %
+ _("Found 2 make.profile dirs: "
+ "using '%s', ignoring '%s'") %
+ (self.profile_path, deprecated_profile_path),
+ noiselevel=-1)
+ else:
+ self.config_profile_path = deprecated_profile_path
+ if isdir_raise_eaccess(self.config_profile_path):
+ self.profile_path = self.config_profile_path
+ else:
+ self.profile_path = None
+ else:
+ # NOTE: repoman may pass in an empty string
+ # here, in order to create an empty profile
+ # for checking dependencies of packages with
+ # empty KEYWORDS.
+ self.profile_path = self.config_profile_path
+
+
+ # The symlink might not exist or might not be a symlink.
+ self.profiles = []
+ self.profiles_complex = []
+ if self.profile_path:
+ try:
+ self._addProfile(os.path.realpath(self.profile_path),
+ repositories, known_repos)
+ except ParseError as e:
+ if not portage._sync_mode:
+ writemsg(_("!!! Unable to parse profile: '%s'\n") % self.profile_path, noiselevel=-1)
+ writemsg("!!! ParseError: %s\n" % str(e), noiselevel=-1)
+ self.profiles = []
+ self.profiles_complex = []
+
+ if self._user_config and self.profiles:
+ custom_prof = os.path.join(
+ self.config_root, CUSTOM_PROFILE_PATH)
+ if os.path.exists(custom_prof):
+ # For read_corresponding_eapi_file, specify default=None
+ # in order to allow things like wildcard atoms when
+ # is no explicit EAPI setting.
+ self.user_profile_dir = custom_prof
+ self.profiles.append(custom_prof)
+ self.profiles_complex.append(
+ _profile_node(custom_prof, True, True,
+ ('profile-bashrcs', 'profile-set'),
+ read_corresponding_eapi_file(
+ custom_prof + os.sep, default=None),
+ True))
+ del custom_prof
+
+ self.profiles = tuple(self.profiles)
+ self.profiles_complex = tuple(self.profiles_complex)
+
+ def _check_var_directory(self, varname, var):
+ if not isdir_raise_eaccess(var):
+ writemsg(_("!!! Error: %s='%s' is not a directory. "
+ "Please correct this.\n") % (varname, var),
+ noiselevel=-1)
+ raise DirectoryNotFound(var)
+
+ def _addProfile(self, currentPath, repositories, known_repos):
+ current_abs_path = os.path.abspath(currentPath)
+ allow_directories = True
+ allow_parent_colon = True
+ repo_loc = None
+ compat_mode = False
+ current_formats = ()
+ eapi = None
+
+ intersecting_repos = [x for x in known_repos
+ if current_abs_path.startswith(x[0])]
+ if intersecting_repos:
+ # Handle nested repositories. The longest path
+ # will be the correct one.
+ repo_loc, layout_data = max(intersecting_repos,
+ key=lambda x:len(x[0]))
+ eapi = layout_data.get("profile_eapi_when_unspecified")
+
+ eapi_file = os.path.join(currentPath, "eapi")
+ eapi = eapi or "0"
+ f = None
+ try:
+ f = io.open(_unicode_encode(eapi_file,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace')
+ eapi = f.readline().strip()
+ except IOError:
+ pass
+ else:
+ if not eapi_is_supported(eapi):
+ raise ParseError(_(
+ "Profile contains unsupported "
+ "EAPI '%s': '%s'") % \
+ (eapi, os.path.realpath(eapi_file),))
+ finally:
+ if f is not None:
+ f.close()
+
+ if intersecting_repos:
+ allow_directories = eapi_allows_directories_on_profile_level_and_repository_level(eapi) or \
+ any(x in _portage1_profiles_allow_directories for x in layout_data['profile-formats'])
+ compat_mode = not eapi_allows_directories_on_profile_level_and_repository_level(eapi) and \
+ layout_data['profile-formats'] == ('portage-1-compat',)
+ allow_parent_colon = any(x in _allow_parent_colon
+ for x in layout_data['profile-formats'])
+ current_formats = tuple(layout_data['profile-formats'])
+
+
+ if compat_mode:
+ offenders = _PORTAGE1_DIRECTORIES.intersection(os.listdir(currentPath))
+ offenders = sorted(x for x in offenders
+ if os.path.isdir(os.path.join(currentPath, x)))
+ if offenders:
+ warnings.warn(_(
+ "\nThe selected profile is implicitly using the 'portage-1' format:\n"
+ "\tprofile = %(profile_path)s\n"
+ "But this repository is not using that format:\n"
+ "\trepo = %(repo_name)s\n"
+ "This will break in the future. Please convert these dirs to files:\n"
+ "\t%(files)s\n"
+ "Or, add this line to the repository's layout.conf:\n"
+ "\tprofile-formats = portage-1")
+ % dict(profile_path=currentPath, repo_name=repo_loc,
+ files='\n\t'.join(offenders)))
+
+ parentsFile = os.path.join(currentPath, "parent")
+ if exists_raise_eaccess(parentsFile):
+ parents = grabfile(parentsFile)
+ if not parents:
+ raise ParseError(
+ _("Empty parent file: '%s'") % parentsFile)
+ for parentPath in parents:
+ abs_parent = parentPath[:1] == os.sep
+ if not abs_parent and allow_parent_colon:
+ parentPath = self._expand_parent_colon(parentsFile,
+ parentPath, repo_loc, repositories)
+
+ # NOTE: This os.path.join() call is intended to ignore
+ # currentPath if parentPath is already absolute.
+ parentPath = normalize_path(os.path.join(
+ currentPath, parentPath))
+
+ if abs_parent or repo_loc is None or \
+ not parentPath.startswith(repo_loc):
+ # It seems that this parent may point outside
+ # of the current repo, so realpath it.
+ parentPath = os.path.realpath(parentPath)
+
+ if exists_raise_eaccess(parentPath):
+ self._addProfile(parentPath, repositories, known_repos)
+ else:
+ raise ParseError(
+ _("Parent '%s' not found: '%s'") % \
+ (parentPath, parentsFile))
+
+ self.profiles.append(currentPath)
+ self.profiles_complex.append(
+ _profile_node(currentPath, allow_directories, False,
+ current_formats, eapi, 'build-id' in current_formats))
+
+ def _expand_parent_colon(self, parentsFile, parentPath,
+ repo_loc, repositories):
+ colon = parentPath.find(":")
+ if colon == -1:
+ return parentPath
+
+ if colon == 0:
+ if repo_loc is None:
+ raise ParseError(
+ _("Parent '%s' not found: '%s'") % \
+ (parentPath, parentsFile))
+ else:
+ parentPath = normalize_path(os.path.join(
+ repo_loc, 'profiles', parentPath[colon+1:]))
+ else:
+ p_repo_name = parentPath[:colon]
+ try:
+ p_repo_loc = repositories.get_location_for_name(p_repo_name)
+ except KeyError:
+ raise ParseError(
+ _("Parent '%s' not found: '%s'") % \
+ (parentPath, parentsFile))
+ else:
+ parentPath = normalize_path(os.path.join(
+ p_repo_loc, 'profiles', parentPath[colon+1:]))
+
+ return parentPath
+
+ def set_root_override(self, root_overwrite=None):
+ # Allow ROOT setting to come from make.conf if it's not overridden
+ # by the constructor argument (from the calling environment).
+ if self.target_root is None and root_overwrite is not None:
+ self.target_root = root_overwrite
+ if not self.target_root.strip():
+ self.target_root = None
+ if self.target_root is None:
+ self.target_root = "/"
+
+ self.target_root = normalize_path(os.path.abspath(
+ self.target_root)).rstrip(os.path.sep) + os.path.sep
+
+ if self.sysroot != "/" and self.sysroot != self.target_root:
+ writemsg(_("!!! Error: SYSROOT (currently %s) must "
+ "equal / or ROOT (currently %s).\n") %
+ (self.sysroot, self.target_root),
+ noiselevel=-1)
+ raise InvalidLocation(self.sysroot)
+
+ ensure_dirs(self.target_root)
+ self._check_var_directory("ROOT", self.target_root)
+
+ self.eroot = self.target_root.rstrip(os.sep) + self.eprefix + os.sep
+
+ self.global_config_path = GLOBAL_CONFIG_PATH
+ if portage.const.EPREFIX:
+ self.global_config_path = os.path.join(portage.const.EPREFIX,
+ GLOBAL_CONFIG_PATH.lstrip(os.sep))
+
+ def set_port_dirs(self, portdir, portdir_overlay):
+ self.portdir = portdir
+ self.portdir_overlay = portdir_overlay
+ if self.portdir_overlay is None:
+ self.portdir_overlay = ""
+
+ self.overlay_profiles = []
+ for ov in shlex_split(self.portdir_overlay):
+ ov = normalize_path(ov)
+ profiles_dir = os.path.join(ov, "profiles")
+ if isdir_raise_eaccess(profiles_dir):
+ self.overlay_profiles.append(profiles_dir)
+
+ self.profile_locations = [os.path.join(portdir, "profiles")] + self.overlay_profiles
+ self.profile_and_user_locations = self.profile_locations[:]
+ if self._user_config:
+ self.profile_and_user_locations.append(self.abs_user_config)
+
+ self.profile_locations = tuple(self.profile_locations)
+ self.profile_and_user_locations = tuple(self.profile_and_user_locations)
diff --git a/lib/portage/package/ebuild/_config/MaskManager.py b/lib/portage/package/ebuild/_config/MaskManager.py
new file mode 100644
index 000000000..40cc6e0c4
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/MaskManager.py
@@ -0,0 +1,261 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'MaskManager',
+)
+
+import warnings
+
+from portage import os
+from portage.dep import ExtendedAtomDict, match_from_list
+from portage.localization import _
+from portage.util import append_repo, grabfile_package, stack_lists, writemsg
+from portage.versions import _pkg_str
+
+class MaskManager(object):
+
+ def __init__(self, repositories, profiles, abs_user_config,
+ user_config=True, strict_umatched_removal=False):
+ self._punmaskdict = ExtendedAtomDict(list)
+ self._pmaskdict = ExtendedAtomDict(list)
+ # Preserves atoms that are eliminated by negative
+ # incrementals in user_pkgmasklines.
+ self._pmaskdict_raw = ExtendedAtomDict(list)
+
+ #Read profile/package.mask from every repo.
+ #Repositories inherit masks from their parent profiles and
+ #are able to remove mask from them with -atoms.
+ #Such a removal affects only the current repo, but not the parent.
+ #Add ::repo specs to every atom to make sure atoms only affect
+ #packages from the current repo.
+
+ # Cache the repository-wide package.mask files as a particular
+ # repo may be often referenced by others as the master.
+ pmask_cache = {}
+
+ def grab_pmask(loc, repo_config):
+ if loc not in pmask_cache:
+ path = os.path.join(loc, 'profiles', 'package.mask')
+ pmask_cache[loc] = grabfile_package(path,
+ recursive=repo_config.portage1_profiles,
+ remember_source_file=True, verify_eapi=True,
+ eapi_default=repo_config.eapi,
+ allow_build_id=("build-id"
+ in repo_config.profile_formats))
+ if repo_config.portage1_profiles_compat and os.path.isdir(path):
+ warnings.warn(_("Repository '%(repo_name)s' is implicitly using "
+ "'portage-1' profile format in its profiles/package.mask, but "
+ "the repository profiles are not marked as that format. This will break "
+ "in the future. Please either convert the following paths "
+ "to files, or add\nprofile-formats = portage-1\nto the "
+ "repository's layout.conf.\n")
+ % dict(repo_name=repo_config.name))
+
+ return pmask_cache[loc]
+
+ repo_pkgmasklines = []
+ for repo in repositories.repos_with_profiles():
+ lines = []
+ repo_lines = grab_pmask(repo.location, repo)
+ removals = frozenset(line[0][1:] for line in repo_lines
+ if line[0][:1] == "-")
+ matched_removals = set()
+ for master in repo.masters:
+ master_lines = grab_pmask(master.location, master)
+ for line in master_lines:
+ if line[0] in removals:
+ matched_removals.add(line[0])
+ # Since we don't stack masters recursively, there aren't any
+ # atoms earlier in the stack to be matched by negative atoms in
+ # master_lines. Also, repo_lines may contain negative atoms
+ # that are intended to negate atoms from a different master
+ # than the one with which we are currently stacking. Therefore,
+ # we disable warn_for_unmatched_removal here (see bug #386569).
+ lines.append(stack_lists([master_lines, repo_lines], incremental=1,
+ remember_source_file=True, warn_for_unmatched_removal=False))
+
+ # It's safe to warn for unmatched removal if masters have not
+ # been overridden by the user, which is guaranteed when
+ # user_config is false (when called by repoman).
+ if repo.masters:
+ unmatched_removals = removals.difference(matched_removals)
+ if unmatched_removals and not user_config:
+ source_file = os.path.join(repo.location,
+ "profiles", "package.mask")
+ unmatched_removals = list(unmatched_removals)
+ if len(unmatched_removals) > 3:
+ writemsg(
+ _("--- Unmatched removal atoms in %s: %s and %s more\n") %
+ (source_file,
+ ", ".join("-" + x for x in unmatched_removals[:3]),
+ len(unmatched_removals) - 3), noiselevel=-1)
+ else:
+ writemsg(
+ _("--- Unmatched removal atom(s) in %s: %s\n") %
+ (source_file,
+ ", ".join("-" + x for x in unmatched_removals)),
+ noiselevel=-1)
+
+ else:
+ lines.append(stack_lists([repo_lines], incremental=1,
+ remember_source_file=True, warn_for_unmatched_removal=not user_config,
+ strict_warn_for_unmatched_removal=strict_umatched_removal))
+ repo_pkgmasklines.extend(append_repo(stack_lists(lines), repo.name, remember_source_file=True))
+
+ repo_pkgunmasklines = []
+ for repo in repositories.repos_with_profiles():
+ if not repo.portage1_profiles:
+ continue
+ repo_lines = grabfile_package(os.path.join(repo.location, "profiles", "package.unmask"), \
+ recursive=1, remember_source_file=True,
+ verify_eapi=True, eapi_default=repo.eapi,
+ allow_build_id=("build-id" in repo.profile_formats))
+ lines = stack_lists([repo_lines], incremental=1, \
+ remember_source_file=True, warn_for_unmatched_removal=True,
+ strict_warn_for_unmatched_removal=strict_umatched_removal)
+ repo_pkgunmasklines.extend(append_repo(lines, repo.name, remember_source_file=True))
+
+ #Read package.mask from the user's profile. Stack them in the end
+ #to allow profiles to override masks from their parent profiles.
+ profile_pkgmasklines = []
+ profile_pkgunmasklines = []
+ for x in profiles:
+ profile_pkgmasklines.append(grabfile_package(
+ os.path.join(x.location, "package.mask"),
+ recursive=x.portage1_directories,
+ remember_source_file=True, verify_eapi=True,
+ eapi=x.eapi, eapi_default=None,
+ allow_build_id=x.allow_build_id))
+ if x.portage1_directories:
+ profile_pkgunmasklines.append(grabfile_package(
+ os.path.join(x.location, "package.unmask"),
+ recursive=x.portage1_directories,
+ remember_source_file=True, verify_eapi=True,
+ eapi=x.eapi, eapi_default=None,
+ allow_build_id=x.allow_build_id))
+ profile_pkgmasklines = stack_lists(profile_pkgmasklines, incremental=1, \
+ remember_source_file=True, warn_for_unmatched_removal=True,
+ strict_warn_for_unmatched_removal=strict_umatched_removal)
+ profile_pkgunmasklines = stack_lists(profile_pkgunmasklines, incremental=1, \
+ remember_source_file=True, warn_for_unmatched_removal=True,
+ strict_warn_for_unmatched_removal=strict_umatched_removal)
+
+ #Read /etc/portage/package.mask. Don't stack it to allow the user to
+ #remove mask atoms from everywhere with -atoms.
+ user_pkgmasklines = []
+ user_pkgunmasklines = []
+ if user_config:
+ user_pkgmasklines = grabfile_package(
+ os.path.join(abs_user_config, "package.mask"), recursive=1, \
+ allow_wildcard=True, allow_repo=True,
+ remember_source_file=True, verify_eapi=False,
+ allow_build_id=True)
+ user_pkgunmasklines = grabfile_package(
+ os.path.join(abs_user_config, "package.unmask"), recursive=1, \
+ allow_wildcard=True, allow_repo=True,
+ remember_source_file=True, verify_eapi=False,
+ allow_build_id=True)
+
+ #Stack everything together. At this point, only user_pkgmasklines may contain -atoms.
+ #Don't warn for unmatched -atoms here, since we don't do it for any other user config file.
+ raw_pkgmasklines = stack_lists([repo_pkgmasklines, profile_pkgmasklines], \
+ incremental=1, remember_source_file=True, warn_for_unmatched_removal=False, ignore_repo=True)
+ pkgmasklines = stack_lists([repo_pkgmasklines, profile_pkgmasklines, user_pkgmasklines], \
+ incremental=1, remember_source_file=True, warn_for_unmatched_removal=False, ignore_repo=True)
+ pkgunmasklines = stack_lists([repo_pkgunmasklines, profile_pkgunmasklines, user_pkgunmasklines], \
+ incremental=1, remember_source_file=True, warn_for_unmatched_removal=False, ignore_repo=True)
+
+ for x, source_file in raw_pkgmasklines:
+ self._pmaskdict_raw.setdefault(x.cp, []).append(x)
+
+ for x, source_file in pkgmasklines:
+ self._pmaskdict.setdefault(x.cp, []).append(x)
+
+ for x, source_file in pkgunmasklines:
+ self._punmaskdict.setdefault(x.cp, []).append(x)
+
+ for d in (self._pmaskdict_raw, self._pmaskdict, self._punmaskdict):
+ for k, v in d.items():
+ d[k] = tuple(v)
+
+ def _getMaskAtom(self, cpv, slot, repo, unmask_atoms=None):
+ """
+ Take a package and return a matching package.mask atom, or None if no
+ such atom exists or it has been cancelled by package.unmask.
+
+ @param cpv: The package name
+ @type cpv: String
+ @param slot: The package's slot
+ @type slot: String
+ @param repo: The package's repository [optional]
+ @type repo: String
+ @param unmask_atoms: if desired pass in self._punmaskdict.get(cp)
+ @type unmask_atoms: list
+ @rtype: String
+ @return: A matching atom string or None if one is not found.
+ """
+
+ try:
+ cpv.slot
+ except AttributeError:
+ pkg = _pkg_str(cpv, slot=slot, repo=repo)
+ else:
+ pkg = cpv
+
+ mask_atoms = self._pmaskdict.get(pkg.cp)
+ if mask_atoms:
+ pkg_list = [pkg]
+ for x in mask_atoms:
+ if not match_from_list(x, pkg_list):
+ continue
+ if unmask_atoms:
+ for y in unmask_atoms:
+ if match_from_list(y, pkg_list):
+ return None
+ return x
+ return None
+
+
+ def getMaskAtom(self, cpv, slot, repo):
+ """
+ Take a package and return a matching package.mask atom, or None if no
+ such atom exists or it has been cancelled by package.unmask.
+
+ @param cpv: The package name
+ @type cpv: String
+ @param slot: The package's slot
+ @type slot: String
+ @param repo: The package's repository [optional]
+ @type repo: String
+ @rtype: String
+ @return: A matching atom string or None if one is not found.
+ """
+
+ try:
+ cpv.slot
+ except AttributeError:
+ pkg = _pkg_str(cpv, slot=slot, repo=repo)
+ else:
+ pkg = cpv
+
+ return self._getMaskAtom(pkg, slot, repo,
+ self._punmaskdict.get(pkg.cp))
+
+
+ def getRawMaskAtom(self, cpv, slot, repo):
+ """
+ Take a package and return a matching package.mask atom, or None if no
+ such atom exists. It HAS NOT! been cancelled by any package.unmask.
+
+ @param cpv: The package name
+ @type cpv: String
+ @param slot: The package's slot
+ @type slot: String
+ @param repo: The package's repository [optional]
+ @type repo: String
+ @rtype: String
+ @return: A matching atom string or None if one is not found.
+ """
+
+ return self._getMaskAtom(cpv, slot, repo)
diff --git a/lib/portage/package/ebuild/_config/UseManager.py b/lib/portage/package/ebuild/_config/UseManager.py
new file mode 100644
index 000000000..7302876ab
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/UseManager.py
@@ -0,0 +1,579 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'UseManager',
+)
+
+from _emerge.Package import Package
+from portage import os
+from portage.dep import Atom, dep_getrepo, dep_getslot, ExtendedAtomDict, remove_slot, _get_useflag_re, _repo_separator
+from portage.eapi import eapi_has_use_aliases, eapi_supports_stable_use_forcing_and_masking
+from portage.exception import InvalidAtom
+from portage.localization import _
+from portage.util import grabfile, grabdict, grabdict_package, read_corresponding_eapi_file, stack_lists, writemsg
+from portage.versions import _pkg_str
+
+from portage.package.ebuild._config.helper import ordered_by_atom_specificity
+
+class UseManager(object):
+
+ def __init__(self, repositories, profiles, abs_user_config, is_stable,
+ user_config=True):
+ # file variable
+ #--------------------------------
+ # repositories
+ #--------------------------------
+ # use.mask _repo_usemask_dict
+ # use.stable.mask _repo_usestablemask_dict
+ # use.force _repo_useforce_dict
+ # use.stable.force _repo_usestableforce_dict
+ # use.aliases _repo_usealiases_dict
+ # package.use.mask _repo_pusemask_dict
+ # package.use.stable.mask _repo_pusestablemask_dict
+ # package.use.force _repo_puseforce_dict
+ # package.use.stable.force _repo_pusestableforce_dict
+ # package.use.aliases _repo_pusealiases_dict
+ #--------------------------------
+ # profiles
+ #--------------------------------
+ # use.mask _usemask_list
+ # use.stable.mask _usestablemask_list
+ # use.force _useforce_list
+ # use.stable.force _usestableforce_list
+ # package.use.mask _pusemask_list
+ # package.use.stable.mask _pusestablemask_list
+ # package.use _pkgprofileuse
+ # package.use.force _puseforce_list
+ # package.use.stable.force _pusestableforce_list
+ #--------------------------------
+ # user config
+ #--------------------------------
+ # package.use _pusedict
+
+ # Dynamic variables tracked by the config class
+ #--------------------------------
+ # profiles
+ #--------------------------------
+ # usemask
+ # useforce
+ #--------------------------------
+ # user config
+ #--------------------------------
+ # puse
+
+ self._user_config = user_config
+ self._is_stable = is_stable
+ self._repo_usemask_dict = self._parse_repository_files_to_dict_of_tuples("use.mask", repositories)
+ self._repo_usestablemask_dict = \
+ self._parse_repository_files_to_dict_of_tuples("use.stable.mask",
+ repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+ self._repo_useforce_dict = self._parse_repository_files_to_dict_of_tuples("use.force", repositories)
+ self._repo_usestableforce_dict = \
+ self._parse_repository_files_to_dict_of_tuples("use.stable.force",
+ repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+ self._repo_pusemask_dict = self._parse_repository_files_to_dict_of_dicts("package.use.mask", repositories)
+ self._repo_pusestablemask_dict = \
+ self._parse_repository_files_to_dict_of_dicts("package.use.stable.mask",
+ repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+ self._repo_puseforce_dict = self._parse_repository_files_to_dict_of_dicts("package.use.force", repositories)
+ self._repo_pusestableforce_dict = \
+ self._parse_repository_files_to_dict_of_dicts("package.use.stable.force",
+ repositories, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+ self._repo_puse_dict = self._parse_repository_files_to_dict_of_dicts("package.use", repositories)
+
+ self._usemask_list = self._parse_profile_files_to_tuple_of_tuples("use.mask", profiles)
+ self._usestablemask_list = \
+ self._parse_profile_files_to_tuple_of_tuples("use.stable.mask",
+ profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+ self._useforce_list = self._parse_profile_files_to_tuple_of_tuples("use.force", profiles)
+ self._usestableforce_list = \
+ self._parse_profile_files_to_tuple_of_tuples("use.stable.force",
+ profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+ self._pusemask_list = self._parse_profile_files_to_tuple_of_dicts("package.use.mask", profiles)
+ self._pusestablemask_list = \
+ self._parse_profile_files_to_tuple_of_dicts("package.use.stable.mask",
+ profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+ self._pkgprofileuse = self._parse_profile_files_to_tuple_of_dicts("package.use", profiles, juststrings=True)
+ self._puseforce_list = self._parse_profile_files_to_tuple_of_dicts("package.use.force", profiles)
+ self._pusestableforce_list = \
+ self._parse_profile_files_to_tuple_of_dicts("package.use.stable.force",
+ profiles, eapi_filter=eapi_supports_stable_use_forcing_and_masking)
+
+ self._pusedict = self._parse_user_files_to_extatomdict("package.use", abs_user_config, user_config)
+
+ self._repo_usealiases_dict = self._parse_repository_usealiases(repositories)
+ self._repo_pusealiases_dict = self._parse_repository_packageusealiases(repositories)
+
+ self.repositories = repositories
+
+ def _parse_file_to_tuple(self, file_name, recursive=True,
+ eapi_filter=None, eapi=None, eapi_default="0"):
+ """
+ @param file_name: input file name
+ @type file_name: str
+ @param recursive: triggers recursion if the input file is a
+ directory
+ @type recursive: bool
+ @param eapi_filter: a function that accepts a single eapi
+ argument, and returns true if the the current file type
+ is supported by the given EAPI
+ @type eapi_filter: callable
+ @param eapi: the EAPI of the current profile node, which allows
+ a call to read_corresponding_eapi_file to be skipped
+ @type eapi: str
+ @param eapi_default: the default EAPI which applies if the
+ current profile node does not define a local EAPI
+ @type eapi_default: str
+ @rtype: tuple
+ @return: collection of USE flags
+ """
+ ret = []
+ lines = grabfile(file_name, recursive=recursive)
+ if eapi is None:
+ eapi = read_corresponding_eapi_file(
+ file_name, default=eapi_default)
+ if eapi_filter is not None and not eapi_filter(eapi):
+ if lines:
+ writemsg(_("--- EAPI '%s' does not support '%s': '%s'\n") %
+ (eapi, os.path.basename(file_name), file_name),
+ noiselevel=-1)
+ return ()
+ useflag_re = _get_useflag_re(eapi)
+ for prefixed_useflag in lines:
+ if prefixed_useflag[:1] == "-":
+ useflag = prefixed_useflag[1:]
+ else:
+ useflag = prefixed_useflag
+ if useflag_re.match(useflag) is None:
+ writemsg(_("--- Invalid USE flag in '%s': '%s'\n") %
+ (file_name, prefixed_useflag), noiselevel=-1)
+ else:
+ ret.append(prefixed_useflag)
+ return tuple(ret)
+
+ def _parse_file_to_dict(self, file_name, juststrings=False, recursive=True,
+ eapi_filter=None, user_config=False, eapi=None, eapi_default="0",
+ allow_build_id=False):
+ """
+ @param file_name: input file name
+ @type file_name: str
+ @param juststrings: store dict values as space-delimited strings
+ instead of tuples
+ @type juststrings: bool
+ @param recursive: triggers recursion if the input file is a
+ directory
+ @type recursive: bool
+ @param eapi_filter: a function that accepts a single eapi
+ argument, and returns true if the the current file type
+ is supported by the given EAPI
+ @type eapi_filter: callable
+ @param user_config: current file is part of the local
+ configuration (not repository content)
+ @type user_config: bool
+ @param eapi: the EAPI of the current profile node, which allows
+ a call to read_corresponding_eapi_file to be skipped
+ @type eapi: str
+ @param eapi_default: the default EAPI which applies if the
+ current profile node does not define a local EAPI
+ @type eapi_default: str
+ @param allow_build_id: allow atoms to specify a particular
+ build-id
+ @type allow_build_id: bool
+ @rtype: tuple
+ @return: collection of USE flags
+ """
+ ret = {}
+ location_dict = {}
+ if eapi is None:
+ eapi = read_corresponding_eapi_file(file_name,
+ default=eapi_default)
+ extended_syntax = eapi is None and user_config
+ if extended_syntax:
+ ret = ExtendedAtomDict(dict)
+ else:
+ ret = {}
+ file_dict = grabdict_package(file_name, recursive=recursive,
+ allow_wildcard=extended_syntax, allow_repo=extended_syntax,
+ verify_eapi=(not extended_syntax), eapi=eapi,
+ eapi_default=eapi_default, allow_build_id=allow_build_id,
+ allow_use=False)
+ if eapi is not None and eapi_filter is not None and not eapi_filter(eapi):
+ if file_dict:
+ writemsg(_("--- EAPI '%s' does not support '%s': '%s'\n") %
+ (eapi, os.path.basename(file_name), file_name),
+ noiselevel=-1)
+ return ret
+ useflag_re = _get_useflag_re(eapi)
+ for k, v in file_dict.items():
+ useflags = []
+ use_expand_prefix = ''
+ for prefixed_useflag in v:
+ if extended_syntax and prefixed_useflag == "\n":
+ use_expand_prefix = ""
+ continue
+ if extended_syntax and prefixed_useflag[-1] == ":":
+ use_expand_prefix = prefixed_useflag[:-1].lower() + "_"
+ continue
+
+ if prefixed_useflag[:1] == "-":
+ useflag = use_expand_prefix + prefixed_useflag[1:]
+ prefixed_useflag = "-" + useflag
+ else:
+ useflag = use_expand_prefix + prefixed_useflag
+ prefixed_useflag = useflag
+ if useflag_re.match(useflag) is None:
+ writemsg(_("--- Invalid USE flag for '%s' in '%s': '%s'\n") %
+ (k, file_name, prefixed_useflag), noiselevel=-1)
+ else:
+ useflags.append(prefixed_useflag)
+ location_dict.setdefault(k, []).extend(useflags)
+ for k, v in location_dict.items():
+ if juststrings:
+ v = " ".join(v)
+ else:
+ v = tuple(v)
+ ret.setdefault(k.cp, {})[k] = v
+ return ret
+
+ def _parse_user_files_to_extatomdict(self, file_name, location, user_config):
+ ret = ExtendedAtomDict(dict)
+ if user_config:
+ pusedict = grabdict_package(
+ os.path.join(location, file_name),
+ recursive=1, newlines=1, allow_wildcard=True,
+ allow_repo=True, verify_eapi=False,
+ allow_build_id=True, allow_use=False)
+ for k, v in pusedict.items():
+ l = []
+ use_expand_prefix = ''
+ for flag in v:
+ if flag == "\n":
+ use_expand_prefix = ""
+ continue
+ if flag[-1] == ":":
+ use_expand_prefix = flag[:-1].lower() + "_"
+ continue
+ if flag[0] == "-":
+ nv = "-" + use_expand_prefix + flag[1:]
+ else:
+ nv = use_expand_prefix + flag
+ l.append(nv)
+ ret.setdefault(k.cp, {})[k] = tuple(l)
+
+ return ret
+
+ def _parse_repository_files_to_dict_of_tuples(self, file_name, repositories, eapi_filter=None):
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ ret[repo.name] = self._parse_file_to_tuple(
+ os.path.join(repo.location, "profiles", file_name),
+ eapi_filter=eapi_filter, eapi_default=repo.eapi)
+ return ret
+
+ def _parse_repository_files_to_dict_of_dicts(self, file_name, repositories, eapi_filter=None):
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ ret[repo.name] = self._parse_file_to_dict(
+ os.path.join(repo.location, "profiles", file_name),
+ eapi_filter=eapi_filter, eapi_default=repo.eapi,
+ allow_build_id=("build-id" in repo.profile_formats))
+ return ret
+
+ def _parse_profile_files_to_tuple_of_tuples(self, file_name, locations,
+ eapi_filter=None):
+ return tuple(self._parse_file_to_tuple(
+ os.path.join(profile.location, file_name),
+ recursive=profile.portage1_directories,
+ eapi_filter=eapi_filter, eapi=profile.eapi,
+ eapi_default=None) for profile in locations)
+
+ def _parse_profile_files_to_tuple_of_dicts(self, file_name, locations,
+ juststrings=False, eapi_filter=None):
+ return tuple(self._parse_file_to_dict(
+ os.path.join(profile.location, file_name), juststrings,
+ recursive=profile.portage1_directories, eapi_filter=eapi_filter,
+ user_config=profile.user_config, eapi=profile.eapi,
+ eapi_default=None, allow_build_id=profile.allow_build_id)
+ for profile in locations)
+
+ def _parse_repository_usealiases(self, repositories):
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ file_name = os.path.join(repo.location, "profiles", "use.aliases")
+ eapi = read_corresponding_eapi_file(
+ file_name, default=repo.eapi)
+ useflag_re = _get_useflag_re(eapi)
+ raw_file_dict = grabdict(file_name, recursive=True)
+ file_dict = {}
+ for real_flag, aliases in raw_file_dict.items():
+ if useflag_re.match(real_flag) is None:
+ writemsg(_("--- Invalid real USE flag in '%s': '%s'\n") % (file_name, real_flag), noiselevel=-1)
+ else:
+ for alias in aliases:
+ if useflag_re.match(alias) is None:
+ writemsg(_("--- Invalid USE flag alias for '%s' real USE flag in '%s': '%s'\n") %
+ (real_flag, file_name, alias), noiselevel=-1)
+ else:
+ if any(alias in v for k, v in file_dict.items() if k != real_flag):
+ writemsg(_("--- Duplicated USE flag alias in '%s': '%s'\n") %
+ (file_name, alias), noiselevel=-1)
+ else:
+ file_dict.setdefault(real_flag, []).append(alias)
+ ret[repo.name] = file_dict
+ return ret
+
+ def _parse_repository_packageusealiases(self, repositories):
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ file_name = os.path.join(repo.location, "profiles", "package.use.aliases")
+ eapi = read_corresponding_eapi_file(
+ file_name, default=repo.eapi)
+ useflag_re = _get_useflag_re(eapi)
+ lines = grabfile(file_name, recursive=True)
+ file_dict = {}
+ for line in lines:
+ elements = line.split()
+ atom = elements[0]
+ try:
+ atom = Atom(atom, eapi=eapi)
+ except InvalidAtom:
+ writemsg(_("--- Invalid atom in '%s': '%s'\n") % (file_name, atom))
+ continue
+ if len(elements) == 1:
+ writemsg(_("--- Missing real USE flag for '%s' in '%s'\n") % (atom, file_name), noiselevel=-1)
+ continue
+ real_flag = elements[1]
+ if useflag_re.match(real_flag) is None:
+ writemsg(_("--- Invalid real USE flag for '%s' in '%s': '%s'\n") % (atom, file_name, real_flag), noiselevel=-1)
+ else:
+ for alias in elements[2:]:
+ if useflag_re.match(alias) is None:
+ writemsg(_("--- Invalid USE flag alias for '%s' real USE flag for '%s' in '%s': '%s'\n") %
+ (real_flag, atom, file_name, alias), noiselevel=-1)
+ else:
+ # Duplicated USE flag aliases in entries for different atoms
+ # matching the same package version are detected in getUseAliases().
+ if any(alias in v for k, v in file_dict.get(atom.cp, {}).get(atom, {}).items() if k != real_flag):
+ writemsg(_("--- Duplicated USE flag alias for '%s' in '%s': '%s'\n") %
+ (atom, file_name, alias), noiselevel=-1)
+ else:
+ file_dict.setdefault(atom.cp, {}).setdefault(atom, {}).setdefault(real_flag, []).append(alias)
+ ret[repo.name] = file_dict
+ return ret
+
+ def _isStable(self, pkg):
+ if self._user_config:
+ try:
+ return pkg.stable
+ except AttributeError:
+ # KEYWORDS is unavailable (prior to "depend" phase)
+ return False
+
+ try:
+ pkg._metadata
+ except AttributeError:
+ # KEYWORDS is unavailable (prior to "depend" phase)
+ return False
+
+ # Since repoman uses different config instances for
+ # different profiles, we have to be careful to do the
+ # stable check against the correct profile here.
+ return self._is_stable(pkg)
+
+ def getUseMask(self, pkg=None, stable=None):
+ if pkg is None:
+ return frozenset(stack_lists(
+ self._usemask_list, incremental=True))
+
+ slot = None
+ cp = getattr(pkg, "cp", None)
+ if cp is None:
+ slot = dep_getslot(pkg)
+ repo = dep_getrepo(pkg)
+ pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo)
+ cp = pkg.cp
+
+ if stable is None:
+ stable = self._isStable(pkg)
+
+ usemask = []
+
+ if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO:
+ repos = []
+ try:
+ repos.extend(repo.name for repo in
+ self.repositories[pkg.repo].masters)
+ except KeyError:
+ pass
+ repos.append(pkg.repo)
+ for repo in repos:
+ usemask.append(self._repo_usemask_dict.get(repo, {}))
+ if stable:
+ usemask.append(self._repo_usestablemask_dict.get(repo, {}))
+ cpdict = self._repo_pusemask_dict.get(repo, {}).get(cp)
+ if cpdict:
+ pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_usemask:
+ usemask.extend(pkg_usemask)
+ if stable:
+ cpdict = self._repo_pusestablemask_dict.get(repo, {}).get(cp)
+ if cpdict:
+ pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_usemask:
+ usemask.extend(pkg_usemask)
+
+ for i, pusemask_dict in enumerate(self._pusemask_list):
+ if self._usemask_list[i]:
+ usemask.append(self._usemask_list[i])
+ if stable and self._usestablemask_list[i]:
+ usemask.append(self._usestablemask_list[i])
+ cpdict = pusemask_dict.get(cp)
+ if cpdict:
+ pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_usemask:
+ usemask.extend(pkg_usemask)
+ if stable:
+ cpdict = self._pusestablemask_list[i].get(cp)
+ if cpdict:
+ pkg_usemask = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_usemask:
+ usemask.extend(pkg_usemask)
+
+ return frozenset(stack_lists(usemask, incremental=True))
+
+ def getUseForce(self, pkg=None, stable=None):
+ if pkg is None:
+ return frozenset(stack_lists(
+ self._useforce_list, incremental=True))
+
+ cp = getattr(pkg, "cp", None)
+ if cp is None:
+ slot = dep_getslot(pkg)
+ repo = dep_getrepo(pkg)
+ pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo)
+ cp = pkg.cp
+
+ if stable is None:
+ stable = self._isStable(pkg)
+
+ useforce = []
+
+ if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO:
+ repos = []
+ try:
+ repos.extend(repo.name for repo in
+ self.repositories[pkg.repo].masters)
+ except KeyError:
+ pass
+ repos.append(pkg.repo)
+ for repo in repos:
+ useforce.append(self._repo_useforce_dict.get(repo, {}))
+ if stable:
+ useforce.append(self._repo_usestableforce_dict.get(repo, {}))
+ cpdict = self._repo_puseforce_dict.get(repo, {}).get(cp)
+ if cpdict:
+ pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_useforce:
+ useforce.extend(pkg_useforce)
+ if stable:
+ cpdict = self._repo_pusestableforce_dict.get(repo, {}).get(cp)
+ if cpdict:
+ pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_useforce:
+ useforce.extend(pkg_useforce)
+
+ for i, puseforce_dict in enumerate(self._puseforce_list):
+ if self._useforce_list[i]:
+ useforce.append(self._useforce_list[i])
+ if stable and self._usestableforce_list[i]:
+ useforce.append(self._usestableforce_list[i])
+ cpdict = puseforce_dict.get(cp)
+ if cpdict:
+ pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_useforce:
+ useforce.extend(pkg_useforce)
+ if stable:
+ cpdict = self._pusestableforce_list[i].get(cp)
+ if cpdict:
+ pkg_useforce = ordered_by_atom_specificity(cpdict, pkg)
+ if pkg_useforce:
+ useforce.extend(pkg_useforce)
+
+ return frozenset(stack_lists(useforce, incremental=True))
+
+ def getUseAliases(self, pkg):
+ if hasattr(pkg, "eapi") and not eapi_has_use_aliases(pkg.eapi):
+ return {}
+
+ cp = getattr(pkg, "cp", None)
+ if cp is None:
+ slot = dep_getslot(pkg)
+ repo = dep_getrepo(pkg)
+ pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo)
+ cp = pkg.cp
+
+ usealiases = {}
+
+ if hasattr(pkg, "repo") and pkg.repo != Package.UNKNOWN_REPO:
+ repos = []
+ try:
+ repos.extend(repo.name for repo in
+ self.repositories[pkg.repo].masters)
+ except KeyError:
+ pass
+ repos.append(pkg.repo)
+ for repo in repos:
+ usealiases_dict = self._repo_usealiases_dict.get(repo, {})
+ for real_flag, aliases in usealiases_dict.items():
+ for alias in aliases:
+ if any(alias in v for k, v in usealiases.items() if k != real_flag):
+ writemsg(_("--- Duplicated USE flag alias for '%s%s%s': '%s'\n") %
+ (pkg.cpv, _repo_separator, pkg.repo, alias), noiselevel=-1)
+ else:
+ usealiases.setdefault(real_flag, []).append(alias)
+ cp_usealiases_dict = self._repo_pusealiases_dict.get(repo, {}).get(cp)
+ if cp_usealiases_dict:
+ usealiases_dict_list = ordered_by_atom_specificity(cp_usealiases_dict, pkg)
+ for usealiases_dict in usealiases_dict_list:
+ for real_flag, aliases in usealiases_dict.items():
+ for alias in aliases:
+ if any(alias in v for k, v in usealiases.items() if k != real_flag):
+ writemsg(_("--- Duplicated USE flag alias for '%s%s%s': '%s'\n") %
+ (pkg.cpv, _repo_separator, pkg.repo, alias), noiselevel=-1)
+ else:
+ usealiases.setdefault(real_flag, []).append(alias)
+
+ return usealiases
+
+ def getPUSE(self, pkg):
+ cp = getattr(pkg, "cp", None)
+ if cp is None:
+ slot = dep_getslot(pkg)
+ repo = dep_getrepo(pkg)
+ pkg = _pkg_str(remove_slot(pkg), slot=slot, repo=repo)
+ cp = pkg.cp
+ ret = ""
+ cpdict = self._pusedict.get(cp)
+ if cpdict:
+ puse_matches = ordered_by_atom_specificity(cpdict, pkg)
+ if puse_matches:
+ puse_list = []
+ for x in puse_matches:
+ puse_list.extend(x)
+ ret = " ".join(puse_list)
+ return ret
+
+ def extract_global_USE_changes(self, old=""):
+ ret = old
+ cpdict = self._pusedict.get("*/*")
+ if cpdict is not None:
+ v = cpdict.pop("*/*", None)
+ if v is not None:
+ ret = " ".join(v)
+ if old:
+ ret = old + " " + ret
+ if not cpdict:
+ #No tokens left in atom_license_map, remove it.
+ del self._pusedict["*/*"]
+ return ret
diff --git a/lib/portage/package/ebuild/_config/VirtualsManager.py b/lib/portage/package/ebuild/_config/VirtualsManager.py
new file mode 100644
index 000000000..c4d1e3635
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/VirtualsManager.py
@@ -0,0 +1,233 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'VirtualsManager',
+)
+
+from copy import deepcopy
+
+from portage import os
+from portage.dep import Atom
+from portage.exception import InvalidAtom
+from portage.localization import _
+from portage.util import grabdict, stack_dictlist, writemsg
+from portage.versions import cpv_getkey
+
+class VirtualsManager(object):
+
+ def __init__(self, *args, **kwargs):
+ if kwargs.get("_copy"):
+ return
+
+ assert len(args) == 1, "VirtualsManager.__init__ takes one positional argument"
+ assert not kwargs, "unknown keyword argument(s) '%s' passed to VirtualsManager.__init__" % \
+ ", ".join(kwargs)
+
+ profiles = args[0]
+ self._virtuals = None
+ self._dirVirtuals = None
+ self._virts_p = None
+
+ # Virtuals obtained from the vartree
+ self._treeVirtuals = None
+ # Virtuals added by the depgraph via self.add_depgraph_virtuals().
+ self._depgraphVirtuals = {}
+
+ #Initialise _dirVirtuals.
+ self._read_dirVirtuals(profiles)
+
+ #We could initialise _treeVirtuals here, but some consumers want to
+ #pass their own vartree.
+
+ def _read_dirVirtuals(self, profiles):
+ """
+ Read the 'virtuals' file in all profiles.
+ """
+ virtuals_list = []
+ for x in profiles:
+ virtuals_file = os.path.join(x, "virtuals")
+ virtuals_dict = grabdict(virtuals_file)
+ atoms_dict = {}
+ for k, v in virtuals_dict.items():
+ try:
+ virt_atom = Atom(k)
+ except InvalidAtom:
+ virt_atom = None
+ else:
+ if virt_atom.blocker or \
+ str(virt_atom) != str(virt_atom.cp):
+ virt_atom = None
+ if virt_atom is None:
+ writemsg(_("--- Invalid virtuals atom in %s: %s\n") % \
+ (virtuals_file, k), noiselevel=-1)
+ continue
+ providers = []
+ for atom in v:
+ atom_orig = atom
+ if atom[:1] == '-':
+ # allow incrementals
+ atom = atom[1:]
+ try:
+ atom = Atom(atom)
+ except InvalidAtom:
+ atom = None
+ else:
+ if atom.blocker:
+ atom = None
+ if atom is None:
+ writemsg(_("--- Invalid atom in %s: %s\n") % \
+ (virtuals_file, atom_orig), noiselevel=-1)
+ else:
+ if atom_orig == str(atom):
+ # normal atom, so return as Atom instance
+ providers.append(atom)
+ else:
+ # atom has special prefix, so return as string
+ providers.append(atom_orig)
+ if providers:
+ atoms_dict[virt_atom] = providers
+ if atoms_dict:
+ virtuals_list.append(atoms_dict)
+
+ self._dirVirtuals = stack_dictlist(virtuals_list, incremental=True)
+
+ for virt in self._dirVirtuals:
+ # Preference for virtuals decreases from left to right.
+ self._dirVirtuals[virt].reverse()
+
+ def __deepcopy__(self, memo=None):
+ if memo is None:
+ memo = {}
+ result = VirtualsManager(_copy=True)
+ memo[id(self)] = result
+
+ # immutable attributes (internal policy ensures lack of mutation)
+ # _treeVirtuals is initilised by _populate_treeVirtuals().
+ # Before that it's 'None'.
+ result._treeVirtuals = self._treeVirtuals
+ memo[id(self._treeVirtuals)] = self._treeVirtuals
+ # _dirVirtuals is initilised by __init__.
+ result._dirVirtuals = self._dirVirtuals
+ memo[id(self._dirVirtuals)] = self._dirVirtuals
+
+ # mutable attributes (change when add_depgraph_virtuals() is called)
+ result._virtuals = deepcopy(self._virtuals, memo)
+ result._depgraphVirtuals = deepcopy(self._depgraphVirtuals, memo)
+ result._virts_p = deepcopy(self._virts_p, memo)
+
+ return result
+
+ def _compile_virtuals(self):
+ """Stack installed and profile virtuals. Preference for virtuals
+ decreases from left to right.
+ Order of preference:
+ 1. installed and in profile
+ 2. installed only
+ 3. profile only
+ """
+
+ assert self._treeVirtuals is not None, "_populate_treeVirtuals() must be called before " + \
+ "any query about virtuals"
+
+ # Virtuals by profile+tree preferences.
+ ptVirtuals = {}
+
+ for virt, installed_list in self._treeVirtuals.items():
+ profile_list = self._dirVirtuals.get(virt, None)
+ if not profile_list:
+ continue
+ for cp in installed_list:
+ if cp in profile_list:
+ ptVirtuals.setdefault(virt, [])
+ ptVirtuals[virt].append(cp)
+
+ virtuals = stack_dictlist([ptVirtuals, self._treeVirtuals,
+ self._dirVirtuals, self._depgraphVirtuals])
+ self._virtuals = virtuals
+ self._virts_p = None
+
+ def getvirtuals(self):
+ """
+ Computes self._virtuals if necessary and returns it.
+ self._virtuals is only computed on the first call.
+ """
+ if self._virtuals is None:
+ self._compile_virtuals()
+
+ return self._virtuals
+
+ def _populate_treeVirtuals(self, vartree):
+ """
+ Initialize _treeVirtuals from the given vartree.
+ It must not have been initialized already, otherwise
+ our assumptions about immutability don't hold.
+ """
+ assert self._treeVirtuals is None, "treeVirtuals must not be reinitialized"
+
+ self._treeVirtuals = {}
+
+ for provide, cpv_list in vartree.get_all_provides().items():
+ try:
+ provide = Atom(provide)
+ except InvalidAtom:
+ continue
+ self._treeVirtuals[provide.cp] = \
+ [Atom(cpv_getkey(cpv)) for cpv in cpv_list]
+
+ def populate_treeVirtuals_if_needed(self, vartree):
+ """
+ Initialize _treeVirtuals if it hasn't been done already.
+ This is a hack for consumers that already have an populated vartree.
+ """
+ if self._treeVirtuals is not None:
+ return
+
+ self._populate_treeVirtuals(vartree)
+
+ def add_depgraph_virtuals(self, mycpv, virts):
+ """This updates the preferences for old-style virtuals,
+ affecting the behavior of dep_expand() and dep_check()
+ calls. It can change dbapi.match() behavior since that
+ calls dep_expand(). However, dbapi instances have
+ internal match caches that are not invalidated when
+ preferences are updated here. This can potentially
+ lead to some inconsistency (relevant to bug #1343)."""
+
+ #Ensure that self._virtuals is populated.
+ if self._virtuals is None:
+ self.getvirtuals()
+
+ modified = False
+ cp = Atom(cpv_getkey(mycpv))
+ for virt in virts:
+ try:
+ virt = Atom(virt).cp
+ except InvalidAtom:
+ continue
+ providers = self._virtuals.get(virt)
+ if providers and cp in providers:
+ continue
+ providers = self._depgraphVirtuals.get(virt)
+ if providers is None:
+ providers = []
+ self._depgraphVirtuals[virt] = providers
+ if cp not in providers:
+ providers.append(cp)
+ modified = True
+
+ if modified:
+ self._compile_virtuals()
+
+ def get_virts_p(self):
+ if self._virts_p is not None:
+ return self._virts_p
+
+ virts = self.getvirtuals()
+ virts_p = {}
+ for x in virts:
+ vkeysplit = x.split("/")
+ if vkeysplit[1] not in virts_p:
+ virts_p[vkeysplit[1]] = virts[x]
+ self._virts_p = virts_p
+ return virts_p
diff --git a/lib/portage/package/ebuild/_config/__init__.py b/lib/portage/package/ebuild/_config/__init__.py
new file mode 100644
index 000000000..21a391aee
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/package/ebuild/_config/env_var_validation.py b/lib/portage/package/ebuild/_config/env_var_validation.py
new file mode 100644
index 000000000..d3db545cb
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/env_var_validation.py
@@ -0,0 +1,23 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.process import find_binary
+from portage.util import shlex_split
+
+def validate_cmd_var(v):
+ """
+ Validate an evironment variable value to see if it
+ contains an executable command as the first token.
+ returns (valid, token_list) where 'valid' is boolean and 'token_list'
+ is the (possibly empty) list of tokens split by shlex.
+ """
+ invalid = False
+ v_split = shlex_split(v)
+ if not v_split:
+ invalid = True
+ elif os.path.isabs(v_split[0]):
+ invalid = not os.access(v_split[0], os.EX_OK)
+ elif find_binary(v_split[0]) is None:
+ invalid = True
+ return (not invalid, v_split)
diff --git a/lib/portage/package/ebuild/_config/features_set.py b/lib/portage/package/ebuild/_config/features_set.py
new file mode 100644
index 000000000..62236fd89
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/features_set.py
@@ -0,0 +1,128 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'features_set',
+)
+
+import logging
+
+from portage.const import SUPPORTED_FEATURES
+from portage.localization import _
+from portage.output import colorize
+from portage.util import writemsg_level
+
+class features_set(object):
+ """
+ Provides relevant set operations needed for access and modification of
+ config.features. The FEATURES variable is automatically synchronized
+ upon modification.
+
+ Modifications result in a permanent override that will cause the change
+ to propagate to the incremental stacking mechanism in config.regenerate().
+ This eliminates the need to call config.backup_changes() when FEATURES
+ is modified, since any overrides are guaranteed to persist despite calls
+ to config.reset().
+ """
+
+ def __init__(self, settings):
+ self._settings = settings
+ self._features = set()
+
+ def __contains__(self, k):
+ return k in self._features
+
+ def __iter__(self):
+ return iter(self._features)
+
+ def _sync_env_var(self):
+ self._settings['FEATURES'] = ' '.join(sorted(self._features))
+
+ def add(self, k):
+ self._settings.modifying()
+ self._settings._features_overrides.append(k)
+ if k not in self._features:
+ self._features.add(k)
+ self._sync_env_var()
+
+ def update(self, values):
+ self._settings.modifying()
+ values = list(values)
+ self._settings._features_overrides.extend(values)
+ need_sync = False
+ for k in values:
+ if k in self._features:
+ continue
+ self._features.add(k)
+ need_sync = True
+ if need_sync:
+ self._sync_env_var()
+
+ def difference_update(self, values):
+ self._settings.modifying()
+ values = list(values)
+ self._settings._features_overrides.extend('-' + k for k in values)
+ remove_us = self._features.intersection(values)
+ if remove_us:
+ self._features.difference_update(values)
+ self._sync_env_var()
+
+ def remove(self, k):
+ """
+ This never raises KeyError, since it records a permanent override
+ that will prevent the given flag from ever being added again by
+ incremental stacking in config.regenerate().
+ """
+ self.discard(k)
+
+ def discard(self, k):
+ self._settings.modifying()
+ self._settings._features_overrides.append('-' + k)
+ if k in self._features:
+ self._features.remove(k)
+ self._sync_env_var()
+
+ def _validate(self):
+ """
+ Implements unknown-features-warn and unknown-features-filter.
+ """
+ if 'unknown-features-warn' in self._features:
+ unknown_features = \
+ self._features.difference(SUPPORTED_FEATURES)
+ if unknown_features:
+ unknown_features = unknown_features.difference(
+ self._settings._unknown_features)
+ if unknown_features:
+ self._settings._unknown_features.update(unknown_features)
+ writemsg_level(colorize("BAD",
+ _("FEATURES variable contains unknown value(s): %s") % \
+ ", ".join(sorted(unknown_features))) \
+ + "\n", level=logging.WARNING, noiselevel=-1)
+
+ if 'unknown-features-filter' in self._features:
+ unknown_features = \
+ self._features.difference(SUPPORTED_FEATURES)
+ if unknown_features:
+ self.difference_update(unknown_features)
+ self._prune_overrides()
+
+ def _prune_overrides(self):
+ """
+ If there are lots of invalid package.env FEATURES settings
+ then unknown-features-filter can make _features_overrides
+ grow larger and larger, so prune it. This performs incremental
+ stacking with preservation of negative values since they need
+ to persist for future config.regenerate() calls.
+ """
+ overrides_set = set(self._settings._features_overrides)
+ positive = set()
+ negative = set()
+ for x in self._settings._features_overrides:
+ if x[:1] == '-':
+ positive.discard(x[1:])
+ negative.add(x[1:])
+ else:
+ positive.add(x)
+ negative.discard(x)
+ self._settings._features_overrides[:] = \
+ list(positive) + list('-' + x for x in negative)
diff --git a/lib/portage/package/ebuild/_config/helper.py b/lib/portage/package/ebuild/_config/helper.py
new file mode 100644
index 000000000..ee0c090a0
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/helper.py
@@ -0,0 +1,64 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'ordered_by_atom_specificity', 'prune_incremental',
+)
+
+from _emerge.Package import Package
+from portage.dep import best_match_to_list, _repo_separator
+
+def ordered_by_atom_specificity(cpdict, pkg, repo=None):
+ """
+ Return a list of matched values from the given cpdict,
+ in ascending order by atom specificity. The rationale
+ for this order is that package.* config files are
+ typically written in ChangeLog like fashion, so it's
+ most friendly if the order that the atoms are written
+ does not matter. Therefore, settings from more specific
+ atoms override those of less specific atoms. Without
+ this behavior, settings from relatively unspecific atoms
+ would (somewhat confusingly) override the settings of
+ more specific atoms, requiring people to make adjustments
+ to the order that atoms are listed in the config file in
+ order to achieve desired results (and thus corrupting
+ the ChangeLog like ordering of the file).
+ """
+ if not hasattr(pkg, 'repo') and repo and repo != Package.UNKNOWN_REPO:
+ pkg = pkg + _repo_separator + repo
+
+ results = []
+ keys = list(cpdict)
+
+ while keys:
+ bestmatch = best_match_to_list(pkg, keys)
+ if bestmatch:
+ keys.remove(bestmatch)
+ results.append(cpdict[bestmatch])
+ else:
+ break
+
+ if results:
+ # reverse, so the most specific atoms come last
+ results.reverse()
+
+ return results
+
+def prune_incremental(split):
+ """
+ Prune off any parts of an incremental variable that are
+ made irrelevant by the latest occuring * or -*. This
+ could be more aggressive but that might be confusing
+ and the point is just to reduce noise a bit.
+ """
+ for i, x in enumerate(reversed(split)):
+ if x == '*':
+ split = split[-i-1:]
+ break
+ elif x == '-*':
+ if i == 0:
+ split = []
+ else:
+ split = split[-i:]
+ break
+ return split
diff --git a/lib/portage/package/ebuild/_config/special_env_vars.py b/lib/portage/package/ebuild/_config/special_env_vars.py
new file mode 100644
index 000000000..a308518af
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/special_env_vars.py
@@ -0,0 +1,211 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = (
+ 'case_insensitive_vars', 'default_globals', 'env_blacklist', \
+ 'environ_filter', 'environ_whitelist', 'environ_whitelist_re',
+)
+
+import re
+
+# Blacklisted variables are internal variables that are never allowed
+# to enter the config instance from the external environment or
+# configuration files.
+env_blacklist = frozenset((
+ "A", "AA", "BDEPEND", "BROOT", "CATEGORY", "DEPEND", "DESCRIPTION",
+ "DOCS", "EAPI",
+ "EBUILD_FORCE_TEST", "EBUILD_PHASE",
+ "EBUILD_PHASE_FUNC", "EBUILD_SKIP_MANIFEST",
+ "ED", "EMERGE_FROM", "EPREFIX", "EROOT",
+ "GREP_OPTIONS", "HDEPEND", "HOMEPAGE",
+ "INHERITED", "IUSE", "IUSE_EFFECTIVE",
+ "KEYWORDS", "LICENSE", "MERGE_TYPE",
+ "PDEPEND", "PF", "PKGUSE", "PORTAGE_BACKGROUND",
+ "PORTAGE_BACKGROUND_UNMERGE", "PORTAGE_BUILDDIR_LOCKED",
+ "PORTAGE_BUILT_USE", "PORTAGE_CONFIGROOT",
+ "PORTAGE_INTERNAL_CALLER", "PORTAGE_IUSE",
+ "PORTAGE_NONFATAL", "PORTAGE_PIPE_FD", "PORTAGE_REPO_NAME",
+ "PORTAGE_USE", "PROPERTIES", "RDEPEND", "REPOSITORY",
+ "REQUIRED_USE", "RESTRICT", "ROOT", "SLOT", "SRC_URI", "_"
+))
+
+environ_whitelist = []
+
+# Whitelisted variables are always allowed to enter the ebuild
+# environment. Generally, this only includes special portage
+# variables. Ebuilds can unset variables that are not whitelisted
+# and rely on them remaining unset for future phases, without them
+# leaking back in from various locations (bug #189417). It's very
+# important to set our special BASH_ENV variable in the ebuild
+# environment in order to prevent sandbox from sourcing /etc/profile
+# in it's bashrc (causing major leakage).
+environ_whitelist += [
+ "ACCEPT_LICENSE", "BASH_ENV", "BROOT", "BUILD_PREFIX", "COLUMNS", "D",
+ "DISTDIR", "DOC_SYMLINKS_DIR", "EAPI", "EBUILD",
+ "EBUILD_FORCE_TEST",
+ "EBUILD_PHASE", "EBUILD_PHASE_FUNC", "ECLASSDIR", "ECLASS_DEPTH", "ED",
+ "EMERGE_FROM", "EPREFIX", "EROOT", "ESYSROOT",
+ "FEATURES", "FILESDIR", "HOME", "MERGE_TYPE", "NOCOLOR", "PATH",
+ "PKGDIR",
+ "PKGUSE", "PKG_LOGDIR", "PKG_TMPDIR",
+ "PORTAGE_ACTUAL_DISTDIR", "PORTAGE_ARCHLIST", "PORTAGE_BASHRC_FILES",
+ "PORTAGE_BASHRC", "PM_EBUILD_HOOK_DIR",
+ "PORTAGE_BINPKG_FILE", "PORTAGE_BINPKG_TAR_OPTS",
+ "PORTAGE_BINPKG_TMPFILE",
+ "PORTAGE_BIN_PATH",
+ "PORTAGE_BUILDDIR", "PORTAGE_BUILD_GROUP", "PORTAGE_BUILD_USER",
+ "PORTAGE_BUNZIP2_COMMAND", "PORTAGE_BZIP2_COMMAND",
+ "PORTAGE_COLORMAP", "PORTAGE_COMPRESS", "PORTAGE_COMPRESSION_COMMAND",
+ "PORTAGE_COMPRESS_EXCLUDE_SUFFIXES",
+ "PORTAGE_CONFIGROOT", "PORTAGE_DEBUG", "PORTAGE_DEPCACHEDIR",
+ "PORTAGE_DOHTML_UNWARNED_SKIPPED_EXTENSIONS",
+ "PORTAGE_DOHTML_UNWARNED_SKIPPED_FILES",
+ "PORTAGE_DOHTML_WARN_ON_SKIPPED_FILES",
+ "PORTAGE_EBUILD_EXIT_FILE", "PORTAGE_FEATURES",
+ "PORTAGE_GID", "PORTAGE_GRPNAME",
+ "PORTAGE_INTERNAL_CALLER",
+ "PORTAGE_INST_GID", "PORTAGE_INST_UID",
+ "PORTAGE_IPC_DAEMON", "PORTAGE_IUSE", "PORTAGE_ECLASS_LOCATIONS",
+ "PORTAGE_LOG_FILE", "PORTAGE_OVERRIDE_EPREFIX", "PORTAGE_PIPE_FD",
+ "PORTAGE_PYM_PATH", "PORTAGE_PYTHON",
+ "PORTAGE_PYTHONPATH", "PORTAGE_QUIET",
+ "PORTAGE_REPO_NAME", "PORTAGE_REPOSITORIES", "PORTAGE_RESTRICT",
+ "PORTAGE_SIGPIPE_STATUS", "PORTAGE_SOCKS5_PROXY",
+ "PORTAGE_TMPDIR", "PORTAGE_UPDATE_ENV", "PORTAGE_USERNAME",
+ "PORTAGE_VERBOSE", "PORTAGE_WORKDIR_MODE", "PORTAGE_XATTR_EXCLUDE",
+ "PORTDIR", "PORTDIR_OVERLAY", "PREROOTPATH", "PYTHONDONTWRITEBYTECODE",
+ "REPLACING_VERSIONS", "REPLACED_BY_VERSION",
+ "ROOT", "ROOTPATH", "SYSROOT", "T", "TMP", "TMPDIR",
+ "USE_EXPAND", "USE_ORDER", "WORKDIR",
+ "XARGS", "__PORTAGE_TEST_HARDLINK_LOCKS",
+]
+
+# user config variables
+environ_whitelist += [
+ "DOC_SYMLINKS_DIR", "INSTALL_MASK", "PKG_INSTALL_MASK"
+]
+
+environ_whitelist += [
+ "A", "AA", "CATEGORY", "P", "PF", "PN", "PR", "PV", "PVR"
+]
+
+# misc variables inherited from the calling environment
+environ_whitelist += [
+ "COLORTERM", "DISPLAY", "EDITOR", "LESS",
+ "LESSOPEN", "LOGNAME", "LS_COLORS", "PAGER",
+ "TERM", "TERMCAP", "USER",
+ 'ftp_proxy', 'http_proxy', 'no_proxy',
+]
+
+# tempdir settings
+environ_whitelist += [
+ "TMPDIR", "TEMP", "TMP",
+]
+
+# localization settings
+environ_whitelist += [
+ "LANG", "LC_COLLATE", "LC_CTYPE", "LC_MESSAGES",
+ "LC_MONETARY", "LC_NUMERIC", "LC_TIME", "LC_PAPER",
+ "LC_ALL",
+]
+
+# other variables inherited from the calling environment
+environ_whitelist += [
+ "CVS_RSH", "ECHANGELOG_USER",
+ "GPG_AGENT_INFO",
+ "SSH_AGENT_PID", "SSH_AUTH_SOCK",
+ "STY", "WINDOW", "XAUTHORITY",
+]
+
+environ_whitelist = frozenset(environ_whitelist)
+
+environ_whitelist_re = re.compile(r'^(CCACHE_|DISTCC_).*')
+
+# Filter selected variables in the config.environ() method so that
+# they don't needlessly propagate down into the ebuild environment.
+environ_filter = []
+
+# Exclude anything that could be extremely long here (like SRC_URI)
+# since that could cause execve() calls to fail with E2BIG errors. For
+# example, see bug #262647.
+environ_filter += [
+ 'DEPEND', 'RDEPEND', 'PDEPEND', 'SRC_URI',
+]
+
+# misc variables inherited from the calling environment
+environ_filter += [
+ "INFOPATH", "MANPATH", "USER",
+]
+
+# variables that break bash
+environ_filter += [
+ "HISTFILE", "POSIXLY_CORRECT",
+]
+
+# portage config variables and variables set directly by portage
+environ_filter += [
+ "ACCEPT_CHOSTS", "ACCEPT_KEYWORDS", "ACCEPT_PROPERTIES",
+ "ACCEPT_RESTRICT", "AUTOCLEAN",
+ "BINPKG_COMPRESS", "BINPKG_COMPRESS_FLAGS",
+ "CLEAN_DELAY", "COLLISION_IGNORE",
+ "CONFIG_PROTECT", "CONFIG_PROTECT_MASK",
+ "DCO_SIGNED_OFF_BY",
+ "EGENCACHE_DEFAULT_OPTS", "EMERGE_DEFAULT_OPTS",
+ "EMERGE_LOG_DIR",
+ "EMERGE_WARNING_DELAY",
+ "FETCHCOMMAND", "FETCHCOMMAND_FTP",
+ "FETCHCOMMAND_HTTP", "FETCHCOMMAND_HTTPS",
+ "FETCHCOMMAND_RSYNC", "FETCHCOMMAND_SFTP",
+ "GENTOO_MIRRORS", "NOCONFMEM", "O",
+ "PORTAGE_BACKGROUND", "PORTAGE_BACKGROUND_UNMERGE",
+ "PORTAGE_BINHOST", "PORTAGE_BINPKG_FORMAT",
+ "PORTAGE_BUILDDIR_LOCKED",
+ "PORTAGE_CHECKSUM_FILTER",
+ "PORTAGE_ELOG_CLASSES",
+ "PORTAGE_ELOG_MAILFROM", "PORTAGE_ELOG_MAILSUBJECT",
+ "PORTAGE_ELOG_MAILURI", "PORTAGE_ELOG_SYSTEM",
+ "PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS", "PORTAGE_FETCH_RESUME_MIN_SIZE",
+ "PORTAGE_GPG_DIR",
+ "PORTAGE_GPG_KEY", "PORTAGE_GPG_SIGNING_COMMAND",
+ "PORTAGE_IONICE_COMMAND",
+ "PORTAGE_PACKAGE_EMPTY_ABORT",
+ "PORTAGE_REPO_DUPLICATE_WARN",
+ "PORTAGE_RO_DISTDIRS",
+ "PORTAGE_RSYNC_EXTRA_OPTS", "PORTAGE_RSYNC_OPTS",
+ "PORTAGE_RSYNC_RETRIES", "PORTAGE_SSH_OPTS", "PORTAGE_SYNC_STALE",
+ "PORTAGE_USE",
+ "PORT_LOGDIR", "PORT_LOGDIR_CLEAN",
+ "QUICKPKG_DEFAULT_OPTS", "REPOMAN_DEFAULT_OPTS",
+ "RESUMECOMMAND", "RESUMECOMMAND_FTP",
+ "RESUMECOMMAND_HTTP", "RESUMECOMMAND_HTTPS",
+ "RESUMECOMMAND_RSYNC", "RESUMECOMMAND_SFTP",
+ "UNINSTALL_IGNORE", "USE_EXPAND_HIDDEN", "USE_ORDER",
+ "__PORTAGE_HELPER"
+]
+
+# No longer supported variables
+environ_filter += [
+ "SYNC"
+]
+
+environ_filter = frozenset(environ_filter)
+
+# Variables that are not allowed to have per-repo or per-package
+# settings.
+global_only_vars = frozenset([
+ "CONFIG_PROTECT",
+])
+
+default_globals = {
+ 'ACCEPT_LICENSE': '* -@EULA',
+ 'ACCEPT_PROPERTIES': '*',
+ 'PORTAGE_BZIP2_COMMAND': 'bzip2',
+}
+
+validate_commands = ('PORTAGE_BZIP2_COMMAND', 'PORTAGE_BUNZIP2_COMMAND',)
+
+# To enhance usability, make some vars case insensitive
+# by forcing them to lower case.
+case_insensitive_vars = ('AUTOCLEAN', 'NOCOLOR',)
diff --git a/lib/portage/package/ebuild/_config/unpack_dependencies.py b/lib/portage/package/ebuild/_config/unpack_dependencies.py
new file mode 100644
index 000000000..137518949
--- /dev/null
+++ b/lib/portage/package/ebuild/_config/unpack_dependencies.py
@@ -0,0 +1,38 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os, _supported_eapis
+from portage.dep import use_reduce
+from portage.eapi import eapi_has_automatic_unpack_dependencies
+from portage.exception import InvalidDependString
+from portage.localization import _
+from portage.util import grabfile, writemsg
+
+def load_unpack_dependencies_configuration(repositories):
+ repo_dict = {}
+ for repo in repositories.repos_with_profiles():
+ for eapi in _supported_eapis:
+ if eapi_has_automatic_unpack_dependencies(eapi):
+ file_name = os.path.join(repo.location, "profiles", "unpack_dependencies", eapi)
+ lines = grabfile(file_name, recursive=True)
+ for line in lines:
+ elements = line.split()
+ suffix = elements[0].lower()
+ if len(elements) == 1:
+ writemsg(_("--- Missing unpack dependencies for '%s' suffix in '%s'\n") % (suffix, file_name))
+ depend = " ".join(elements[1:])
+ try:
+ use_reduce(depend, eapi=eapi)
+ except InvalidDependString as e:
+ writemsg(_("--- Invalid unpack dependencies for '%s' suffix in '%s': '%s'\n" % (suffix, file_name, e)))
+ else:
+ repo_dict.setdefault(repo.name, {}).setdefault(eapi, {})[suffix] = depend
+
+ ret = {}
+ for repo in repositories.repos_with_profiles():
+ for repo_name in [x.name for x in repo.masters] + [repo.name]:
+ for eapi in repo_dict.get(repo_name, {}):
+ for suffix, depend in repo_dict.get(repo_name, {}).get(eapi, {}).items():
+ ret.setdefault(repo.name, {}).setdefault(eapi, {})[suffix] = depend
+
+ return ret
diff --git a/lib/portage/package/ebuild/_ipc/ExitCommand.py b/lib/portage/package/ebuild/_ipc/ExitCommand.py
new file mode 100644
index 000000000..f14050b91
--- /dev/null
+++ b/lib/portage/package/ebuild/_ipc/ExitCommand.py
@@ -0,0 +1,27 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.package.ebuild._ipc.IpcCommand import IpcCommand
+
+class ExitCommand(IpcCommand):
+
+ __slots__ = ('exitcode', 'reply_hook',)
+
+ def __init__(self):
+ IpcCommand.__init__(self)
+ self.reply_hook = None
+ self.exitcode = None
+
+ def __call__(self, argv):
+
+ if self.exitcode is not None:
+ # Ignore all but the first call, since if die is called
+ # then we certainly want to honor that exitcode, even
+ # the ebuild process manages to send a second exit
+ # command.
+ self.reply_hook = None
+ else:
+ self.exitcode = int(argv[1])
+
+ # (stdout, stderr, returncode)
+ return ('', '', 0)
diff --git a/lib/portage/package/ebuild/_ipc/IpcCommand.py b/lib/portage/package/ebuild/_ipc/IpcCommand.py
new file mode 100644
index 000000000..efb27f0a2
--- /dev/null
+++ b/lib/portage/package/ebuild/_ipc/IpcCommand.py
@@ -0,0 +1,9 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class IpcCommand(object):
+
+ __slots__ = ()
+
+ def __call__(self, argv):
+ raise NotImplementedError(self)
diff --git a/lib/portage/package/ebuild/_ipc/QueryCommand.py b/lib/portage/package/ebuild/_ipc/QueryCommand.py
new file mode 100644
index 000000000..fa6d1ea16
--- /dev/null
+++ b/lib/portage/package/ebuild/_ipc/QueryCommand.py
@@ -0,0 +1,140 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import io
+
+import portage
+from portage import os
+from portage.dep import Atom, _repo_name_re
+from portage.eapi import eapi_has_repo_deps
+from portage.elog import messages as elog_messages
+from portage.exception import InvalidAtom
+from portage.package.ebuild._ipc.IpcCommand import IpcCommand
+from portage.util import normalize_path
+from portage.versions import best
+
+class QueryCommand(IpcCommand):
+
+ __slots__ = ('phase', 'settings',)
+
+ _db = None
+
+ @classmethod
+ def get_db(cls):
+ if cls._db is not None:
+ return cls._db
+ return portage.db
+
+ def __init__(self, settings, phase):
+ IpcCommand.__init__(self)
+ self.settings = settings
+ self.phase = phase
+
+ def __call__(self, argv):
+ """
+ @return: tuple of (stdout, stderr, returncode)
+ """
+
+ # Python 3:
+ # cmd, root, *args = argv
+ cmd = argv[0]
+ root = argv[1]
+ args = argv[2:]
+
+ warnings = []
+ warnings_str = ''
+
+ db = self.get_db()
+ eapi = self.settings.get('EAPI')
+
+ root = normalize_path(root or os.sep).rstrip(os.sep) + os.sep
+ if root not in db:
+ return ('', '%s: Invalid ROOT: %s\n' % (cmd, root), 3)
+
+ portdb = db[root]["porttree"].dbapi
+ vardb = db[root]["vartree"].dbapi
+
+ if cmd in ('best_version', 'has_version'):
+ allow_repo = eapi_has_repo_deps(eapi)
+ try:
+ atom = Atom(args[0], allow_repo=allow_repo)
+ except InvalidAtom:
+ return ('', '%s: Invalid atom: %s\n' % (cmd, args[0]), 2)
+
+ try:
+ atom = Atom(args[0], allow_repo=allow_repo, eapi=eapi)
+ except InvalidAtom as e:
+ warnings.append("QA Notice: %s: %s" % (cmd, e))
+
+ use = self.settings.get('PORTAGE_BUILT_USE')
+ if use is None:
+ use = self.settings['PORTAGE_USE']
+
+ use = frozenset(use.split())
+ atom = atom.evaluate_conditionals(use)
+
+ if warnings:
+ warnings_str = self._elog('eqawarn', warnings)
+
+ if cmd == 'has_version':
+ if vardb.match(atom):
+ returncode = 0
+ else:
+ returncode = 1
+ return ('', warnings_str, returncode)
+ elif cmd == 'best_version':
+ m = best(vardb.match(atom))
+ return ('%s\n' % m, warnings_str, 0)
+ elif cmd in ('master_repositories', 'repository_path', 'available_eclasses', 'eclass_path', 'license_path'):
+ repo = _repo_name_re.match(args[0])
+ if repo is None:
+ return ('', '%s: Invalid repository: %s\n' % (cmd, args[0]), 2)
+ try:
+ repo = portdb.repositories[args[0]]
+ except KeyError:
+ return ('', warnings_str, 1)
+
+ if cmd == 'master_repositories':
+ return ('%s\n' % ' '.join(x.name for x in repo.masters), warnings_str, 0)
+ elif cmd == 'repository_path':
+ return ('%s\n' % repo.location, warnings_str, 0)
+ elif cmd == 'available_eclasses':
+ return ('%s\n' % ' '.join(sorted(repo.eclass_db.eclasses)), warnings_str, 0)
+ elif cmd == 'eclass_path':
+ try:
+ eclass = repo.eclass_db.eclasses[args[1]]
+ except KeyError:
+ return ('', warnings_str, 1)
+ return ('%s\n' % eclass.location, warnings_str, 0)
+ elif cmd == 'license_path':
+ paths = reversed([os.path.join(x.location, 'licenses', args[1]) for x in list(repo.masters) + [repo]])
+ for path in paths:
+ if os.path.exists(path):
+ return ('%s\n' % path, warnings_str, 0)
+ return ('', warnings_str, 1)
+ else:
+ return ('', 'Invalid command: %s\n' % cmd, 3)
+
+ def _elog(self, elog_funcname, lines):
+ """
+ This returns a string, to be returned via ipc and displayed at the
+ appropriate place in the build output. We wouldn't want to open the
+ log here since it is already opened by AbstractEbuildProcess and we
+ don't want to corrupt it, especially if it is being written with
+ compression.
+ """
+ out = io.StringIO()
+ phase = self.phase
+ elog_func = getattr(elog_messages, elog_funcname)
+ global_havecolor = portage.output.havecolor
+ try:
+ portage.output.havecolor = \
+ self.settings.get('NOCOLOR', 'false').lower() in ('no', 'false')
+ for line in lines:
+ elog_func(line, phase=phase, key=self.settings.mycpv, out=out)
+ finally:
+ portage.output.havecolor = global_havecolor
+ msg = out.getvalue()
+ return msg
diff --git a/lib/portage/package/ebuild/_ipc/__init__.py b/lib/portage/package/ebuild/_ipc/__init__.py
new file mode 100644
index 000000000..21a391aee
--- /dev/null
+++ b/lib/portage/package/ebuild/_ipc/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/package/ebuild/_metadata_invalid.py b/lib/portage/package/ebuild/_metadata_invalid.py
new file mode 100644
index 000000000..bcf1f7fcd
--- /dev/null
+++ b/lib/portage/package/ebuild/_metadata_invalid.py
@@ -0,0 +1,41 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import textwrap
+
+import portage
+from portage.dep import _repo_separator
+from portage.elog import elog_process
+from portage.elog.messages import eerror
+
+def eapi_invalid(self, cpv, repo_name, settings,
+ eapi_var, eapi_parsed, eapi_lineno):
+
+ msg = []
+ msg.extend(textwrap.wrap(("EAPI assignment in ebuild '%s%s%s' does not"
+ " conform with PMS section 7.3.1 (see bug #402167):") %
+ (cpv, _repo_separator, repo_name), 70))
+
+ if not eapi_parsed:
+ # None means the assignment was not found, while an
+ # empty string indicates an (invalid) empty assingment.
+ msg.append(
+ "\tvalid EAPI assignment must"
+ " occur on or before line: %s" %
+ eapi_lineno)
+ else:
+ msg.append(("\tbash returned EAPI '%s' which does not match "
+ "assignment on line: %s") %
+ (eapi_var, eapi_lineno))
+
+ if portage.data.secpass >= 2:
+ # TODO: improve elog permission error handling (bug #416231)
+ for line in msg:
+ eerror(line, phase="other", key=cpv)
+ elog_process(cpv, settings,
+ phasefilter=("other",))
+
+ else:
+ out = portage.output.EOutput()
+ for line in msg:
+ out.eerror(line)
diff --git a/lib/portage/package/ebuild/_parallel_manifest/ManifestProcess.py b/lib/portage/package/ebuild/_parallel_manifest/ManifestProcess.py
new file mode 100644
index 000000000..44e257664
--- /dev/null
+++ b/lib/portage/package/ebuild/_parallel_manifest/ManifestProcess.py
@@ -0,0 +1,43 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.exception import (FileNotFound,
+ PermissionDenied, PortagePackageException)
+from portage.localization import _
+from portage.util._async.ForkProcess import ForkProcess
+
+class ManifestProcess(ForkProcess):
+
+ __slots__ = ("cp", "distdir", "fetchlist_dict", "repo_config")
+
+ MODIFIED = 16
+
+ def _run(self):
+ mf = self.repo_config.load_manifest(
+ os.path.join(self.repo_config.location, self.cp),
+ self.distdir, fetchlist_dict=self.fetchlist_dict)
+
+ try:
+ mf.create(assumeDistHashesAlways=True)
+ except FileNotFound as e:
+ portage.writemsg(_("!!! File %s doesn't exist, can't update "
+ "Manifest\n") % e, noiselevel=-1)
+ return 1
+
+ except PortagePackageException as e:
+ portage.writemsg(("!!! %s\n") % (e,), noiselevel=-1)
+ return 1
+
+ try:
+ modified = mf.write(sign=False)
+ except PermissionDenied as e:
+ portage.writemsg("!!! %s: %s\n" % (_("Permission Denied"), e,),
+ noiselevel=-1)
+ return 1
+ else:
+ if modified:
+ return self.MODIFIED
+ else:
+ return os.EX_OK
diff --git a/lib/portage/package/ebuild/_parallel_manifest/ManifestScheduler.py b/lib/portage/package/ebuild/_parallel_manifest/ManifestScheduler.py
new file mode 100644
index 000000000..fabea9bc1
--- /dev/null
+++ b/lib/portage/package/ebuild/_parallel_manifest/ManifestScheduler.py
@@ -0,0 +1,88 @@
+# Copyright 2012-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.dbapi.porttree import _async_manifest_fetchlist
+from portage.dep import _repo_separator
+from portage.localization import _
+from portage.util._async.AsyncScheduler import AsyncScheduler
+from .ManifestTask import ManifestTask
+
+class ManifestScheduler(AsyncScheduler):
+
+ def __init__(self, portdb, cp_iter=None,
+ gpg_cmd=None, gpg_vars=None, force_sign_key=None, **kwargs):
+
+ AsyncScheduler.__init__(self, **kwargs)
+
+ self._portdb = portdb
+
+ if cp_iter is None:
+ cp_iter = self._iter_every_cp()
+ self._cp_iter = cp_iter
+ self._gpg_cmd = gpg_cmd
+ self._gpg_vars = gpg_vars
+ self._force_sign_key = force_sign_key
+ self._task_iter = self._iter_tasks()
+
+ def _next_task(self):
+ return next(self._task_iter)
+
+ def _iter_every_cp(self):
+ # List categories individually, in order to start yielding quicker,
+ # and in order to reduce latency in case of a signal interrupt.
+ cp_all = self._portdb.cp_all
+ for category in sorted(self._portdb.categories):
+ for cp in cp_all(categories=(category,)):
+ yield cp
+
+ def _iter_tasks(self):
+ portdb = self._portdb
+ distdir = portdb.settings["DISTDIR"]
+ disabled_repos = set()
+
+ for cp in self._cp_iter:
+ if self._terminated.is_set():
+ break
+ # We iterate over portdb.porttrees, since it's common to
+ # tweak this attribute in order to adjust repo selection.
+ for mytree in portdb.porttrees:
+ if self._terminated.is_set():
+ break
+ repo_config = portdb.repositories.get_repo_for_location(mytree)
+ if not repo_config.create_manifest:
+ if repo_config.name not in disabled_repos:
+ disabled_repos.add(repo_config.name)
+ portage.writemsg(
+ _(">>> Skipping creating Manifest for %s%s%s; "
+ "repository is configured to not use them\n") %
+ (cp, _repo_separator, repo_config.name),
+ noiselevel=-1)
+ continue
+ cpv_list = portdb.cp_list(cp, mytree=[repo_config.location])
+ if not cpv_list:
+ continue
+
+ # Use _async_manifest_fetchlist(max_jobs=1), since we
+ # spawn concurrent ManifestTask instances.
+ yield ManifestTask(cp=cp, distdir=distdir,
+ fetchlist_dict=_async_manifest_fetchlist(
+ portdb, repo_config, cp, cpv_list=cpv_list,
+ max_jobs=1, loop=self._event_loop),
+ repo_config=repo_config,
+ gpg_cmd=self._gpg_cmd, gpg_vars=self._gpg_vars,
+ force_sign_key=self._force_sign_key)
+
+ def _task_exit(self, task):
+
+ if task.returncode != os.EX_OK:
+ if not self._terminated_tasks:
+ portage.writemsg(
+ "Error processing %s%s%s, continuing...\n" %
+ (task.cp, _repo_separator, task.repo_config.name),
+ noiselevel=-1)
+
+ AsyncScheduler._task_exit(self, task)
+
+
diff --git a/lib/portage/package/ebuild/_parallel_manifest/ManifestTask.py b/lib/portage/package/ebuild/_parallel_manifest/ManifestTask.py
new file mode 100644
index 000000000..6bf5e82ef
--- /dev/null
+++ b/lib/portage/package/ebuild/_parallel_manifest/ManifestTask.py
@@ -0,0 +1,208 @@
+# Copyright 2012-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import re
+import subprocess
+
+from portage import os
+from portage import _unicode_encode, _encodings
+from portage.const import MANIFEST2_IDENTIFIERS
+from portage.dep import _repo_separator
+from portage.exception import InvalidDependString
+from portage.localization import _
+from portage.util import (atomic_ofstream, grablines,
+ shlex_split, varexpand, writemsg)
+from portage.util._async.AsyncTaskFuture import AsyncTaskFuture
+from portage.util._async.PipeLogger import PipeLogger
+from portage.util._async.PopenProcess import PopenProcess
+from _emerge.CompositeTask import CompositeTask
+from _emerge.PipeReader import PipeReader
+from .ManifestProcess import ManifestProcess
+
+class ManifestTask(CompositeTask):
+
+ __slots__ = ("cp", "distdir", "fetchlist_dict", "gpg_cmd",
+ "gpg_vars", "repo_config", "force_sign_key", "_manifest_path")
+
+ _PGP_HEADER = b"BEGIN PGP SIGNED MESSAGE"
+ _manifest_line_re = re.compile(r'^(%s) ' % "|".join(MANIFEST2_IDENTIFIERS))
+ _gpg_key_id_re = re.compile(r'^[0-9A-F]*$')
+ _gpg_key_id_lengths = (8, 16, 24, 32, 40)
+
+ def _start(self):
+ self._manifest_path = os.path.join(self.repo_config.location,
+ self.cp, "Manifest")
+
+ self._start_task(
+ AsyncTaskFuture(future=self.fetchlist_dict),
+ self._start_with_fetchlist)
+
+ def _start_with_fetchlist(self, fetchlist_task):
+ if self._default_exit(fetchlist_task) != os.EX_OK:
+ if not self.fetchlist_dict.cancelled():
+ try:
+ self.fetchlist_dict.result()
+ except InvalidDependString as e:
+ writemsg(
+ _("!!! %s%s%s: SRC_URI: %s\n") %
+ (self.cp, _repo_separator, self.repo_config.name, e),
+ noiselevel=-1)
+ self._async_wait()
+ return
+ self.fetchlist_dict = self.fetchlist_dict.result()
+ manifest_proc = ManifestProcess(cp=self.cp, distdir=self.distdir,
+ fetchlist_dict=self.fetchlist_dict, repo_config=self.repo_config,
+ scheduler=self.scheduler)
+ self._start_task(manifest_proc, self._manifest_proc_exit)
+
+ def _manifest_proc_exit(self, manifest_proc):
+ self._assert_current(manifest_proc)
+ if manifest_proc.returncode not in (os.EX_OK, manifest_proc.MODIFIED):
+ self.returncode = manifest_proc.returncode
+ self._current_task = None
+ self.wait()
+ return
+
+ modified = manifest_proc.returncode == manifest_proc.MODIFIED
+ sign = self.gpg_cmd is not None
+
+ if not modified and sign:
+ sign = self._need_signature()
+ if not sign and self.force_sign_key is not None \
+ and os.path.exists(self._manifest_path):
+ self._check_sig_key()
+ return
+
+ if not sign or not os.path.exists(self._manifest_path):
+ self.returncode = os.EX_OK
+ self._current_task = None
+ self.wait()
+ return
+
+ self._start_gpg_proc()
+
+ def _check_sig_key(self):
+ null_fd = os.open('/dev/null', os.O_RDONLY)
+ popen_proc = PopenProcess(proc=subprocess.Popen(
+ ["gpg", "--verify", self._manifest_path],
+ stdin=null_fd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
+ pipe_reader=PipeReader())
+ os.close(null_fd)
+ popen_proc.pipe_reader.input_files = {
+ "producer" : popen_proc.proc.stdout}
+ self._start_task(popen_proc, self._check_sig_key_exit)
+
+ @staticmethod
+ def _parse_gpg_key(output):
+ """
+ Returns the first token which appears to represent a gpg key
+ id, or None if there is no such token.
+ """
+ regex = ManifestTask._gpg_key_id_re
+ lengths = ManifestTask._gpg_key_id_lengths
+ for token in output.split():
+ m = regex.match(token)
+ if m is not None and len(m.group(0)) in lengths:
+ return m.group(0)
+ return None
+
+ @staticmethod
+ def _normalize_gpg_key(key_str):
+ """
+ Strips leading "0x" and trailing "!", and converts to uppercase
+ (intended to be the same format as that in gpg --verify output).
+ """
+ key_str = key_str.upper()
+ if key_str.startswith("0X"):
+ key_str = key_str[2:]
+ key_str = key_str.rstrip("!")
+ return key_str
+
+ def _check_sig_key_exit(self, proc):
+ self._assert_current(proc)
+
+ parsed_key = self._parse_gpg_key(
+ proc.pipe_reader.getvalue().decode('utf_8', 'replace'))
+ if parsed_key is not None and \
+ self._normalize_gpg_key(parsed_key) == \
+ self._normalize_gpg_key(self.force_sign_key):
+ self.returncode = os.EX_OK
+ self._current_task = None
+ self.wait()
+ return
+
+ if self._was_cancelled():
+ self.wait()
+ return
+
+ self._strip_sig(self._manifest_path)
+ self._start_gpg_proc()
+
+ @staticmethod
+ def _strip_sig(manifest_path):
+ """
+ Strip an existing signature from a Manifest file.
+ """
+ line_re = ManifestTask._manifest_line_re
+ lines = grablines(manifest_path)
+ f = None
+ try:
+ f = atomic_ofstream(manifest_path)
+ for line in lines:
+ if line_re.match(line) is not None:
+ f.write(line)
+ f.close()
+ f = None
+ finally:
+ if f is not None:
+ f.abort()
+
+ def _start_gpg_proc(self):
+ gpg_vars = self.gpg_vars
+ if gpg_vars is None:
+ gpg_vars = {}
+ else:
+ gpg_vars = gpg_vars.copy()
+ gpg_vars["FILE"] = self._manifest_path
+ gpg_cmd = varexpand(self.gpg_cmd, mydict=gpg_vars)
+ gpg_cmd = shlex_split(gpg_cmd)
+ gpg_proc = PopenProcess(proc=subprocess.Popen(gpg_cmd,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
+ # PipeLogger echos output and efficiently monitors for process
+ # exit by listening for the stdout EOF event.
+ gpg_proc.pipe_reader = PipeLogger(background=self.background,
+ input_fd=gpg_proc.proc.stdout, scheduler=self.scheduler)
+ self._start_task(gpg_proc, self._gpg_proc_exit)
+
+ def _gpg_proc_exit(self, gpg_proc):
+ if self._default_exit(gpg_proc) != os.EX_OK:
+ self.wait()
+ return
+
+ rename_args = (self._manifest_path + ".asc", self._manifest_path)
+ try:
+ os.rename(*rename_args)
+ except OSError as e:
+ writemsg("!!! rename('%s', '%s'): %s\n" % rename_args + (e,),
+ noiselevel=-1)
+ try:
+ os.unlink(self._manifest_path + ".asc")
+ except OSError:
+ pass
+ self.returncode = 1
+ else:
+ self.returncode = os.EX_OK
+
+ self._current_task = None
+ self.wait()
+
+ def _need_signature(self):
+ try:
+ with open(_unicode_encode(self._manifest_path,
+ encoding=_encodings['fs'], errors='strict'), 'rb') as f:
+ return self._PGP_HEADER not in f.readline()
+ except IOError as e:
+ if e.errno in (errno.ENOENT, errno.ESTALE):
+ return False
+ raise
diff --git a/lib/portage/package/ebuild/_parallel_manifest/__init__.py b/lib/portage/package/ebuild/_parallel_manifest/__init__.py
new file mode 100644
index 000000000..418ad862b
--- /dev/null
+++ b/lib/portage/package/ebuild/_parallel_manifest/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/package/ebuild/_spawn_nofetch.py b/lib/portage/package/ebuild/_spawn_nofetch.py
new file mode 100644
index 000000000..bbfd5b72b
--- /dev/null
+++ b/lib/portage/package/ebuild/_spawn_nofetch.py
@@ -0,0 +1,125 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+
+import portage
+from portage import os
+from portage import shutil
+from portage.const import EBUILD_PHASES
+from portage.elog import elog_process
+from portage.package.ebuild.config import config
+from portage.package.ebuild.doebuild import doebuild_environment
+from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.EventLoop import EventLoop
+from portage.util._eventloop.global_event_loop import global_event_loop
+from _emerge.CompositeTask import CompositeTask
+from _emerge.EbuildPhase import EbuildPhase
+
+
+class SpawnNofetchWithoutBuilddir(CompositeTask):
+ """
+ This spawns pkg_nofetch if appropriate, while avoiding the
+ need to lock a global build directory. The settings parameter
+ is useful only if setcpv has already been called in order
+ to cache metadata. It will be cloned internally, in order to
+ prevent any changes from interfering with the calling code.
+ If settings is None then a suitable config instance will be
+ acquired from the given portdbapi instance. Do not use the
+ settings parameter unless setcpv has been called on the given
+ instance, since otherwise it's possible to trigger issues like
+ bug #408817 due to fragile assumptions involving the config
+ state inside doebuild_environment().
+
+ A private PORTAGE_BUILDDIR will be created and cleaned up, in
+ order to avoid any interference with any other processes.
+ If PORTAGE_TMPDIR is writable, that will be used, otherwise
+ the default directory for the tempfile module will be used.
+
+ We only call the pkg_nofetch phase if either RESTRICT=fetch
+ is set or the package has explicitly overridden the default
+ pkg_nofetch implementation. This allows specialized messages
+ to be displayed for problematic packages even though they do
+ not set RESTRICT=fetch (bug #336499).
+
+ This class does nothing if the PORTAGE_PARALLEL_FETCHONLY
+ variable is set in the config instance.
+ """
+ __slots__ = ('ebuild_path', 'fd_pipes', 'portdb', 'settings',
+ '_private_tmpdir')
+
+ def _start(self):
+ settings = self.settings
+ if settings is None:
+ settings = self.portdb.settings
+
+ if 'PORTAGE_PARALLEL_FETCHONLY' in settings:
+ # parallel-fetch mode
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ # Prevent temporary config changes from interfering
+ # with config instances that are reused.
+ settings = self.settings = config(clone=settings)
+
+ # We must create our private PORTAGE_TMPDIR before calling
+ # doebuild_environment(), since lots of variables such
+ # as PORTAGE_BUILDDIR refer to paths inside PORTAGE_TMPDIR.
+ portage_tmpdir = settings.get('PORTAGE_TMPDIR')
+ if not portage_tmpdir or not os.access(portage_tmpdir, os.W_OK):
+ portage_tmpdir = None
+ private_tmpdir = self._private_tmpdir = tempfile.mkdtemp(
+ dir=portage_tmpdir)
+ settings['PORTAGE_TMPDIR'] = private_tmpdir
+ settings.backup_changes('PORTAGE_TMPDIR')
+ # private temp dir was just created, so it's not locked yet
+ settings.pop('PORTAGE_BUILDDIR_LOCKED', None)
+
+ doebuild_environment(self.ebuild_path, 'nofetch',
+ settings=settings, db=self.portdb)
+ restrict = settings['PORTAGE_RESTRICT'].split()
+ defined_phases = settings['DEFINED_PHASES'].split()
+ if not defined_phases:
+ # When DEFINED_PHASES is undefined, assume all
+ # phases are defined.
+ defined_phases = EBUILD_PHASES
+
+ if 'fetch' not in restrict and \
+ 'nofetch' not in defined_phases:
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return
+
+ prepare_build_dirs(settings=settings)
+
+ ebuild_phase = EbuildPhase(background=self.background,
+ phase='nofetch',
+ scheduler=self.scheduler,
+ fd_pipes=self.fd_pipes, settings=settings)
+
+ self._start_task(ebuild_phase, self._nofetch_exit)
+
+ def _nofetch_exit(self, ebuild_phase):
+ self._final_exit(ebuild_phase)
+ elog_process(self.settings.mycpv, self.settings)
+ shutil.rmtree(self._private_tmpdir)
+ self._async_wait()
+
+
+def spawn_nofetch(portdb, ebuild_path, settings=None, fd_pipes=None):
+ """
+ Create a NofetchPrivateTmpdir instance, and execute it synchronously.
+ This function must not be called from asynchronous code, since it will
+ trigger event loop recursion which is incompatible with asyncio.
+ """
+ nofetch = SpawnNofetchWithoutBuilddir(background=False,
+ portdb=portdb,
+ ebuild_path=ebuild_path,
+ scheduler=SchedulerInterface(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ fd_pipes=fd_pipes, settings=settings)
+
+ nofetch.start()
+ return nofetch.wait()
diff --git a/lib/portage/package/ebuild/config.py b/lib/portage/package/ebuild/config.py
new file mode 100644
index 000000000..320d9f6c0
--- /dev/null
+++ b/lib/portage/package/ebuild/config.py
@@ -0,0 +1,2875 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = [
+ 'autouse', 'best_from_dict', 'check_config_instance', 'config',
+]
+
+import copy
+from itertools import chain
+import grp
+import logging
+import platform
+import pwd
+import re
+import sys
+import traceback
+import warnings
+
+from _emerge.Package import Package
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.data:portage_gid',
+ 'portage.dep.soname.SonameAtom:SonameAtom',
+ 'portage.dbapi.vartree:vartree',
+ 'portage.package.ebuild.doebuild:_phase_func_map',
+ 'portage.util.compression_probe:_compressors',
+ 'portage.util.locale:check_locale,split_LC_ALL',
+)
+from portage import bsd_chflags, \
+ load_mod, os, selinux, _unicode_decode
+from portage.const import CACHE_PATH, \
+ DEPCACHE_PATH, INCREMENTALS, MAKE_CONF_FILE, \
+ MODULES_FILE_PATH, PORTAGE_BASE_PATH, \
+ PRIVATE_PATH, PROFILE_PATH, USER_CONFIG_PATH, \
+ USER_VIRTUALS_FILE
+from portage.dbapi import dbapi
+from portage.dbapi.porttree import portdbapi
+from portage.dep import Atom, isvalidatom, match_from_list, use_reduce, _repo_separator, _slot_separator
+from portage.eapi import (eapi_exports_AA, eapi_exports_merge_type,
+ eapi_supports_prefix, eapi_exports_replace_vars, _get_eapi_attrs)
+from portage.env.loaders import KeyValuePairFileLoader
+from portage.exception import InvalidDependString, IsADirectory, \
+ PortageException
+from portage.localization import _
+from portage.output import colorize
+from portage.process import fakeroot_capable, sandbox_capable
+from portage.repository.config import load_repository_config
+from portage.util import ensure_dirs, getconfig, grabdict, \
+ grabdict_package, grabfile, grabfile_package, LazyItemsDict, \
+ normalize_path, shlex_split, stack_dictlist, stack_dicts, stack_lists, \
+ writemsg, writemsg_level, _eapi_cache
+from portage.util.path import first_existing
+from portage.util._path import exists_raise_eaccess, isdir_raise_eaccess
+from portage.versions import catpkgsplit, catsplit, cpv_getkey, _pkg_str
+
+from portage.package.ebuild._config import special_env_vars
+from portage.package.ebuild._config.env_var_validation import validate_cmd_var
+from portage.package.ebuild._config.features_set import features_set
+from portage.package.ebuild._config.KeywordsManager import KeywordsManager
+from portage.package.ebuild._config.LicenseManager import LicenseManager
+from portage.package.ebuild._config.UseManager import UseManager
+from portage.package.ebuild._config.LocationsManager import LocationsManager
+from portage.package.ebuild._config.MaskManager import MaskManager
+from portage.package.ebuild._config.VirtualsManager import VirtualsManager
+from portage.package.ebuild._config.helper import ordered_by_atom_specificity, prune_incremental
+from portage.package.ebuild._config.unpack_dependencies import load_unpack_dependencies_configuration
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+_feature_flags_cache = {}
+
+def _get_feature_flags(eapi_attrs):
+ cache_key = (eapi_attrs.feature_flag_test, eapi_attrs.feature_flag_targetroot)
+ flags = _feature_flags_cache.get(cache_key)
+ if flags is not None:
+ return flags
+
+ flags = []
+ if eapi_attrs.feature_flag_test:
+ flags.append("test")
+ if eapi_attrs.feature_flag_targetroot:
+ flags.append("targetroot")
+
+ flags = frozenset(flags)
+ _feature_flags_cache[cache_key] = flags
+ return flags
+
+def autouse(myvartree, use_cache=1, mysettings=None):
+ warnings.warn("portage.autouse() is deprecated",
+ DeprecationWarning, stacklevel=2)
+ return ""
+
+def check_config_instance(test):
+ if not isinstance(test, config):
+ raise TypeError("Invalid type for config object: %s (should be %s)" % (test.__class__, config))
+
+def best_from_dict(key, top_dict, key_order, EmptyOnError=1, FullCopy=1, AllowEmpty=1):
+ for x in key_order:
+ if x in top_dict and key in top_dict[x]:
+ if FullCopy:
+ return copy.deepcopy(top_dict[x][key])
+ else:
+ return top_dict[x][key]
+ if EmptyOnError:
+ return ""
+ else:
+ raise KeyError("Key not found in list; '%s'" % key)
+
+def _lazy_iuse_regex(iuse_implicit):
+ """
+ The PORTAGE_IUSE value is lazily evaluated since re.escape() is slow
+ and the value is only used when an ebuild phase needs to be executed
+ (it's used only to generate QA notices).
+ """
+ # Escape anything except ".*" which is supposed to pass through from
+ # _get_implicit_iuse().
+ regex = sorted(re.escape(x) for x in iuse_implicit)
+ regex = "^(%s)$" % "|".join(regex)
+ regex = regex.replace("\\.\\*", ".*")
+ return regex
+
+class _iuse_implicit_match_cache(object):
+
+ def __init__(self, settings):
+ self._iuse_implicit_re = re.compile("^(%s)$" % \
+ "|".join(settings._get_implicit_iuse()))
+ self._cache = {}
+
+ def __call__(self, flag):
+ """
+ Returns True if the flag is matched, False otherwise.
+ """
+ try:
+ return self._cache[flag]
+ except KeyError:
+ m = self._iuse_implicit_re.match(flag) is not None
+ self._cache[flag] = m
+ return m
+
+class config(object):
+ """
+ This class encompasses the main portage configuration. Data is pulled from
+ ROOT/PORTDIR/profiles/, from ROOT/etc/make.profile incrementally through all
+ parent profiles as well as from ROOT/PORTAGE_CONFIGROOT/* for user specified
+ overrides.
+
+ Generally if you need data like USE flags, FEATURES, environment variables,
+ virtuals ...etc you look in here.
+ """
+
+ _constant_keys = frozenset(['PORTAGE_BIN_PATH', 'PORTAGE_GID',
+ 'PORTAGE_PYM_PATH', 'PORTAGE_PYTHONPATH'])
+
+ _setcpv_aux_keys = ('BDEPEND', 'DEFINED_PHASES', 'DEPEND', 'EAPI', 'HDEPEND',
+ 'INHERITED', 'IUSE', 'REQUIRED_USE', 'KEYWORDS', 'LICENSE', 'PDEPEND',
+ 'PROPERTIES', 'RDEPEND', 'SLOT',
+ 'repository', 'RESTRICT', 'LICENSE',)
+
+ _module_aliases = {
+ "cache.metadata_overlay.database" : "portage.cache.flat_hash.mtime_md5_database",
+ "portage.cache.metadata_overlay.database" : "portage.cache.flat_hash.mtime_md5_database",
+ }
+
+ _case_insensitive_vars = special_env_vars.case_insensitive_vars
+ _default_globals = special_env_vars.default_globals
+ _env_blacklist = special_env_vars.env_blacklist
+ _environ_filter = special_env_vars.environ_filter
+ _environ_whitelist = special_env_vars.environ_whitelist
+ _environ_whitelist_re = special_env_vars.environ_whitelist_re
+ _global_only_vars = special_env_vars.global_only_vars
+
+ def __init__(self, clone=None, mycpv=None, config_profile_path=None,
+ config_incrementals=None, config_root=None, target_root=None,
+ sysroot=None, eprefix=None, local_config=True, env=None,
+ _unmatched_removal=False, repositories=None):
+ """
+ @param clone: If provided, init will use deepcopy to copy by value the instance.
+ @type clone: Instance of config class.
+ @param mycpv: CPV to load up (see setcpv), this is the same as calling init with mycpv=None
+ and then calling instance.setcpv(mycpv).
+ @type mycpv: String
+ @param config_profile_path: Configurable path to the profile (usually PROFILE_PATH from portage.const)
+ @type config_profile_path: String
+ @param config_incrementals: List of incremental variables
+ (defaults to portage.const.INCREMENTALS)
+ @type config_incrementals: List
+ @param config_root: path to read local config from (defaults to "/", see PORTAGE_CONFIGROOT)
+ @type config_root: String
+ @param target_root: the target root, which typically corresponds to the
+ value of the $ROOT env variable (default is /)
+ @type target_root: String
+ @param sysroot: the sysroot to build against, which typically corresponds
+ to the value of the $SYSROOT env variable (default is /)
+ @type sysroot: String
+ @param eprefix: set the EPREFIX variable (default is portage.const.EPREFIX)
+ @type eprefix: String
+ @param local_config: Enables loading of local config (/etc/portage); used most by repoman to
+ ignore local config (keywording and unmasking)
+ @type local_config: Boolean
+ @param env: The calling environment which is used to override settings.
+ Defaults to os.environ if unspecified.
+ @type env: dict
+ @param _unmatched_removal: Enabled by repoman when the
+ --unmatched-removal option is given.
+ @type _unmatched_removal: Boolean
+ @param repositories: Configuration of repositories.
+ Defaults to portage.repository.config.load_repository_config().
+ @type repositories: Instance of portage.repository.config.RepoConfigLoader class.
+ """
+
+ # This is important when config is reloaded after emerge --sync.
+ _eapi_cache.clear()
+
+ # When initializing the global portage.settings instance, avoid
+ # raising exceptions whenever possible since exceptions thrown
+ # from 'import portage' or 'import portage.exceptions' statements
+ # can practically render the api unusable for api consumers.
+ tolerant = hasattr(portage, '_initializing_globals')
+ self._tolerant = tolerant
+ self._unmatched_removal = _unmatched_removal
+
+ self.locked = 0
+ self.mycpv = None
+ self._setcpv_args_hash = None
+ self.puse = ""
+ self._penv = []
+ self.modifiedkeys = []
+ self.uvlist = []
+ self._accept_chost_re = None
+ self._accept_properties = None
+ self._accept_restrict = None
+ self._features_overrides = []
+ self._make_defaults = None
+ self._parent_stable = None
+ self._soname_provided = None
+
+ # _unknown_features records unknown features that
+ # have triggered warning messages, and ensures that
+ # the same warning isn't shown twice.
+ self._unknown_features = set()
+
+ self.local_config = local_config
+
+ if clone:
+ # For immutable attributes, use shallow copy for
+ # speed and memory conservation.
+ self._tolerant = clone._tolerant
+ self._unmatched_removal = clone._unmatched_removal
+ self.categories = clone.categories
+ self.depcachedir = clone.depcachedir
+ self.incrementals = clone.incrementals
+ self.module_priority = clone.module_priority
+ self.profile_path = clone.profile_path
+ self.profiles = clone.profiles
+ self.packages = clone.packages
+ self.repositories = clone.repositories
+ self.unpack_dependencies = clone.unpack_dependencies
+ self._iuse_effective = clone._iuse_effective
+ self._iuse_implicit_match = clone._iuse_implicit_match
+ self._non_user_variables = clone._non_user_variables
+ self._env_d_blacklist = clone._env_d_blacklist
+ self._pbashrc = clone._pbashrc
+ self._repo_make_defaults = clone._repo_make_defaults
+ self.usemask = clone.usemask
+ self.useforce = clone.useforce
+ self.puse = clone.puse
+ self.user_profile_dir = clone.user_profile_dir
+ self.local_config = clone.local_config
+ self.make_defaults_use = clone.make_defaults_use
+ self.mycpv = clone.mycpv
+ self._setcpv_args_hash = clone._setcpv_args_hash
+ self._soname_provided = clone._soname_provided
+ self._profile_bashrc = clone._profile_bashrc
+
+ # immutable attributes (internal policy ensures lack of mutation)
+ self._locations_manager = clone._locations_manager
+ self._use_manager = clone._use_manager
+ # force instantiation of lazy immutable objects when cloning, so
+ # that they're not instantiated more than once
+ self._keywords_manager_obj = clone._keywords_manager
+ self._mask_manager_obj = clone._mask_manager
+
+ # shared mutable attributes
+ self._unknown_features = clone._unknown_features
+
+ self.modules = copy.deepcopy(clone.modules)
+ self._penv = copy.deepcopy(clone._penv)
+
+ self.configdict = copy.deepcopy(clone.configdict)
+ self.configlist = [
+ self.configdict['env.d'],
+ self.configdict['repo'],
+ self.configdict['pkginternal'],
+ self.configdict['globals'],
+ self.configdict['defaults'],
+ self.configdict['conf'],
+ self.configdict['pkg'],
+ self.configdict['env'],
+ ]
+ self.lookuplist = self.configlist[:]
+ self.lookuplist.reverse()
+ self._use_expand_dict = copy.deepcopy(clone._use_expand_dict)
+ self.backupenv = self.configdict["backupenv"]
+ self.prevmaskdict = copy.deepcopy(clone.prevmaskdict)
+ self.pprovideddict = copy.deepcopy(clone.pprovideddict)
+ self.features = features_set(self)
+ self.features._features = copy.deepcopy(clone.features._features)
+ self._features_overrides = copy.deepcopy(clone._features_overrides)
+
+ #Strictly speaking _license_manager is not immutable. Users need to ensure that
+ #extract_global_changes() is called right after __init__ (if at all).
+ #It also has the mutable member _undef_lic_groups. It is used to track
+ #undefined license groups, to not display an error message for the same
+ #group again and again. Because of this, it's useful to share it between
+ #all LicenseManager instances.
+ self._license_manager = clone._license_manager
+
+ # force instantiation of lazy objects when cloning, so
+ # that they're not instantiated more than once
+ self._virtuals_manager_obj = copy.deepcopy(clone._virtuals_manager)
+
+ self._accept_properties = copy.deepcopy(clone._accept_properties)
+ self._ppropertiesdict = copy.deepcopy(clone._ppropertiesdict)
+ self._accept_restrict = copy.deepcopy(clone._accept_restrict)
+ self._paccept_restrict = copy.deepcopy(clone._paccept_restrict)
+ self._penvdict = copy.deepcopy(clone._penvdict)
+ self._pbashrcdict = copy.deepcopy(clone._pbashrcdict)
+ self._expand_map = copy.deepcopy(clone._expand_map)
+
+ else:
+ # lazily instantiated objects
+ self._keywords_manager_obj = None
+ self._mask_manager_obj = None
+ self._virtuals_manager_obj = None
+
+ locations_manager = LocationsManager(config_root=config_root,
+ config_profile_path=config_profile_path, eprefix=eprefix,
+ local_config=local_config, target_root=target_root,
+ sysroot=sysroot)
+ self._locations_manager = locations_manager
+
+ eprefix = locations_manager.eprefix
+ config_root = locations_manager.config_root
+ sysroot = locations_manager.sysroot
+ esysroot = locations_manager.esysroot
+ broot = locations_manager.broot
+ abs_user_config = locations_manager.abs_user_config
+ make_conf_paths = [
+ os.path.join(config_root, 'etc', 'make.conf'),
+ os.path.join(config_root, MAKE_CONF_FILE)
+ ]
+ try:
+ if os.path.samefile(*make_conf_paths):
+ make_conf_paths.pop()
+ except OSError:
+ pass
+
+ make_conf_count = 0
+ make_conf = {}
+ for x in make_conf_paths:
+ mygcfg = getconfig(x,
+ tolerant=tolerant, allow_sourcing=True,
+ expand=make_conf, recursive=True)
+ if mygcfg is not None:
+ make_conf.update(mygcfg)
+ make_conf_count += 1
+
+ if make_conf_count == 2:
+ writemsg("!!! %s\n" %
+ _("Found 2 make.conf files, using both '%s' and '%s'") %
+ tuple(make_conf_paths), noiselevel=-1)
+
+ # Allow ROOT setting to come from make.conf if it's not overridden
+ # by the constructor argument (from the calling environment).
+ locations_manager.set_root_override(make_conf.get("ROOT"))
+ target_root = locations_manager.target_root
+ eroot = locations_manager.eroot
+ self.global_config_path = locations_manager.global_config_path
+
+ # The expand_map is used for variable substitution
+ # in getconfig() calls, and the getconfig() calls
+ # update expand_map with the value of each variable
+ # assignment that occurs. Variable substitution occurs
+ # in the following order, which corresponds to the
+ # order of appearance in self.lookuplist:
+ #
+ # * env.d
+ # * make.globals
+ # * make.defaults
+ # * make.conf
+ #
+ # Notably absent is "env", since we want to avoid any
+ # interaction with the calling environment that might
+ # lead to unexpected results.
+
+ env_d = getconfig(os.path.join(eroot, "etc", "profile.env"),
+ tolerant=tolerant, expand=False) or {}
+ expand_map = env_d.copy()
+ self._expand_map = expand_map
+
+ # Allow make.globals to set default paths relative to ${EPREFIX}.
+ expand_map["EPREFIX"] = eprefix
+ expand_map["PORTAGE_CONFIGROOT"] = config_root
+
+ if portage._not_installed:
+ make_globals_path = os.path.join(PORTAGE_BASE_PATH, "cnf", "make.globals")
+ else:
+ make_globals_path = os.path.join(self.global_config_path, "make.globals")
+ old_make_globals = os.path.join(config_root, "etc", "make.globals")
+ if os.path.isfile(old_make_globals) and \
+ not os.path.samefile(make_globals_path, old_make_globals):
+ # Don't warn if they refer to the same path, since
+ # that can be used for backward compatibility with
+ # old software.
+ writemsg("!!! %s\n" %
+ _("Found obsolete make.globals file: "
+ "'%s', (using '%s' instead)") %
+ (old_make_globals, make_globals_path),
+ noiselevel=-1)
+
+ make_globals = getconfig(make_globals_path,
+ tolerant=tolerant, expand=expand_map)
+ if make_globals is None:
+ make_globals = {}
+
+ for k, v in self._default_globals.items():
+ make_globals.setdefault(k, v)
+
+ if config_incrementals is None:
+ self.incrementals = INCREMENTALS
+ else:
+ self.incrementals = config_incrementals
+ if not isinstance(self.incrementals, frozenset):
+ self.incrementals = frozenset(self.incrementals)
+
+ self.module_priority = ("user", "default")
+ self.modules = {}
+ modules_file = os.path.join(config_root, MODULES_FILE_PATH)
+ modules_loader = KeyValuePairFileLoader(modules_file, None, None)
+ modules_dict, modules_errors = modules_loader.load()
+ self.modules["user"] = modules_dict
+ if self.modules["user"] is None:
+ self.modules["user"] = {}
+ user_auxdbmodule = \
+ self.modules["user"].get("portdbapi.auxdbmodule")
+ if user_auxdbmodule is not None and \
+ user_auxdbmodule in self._module_aliases:
+ warnings.warn("'%s' is deprecated: %s" %
+ (user_auxdbmodule, modules_file))
+
+ self.modules["default"] = {
+ "portdbapi.auxdbmodule": "portage.cache.flat_hash.mtime_md5_database",
+ }
+
+ self.configlist=[]
+
+ # back up our incremental variables:
+ self.configdict={}
+ self._use_expand_dict = {}
+ # configlist will contain: [ env.d, globals, defaults, conf, pkg, backupenv, env ]
+ self.configlist.append({})
+ self.configdict["env.d"] = self.configlist[-1]
+
+ self.configlist.append({})
+ self.configdict["repo"] = self.configlist[-1]
+
+ self.configlist.append({})
+ self.configdict["pkginternal"] = self.configlist[-1]
+
+ # env_d will be None if profile.env doesn't exist.
+ if env_d:
+ self.configdict["env.d"].update(env_d)
+
+ # backupenv is used for calculating incremental variables.
+ if env is None:
+ env = os.environ
+
+ # Avoid potential UnicodeDecodeError exceptions later.
+ env_unicode = dict((_unicode_decode(k), _unicode_decode(v))
+ for k, v in env.items())
+
+ self.backupenv = env_unicode
+
+ if env_d:
+ # Remove duplicate values so they don't override updated
+ # profile.env values later (profile.env is reloaded in each
+ # call to self.regenerate).
+ for k, v in env_d.items():
+ try:
+ if self.backupenv[k] == v:
+ del self.backupenv[k]
+ except KeyError:
+ pass
+ del k, v
+
+ self.configdict["env"] = LazyItemsDict(self.backupenv)
+
+ self.configlist.append(make_globals)
+ self.configdict["globals"]=self.configlist[-1]
+
+ self.make_defaults_use = []
+
+ #Loading Repositories
+ self["PORTAGE_CONFIGROOT"] = config_root
+ self["ROOT"] = target_root
+ self["SYSROOT"] = sysroot
+ self["EPREFIX"] = eprefix
+ self["EROOT"] = eroot
+ self["ESYSROOT"] = esysroot
+ self["BROOT"] = broot
+ known_repos = []
+ portdir = ""
+ portdir_overlay = ""
+ portdir_sync = None
+ for confs in [make_globals, make_conf, self.configdict["env"]]:
+ v = confs.get("PORTDIR")
+ if v is not None:
+ portdir = v
+ known_repos.append(v)
+ v = confs.get("PORTDIR_OVERLAY")
+ if v is not None:
+ portdir_overlay = v
+ known_repos.extend(shlex_split(v))
+ v = confs.get("SYNC")
+ if v is not None:
+ portdir_sync = v
+ if 'PORTAGE_RSYNC_EXTRA_OPTS' in confs:
+ self['PORTAGE_RSYNC_EXTRA_OPTS'] = confs['PORTAGE_RSYNC_EXTRA_OPTS']
+
+ self["PORTDIR"] = portdir
+ self["PORTDIR_OVERLAY"] = portdir_overlay
+ if portdir_sync:
+ self["SYNC"] = portdir_sync
+ self.lookuplist = [self.configdict["env"]]
+ if repositories is None:
+ self.repositories = load_repository_config(self)
+ else:
+ self.repositories = repositories
+
+ known_repos.extend(repo.location for repo in self.repositories)
+ known_repos = frozenset(known_repos)
+
+ self['PORTAGE_REPOSITORIES'] = self.repositories.config_string()
+ self.backup_changes('PORTAGE_REPOSITORIES')
+
+ #filling PORTDIR and PORTDIR_OVERLAY variable for compatibility
+ main_repo = self.repositories.mainRepo()
+ if main_repo is not None:
+ self["PORTDIR"] = main_repo.location
+ self.backup_changes("PORTDIR")
+ expand_map["PORTDIR"] = self["PORTDIR"]
+
+ # repoman controls PORTDIR_OVERLAY via the environment, so no
+ # special cases are needed here.
+ portdir_overlay = list(self.repositories.repoLocationList())
+ if portdir_overlay and portdir_overlay[0] == self["PORTDIR"]:
+ portdir_overlay = portdir_overlay[1:]
+
+ new_ov = []
+ if portdir_overlay:
+ for ov in portdir_overlay:
+ ov = normalize_path(ov)
+ if isdir_raise_eaccess(ov) or portage._sync_mode:
+ new_ov.append(portage._shell_quote(ov))
+ else:
+ writemsg(_("!!! Invalid PORTDIR_OVERLAY"
+ " (not a dir): '%s'\n") % ov, noiselevel=-1)
+
+ self["PORTDIR_OVERLAY"] = " ".join(new_ov)
+ self.backup_changes("PORTDIR_OVERLAY")
+ expand_map["PORTDIR_OVERLAY"] = self["PORTDIR_OVERLAY"]
+
+ locations_manager.set_port_dirs(self["PORTDIR"], self["PORTDIR_OVERLAY"])
+ locations_manager.load_profiles(self.repositories, known_repos)
+
+ profiles_complex = locations_manager.profiles_complex
+ self.profiles = locations_manager.profiles
+ self.profile_path = locations_manager.profile_path
+ self.user_profile_dir = locations_manager.user_profile_dir
+
+ try:
+ packages_list = [grabfile_package(
+ os.path.join(x.location, "packages"),
+ verify_eapi=True, eapi=x.eapi, eapi_default=None,
+ allow_build_id=x.allow_build_id)
+ for x in profiles_complex]
+ except IOError as e:
+ if e.errno == IsADirectory.errno:
+ raise IsADirectory(os.path.join(self.profile_path,
+ "packages"))
+
+ self.packages = tuple(stack_lists(packages_list, incremental=1))
+
+ # revmaskdict
+ self.prevmaskdict={}
+ for x in self.packages:
+ # Negative atoms are filtered by the above stack_lists() call.
+ if not isinstance(x, Atom):
+ x = Atom(x.lstrip('*'))
+ self.prevmaskdict.setdefault(x.cp, []).append(x)
+
+ self.unpack_dependencies = load_unpack_dependencies_configuration(self.repositories)
+
+ mygcfg = {}
+ if profiles_complex:
+ mygcfg_dlists = [getconfig(os.path.join(x.location, "make.defaults"),
+ tolerant=tolerant, expand=expand_map, recursive=x.portage1_directories)
+ for x in profiles_complex]
+ self._make_defaults = mygcfg_dlists
+ mygcfg = stack_dicts(mygcfg_dlists,
+ incrementals=self.incrementals)
+ if mygcfg is None:
+ mygcfg = {}
+ self.configlist.append(mygcfg)
+ self.configdict["defaults"]=self.configlist[-1]
+
+ mygcfg = {}
+ for x in make_conf_paths:
+ mygcfg.update(getconfig(x,
+ tolerant=tolerant, allow_sourcing=True,
+ expand=expand_map, recursive=True) or {})
+
+ # Don't allow the user to override certain variables in make.conf
+ profile_only_variables = self.configdict["defaults"].get(
+ "PROFILE_ONLY_VARIABLES", "").split()
+ profile_only_variables = stack_lists([profile_only_variables])
+ non_user_variables = set()
+ non_user_variables.update(profile_only_variables)
+ non_user_variables.update(self._env_blacklist)
+ non_user_variables.update(self._global_only_vars)
+ non_user_variables = frozenset(non_user_variables)
+ self._non_user_variables = non_user_variables
+
+ self._env_d_blacklist = frozenset(chain(
+ profile_only_variables,
+ self._env_blacklist,
+ ))
+ env_d = self.configdict["env.d"]
+ for k in self._env_d_blacklist:
+ env_d.pop(k, None)
+
+ for k in profile_only_variables:
+ mygcfg.pop(k, None)
+
+ self.configlist.append(mygcfg)
+ self.configdict["conf"]=self.configlist[-1]
+
+ self.configlist.append(LazyItemsDict())
+ self.configdict["pkg"]=self.configlist[-1]
+
+ self.configdict["backupenv"] = self.backupenv
+
+ # Don't allow the user to override certain variables in the env
+ for k in profile_only_variables:
+ self.backupenv.pop(k, None)
+
+ self.configlist.append(self.configdict["env"])
+
+ # make lookuplist for loading package.*
+ self.lookuplist=self.configlist[:]
+ self.lookuplist.reverse()
+
+ # Blacklist vars that could interfere with portage internals.
+ for blacklisted in self._env_blacklist:
+ for cfg in self.lookuplist:
+ cfg.pop(blacklisted, None)
+ self.backupenv.pop(blacklisted, None)
+ del blacklisted, cfg
+
+ self["PORTAGE_CONFIGROOT"] = config_root
+ self.backup_changes("PORTAGE_CONFIGROOT")
+ self["ROOT"] = target_root
+ self.backup_changes("ROOT")
+ self["SYSROOT"] = sysroot
+ self.backup_changes("SYSROOT")
+ self["EPREFIX"] = eprefix
+ self.backup_changes("EPREFIX")
+ self["EROOT"] = eroot
+ self.backup_changes("EROOT")
+ self["ESYSROOT"] = esysroot
+ self.backup_changes("ESYSROOT")
+ self["BROOT"] = broot
+ self.backup_changes("BROOT")
+
+ # The prefix of the running portage instance is used in the
+ # ebuild environment to implement the --host-root option for
+ # best_version and has_version.
+ self["PORTAGE_OVERRIDE_EPREFIX"] = portage.const.EPREFIX
+ self.backup_changes("PORTAGE_OVERRIDE_EPREFIX")
+
+ self._ppropertiesdict = portage.dep.ExtendedAtomDict(dict)
+ self._paccept_restrict = portage.dep.ExtendedAtomDict(dict)
+ self._penvdict = portage.dep.ExtendedAtomDict(dict)
+ self._pbashrcdict = {}
+ self._pbashrc = ()
+
+ self._repo_make_defaults = {}
+ for repo in self.repositories.repos_with_profiles():
+ d = getconfig(os.path.join(repo.location, "profiles", "make.defaults"),
+ tolerant=tolerant, expand=self.configdict["globals"].copy(), recursive=repo.portage1_profiles) or {}
+ if d:
+ for k in chain(self._env_blacklist,
+ profile_only_variables, self._global_only_vars):
+ d.pop(k, None)
+ self._repo_make_defaults[repo.name] = d
+
+ #Read all USE related files from profiles and optionally from user config.
+ self._use_manager = UseManager(self.repositories, profiles_complex,
+ abs_user_config, self._isStable, user_config=local_config)
+ #Initialize all USE related variables we track ourselves.
+ self.usemask = self._use_manager.getUseMask()
+ self.useforce = self._use_manager.getUseForce()
+ self.configdict["conf"]["USE"] = \
+ self._use_manager.extract_global_USE_changes( \
+ self.configdict["conf"].get("USE", ""))
+
+ #Read license_groups and optionally license_groups and package.license from user config
+ self._license_manager = LicenseManager(locations_manager.profile_locations, \
+ abs_user_config, user_config=local_config)
+ #Extract '*/*' entries from package.license
+ self.configdict["conf"]["ACCEPT_LICENSE"] = \
+ self._license_manager.extract_global_changes( \
+ self.configdict["conf"].get("ACCEPT_LICENSE", ""))
+
+ # profile.bashrc
+ self._profile_bashrc = tuple(os.path.isfile(os.path.join(profile.location, 'profile.bashrc'))
+ for profile in profiles_complex)
+
+ if local_config:
+ #package.properties
+ propdict = grabdict_package(os.path.join(
+ abs_user_config, "package.properties"), recursive=1, allow_wildcard=True, \
+ allow_repo=True, verify_eapi=False,
+ allow_build_id=True)
+ v = propdict.pop("*/*", None)
+ if v is not None:
+ if "ACCEPT_PROPERTIES" in self.configdict["conf"]:
+ self.configdict["conf"]["ACCEPT_PROPERTIES"] += " " + " ".join(v)
+ else:
+ self.configdict["conf"]["ACCEPT_PROPERTIES"] = " ".join(v)
+ for k, v in propdict.items():
+ self._ppropertiesdict.setdefault(k.cp, {})[k] = v
+
+ # package.accept_restrict
+ d = grabdict_package(os.path.join(
+ abs_user_config, "package.accept_restrict"),
+ recursive=True, allow_wildcard=True,
+ allow_repo=True, verify_eapi=False,
+ allow_build_id=True)
+ v = d.pop("*/*", None)
+ if v is not None:
+ if "ACCEPT_RESTRICT" in self.configdict["conf"]:
+ self.configdict["conf"]["ACCEPT_RESTRICT"] += " " + " ".join(v)
+ else:
+ self.configdict["conf"]["ACCEPT_RESTRICT"] = " ".join(v)
+ for k, v in d.items():
+ self._paccept_restrict.setdefault(k.cp, {})[k] = v
+
+ #package.env
+ penvdict = grabdict_package(os.path.join(
+ abs_user_config, "package.env"), recursive=1, allow_wildcard=True, \
+ allow_repo=True, verify_eapi=False,
+ allow_build_id=True)
+ v = penvdict.pop("*/*", None)
+ if v is not None:
+ global_wildcard_conf = {}
+ self._grab_pkg_env(v, global_wildcard_conf)
+ incrementals = self.incrementals
+ conf_configdict = self.configdict["conf"]
+ for k, v in global_wildcard_conf.items():
+ if k in incrementals:
+ if k in conf_configdict:
+ conf_configdict[k] = \
+ conf_configdict[k] + " " + v
+ else:
+ conf_configdict[k] = v
+ else:
+ conf_configdict[k] = v
+ expand_map[k] = v
+
+ for k, v in penvdict.items():
+ self._penvdict.setdefault(k.cp, {})[k] = v
+
+ # package.bashrc
+ for profile in profiles_complex:
+ if not 'profile-bashrcs' in profile.profile_formats:
+ continue
+ self._pbashrcdict[profile] = \
+ portage.dep.ExtendedAtomDict(dict)
+ bashrc = grabdict_package(os.path.join(profile.location,
+ "package.bashrc"), recursive=1, allow_wildcard=True,
+ allow_repo=True, verify_eapi=True,
+ eapi=profile.eapi, eapi_default=None,
+ allow_build_id=profile.allow_build_id)
+ if not bashrc:
+ continue
+
+ for k, v in bashrc.items():
+ envfiles = [os.path.join(profile.location,
+ "bashrc",
+ envname) for envname in v]
+ self._pbashrcdict[profile].setdefault(k.cp, {})\
+ .setdefault(k, []).extend(envfiles)
+
+ #getting categories from an external file now
+ self.categories = [grabfile(os.path.join(x, "categories")) \
+ for x in locations_manager.profile_and_user_locations]
+ category_re = dbapi._category_re
+ # categories used to be a tuple, but now we use a frozenset
+ # for hashed category validation in pordbapi.cp_list()
+ self.categories = frozenset(
+ x for x in stack_lists(self.categories, incremental=1)
+ if category_re.match(x) is not None)
+
+ archlist = [grabfile(os.path.join(x, "arch.list")) \
+ for x in locations_manager.profile_and_user_locations]
+ archlist = sorted(stack_lists(archlist, incremental=1))
+ self.configdict["conf"]["PORTAGE_ARCHLIST"] = " ".join(archlist)
+
+ pkgprovidedlines = []
+ for x in profiles_complex:
+ provpath = os.path.join(x.location, "package.provided")
+ if os.path.exists(provpath):
+ if _get_eapi_attrs(x.eapi).allows_package_provided:
+ pkgprovidedlines.append(grabfile(provpath,
+ recursive=x.portage1_directories))
+ else:
+ # TODO: bail out?
+ writemsg((_("!!! package.provided not allowed in EAPI %s: ")
+ %x.eapi)+x.location+"\n",
+ noiselevel=-1)
+
+ pkgprovidedlines = stack_lists(pkgprovidedlines, incremental=1)
+ has_invalid_data = False
+ for x in range(len(pkgprovidedlines)-1, -1, -1):
+ myline = pkgprovidedlines[x]
+ if not isvalidatom("=" + myline):
+ writemsg(_("Invalid package name in package.provided: %s\n") % \
+ myline, noiselevel=-1)
+ has_invalid_data = True
+ del pkgprovidedlines[x]
+ continue
+ cpvr = catpkgsplit(pkgprovidedlines[x])
+ if not cpvr or cpvr[0] == "null":
+ writemsg(_("Invalid package name in package.provided: ")+pkgprovidedlines[x]+"\n",
+ noiselevel=-1)
+ has_invalid_data = True
+ del pkgprovidedlines[x]
+ continue
+ if has_invalid_data:
+ writemsg(_("See portage(5) for correct package.provided usage.\n"),
+ noiselevel=-1)
+ self.pprovideddict = {}
+ for x in pkgprovidedlines:
+ x_split = catpkgsplit(x)
+ if x_split is None:
+ continue
+ mycatpkg = cpv_getkey(x)
+ if mycatpkg in self.pprovideddict:
+ self.pprovideddict[mycatpkg].append(x)
+ else:
+ self.pprovideddict[mycatpkg]=[x]
+
+ # reasonable defaults; this is important as without USE_ORDER,
+ # USE will always be "" (nothing set)!
+ if "USE_ORDER" not in self:
+ self["USE_ORDER"] = "env:pkg:conf:defaults:pkginternal:repo:env.d"
+ self.backup_changes("USE_ORDER")
+
+ if "CBUILD" not in self and "CHOST" in self:
+ self["CBUILD"] = self["CHOST"]
+ self.backup_changes("CBUILD")
+
+ if "USERLAND" not in self:
+ # Set default USERLAND so that our test cases can assume that
+ # it's always set. This allows isolated-functions.sh to avoid
+ # calling uname -s when sourced.
+ system = platform.system()
+ if system is not None and \
+ (system.endswith("BSD") or system == "DragonFly"):
+ self["USERLAND"] = "BSD"
+ else:
+ self["USERLAND"] = "GNU"
+ self.backup_changes("USERLAND")
+
+ default_inst_ids = {
+ "PORTAGE_INST_GID": "0",
+ "PORTAGE_INST_UID": "0",
+ }
+
+ eroot_or_parent = first_existing(eroot)
+ unprivileged = False
+ try:
+ eroot_st = os.stat(eroot_or_parent)
+ except OSError:
+ pass
+ else:
+
+ if portage.data._unprivileged_mode(
+ eroot_or_parent, eroot_st):
+ unprivileged = True
+
+ default_inst_ids["PORTAGE_INST_GID"] = str(eroot_st.st_gid)
+ default_inst_ids["PORTAGE_INST_UID"] = str(eroot_st.st_uid)
+
+ if "PORTAGE_USERNAME" not in self:
+ try:
+ pwd_struct = pwd.getpwuid(eroot_st.st_uid)
+ except KeyError:
+ pass
+ else:
+ self["PORTAGE_USERNAME"] = pwd_struct.pw_name
+ self.backup_changes("PORTAGE_USERNAME")
+
+ if "PORTAGE_GRPNAME" not in self:
+ try:
+ grp_struct = grp.getgrgid(eroot_st.st_gid)
+ except KeyError:
+ pass
+ else:
+ self["PORTAGE_GRPNAME"] = grp_struct.gr_name
+ self.backup_changes("PORTAGE_GRPNAME")
+
+ for var, default_val in default_inst_ids.items():
+ try:
+ self[var] = str(int(self.get(var, default_val)))
+ except ValueError:
+ writemsg(_("!!! %s='%s' is not a valid integer. "
+ "Falling back to %s.\n") % (var, self[var], default_val),
+ noiselevel=-1)
+ self[var] = default_val
+ self.backup_changes(var)
+
+ self.depcachedir = self.get("PORTAGE_DEPCACHEDIR")
+ if self.depcachedir is None:
+ self.depcachedir = os.path.join(os.sep,
+ portage.const.EPREFIX, DEPCACHE_PATH.lstrip(os.sep))
+ if unprivileged and target_root != os.sep:
+ # In unprivileged mode, automatically make
+ # depcachedir relative to target_root if the
+ # default depcachedir is not writable.
+ if not os.access(first_existing(self.depcachedir),
+ os.W_OK):
+ self.depcachedir = os.path.join(eroot,
+ DEPCACHE_PATH.lstrip(os.sep))
+
+ self["PORTAGE_DEPCACHEDIR"] = self.depcachedir
+ self.backup_changes("PORTAGE_DEPCACHEDIR")
+
+ if portage._internal_caller:
+ self["PORTAGE_INTERNAL_CALLER"] = "1"
+ self.backup_changes("PORTAGE_INTERNAL_CALLER")
+
+ # initialize self.features
+ self.regenerate()
+
+ if unprivileged:
+ self.features.add('unprivileged')
+
+ if bsd_chflags:
+ self.features.add('chflags')
+
+ self._init_iuse()
+
+ self._validate_commands()
+
+ for k in self._case_insensitive_vars:
+ if k in self:
+ self[k] = self[k].lower()
+ self.backup_changes(k)
+
+ # The first constructed config object initializes these modules,
+ # and subsequent calls to the _init() functions have no effect.
+ portage.output._init(config_root=self['PORTAGE_CONFIGROOT'])
+ portage.data._init(self)
+
+ if mycpv:
+ self.setcpv(mycpv)
+
+ def _init_iuse(self):
+ self._iuse_effective = self._calc_iuse_effective()
+ self._iuse_implicit_match = _iuse_implicit_match_cache(self)
+
+ @property
+ def mygcfg(self):
+ warnings.warn("portage.config.mygcfg is deprecated", stacklevel=3)
+ return {}
+
+ def _validate_commands(self):
+ for k in special_env_vars.validate_commands:
+ v = self.get(k)
+ if v is not None:
+ valid, v_split = validate_cmd_var(v)
+
+ if not valid:
+ if v_split:
+ writemsg_level(_("%s setting is invalid: '%s'\n") % \
+ (k, v), level=logging.ERROR, noiselevel=-1)
+
+ # before deleting the invalid setting, backup
+ # the default value if available
+ v = self.configdict['globals'].get(k)
+ if v is not None:
+ default_valid, v_split = validate_cmd_var(v)
+ if not default_valid:
+ if v_split:
+ writemsg_level(
+ _("%s setting from make.globals" + \
+ " is invalid: '%s'\n") % \
+ (k, v), level=logging.ERROR, noiselevel=-1)
+ # make.globals seems corrupt, so try for
+ # a hardcoded default instead
+ v = self._default_globals.get(k)
+
+ # delete all settings for this key,
+ # including the invalid one
+ del self[k]
+ self.backupenv.pop(k, None)
+ if v:
+ # restore validated default
+ self.configdict['globals'][k] = v
+
+ def _init_dirs(self):
+ """
+ Create a few directories that are critical to portage operation
+ """
+ if not os.access(self["EROOT"], os.W_OK):
+ return
+
+ # gid, mode, mask, preserve_perms
+ dir_mode_map = {
+ "tmp" : ( -1, 0o1777, 0, True),
+ "var/tmp" : ( -1, 0o1777, 0, True),
+ PRIVATE_PATH : (portage_gid, 0o2750, 0o2, False),
+ CACHE_PATH : (portage_gid, 0o755, 0o2, False)
+ }
+
+ for mypath, (gid, mode, modemask, preserve_perms) \
+ in dir_mode_map.items():
+ mydir = os.path.join(self["EROOT"], mypath)
+ if preserve_perms and os.path.isdir(mydir):
+ # Only adjust permissions on some directories if
+ # they don't exist yet. This gives freedom to the
+ # user to adjust permissions to suit their taste.
+ continue
+ try:
+ ensure_dirs(mydir, gid=gid, mode=mode, mask=modemask)
+ except PortageException as e:
+ writemsg(_("!!! Directory initialization failed: '%s'\n") % mydir,
+ noiselevel=-1)
+ writemsg("!!! %s\n" % str(e),
+ noiselevel=-1)
+
+ @property
+ def _keywords_manager(self):
+ if self._keywords_manager_obj is None:
+ self._keywords_manager_obj = KeywordsManager(
+ self._locations_manager.profiles_complex,
+ self._locations_manager.abs_user_config,
+ self.local_config,
+ global_accept_keywords=self.configdict["defaults"].get("ACCEPT_KEYWORDS", ""))
+ return self._keywords_manager_obj
+
+ @property
+ def _mask_manager(self):
+ if self._mask_manager_obj is None:
+ self._mask_manager_obj = MaskManager(self.repositories,
+ self._locations_manager.profiles_complex,
+ self._locations_manager.abs_user_config,
+ user_config=self.local_config,
+ strict_umatched_removal=self._unmatched_removal)
+ return self._mask_manager_obj
+
+ @property
+ def _virtuals_manager(self):
+ if self._virtuals_manager_obj is None:
+ self._virtuals_manager_obj = VirtualsManager(self.profiles)
+ return self._virtuals_manager_obj
+
+ @property
+ def pkeywordsdict(self):
+ result = self._keywords_manager.pkeywordsdict.copy()
+ for k, v in result.items():
+ result[k] = v.copy()
+ return result
+
+ @property
+ def pmaskdict(self):
+ return self._mask_manager._pmaskdict.copy()
+
+ @property
+ def punmaskdict(self):
+ return self._mask_manager._punmaskdict.copy()
+
+ @property
+ def soname_provided(self):
+ if self._soname_provided is None:
+ d = stack_dictlist((grabdict(
+ os.path.join(x, "soname.provided"), recursive=True)
+ for x in self.profiles), incremental=True)
+ self._soname_provided = frozenset(SonameAtom(cat, soname)
+ for cat, sonames in d.items() for soname in sonames)
+ return self._soname_provided
+
+ def expandLicenseTokens(self, tokens):
+ """ Take a token from ACCEPT_LICENSE or package.license and expand it
+ if it's a group token (indicated by @) or just return it if it's not a
+ group. If a group is negated then negate all group elements."""
+ return self._license_manager.expandLicenseTokens(tokens)
+
+ def validate(self):
+ """Validate miscellaneous settings and display warnings if necessary.
+ (This code was previously in the global scope of portage.py)"""
+
+ groups = self.get("ACCEPT_KEYWORDS", "").split()
+ archlist = self.archlist()
+ if not archlist:
+ writemsg(_("--- 'profiles/arch.list' is empty or "
+ "not available. Empty portage tree?\n"), noiselevel=1)
+ else:
+ for group in groups:
+ if group not in archlist and \
+ not (group.startswith("-") and group[1:] in archlist) and \
+ group not in ("*", "~*", "**"):
+ writemsg(_("!!! INVALID ACCEPT_KEYWORDS: %s\n") % str(group),
+ noiselevel=-1)
+
+ profile_broken = False
+
+ # getmaskingstatus requires ARCH for ACCEPT_KEYWORDS support
+ arch = self.get('ARCH')
+ if not self.profile_path or not arch:
+ profile_broken = True
+ else:
+ # If any one of these files exists, then
+ # the profile is considered valid.
+ for x in ("make.defaults", "parent",
+ "packages", "use.force", "use.mask"):
+ if exists_raise_eaccess(os.path.join(self.profile_path, x)):
+ break
+ else:
+ profile_broken = True
+
+ if profile_broken and not portage._sync_mode:
+ abs_profile_path = None
+ for x in (PROFILE_PATH, 'etc/make.profile'):
+ x = os.path.join(self["PORTAGE_CONFIGROOT"], x)
+ try:
+ os.lstat(x)
+ except OSError:
+ pass
+ else:
+ abs_profile_path = x
+ break
+
+ if abs_profile_path is None:
+ abs_profile_path = os.path.join(self["PORTAGE_CONFIGROOT"],
+ PROFILE_PATH)
+
+ writemsg(_("\n\n!!! %s is not a symlink and will probably prevent most merges.\n") % abs_profile_path,
+ noiselevel=-1)
+ writemsg(_("!!! It should point into a profile within %s/profiles/\n") % self["PORTDIR"])
+ writemsg(_("!!! (You can safely ignore this message when syncing. It's harmless.)\n\n\n"))
+
+ abs_user_virtuals = os.path.join(self["PORTAGE_CONFIGROOT"],
+ USER_VIRTUALS_FILE)
+ if os.path.exists(abs_user_virtuals):
+ writemsg("\n!!! /etc/portage/virtuals is deprecated in favor of\n")
+ writemsg("!!! /etc/portage/profile/virtuals. Please move it to\n")
+ writemsg("!!! this new location.\n\n")
+
+ if not sandbox_capable and \
+ ("sandbox" in self.features or "usersandbox" in self.features):
+ if self.profile_path is not None and \
+ os.path.realpath(self.profile_path) == \
+ os.path.realpath(os.path.join(
+ self["PORTAGE_CONFIGROOT"], PROFILE_PATH)):
+ # Don't show this warning when running repoman and the
+ # sandbox feature came from a profile that doesn't belong
+ # to the user.
+ writemsg(colorize("BAD", _("!!! Problem with sandbox"
+ " binary. Disabling...\n\n")), noiselevel=-1)
+
+ if "fakeroot" in self.features and \
+ not fakeroot_capable:
+ writemsg(_("!!! FEATURES=fakeroot is enabled, but the "
+ "fakeroot binary is not installed.\n"), noiselevel=-1)
+
+ if os.getuid() == 0 and not hasattr(os, "setgroups"):
+ warning_shown = False
+
+ if "userpriv" in self.features:
+ writemsg(_("!!! FEATURES=userpriv is enabled, but "
+ "os.setgroups is not available.\n"), noiselevel=-1)
+ warning_shown = True
+
+ if "userfetch" in self.features:
+ writemsg(_("!!! FEATURES=userfetch is enabled, but "
+ "os.setgroups is not available.\n"), noiselevel=-1)
+ warning_shown = True
+
+ if warning_shown and platform.python_implementation() == 'PyPy':
+ writemsg(_("!!! See https://bugs.pypy.org/issue833 for details.\n"),
+ noiselevel=-1)
+
+ binpkg_compression = self.get("BINPKG_COMPRESS")
+ if binpkg_compression:
+ try:
+ compression = _compressors[binpkg_compression]
+ except KeyError as e:
+ writemsg("!!! BINPKG_COMPRESS contains invalid or "
+ "unsupported compression method: %s" % e.args[0],
+ noiselevel=-1)
+ else:
+ try:
+ compression_binary = shlex_split(
+ portage.util.varexpand(compression["compress"],
+ mydict=self))[0]
+ except IndexError as e:
+ writemsg("!!! BINPKG_COMPRESS contains invalid or "
+ "unsupported compression method: %s" % e.args[0],
+ noiselevel=-1)
+ else:
+ if portage.process.find_binary(
+ compression_binary) is None:
+ missing_package = compression["package"]
+ writemsg("!!! BINPKG_COMPRESS unsupported %s. "
+ "Missing package: %s" %
+ (binpkg_compression, missing_package),
+ noiselevel=-1)
+
+ def load_best_module(self,property_string):
+ best_mod = best_from_dict(property_string,self.modules,self.module_priority)
+ mod = None
+ try:
+ mod = load_mod(best_mod)
+ except ImportError:
+ if best_mod in self._module_aliases:
+ mod = load_mod(self._module_aliases[best_mod])
+ elif not best_mod.startswith("cache."):
+ raise
+ else:
+ best_mod = "portage." + best_mod
+ try:
+ mod = load_mod(best_mod)
+ except ImportError:
+ raise
+ return mod
+
+ def lock(self):
+ self.locked = 1
+
+ def unlock(self):
+ self.locked = 0
+
+ def modifying(self):
+ if self.locked:
+ raise Exception(_("Configuration is locked."))
+
+ def backup_changes(self,key=None):
+ self.modifying()
+ if key and key in self.configdict["env"]:
+ self.backupenv[key] = copy.deepcopy(self.configdict["env"][key])
+ else:
+ raise KeyError(_("No such key defined in environment: %s") % key)
+
+ def reset(self, keeping_pkg=0, use_cache=None):
+ """
+ Restore environment from self.backupenv, call self.regenerate()
+ @param keeping_pkg: Should we keep the setcpv() data or delete it.
+ @type keeping_pkg: Boolean
+ @rype: None
+ """
+
+ if use_cache is not None:
+ warnings.warn("The use_cache parameter for config.reset() is deprecated and without effect.",
+ DeprecationWarning, stacklevel=2)
+
+ self.modifying()
+ self.configdict["env"].clear()
+ self.configdict["env"].update(self.backupenv)
+
+ self.modifiedkeys = []
+ if not keeping_pkg:
+ self.mycpv = None
+ self._setcpv_args_hash = None
+ self.puse = ""
+ del self._penv[:]
+ self.configdict["pkg"].clear()
+ self.configdict["pkginternal"].clear()
+ self.configdict["repo"].clear()
+ self.configdict["defaults"]["USE"] = \
+ " ".join(self.make_defaults_use)
+ self.usemask = self._use_manager.getUseMask()
+ self.useforce = self._use_manager.getUseForce()
+ self.regenerate()
+
+ class _lazy_vars(object):
+
+ __slots__ = ('built_use', 'settings', 'values')
+
+ def __init__(self, built_use, settings):
+ self.built_use = built_use
+ self.settings = settings
+ self.values = None
+
+ def __getitem__(self, k):
+ if self.values is None:
+ self.values = self._init_values()
+ return self.values[k]
+
+ def _init_values(self):
+ values = {}
+ settings = self.settings
+ use = self.built_use
+ if use is None:
+ use = frozenset(settings['PORTAGE_USE'].split())
+
+ values['ACCEPT_LICENSE'] = settings._license_manager.get_prunned_accept_license( \
+ settings.mycpv, use, settings.get('LICENSE', ''), settings.get('SLOT'), settings.get('PORTAGE_REPO_NAME'))
+ values['PORTAGE_RESTRICT'] = self._restrict(use, settings)
+ return values
+
+ def _restrict(self, use, settings):
+ try:
+ restrict = set(use_reduce(settings.get('RESTRICT', ''), uselist=use, flat=True))
+ except InvalidDependString:
+ restrict = set()
+ return ' '.join(sorted(restrict))
+
+ class _lazy_use_expand(object):
+ """
+ Lazily evaluate USE_EXPAND variables since they are only needed when
+ an ebuild shell is spawned. Variables values are made consistent with
+ the previously calculated USE settings.
+ """
+
+ def __init__(self, settings, unfiltered_use,
+ use, usemask, iuse_effective,
+ use_expand_split, use_expand_dict):
+ self._settings = settings
+ self._unfiltered_use = unfiltered_use
+ self._use = use
+ self._usemask = usemask
+ self._iuse_effective = iuse_effective
+ self._use_expand_split = use_expand_split
+ self._use_expand_dict = use_expand_dict
+
+ def __getitem__(self, key):
+ prefix = key.lower() + '_'
+ prefix_len = len(prefix)
+ expand_flags = set( x[prefix_len:] for x in self._use \
+ if x[:prefix_len] == prefix )
+ var_split = self._use_expand_dict.get(key, '').split()
+ # Preserve the order of var_split because it can matter for things
+ # like LINGUAS.
+ var_split = [ x for x in var_split if x in expand_flags ]
+ var_split.extend(expand_flags.difference(var_split))
+ has_wildcard = '*' in expand_flags
+ if has_wildcard:
+ var_split = [ x for x in var_split if x != "*" ]
+ has_iuse = set()
+ for x in self._iuse_effective:
+ if x[:prefix_len] == prefix:
+ has_iuse.add(x[prefix_len:])
+ if has_wildcard:
+ # * means to enable everything in IUSE that's not masked
+ if has_iuse:
+ usemask = self._usemask
+ for suffix in has_iuse:
+ x = prefix + suffix
+ if x not in usemask:
+ if suffix not in expand_flags:
+ var_split.append(suffix)
+ else:
+ # If there is a wildcard and no matching flags in IUSE then
+ # LINGUAS should be unset so that all .mo files are
+ # installed.
+ var_split = []
+ # Make the flags unique and filter them according to IUSE.
+ # Also, continue to preserve order for things like LINGUAS
+ # and filter any duplicates that variable may contain.
+ filtered_var_split = []
+ remaining = has_iuse.intersection(var_split)
+ for x in var_split:
+ if x in remaining:
+ remaining.remove(x)
+ filtered_var_split.append(x)
+ var_split = filtered_var_split
+
+ return ' '.join(var_split)
+
+ def _setcpv_recursion_gate(f):
+ """
+ Raise AssertionError for recursive setcpv calls.
+ """
+ def wrapper(self, *args, **kwargs):
+ if hasattr(self, '_setcpv_active'):
+ raise AssertionError('setcpv recursion detected')
+ self._setcpv_active = True
+ try:
+ return f(self, *args, **kwargs)
+ finally:
+ del self._setcpv_active
+ return wrapper
+
+ @_setcpv_recursion_gate
+ def setcpv(self, mycpv, use_cache=None, mydb=None):
+ """
+ Load a particular CPV into the config, this lets us see the
+ Default USE flags for a particular ebuild as well as the USE
+ flags from package.use.
+
+ @param mycpv: A cpv to load
+ @type mycpv: string
+ @param mydb: a dbapi instance that supports aux_get with the IUSE key.
+ @type mydb: dbapi or derivative.
+ @rtype: None
+ """
+
+ if use_cache is not None:
+ warnings.warn("The use_cache parameter for config.setcpv() is deprecated and without effect.",
+ DeprecationWarning, stacklevel=2)
+
+ self.modifying()
+
+ pkg = None
+ built_use = None
+ explicit_iuse = None
+ if not isinstance(mycpv, basestring):
+ pkg = mycpv
+ mycpv = pkg.cpv
+ mydb = pkg._metadata
+ explicit_iuse = pkg.iuse.all
+ args_hash = (mycpv, id(pkg))
+ if pkg.built:
+ built_use = pkg.use.enabled
+ else:
+ args_hash = (mycpv, id(mydb))
+
+ if args_hash == self._setcpv_args_hash:
+ return
+ self._setcpv_args_hash = args_hash
+
+ has_changed = False
+ self.mycpv = mycpv
+ cat, pf = catsplit(mycpv)
+ cp = cpv_getkey(mycpv)
+ cpv_slot = self.mycpv
+ pkginternaluse = ""
+ iuse = ""
+ pkg_configdict = self.configdict["pkg"]
+ previous_iuse = pkg_configdict.get("IUSE")
+ previous_iuse_effective = pkg_configdict.get("IUSE_EFFECTIVE")
+ previous_features = pkg_configdict.get("FEATURES")
+ previous_penv = self._penv
+
+ aux_keys = self._setcpv_aux_keys
+
+ # Discard any existing metadata and package.env settings from
+ # the previous package instance.
+ pkg_configdict.clear()
+
+ pkg_configdict["CATEGORY"] = cat
+ pkg_configdict["PF"] = pf
+ repository = None
+ eapi = None
+ if mydb:
+ if not hasattr(mydb, "aux_get"):
+ for k in aux_keys:
+ if k in mydb:
+ # Make these lazy, since __getitem__ triggers
+ # evaluation of USE conditionals which can't
+ # occur until PORTAGE_USE is calculated below.
+ pkg_configdict.addLazySingleton(k,
+ mydb.__getitem__, k)
+ else:
+ # When calling dbapi.aux_get(), grab USE for built/installed
+ # packages since we want to save it PORTAGE_BUILT_USE for
+ # evaluating conditional USE deps in atoms passed via IPC to
+ # helpers like has_version and best_version.
+ aux_keys = set(aux_keys)
+ if hasattr(mydb, '_aux_cache_keys'):
+ aux_keys = aux_keys.intersection(mydb._aux_cache_keys)
+ aux_keys.add('USE')
+ aux_keys = list(aux_keys)
+ for k, v in zip(aux_keys, mydb.aux_get(self.mycpv, aux_keys)):
+ pkg_configdict[k] = v
+ built_use = frozenset(pkg_configdict.pop('USE').split())
+ if not built_use:
+ # Empty USE means this dbapi instance does not contain
+ # built packages.
+ built_use = None
+ eapi = pkg_configdict['EAPI']
+
+ repository = pkg_configdict.pop("repository", None)
+ if repository is not None:
+ pkg_configdict["PORTAGE_REPO_NAME"] = repository
+ iuse = pkg_configdict["IUSE"]
+ if pkg is None:
+ self.mycpv = _pkg_str(self.mycpv, metadata=pkg_configdict,
+ settings=self)
+ cpv_slot = self.mycpv
+ else:
+ cpv_slot = pkg
+ pkginternaluse = []
+ for x in iuse.split():
+ if x.startswith("+"):
+ pkginternaluse.append(x[1:])
+ elif x.startswith("-"):
+ pkginternaluse.append(x)
+ pkginternaluse = " ".join(pkginternaluse)
+
+ eapi_attrs = _get_eapi_attrs(eapi)
+
+ if pkginternaluse != self.configdict["pkginternal"].get("USE", ""):
+ self.configdict["pkginternal"]["USE"] = pkginternaluse
+ has_changed = True
+
+ repo_env = []
+ if repository and repository != Package.UNKNOWN_REPO:
+ repos = []
+ try:
+ repos.extend(repo.name for repo in
+ self.repositories[repository].masters)
+ except KeyError:
+ pass
+ repos.append(repository)
+ for repo in repos:
+ d = self._repo_make_defaults.get(repo)
+ if d is None:
+ d = {}
+ else:
+ # make a copy, since we might modify it with
+ # package.use settings
+ d = d.copy()
+ cpdict = self._use_manager._repo_puse_dict.get(repo, {}).get(cp)
+ if cpdict:
+ repo_puse = ordered_by_atom_specificity(cpdict, cpv_slot)
+ if repo_puse:
+ for x in repo_puse:
+ d["USE"] = d.get("USE", "") + " " + " ".join(x)
+ if d:
+ repo_env.append(d)
+
+ if repo_env or self.configdict["repo"]:
+ self.configdict["repo"].clear()
+ self.configdict["repo"].update(stack_dicts(repo_env,
+ incrementals=self.incrementals))
+ has_changed = True
+
+ defaults = []
+ for i, pkgprofileuse_dict in enumerate(self._use_manager._pkgprofileuse):
+ if self.make_defaults_use[i]:
+ defaults.append(self.make_defaults_use[i])
+ cpdict = pkgprofileuse_dict.get(cp)
+ if cpdict:
+ pkg_defaults = ordered_by_atom_specificity(cpdict, cpv_slot)
+ if pkg_defaults:
+ defaults.extend(pkg_defaults)
+ defaults = " ".join(defaults)
+ if defaults != self.configdict["defaults"].get("USE",""):
+ self.configdict["defaults"]["USE"] = defaults
+ has_changed = True
+
+ useforce = self._use_manager.getUseForce(cpv_slot)
+ if useforce != self.useforce:
+ self.useforce = useforce
+ has_changed = True
+
+ usemask = self._use_manager.getUseMask(cpv_slot)
+ if usemask != self.usemask:
+ self.usemask = usemask
+ has_changed = True
+
+ oldpuse = self.puse
+ self.puse = self._use_manager.getPUSE(cpv_slot)
+ if oldpuse != self.puse:
+ has_changed = True
+ self.configdict["pkg"]["PKGUSE"] = self.puse[:] # For saving to PUSE file
+ self.configdict["pkg"]["USE"] = self.puse[:] # this gets appended to USE
+
+ if previous_features:
+ # The package from the previous setcpv call had package.env
+ # settings which modified FEATURES. Therefore, trigger a
+ # regenerate() call in order to ensure that self.features
+ # is accurate.
+ has_changed = True
+
+ self._penv = []
+ cpdict = self._penvdict.get(cp)
+ if cpdict:
+ penv_matches = ordered_by_atom_specificity(cpdict, cpv_slot)
+ if penv_matches:
+ for x in penv_matches:
+ self._penv.extend(x)
+
+ bashrc_files = []
+
+ for profile, profile_bashrc in zip(self._locations_manager.profiles_complex, self._profile_bashrc):
+ if profile_bashrc:
+ bashrc_files.append(os.path.join(profile.location, 'profile.bashrc'))
+ if profile in self._pbashrcdict:
+ cpdict = self._pbashrcdict[profile].get(cp)
+ if cpdict:
+ bashrc_matches = \
+ ordered_by_atom_specificity(cpdict, cpv_slot)
+ for x in bashrc_matches:
+ bashrc_files.extend(x)
+
+ self._pbashrc = tuple(bashrc_files)
+
+ protected_pkg_keys = set(pkg_configdict)
+ protected_pkg_keys.discard('USE')
+
+ # If there are _any_ package.env settings for this package
+ # then it automatically triggers config.reset(), in order
+ # to account for possible incremental interaction between
+ # package.use, package.env, and overrides from the calling
+ # environment (configdict['env']).
+ if self._penv:
+ has_changed = True
+ # USE is special because package.use settings override
+ # it. Discard any package.use settings here and they'll
+ # be added back later.
+ pkg_configdict.pop('USE', None)
+ self._grab_pkg_env(self._penv, pkg_configdict,
+ protected_keys=protected_pkg_keys)
+
+ # Now add package.use settings, which override USE from
+ # package.env
+ if self.puse:
+ if 'USE' in pkg_configdict:
+ pkg_configdict['USE'] = \
+ pkg_configdict['USE'] + " " + self.puse
+ else:
+ pkg_configdict['USE'] = self.puse
+
+ elif previous_penv:
+ has_changed = True
+
+ if not (previous_iuse == iuse and
+ previous_iuse_effective is not None == eapi_attrs.iuse_effective):
+ has_changed = True
+
+ if has_changed:
+ self.reset(keeping_pkg=1)
+
+ env_configdict = self.configdict['env']
+
+ # Ensure that "pkg" values are always preferred over "env" values.
+ # This must occur _after_ the above reset() call, since reset()
+ # copies values from self.backupenv.
+ for k in protected_pkg_keys:
+ env_configdict.pop(k, None)
+
+ lazy_vars = self._lazy_vars(built_use, self)
+ env_configdict.addLazySingleton('ACCEPT_LICENSE',
+ lazy_vars.__getitem__, 'ACCEPT_LICENSE')
+ env_configdict.addLazySingleton('PORTAGE_RESTRICT',
+ lazy_vars.__getitem__, 'PORTAGE_RESTRICT')
+
+ if built_use is not None:
+ pkg_configdict['PORTAGE_BUILT_USE'] = ' '.join(built_use)
+
+ # If reset() has not been called, it's safe to return
+ # early if IUSE has not changed.
+ if not has_changed:
+ return
+
+ # Filter out USE flags that aren't part of IUSE. This has to
+ # be done for every setcpv() call since practically every
+ # package has different IUSE.
+ use = set(self["USE"].split())
+ unfiltered_use = frozenset(use)
+ if explicit_iuse is None:
+ explicit_iuse = frozenset(x.lstrip("+-") for x in iuse.split())
+
+ if eapi_attrs.iuse_effective:
+ iuse_implicit_match = self._iuse_effective_match
+ portage_iuse = set(self._iuse_effective)
+ portage_iuse.update(explicit_iuse)
+ if built_use is not None:
+ # When the binary package was built, the profile may have
+ # had different IUSE_IMPLICIT settings, so any member of
+ # the built USE setting is considered to be a member of
+ # IUSE_EFFECTIVE (see bug 640318).
+ portage_iuse.update(built_use)
+ self.configdict["pkg"]["IUSE_EFFECTIVE"] = \
+ " ".join(sorted(portage_iuse))
+ else:
+ iuse_implicit_match = self._iuse_implicit_match
+ portage_iuse = self._get_implicit_iuse()
+ portage_iuse.update(explicit_iuse)
+
+ # PORTAGE_IUSE is not always needed so it's lazily evaluated.
+ self.configdict["env"].addLazySingleton(
+ "PORTAGE_IUSE", _lazy_iuse_regex, portage_iuse)
+
+ if pkg is None:
+ raw_restrict = pkg_configdict.get("RESTRICT")
+ else:
+ raw_restrict = pkg._raw_metadata["RESTRICT"]
+
+ restrict_test = False
+ if raw_restrict:
+ try:
+ if built_use is not None:
+ restrict = use_reduce(raw_restrict,
+ uselist=built_use, flat=True)
+ else:
+ # Use matchnone=True to ignore USE conditional parts
+ # of RESTRICT, since we want to know whether to mask
+ # the "test" flag _before_ we know the USE values
+ # that would be needed to evaluate the USE
+ # conditionals (see bug #273272).
+ restrict = use_reduce(raw_restrict,
+ matchnone=True, flat=True)
+ except PortageException:
+ pass
+ else:
+ restrict_test = "test" in restrict
+
+ ebuild_force_test = not restrict_test and \
+ self.get("EBUILD_FORCE_TEST") == "1"
+
+ if "test" in explicit_iuse or iuse_implicit_match("test"):
+ if "test" not in self.features:
+ use.discard("test")
+ elif restrict_test or \
+ ("test" in self.usemask and not ebuild_force_test):
+ # "test" is in IUSE and USE=test is masked, so execution
+ # of src_test() probably is not reliable. Therefore,
+ # temporarily disable FEATURES=test just for this package.
+ self["FEATURES"] = " ".join(x for x in self.features \
+ if x != "test")
+ use.discard("test")
+ else:
+ use.add("test")
+ if ebuild_force_test and "test" in self.usemask:
+ self.usemask = \
+ frozenset(x for x in self.usemask if x != "test")
+
+ if eapi_attrs.feature_flag_targetroot and \
+ ("targetroot" in explicit_iuse or iuse_implicit_match("targetroot")):
+ if self["ROOT"] != "/":
+ use.add("targetroot")
+ else:
+ use.discard("targetroot")
+
+ # Allow _* flags from USE_EXPAND wildcards to pass through here.
+ use.difference_update([x for x in use \
+ if (x not in explicit_iuse and \
+ not iuse_implicit_match(x)) and x[-2:] != '_*'])
+
+ # Use the calculated USE flags to regenerate the USE_EXPAND flags so
+ # that they are consistent. For optimal performance, use slice
+ # comparison instead of startswith().
+ use_expand_split = set(x.lower() for \
+ x in self.get('USE_EXPAND', '').split())
+ lazy_use_expand = self._lazy_use_expand(
+ self, unfiltered_use, use, self.usemask,
+ portage_iuse, use_expand_split, self._use_expand_dict)
+
+ use_expand_iuses = dict((k, set()) for k in use_expand_split)
+ for x in portage_iuse:
+ x_split = x.split('_')
+ if len(x_split) == 1:
+ continue
+ for i in range(len(x_split) - 1):
+ k = '_'.join(x_split[:i+1])
+ if k in use_expand_split:
+ use_expand_iuses[k].add(x)
+ break
+
+ for k, use_expand_iuse in use_expand_iuses.items():
+ if k + '_*' in use:
+ use.update( x for x in use_expand_iuse if x not in usemask )
+ k = k.upper()
+ self.configdict['env'].addLazySingleton(k,
+ lazy_use_expand.__getitem__, k)
+
+ for k in self.get("USE_EXPAND_UNPREFIXED", "").split():
+ var_split = self.get(k, '').split()
+ var_split = [ x for x in var_split if x in use ]
+ if var_split:
+ self.configlist[-1][k] = ' '.join(var_split)
+ elif k in self:
+ self.configlist[-1][k] = ''
+
+ # Filtered for the ebuild environment. Store this in a separate
+ # attribute since we still want to be able to see global USE
+ # settings for things like emerge --info.
+
+ self.configdict["env"]["PORTAGE_USE"] = \
+ " ".join(sorted(x for x in use if x[-2:] != '_*'))
+
+ # Clear the eapi cache here rather than in the constructor, since
+ # setcpv triggers lazy instantiation of things like _use_manager.
+ _eapi_cache.clear()
+
+ def _grab_pkg_env(self, penv, container, protected_keys=None):
+ if protected_keys is None:
+ protected_keys = ()
+ abs_user_config = os.path.join(
+ self['PORTAGE_CONFIGROOT'], USER_CONFIG_PATH)
+ non_user_variables = self._non_user_variables
+ # Make a copy since we don't want per-package settings
+ # to pollute the global expand_map.
+ expand_map = self._expand_map.copy()
+ incrementals = self.incrementals
+ for envname in penv:
+ penvfile = os.path.join(abs_user_config, "env", envname)
+ penvconfig = getconfig(penvfile, tolerant=self._tolerant,
+ allow_sourcing=True, expand=expand_map)
+ if penvconfig is None:
+ writemsg("!!! %s references non-existent file: %s\n" % \
+ (os.path.join(abs_user_config, 'package.env'), penvfile),
+ noiselevel=-1)
+ else:
+ for k, v in penvconfig.items():
+ if k in protected_keys or \
+ k in non_user_variables:
+ writemsg("!!! Illegal variable " + \
+ "'%s' assigned in '%s'\n" % \
+ (k, penvfile), noiselevel=-1)
+ elif k in incrementals:
+ if k in container:
+ container[k] = container[k] + " " + v
+ else:
+ container[k] = v
+ else:
+ container[k] = v
+
+ def _iuse_effective_match(self, flag):
+ return flag in self._iuse_effective
+
+ def _calc_iuse_effective(self):
+ """
+ Beginning with EAPI 5, IUSE_EFFECTIVE is defined by PMS.
+ """
+ iuse_effective = []
+ iuse_effective.extend(self.get("IUSE_IMPLICIT", "").split())
+
+ # USE_EXPAND_IMPLICIT should contain things like ARCH, ELIBC,
+ # KERNEL, and USERLAND.
+ use_expand_implicit = frozenset(
+ self.get("USE_EXPAND_IMPLICIT", "").split())
+
+ # USE_EXPAND_UNPREFIXED should contain at least ARCH, and
+ # USE_EXPAND_VALUES_ARCH should contain all valid ARCH flags.
+ for v in self.get("USE_EXPAND_UNPREFIXED", "").split():
+ if v not in use_expand_implicit:
+ continue
+ iuse_effective.extend(
+ self.get("USE_EXPAND_VALUES_" + v, "").split())
+
+ use_expand = frozenset(self.get("USE_EXPAND", "").split())
+ for v in use_expand_implicit:
+ if v not in use_expand:
+ continue
+ lower_v = v.lower()
+ for x in self.get("USE_EXPAND_VALUES_" + v, "").split():
+ iuse_effective.append(lower_v + "_" + x)
+
+ return frozenset(iuse_effective)
+
+ def _get_implicit_iuse(self):
+ """
+ Prior to EAPI 5, these flags are considered to
+ be implicit members of IUSE:
+ * Flags derived from ARCH
+ * Flags derived from USE_EXPAND_HIDDEN variables
+ * Masked flags, such as those from {,package}use.mask
+ * Forced flags, such as those from {,package}use.force
+ * build and bootstrap flags used by bootstrap.sh
+ """
+ iuse_implicit = set()
+ # Flags derived from ARCH.
+ arch = self.configdict["defaults"].get("ARCH")
+ if arch:
+ iuse_implicit.add(arch)
+ iuse_implicit.update(self.get("PORTAGE_ARCHLIST", "").split())
+
+ # Flags derived from USE_EXPAND_HIDDEN variables
+ # such as ELIBC, KERNEL, and USERLAND.
+ use_expand_hidden = self.get("USE_EXPAND_HIDDEN", "").split()
+ for x in use_expand_hidden:
+ iuse_implicit.add(x.lower() + "_.*")
+
+ # Flags that have been masked or forced.
+ iuse_implicit.update(self.usemask)
+ iuse_implicit.update(self.useforce)
+
+ # build and bootstrap flags used by bootstrap.sh
+ iuse_implicit.add("build")
+ iuse_implicit.add("bootstrap")
+
+ # Controlled by FEATURES=test. Make this implicit, so handling
+ # of FEATURES=test is consistent regardless of explicit IUSE.
+ # Users may use use.mask/package.use.mask to control
+ # FEATURES=test for all ebuilds, regardless of explicit IUSE.
+ iuse_implicit.add("test")
+
+ return iuse_implicit
+
+ def _getUseMask(self, pkg, stable=None):
+ return self._use_manager.getUseMask(pkg, stable=stable)
+
+ def _getUseForce(self, pkg, stable=None):
+ return self._use_manager.getUseForce(pkg, stable=stable)
+
+ def _getMaskAtom(self, cpv, metadata):
+ """
+ Take a package and return a matching package.mask atom, or None if no
+ such atom exists or it has been cancelled by package.unmask.
+
+ @param cpv: The package name
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: String
+ @return: A matching atom string or None if one is not found.
+ """
+ return self._mask_manager.getMaskAtom(cpv, metadata["SLOT"], metadata.get('repository'))
+
+ def _getRawMaskAtom(self, cpv, metadata):
+ """
+ Take a package and return a matching package.mask atom, or None if no
+ such atom exists or it has been cancelled by package.unmask.
+
+ @param cpv: The package name
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: String
+ @return: A matching atom string or None if one is not found.
+ """
+ return self._mask_manager.getRawMaskAtom(cpv, metadata["SLOT"], metadata.get('repository'))
+
+
+ def _getProfileMaskAtom(self, cpv, metadata):
+ """
+ Take a package and return a matching profile atom, or None if no
+ such atom exists. Note that a profile atom may or may not have a "*"
+ prefix.
+
+ @param cpv: The package name
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: String
+ @return: A matching profile atom string or None if one is not found.
+ """
+
+ warnings.warn("The config._getProfileMaskAtom() method is deprecated.",
+ DeprecationWarning, stacklevel=2)
+
+ cp = cpv_getkey(cpv)
+ profile_atoms = self.prevmaskdict.get(cp)
+ if profile_atoms:
+ pkg = "".join((cpv, _slot_separator, metadata["SLOT"]))
+ repo = metadata.get("repository")
+ if repo and repo != Package.UNKNOWN_REPO:
+ pkg = "".join((pkg, _repo_separator, repo))
+ pkg_list = [pkg]
+ for x in profile_atoms:
+ if match_from_list(x, pkg_list):
+ continue
+ return x
+ return None
+
+ def _isStable(self, pkg):
+ return self._keywords_manager.isStable(pkg,
+ self.get("ACCEPT_KEYWORDS", ""),
+ self.configdict["backupenv"].get("ACCEPT_KEYWORDS", ""))
+
+ def _getKeywords(self, cpv, metadata):
+ return self._keywords_manager.getKeywords(cpv, metadata["SLOT"], \
+ metadata.get("KEYWORDS", ""), metadata.get("repository"))
+
+ def _getMissingKeywords(self, cpv, metadata):
+ """
+ Take a package and return a list of any KEYWORDS that the user may
+ need to accept for the given package. If the KEYWORDS are empty
+ and the the ** keyword has not been accepted, the returned list will
+ contain ** alone (in order to distinguish from the case of "none
+ missing").
+
+ @param cpv: The package name (for package.keywords support)
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: List
+ @return: A list of KEYWORDS that have not been accepted.
+ """
+
+ # Hack: Need to check the env directly here as otherwise stacking
+ # doesn't work properly as negative values are lost in the config
+ # object (bug #139600)
+ backuped_accept_keywords = self.configdict["backupenv"].get("ACCEPT_KEYWORDS", "")
+ global_accept_keywords = self.get("ACCEPT_KEYWORDS", "")
+
+ return self._keywords_manager.getMissingKeywords(cpv, metadata["SLOT"], \
+ metadata.get("KEYWORDS", ""), metadata.get('repository'), \
+ global_accept_keywords, backuped_accept_keywords)
+
+ def _getRawMissingKeywords(self, cpv, metadata):
+ """
+ Take a package and return a list of any KEYWORDS that the user may
+ need to accept for the given package. If the KEYWORDS are empty,
+ the returned list will contain ** alone (in order to distinguish
+ from the case of "none missing"). This DOES NOT apply any user config
+ package.accept_keywords acceptance.
+
+ @param cpv: The package name (for package.keywords support)
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: List
+ @return: lists of KEYWORDS that have not been accepted
+ and the keywords it looked for.
+ """
+ return self._keywords_manager.getRawMissingKeywords(cpv, metadata["SLOT"], \
+ metadata.get("KEYWORDS", ""), metadata.get('repository'), \
+ self.get("ACCEPT_KEYWORDS", ""))
+
+ def _getPKeywords(self, cpv, metadata):
+ global_accept_keywords = self.get("ACCEPT_KEYWORDS", "")
+
+ return self._keywords_manager.getPKeywords(cpv, metadata["SLOT"], \
+ metadata.get('repository'), global_accept_keywords)
+
+ def _getMissingLicenses(self, cpv, metadata):
+ """
+ Take a LICENSE string and return a list of any licenses that the user
+ may need to accept for the given package. The returned list will not
+ contain any licenses that have already been accepted. This method
+ can throw an InvalidDependString exception.
+
+ @param cpv: The package name (for package.license support)
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: List
+ @return: A list of licenses that have not been accepted.
+ """
+ return self._license_manager.getMissingLicenses( \
+ cpv, metadata["USE"], metadata["LICENSE"], metadata["SLOT"], metadata.get('repository'))
+
+ def _getMissingProperties(self, cpv, metadata):
+ """
+ Take a PROPERTIES string and return a list of any properties the user
+ may need to accept for the given package. The returned list will not
+ contain any properties that have already been accepted. This method
+ can throw an InvalidDependString exception.
+
+ @param cpv: The package name (for package.properties support)
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: List
+ @return: A list of properties that have not been accepted.
+ """
+ accept_properties = self._accept_properties
+ try:
+ cpv.slot
+ except AttributeError:
+ cpv = _pkg_str(cpv, metadata=metadata, settings=self)
+ cp = cpv_getkey(cpv)
+ cpdict = self._ppropertiesdict.get(cp)
+ if cpdict:
+ pproperties_list = ordered_by_atom_specificity(cpdict, cpv)
+ if pproperties_list:
+ accept_properties = list(self._accept_properties)
+ for x in pproperties_list:
+ accept_properties.extend(x)
+
+ properties_str = metadata.get("PROPERTIES", "")
+ properties = set(use_reduce(properties_str, matchall=1, flat=True))
+
+ acceptable_properties = set()
+ for x in accept_properties:
+ if x == '*':
+ acceptable_properties.update(properties)
+ elif x == '-*':
+ acceptable_properties.clear()
+ elif x[:1] == '-':
+ acceptable_properties.discard(x[1:])
+ else:
+ acceptable_properties.add(x)
+
+ if "?" in properties_str:
+ use = metadata["USE"].split()
+ else:
+ use = []
+
+ return [x for x in use_reduce(properties_str, uselist=use, flat=True)
+ if x not in acceptable_properties]
+
+ def _getMissingRestrict(self, cpv, metadata):
+ """
+ Take a RESTRICT string and return a list of any tokens the user
+ may need to accept for the given package. The returned list will not
+ contain any tokens that have already been accepted. This method
+ can throw an InvalidDependString exception.
+
+ @param cpv: The package name (for package.accept_restrict support)
+ @type cpv: String
+ @param metadata: A dictionary of raw package metadata
+ @type metadata: dict
+ @rtype: List
+ @return: A list of tokens that have not been accepted.
+ """
+ accept_restrict = self._accept_restrict
+ try:
+ cpv.slot
+ except AttributeError:
+ cpv = _pkg_str(cpv, metadata=metadata, settings=self)
+ cp = cpv_getkey(cpv)
+ cpdict = self._paccept_restrict.get(cp)
+ if cpdict:
+ paccept_restrict_list = ordered_by_atom_specificity(cpdict, cpv)
+ if paccept_restrict_list:
+ accept_restrict = list(self._accept_restrict)
+ for x in paccept_restrict_list:
+ accept_restrict.extend(x)
+
+ restrict_str = metadata.get("RESTRICT", "")
+ all_restricts = set(use_reduce(restrict_str, matchall=1, flat=True))
+
+ acceptable_restricts = set()
+ for x in accept_restrict:
+ if x == '*':
+ acceptable_restricts.update(all_restricts)
+ elif x == '-*':
+ acceptable_restricts.clear()
+ elif x[:1] == '-':
+ acceptable_restricts.discard(x[1:])
+ else:
+ acceptable_restricts.add(x)
+
+ if "?" in restrict_str:
+ use = metadata["USE"].split()
+ else:
+ use = []
+
+ return [x for x in use_reduce(restrict_str, uselist=use, flat=True)
+ if x not in acceptable_restricts]
+
+ def _accept_chost(self, cpv, metadata):
+ """
+ @return True if pkg CHOST is accepted, False otherwise.
+ """
+ if self._accept_chost_re is None:
+ accept_chost = self.get("ACCEPT_CHOSTS", "").split()
+ if not accept_chost:
+ chost = self.get("CHOST")
+ if chost:
+ accept_chost.append(chost)
+ if not accept_chost:
+ self._accept_chost_re = re.compile(".*")
+ elif len(accept_chost) == 1:
+ try:
+ self._accept_chost_re = re.compile(r'^%s$' % accept_chost[0])
+ except re.error as e:
+ writemsg(_("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n") % \
+ (accept_chost[0], e), noiselevel=-1)
+ self._accept_chost_re = re.compile("^$")
+ else:
+ try:
+ self._accept_chost_re = re.compile(
+ r'^(%s)$' % "|".join(accept_chost))
+ except re.error as e:
+ writemsg(_("!!! Invalid ACCEPT_CHOSTS value: '%s': %s\n") % \
+ (" ".join(accept_chost), e), noiselevel=-1)
+ self._accept_chost_re = re.compile("^$")
+
+ pkg_chost = metadata.get('CHOST', '')
+ return not pkg_chost or \
+ self._accept_chost_re.match(pkg_chost) is not None
+
+ def setinst(self, mycpv, mydbapi):
+ """This used to update the preferences for old-style virtuals.
+ It is no-op now."""
+ pass
+
+ def reload(self):
+ """Reload things like /etc/profile.env that can change during runtime."""
+ env_d_filename = os.path.join(self["EROOT"], "etc", "profile.env")
+ self.configdict["env.d"].clear()
+ env_d = getconfig(env_d_filename,
+ tolerant=self._tolerant, expand=False)
+ if env_d:
+ # env_d will be None if profile.env doesn't exist.
+ for k in self._env_d_blacklist:
+ env_d.pop(k, None)
+ self.configdict["env.d"].update(env_d)
+
+ def regenerate(self, useonly=0, use_cache=None):
+ """
+ Regenerate settings
+ This involves regenerating valid USE flags, re-expanding USE_EXPAND flags
+ re-stacking USE flags (-flag and -*), as well as any other INCREMENTAL
+ variables. This also updates the env.d configdict; useful in case an ebuild
+ changes the environment.
+
+ If FEATURES has already stacked, it is not stacked twice.
+
+ @param useonly: Only regenerate USE flags (not any other incrementals)
+ @type useonly: Boolean
+ @rtype: None
+ """
+
+ if use_cache is not None:
+ warnings.warn("The use_cache parameter for config.regenerate() is deprecated and without effect.",
+ DeprecationWarning, stacklevel=2)
+
+ self.modifying()
+
+ if useonly:
+ myincrementals=["USE"]
+ else:
+ myincrementals = self.incrementals
+ myincrementals = set(myincrementals)
+
+ # Process USE last because it depends on USE_EXPAND which is also
+ # an incremental!
+ myincrementals.discard("USE")
+
+ mydbs = self.configlist[:-1]
+ mydbs.append(self.backupenv)
+
+ # ACCEPT_LICENSE is a lazily evaluated incremental, so that * can be
+ # used to match all licenses without every having to explicitly expand
+ # it to all licenses.
+ if self.local_config:
+ mysplit = []
+ for curdb in mydbs:
+ mysplit.extend(curdb.get('ACCEPT_LICENSE', '').split())
+ mysplit = prune_incremental(mysplit)
+ accept_license_str = ' '.join(mysplit)
+ self.configlist[-1]['ACCEPT_LICENSE'] = accept_license_str
+ self._license_manager.set_accept_license_str(accept_license_str)
+ else:
+ # repoman will accept any license
+ self._license_manager.set_accept_license_str("*")
+
+ # ACCEPT_PROPERTIES works like ACCEPT_LICENSE, without groups
+ if self.local_config:
+ mysplit = []
+ for curdb in mydbs:
+ mysplit.extend(curdb.get('ACCEPT_PROPERTIES', '').split())
+ mysplit = prune_incremental(mysplit)
+ self.configlist[-1]['ACCEPT_PROPERTIES'] = ' '.join(mysplit)
+ if tuple(mysplit) != self._accept_properties:
+ self._accept_properties = tuple(mysplit)
+ else:
+ # repoman will accept any property
+ self._accept_properties = ('*',)
+
+ if self.local_config:
+ mysplit = []
+ for curdb in mydbs:
+ mysplit.extend(curdb.get('ACCEPT_RESTRICT', '').split())
+ mysplit = prune_incremental(mysplit)
+ self.configlist[-1]['ACCEPT_RESTRICT'] = ' '.join(mysplit)
+ if tuple(mysplit) != self._accept_restrict:
+ self._accept_restrict = tuple(mysplit)
+ else:
+ # repoman will accept any property
+ self._accept_restrict = ('*',)
+
+ increment_lists = {}
+ for k in myincrementals:
+ incremental_list = []
+ increment_lists[k] = incremental_list
+ for curdb in mydbs:
+ v = curdb.get(k)
+ if v is not None:
+ incremental_list.append(v.split())
+
+ if 'FEATURES' in increment_lists:
+ increment_lists['FEATURES'].append(self._features_overrides)
+
+ myflags = set()
+ for mykey, incremental_list in increment_lists.items():
+
+ myflags.clear()
+ for mysplit in incremental_list:
+
+ for x in mysplit:
+ if x=="-*":
+ # "-*" is a special "minus" var that means "unset all settings".
+ # so USE="-* gnome" will have *just* gnome enabled.
+ myflags.clear()
+ continue
+
+ if x[0]=="+":
+ # Not legal. People assume too much. Complain.
+ writemsg(colorize("BAD",
+ _("%s values should not start with a '+': %s") % (mykey,x)) \
+ + "\n", noiselevel=-1)
+ x=x[1:]
+ if not x:
+ continue
+
+ if (x[0]=="-"):
+ myflags.discard(x[1:])
+ continue
+
+ # We got here, so add it now.
+ myflags.add(x)
+
+ #store setting in last element of configlist, the original environment:
+ if myflags or mykey in self:
+ self.configlist[-1][mykey] = " ".join(sorted(myflags))
+
+ # Do the USE calculation last because it depends on USE_EXPAND.
+ use_expand = self.get("USE_EXPAND", "").split()
+ use_expand_dict = self._use_expand_dict
+ use_expand_dict.clear()
+ for k in use_expand:
+ v = self.get(k)
+ if v is not None:
+ use_expand_dict[k] = v
+
+ use_expand_unprefixed = self.get("USE_EXPAND_UNPREFIXED", "").split()
+
+ # In order to best accomodate the long-standing practice of
+ # setting default USE_EXPAND variables in the profile's
+ # make.defaults, we translate these variables into their
+ # equivalent USE flags so that useful incremental behavior
+ # is enabled (for sub-profiles).
+ configdict_defaults = self.configdict['defaults']
+ if self._make_defaults is not None:
+ for i, cfg in enumerate(self._make_defaults):
+ if not cfg:
+ self.make_defaults_use.append("")
+ continue
+ use = cfg.get("USE", "")
+ expand_use = []
+
+ for k in use_expand_unprefixed:
+ v = cfg.get(k)
+ if v is not None:
+ expand_use.extend(v.split())
+
+ for k in use_expand_dict:
+ v = cfg.get(k)
+ if v is None:
+ continue
+ prefix = k.lower() + '_'
+ for x in v.split():
+ if x[:1] == '-':
+ expand_use.append('-' + prefix + x[1:])
+ else:
+ expand_use.append(prefix + x)
+
+ if expand_use:
+ expand_use.append(use)
+ use = ' '.join(expand_use)
+ self.make_defaults_use.append(use)
+ self.make_defaults_use = tuple(self.make_defaults_use)
+ # Preserve both positive and negative flags here, since
+ # negative flags may later interact with other flags pulled
+ # in via USE_ORDER.
+ configdict_defaults['USE'] = ' '.join(
+ filter(None, self.make_defaults_use))
+ # Set to None so this code only runs once.
+ self._make_defaults = None
+
+ if not self.uvlist:
+ for x in self["USE_ORDER"].split(":"):
+ if x in self.configdict:
+ self.uvlist.append(self.configdict[x])
+ self.uvlist.reverse()
+
+ # For optimal performance, use slice
+ # comparison instead of startswith().
+ iuse = self.configdict["pkg"].get("IUSE")
+ if iuse is not None:
+ iuse = [x.lstrip("+-") for x in iuse.split()]
+ myflags = set()
+ for curdb in self.uvlist:
+
+ for k in use_expand_unprefixed:
+ v = curdb.get(k)
+ if v is None:
+ continue
+ for x in v.split():
+ if x[:1] == "-":
+ myflags.discard(x[1:])
+ else:
+ myflags.add(x)
+
+ cur_use_expand = [x for x in use_expand if x in curdb]
+ mysplit = curdb.get("USE", "").split()
+ if not mysplit and not cur_use_expand:
+ continue
+ for x in mysplit:
+ if x == "-*":
+ myflags.clear()
+ continue
+
+ if x[0] == "+":
+ writemsg(colorize("BAD", _("USE flags should not start "
+ "with a '+': %s\n") % x), noiselevel=-1)
+ x = x[1:]
+ if not x:
+ continue
+
+ if x[0] == "-":
+ if x[-2:] == '_*':
+ prefix = x[1:-1]
+ prefix_len = len(prefix)
+ myflags.difference_update(
+ [y for y in myflags if \
+ y[:prefix_len] == prefix])
+ myflags.discard(x[1:])
+ continue
+
+ if iuse is not None and x[-2:] == '_*':
+ # Expand wildcards here, so that cases like
+ # USE="linguas_* -linguas_en_US" work correctly.
+ prefix = x[:-1]
+ prefix_len = len(prefix)
+ has_iuse = False
+ for y in iuse:
+ if y[:prefix_len] == prefix:
+ has_iuse = True
+ myflags.add(y)
+ if not has_iuse:
+ # There are no matching IUSE, so allow the
+ # wildcard to pass through. This allows
+ # linguas_* to trigger unset LINGUAS in
+ # cases when no linguas_ flags are in IUSE.
+ myflags.add(x)
+ else:
+ myflags.add(x)
+
+ if curdb is configdict_defaults:
+ # USE_EXPAND flags from make.defaults are handled
+ # earlier, in order to provide useful incremental
+ # behavior (for sub-profiles).
+ continue
+
+ for var in cur_use_expand:
+ var_lower = var.lower()
+ is_not_incremental = var not in myincrementals
+ if is_not_incremental:
+ prefix = var_lower + "_"
+ prefix_len = len(prefix)
+ for x in list(myflags):
+ if x[:prefix_len] == prefix:
+ myflags.remove(x)
+ for x in curdb[var].split():
+ if x[0] == "+":
+ if is_not_incremental:
+ writemsg(colorize("BAD", _("Invalid '+' "
+ "operator in non-incremental variable "
+ "'%s': '%s'\n") % (var, x)), noiselevel=-1)
+ continue
+ else:
+ writemsg(colorize("BAD", _("Invalid '+' "
+ "operator in incremental variable "
+ "'%s': '%s'\n") % (var, x)), noiselevel=-1)
+ x = x[1:]
+ if x[0] == "-":
+ if is_not_incremental:
+ writemsg(colorize("BAD", _("Invalid '-' "
+ "operator in non-incremental variable "
+ "'%s': '%s'\n") % (var, x)), noiselevel=-1)
+ continue
+ myflags.discard(var_lower + "_" + x[1:])
+ continue
+ myflags.add(var_lower + "_" + x)
+
+ if hasattr(self, "features"):
+ self.features._features.clear()
+ else:
+ self.features = features_set(self)
+ self.features._features.update(self.get('FEATURES', '').split())
+ self.features._sync_env_var()
+ self.features._validate()
+
+ myflags.update(self.useforce)
+ arch = self.configdict["defaults"].get("ARCH")
+ if arch:
+ myflags.add(arch)
+
+ myflags.difference_update(self.usemask)
+ self.configlist[-1]["USE"]= " ".join(sorted(myflags))
+
+ if self.mycpv is None:
+ # Generate global USE_EXPAND variables settings that are
+ # consistent with USE, for display by emerge --info. For
+ # package instances, these are instead generated via
+ # setcpv().
+ for k in use_expand:
+ prefix = k.lower() + '_'
+ prefix_len = len(prefix)
+ expand_flags = set( x[prefix_len:] for x in myflags \
+ if x[:prefix_len] == prefix )
+ var_split = use_expand_dict.get(k, '').split()
+ var_split = [ x for x in var_split if x in expand_flags ]
+ var_split.extend(sorted(expand_flags.difference(var_split)))
+ if var_split:
+ self.configlist[-1][k] = ' '.join(var_split)
+ elif k in self:
+ self.configlist[-1][k] = ''
+
+ for k in use_expand_unprefixed:
+ var_split = self.get(k, '').split()
+ var_split = [ x for x in var_split if x in myflags ]
+ if var_split:
+ self.configlist[-1][k] = ' '.join(var_split)
+ elif k in self:
+ self.configlist[-1][k] = ''
+
+ @property
+ def virts_p(self):
+ warnings.warn("portage config.virts_p attribute " + \
+ "is deprecated, use config.get_virts_p()",
+ DeprecationWarning, stacklevel=2)
+ return self.get_virts_p()
+
+ @property
+ def virtuals(self):
+ warnings.warn("portage config.virtuals attribute " + \
+ "is deprecated, use config.getvirtuals()",
+ DeprecationWarning, stacklevel=2)
+ return self.getvirtuals()
+
+ def get_virts_p(self):
+ # Ensure that we don't trigger the _treeVirtuals
+ # assertion in VirtualsManager._compile_virtuals().
+ self.getvirtuals()
+ return self._virtuals_manager.get_virts_p()
+
+ def getvirtuals(self):
+ if self._virtuals_manager._treeVirtuals is None:
+ #Hack around the fact that VirtualsManager needs a vartree
+ #and vartree needs a config instance.
+ #This code should be part of VirtualsManager.getvirtuals().
+ if self.local_config:
+ temp_vartree = vartree(settings=self)
+ self._virtuals_manager._populate_treeVirtuals(temp_vartree)
+ else:
+ self._virtuals_manager._treeVirtuals = {}
+
+ return self._virtuals_manager.getvirtuals()
+
+ def _populate_treeVirtuals_if_needed(self, vartree):
+ """Reduce the provides into a list by CP."""
+ if self._virtuals_manager._treeVirtuals is None:
+ if self.local_config:
+ self._virtuals_manager._populate_treeVirtuals(vartree)
+ else:
+ self._virtuals_manager._treeVirtuals = {}
+
+ def __delitem__(self,mykey):
+ self.pop(mykey)
+
+ def __getitem__(self, key):
+ try:
+ return self._getitem(key)
+ except KeyError:
+ if portage._internal_caller:
+ stack = traceback.format_stack()[:-1] + traceback.format_exception(*sys.exc_info())[1:]
+ try:
+ # Ensure that output is written to terminal.
+ with open("/dev/tty", "w") as f:
+ f.write("=" * 96 + "\n")
+ f.write("=" * 8 + " Traceback for invalid call to portage.package.ebuild.config.config.__getitem__ " + "=" * 8 + "\n")
+ f.writelines(stack)
+ f.write("=" * 96 + "\n")
+ except Exception:
+ pass
+ raise
+ else:
+ warnings.warn(_("Passing nonexistent key %r to %s is deprecated. Use %s instead.") %
+ (key, "portage.package.ebuild.config.config.__getitem__",
+ "portage.package.ebuild.config.config.get"), DeprecationWarning, stacklevel=2)
+ return ""
+
+ def _getitem(self, mykey):
+
+ if mykey in self._constant_keys:
+ # These two point to temporary values when
+ # portage plans to update itself.
+ if mykey == "PORTAGE_BIN_PATH":
+ return portage._bin_path
+ elif mykey == "PORTAGE_PYM_PATH":
+ return portage._pym_path
+
+ elif mykey == "PORTAGE_PYTHONPATH":
+ value = [x for x in \
+ self.backupenv.get("PYTHONPATH", "").split(":") if x]
+ need_pym_path = True
+ if value:
+ try:
+ need_pym_path = not os.path.samefile(value[0],
+ portage._pym_path)
+ except OSError:
+ pass
+ if need_pym_path:
+ value.insert(0, portage._pym_path)
+ return ":".join(value)
+
+ elif mykey == "PORTAGE_GID":
+ return "%s" % portage_gid
+
+ for d in self.lookuplist:
+ try:
+ return d[mykey]
+ except KeyError:
+ pass
+
+ raise KeyError(mykey)
+
+ def get(self, k, x=None):
+ try:
+ return self._getitem(k)
+ except KeyError:
+ return x
+
+ def pop(self, key, *args):
+ self.modifying()
+ if len(args) > 1:
+ raise TypeError(
+ "pop expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+ v = self
+ for d in reversed(self.lookuplist):
+ v = d.pop(key, v)
+ if v is self:
+ if args:
+ return args[0]
+ raise KeyError(key)
+ return v
+
+ def __contains__(self, mykey):
+ """Called to implement membership test operators (in and not in)."""
+ try:
+ self._getitem(mykey)
+ except KeyError:
+ return False
+ else:
+ return True
+
+ def setdefault(self, k, x=None):
+ v = self.get(k)
+ if v is not None:
+ return v
+ else:
+ self[k] = x
+ return x
+
+ def keys(self):
+ return list(self)
+
+ def __iter__(self):
+ keys = set()
+ keys.update(self._constant_keys)
+ for d in self.lookuplist:
+ keys.update(d)
+ return iter(keys)
+
+ def iterkeys(self):
+ return iter(self)
+
+ def iteritems(self):
+ for k in self:
+ yield (k, self._getitem(k))
+
+ def items(self):
+ return list(self.iteritems())
+
+ def __setitem__(self,mykey,myvalue):
+ "set a value; will be thrown away at reset() time"
+ if not isinstance(myvalue, basestring):
+ raise ValueError("Invalid type being used as a value: '%s': '%s'" % (str(mykey),str(myvalue)))
+
+ # Avoid potential UnicodeDecodeError exceptions later.
+ mykey = _unicode_decode(mykey)
+ myvalue = _unicode_decode(myvalue)
+
+ self.modifying()
+ self.modifiedkeys.append(mykey)
+ self.configdict["env"][mykey]=myvalue
+
+ def environ(self):
+ "return our locally-maintained environment"
+ mydict={}
+ environ_filter = self._environ_filter
+
+ eapi = self.get('EAPI')
+ eapi_attrs = _get_eapi_attrs(eapi)
+ phase = self.get('EBUILD_PHASE')
+ emerge_from = self.get('EMERGE_FROM')
+ filter_calling_env = False
+ if self.mycpv is not None and \
+ not (emerge_from == 'ebuild' and phase == 'setup') and \
+ phase not in ('clean', 'cleanrm', 'depend', 'fetch'):
+ temp_dir = self.get('T')
+ if temp_dir is not None and \
+ os.path.exists(os.path.join(temp_dir, 'environment')):
+ filter_calling_env = True
+
+ environ_whitelist = self._environ_whitelist
+ for x, myvalue in self.iteritems():
+ if x in environ_filter:
+ continue
+ if not isinstance(myvalue, basestring):
+ writemsg(_("!!! Non-string value in config: %s=%s\n") % \
+ (x, myvalue), noiselevel=-1)
+ continue
+ if filter_calling_env and \
+ x not in environ_whitelist and \
+ not self._environ_whitelist_re.match(x):
+ # Do not allow anything to leak into the ebuild
+ # environment unless it is explicitly whitelisted.
+ # This ensures that variables unset by the ebuild
+ # remain unset (bug #189417).
+ continue
+ mydict[x] = myvalue
+ if "HOME" not in mydict and "BUILD_PREFIX" in mydict:
+ writemsg("*** HOME not set. Setting to "+mydict["BUILD_PREFIX"]+"\n")
+ mydict["HOME"]=mydict["BUILD_PREFIX"][:]
+
+ if filter_calling_env:
+ if phase:
+ whitelist = []
+ if "rpm" == phase:
+ whitelist.append("RPMDIR")
+ for k in whitelist:
+ v = self.get(k)
+ if v is not None:
+ mydict[k] = v
+
+ # At some point we may want to stop exporting FEATURES to the ebuild
+ # environment, in order to prevent ebuilds from abusing it. In
+ # preparation for that, export it as PORTAGE_FEATURES so that bashrc
+ # users will be able to migrate any FEATURES conditional code to
+ # use this alternative variable.
+ mydict["PORTAGE_FEATURES"] = self["FEATURES"]
+
+ # Filtered by IUSE and implicit IUSE.
+ mydict["USE"] = self.get("PORTAGE_USE", "")
+
+ # Don't export AA to the ebuild environment in EAPIs that forbid it
+ if not eapi_exports_AA(eapi):
+ mydict.pop("AA", None)
+
+ if not eapi_exports_merge_type(eapi):
+ mydict.pop("MERGE_TYPE", None)
+
+ src_phase = _phase_func_map.get(phase, '').startswith('src_')
+
+ if not (src_phase and eapi_attrs.sysroot):
+ mydict.pop("ESYSROOT", None)
+
+ if not (src_phase and eapi_attrs.broot):
+ mydict.pop("BROOT", None)
+
+ # Prefix variables are supported beginning with EAPI 3, or when
+ # force-prefix is in FEATURES, since older EAPIs would otherwise be
+ # useless with prefix configurations. This brings compatibility with
+ # the prefix branch of portage, which also supports EPREFIX for all
+ # EAPIs (for obvious reasons).
+ if phase == 'depend' or \
+ ('force-prefix' not in self.features and
+ eapi is not None and not eapi_supports_prefix(eapi)):
+ mydict.pop("ED", None)
+ mydict.pop("EPREFIX", None)
+ mydict.pop("EROOT", None)
+ mydict.pop("ESYSROOT", None)
+
+ if phase not in ("pretend", "setup", "preinst", "postinst") or \
+ not eapi_exports_replace_vars(eapi):
+ mydict.pop("REPLACING_VERSIONS", None)
+
+ if phase not in ("prerm", "postrm") or \
+ not eapi_exports_replace_vars(eapi):
+ mydict.pop("REPLACED_BY_VERSION", None)
+
+ if phase is not None and eapi_attrs.exports_EBUILD_PHASE_FUNC:
+ phase_func = _phase_func_map.get(phase)
+ if phase_func is not None:
+ mydict["EBUILD_PHASE_FUNC"] = phase_func
+
+ if eapi_attrs.posixish_locale:
+ split_LC_ALL(mydict)
+ mydict["LC_COLLATE"] = "C"
+ # check_locale() returns None when check can not be executed.
+ if check_locale(silent=True, env=mydict) is False:
+ # try another locale
+ for l in ("C.UTF-8", "en_US.UTF-8", "en_GB.UTF-8", "C"):
+ mydict["LC_CTYPE"] = l
+ if check_locale(silent=True, env=mydict):
+ # TODO: output the following only once
+# writemsg(_("!!! LC_CTYPE unsupported, using %s instead\n")
+# % mydict["LC_CTYPE"])
+ break
+ else:
+ raise AssertionError("C locale did not pass the test!")
+
+ if not eapi_attrs.exports_PORTDIR:
+ mydict.pop("PORTDIR", None)
+ if not eapi_attrs.exports_ECLASSDIR:
+ mydict.pop("ECLASSDIR", None)
+
+ if not eapi_attrs.path_variables_end_with_trailing_slash:
+ for v in ("D", "ED", "ROOT", "EROOT", "ESYSROOT", "BROOT"):
+ if v in mydict:
+ mydict[v] = mydict[v].rstrip(os.path.sep)
+
+ # Since SYSROOT=/ interacts badly with autotools.eclass (bug 654600),
+ # and no EAPI expects SYSROOT to have a trailing slash, always strip
+ # the trailing slash from SYSROOT.
+ if 'SYSROOT' in mydict:
+ mydict['SYSROOT'] = mydict['SYSROOT'].rstrip(os.sep)
+
+ try:
+ builddir = mydict["PORTAGE_BUILDDIR"]
+ distdir = mydict["DISTDIR"]
+ except KeyError:
+ pass
+ else:
+ mydict["PORTAGE_ACTUAL_DISTDIR"] = distdir
+ mydict["DISTDIR"] = os.path.join(builddir, "distdir")
+
+ return mydict
+
+ def thirdpartymirrors(self):
+ if getattr(self, "_thirdpartymirrors", None) is None:
+ thirdparty_lists = []
+ for repo_name in reversed(self.repositories.prepos_order):
+ thirdparty_lists.append(grabdict(os.path.join(
+ self.repositories[repo_name].location,
+ "profiles", "thirdpartymirrors")))
+ self._thirdpartymirrors = stack_dictlist(thirdparty_lists, incremental=True)
+ return self._thirdpartymirrors
+
+ def archlist(self):
+ _archlist = []
+ for myarch in self["PORTAGE_ARCHLIST"].split():
+ _archlist.append(myarch)
+ _archlist.append("~" + myarch)
+ return _archlist
+
+ def selinux_enabled(self):
+ if getattr(self, "_selinux_enabled", None) is None:
+ self._selinux_enabled = 0
+ if "selinux" in self["USE"].split():
+ if selinux:
+ if selinux.is_selinux_enabled() == 1:
+ self._selinux_enabled = 1
+ else:
+ self._selinux_enabled = 0
+ else:
+ writemsg(_("!!! SELinux module not found. Please verify that it was installed.\n"),
+ noiselevel=-1)
+ self._selinux_enabled = 0
+
+ return self._selinux_enabled
+
+ if sys.hexversion >= 0x3000000:
+ keys = __iter__
+ items = iteritems
diff --git a/lib/portage/package/ebuild/deprecated_profile_check.py b/lib/portage/package/ebuild/deprecated_profile_check.py
new file mode 100644
index 000000000..fdb19b4ac
--- /dev/null
+++ b/lib/portage/package/ebuild/deprecated_profile_check.py
@@ -0,0 +1,83 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['deprecated_profile_check']
+
+import io
+
+import portage
+from portage import os, _encodings, _unicode_encode
+from portage.const import DEPRECATED_PROFILE_FILE
+from portage.localization import _
+from portage.output import colorize
+from portage.util import writemsg
+
+def deprecated_profile_check(settings=None):
+ config_root = None
+ eprefix = None
+ deprecated_profile_file = None
+ if settings is not None:
+ config_root = settings["PORTAGE_CONFIGROOT"]
+ eprefix = settings["EPREFIX"]
+ for x in reversed(settings.profiles):
+ deprecated_profile_file = os.path.join(x, "deprecated")
+ if os.access(deprecated_profile_file, os.R_OK):
+ break
+ else:
+ deprecated_profile_file = None
+
+ if deprecated_profile_file is None:
+ deprecated_profile_file = os.path.join(config_root or "/",
+ DEPRECATED_PROFILE_FILE)
+ if not os.access(deprecated_profile_file, os.R_OK):
+ deprecated_profile_file = os.path.join(config_root or "/",
+ 'etc', 'make.profile', 'deprecated')
+ if not os.access(deprecated_profile_file, os.R_OK):
+ return
+
+ with io.open(_unicode_encode(deprecated_profile_file,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace') as f:
+ dcontent = f.readlines()
+ writemsg(colorize("BAD", _("\n!!! Your current profile is "
+ "deprecated and not supported anymore.")) + "\n", noiselevel=-1)
+ writemsg(colorize("BAD", _("!!! Use eselect profile to update your "
+ "profile.")) + "\n", noiselevel=-1)
+ if not dcontent:
+ writemsg(colorize("BAD", _("!!! Please refer to the "
+ "Gentoo Upgrading Guide.")) + "\n", noiselevel=-1)
+ return True
+ newprofile = dcontent[0].rstrip("\n")
+ writemsg(colorize("BAD", _("!!! Please upgrade to the "
+ "following profile if possible:")) + "\n\n", noiselevel=-1)
+ writemsg(8*" " + colorize("GOOD", newprofile) + "\n\n", noiselevel=-1)
+ if len(dcontent) > 1:
+ writemsg(_("To upgrade do the following steps:\n"), noiselevel=-1)
+ for myline in dcontent[1:]:
+ writemsg(myline, noiselevel=-1)
+ writemsg("\n\n", noiselevel=-1)
+ else:
+ writemsg(_("You may use the following command to upgrade:\n\n"), noiselevel=-1)
+ writemsg(8*" " + colorize("INFORM", 'eselect profile set ' +
+ newprofile) + "\n\n", noiselevel=-1)
+
+ if settings is not None:
+ main_repo_loc = settings.repositories.mainRepoLocation()
+ new_profile_path = os.path.join(main_repo_loc,
+ "profiles", newprofile.rstrip("\n"))
+
+ if os.path.isdir(new_profile_path):
+ new_config = portage.config(config_root=config_root,
+ config_profile_path=new_profile_path,
+ eprefix=eprefix)
+
+ if not new_config.profiles:
+ writemsg("\n %s %s\n" % (colorize("WARN", "*"),
+ _("You must update portage before you "
+ "can migrate to the above profile.")), noiselevel=-1)
+ writemsg(" %s %s\n\n" % (colorize("WARN", "*"),
+ _("In order to update portage, "
+ "run 'emerge --oneshot portage'.")),
+ noiselevel=-1)
+
+ return True
diff --git a/lib/portage/package/ebuild/digestcheck.py b/lib/portage/package/ebuild/digestcheck.py
new file mode 100644
index 000000000..502950f31
--- /dev/null
+++ b/lib/portage/package/ebuild/digestcheck.py
@@ -0,0 +1,155 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['digestcheck']
+
+import warnings
+
+from portage import os, _encodings, _unicode_decode
+from portage.checksum import _hash_filter
+from portage.exception import DigestException, FileNotFound
+from portage.localization import _
+from portage.output import EOutput
+from portage.util import writemsg
+
+def digestcheck(myfiles, mysettings, strict=False, justmanifest=None, mf=None):
+ """
+ Verifies checksums. Assumes all files have been downloaded.
+ @rtype: int
+ @return: 1 on success and 0 on failure
+ """
+
+ if justmanifest is not None:
+ warnings.warn("The justmanifest parameter of the " + \
+ "portage.package.ebuild.digestcheck.digestcheck()" + \
+ " function is now unused.",
+ DeprecationWarning, stacklevel=2)
+ justmanifest = None
+
+ if mysettings.get("EBUILD_SKIP_MANIFEST") == "1":
+ return 1
+ pkgdir = mysettings["O"]
+ hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if hash_filter.transparent:
+ hash_filter = None
+ if mf is None:
+ mf = mysettings.repositories.get_repo_for_location(
+ os.path.dirname(os.path.dirname(pkgdir)))
+ mf = mf.load_manifest(pkgdir, mysettings["DISTDIR"])
+ eout = EOutput()
+ eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
+ try:
+ if not mf.thin and strict and "PORTAGE_PARALLEL_FETCHONLY" not in mysettings:
+ if mf.fhashdict.get("EBUILD"):
+ eout.ebegin(_("checking ebuild checksums ;-)"))
+ mf.checkTypeHashes("EBUILD", hash_filter=hash_filter)
+ eout.eend(0)
+ if mf.fhashdict.get("AUX"):
+ eout.ebegin(_("checking auxfile checksums ;-)"))
+ mf.checkTypeHashes("AUX", hash_filter=hash_filter)
+ eout.eend(0)
+ if mf.strict_misc_digests and mf.fhashdict.get("MISC"):
+ eout.ebegin(_("checking miscfile checksums ;-)"))
+ mf.checkTypeHashes("MISC", ignoreMissingFiles=True,
+ hash_filter=hash_filter)
+ eout.eend(0)
+ for f in myfiles:
+ eout.ebegin(_("checking %s ;-)") % f)
+ ftype = mf.findFile(f)
+ if ftype is None:
+ if mf.allow_missing:
+ continue
+ eout.eend(1)
+ writemsg(_("\n!!! Missing digest for '%s'\n") % (f,),
+ noiselevel=-1)
+ return 0
+ mf.checkFileHashes(ftype, f, hash_filter=hash_filter)
+ eout.eend(0)
+ except FileNotFound as e:
+ eout.eend(1)
+ writemsg(_("\n!!! A file listed in the Manifest could not be found: %s\n") % str(e),
+ noiselevel=-1)
+ return 0
+ except DigestException as e:
+ eout.eend(1)
+ writemsg(_("\n!!! Digest verification failed:\n"), noiselevel=-1)
+ writemsg("!!! %s\n" % e.value[0], noiselevel=-1)
+ writemsg(_("!!! Reason: %s\n") % e.value[1], noiselevel=-1)
+ writemsg(_("!!! Got: %s\n") % e.value[2], noiselevel=-1)
+ writemsg(_("!!! Expected: %s\n") % e.value[3], noiselevel=-1)
+ return 0
+ if mf.thin or mf.allow_missing:
+ # In this case we ignore any missing digests that
+ # would otherwise be detected below.
+ return 1
+ # Make sure that all of the ebuilds are actually listed in the Manifest.
+ for f in os.listdir(pkgdir):
+ pf = None
+ if f[-7:] == '.ebuild':
+ pf = f[:-7]
+ if pf is not None and not mf.hasFile("EBUILD", f):
+ writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
+ os.path.join(pkgdir, f), noiselevel=-1)
+ if strict:
+ return 0
+ # epatch will just grab all the patches out of a directory, so we have to
+ # make sure there aren't any foreign files that it might grab.
+ filesdir = os.path.join(pkgdir, "files")
+
+ for parent, dirs, files in os.walk(filesdir):
+ try:
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='replace')
+ writemsg(_("!!! Path contains invalid "
+ "character(s) for encoding '%s': '%s'") \
+ % (_encodings['fs'], parent), noiselevel=-1)
+ if strict:
+ return 0
+ continue
+ for d in dirs:
+ d_bytes = d
+ try:
+ d = _unicode_decode(d,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ d = _unicode_decode(d,
+ encoding=_encodings['fs'], errors='replace')
+ writemsg(_("!!! Path contains invalid "
+ "character(s) for encoding '%s': '%s'") \
+ % (_encodings['fs'], os.path.join(parent, d)),
+ noiselevel=-1)
+ if strict:
+ return 0
+ dirs.remove(d_bytes)
+ continue
+ if d.startswith(".") or d == "CVS":
+ dirs.remove(d_bytes)
+ for f in files:
+ try:
+ f = _unicode_decode(f,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ f = _unicode_decode(f,
+ encoding=_encodings['fs'], errors='replace')
+ if f.startswith("."):
+ continue
+ f = os.path.join(parent, f)[len(filesdir) + 1:]
+ writemsg(_("!!! File name contains invalid "
+ "character(s) for encoding '%s': '%s'") \
+ % (_encodings['fs'], f), noiselevel=-1)
+ if strict:
+ return 0
+ continue
+ if f.startswith("."):
+ continue
+ f = os.path.join(parent, f)[len(filesdir) + 1:]
+ file_type = mf.findFile(f)
+ if file_type != "AUX" and not f.startswith("digest-"):
+ writemsg(_("!!! A file is not listed in the Manifest: '%s'\n") % \
+ os.path.join(filesdir, f), noiselevel=-1)
+ if strict:
+ return 0
+ return 1
diff --git a/lib/portage/package/ebuild/digestgen.py b/lib/portage/package/ebuild/digestgen.py
new file mode 100644
index 000000000..6f3f877cd
--- /dev/null
+++ b/lib/portage/package/ebuild/digestgen.py
@@ -0,0 +1,208 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['digestgen']
+
+import errno
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.package.ebuild._spawn_nofetch:spawn_nofetch',
+)
+
+from portage import os
+from portage.const import MANIFEST2_HASH_DEFAULTS
+from portage.dbapi.porttree import FetchlistDict
+from portage.dep import use_reduce
+from portage.exception import InvalidDependString, FileNotFound, \
+ PermissionDenied, PortagePackageException
+from portage.localization import _
+from portage.output import colorize
+from portage.package.ebuild.fetch import fetch
+from portage.util import writemsg, writemsg_stdout
+from portage.versions import catsplit
+
+def digestgen(myarchives=None, mysettings=None, myportdb=None):
+ """
+ Generates a digest file if missing. Fetches files if necessary.
+ NOTE: myarchives and mysettings used to be positional arguments,
+ so their order must be preserved for backward compatibility.
+ @param mysettings: the ebuild config (mysettings["O"] must correspond
+ to the ebuild's parent directory)
+ @type mysettings: config
+ @param myportdb: a portdbapi instance
+ @type myportdb: portdbapi
+ @rtype: int
+ @return: 1 on success and 0 on failure
+ """
+ if mysettings is None or myportdb is None:
+ raise TypeError("portage.digestgen(): 'mysettings' and 'myportdb' parameter are required.")
+
+ try:
+ portage._doebuild_manifest_exempt_depend += 1
+ distfiles_map = {}
+ fetchlist_dict = FetchlistDict(mysettings["O"], mysettings, myportdb)
+ for cpv in fetchlist_dict:
+ try:
+ for myfile in fetchlist_dict[cpv]:
+ distfiles_map.setdefault(myfile, []).append(cpv)
+ except InvalidDependString as e:
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ del e
+ return 0
+ mytree = os.path.dirname(os.path.dirname(mysettings["O"]))
+ try:
+ mf = mysettings.repositories.get_repo_for_location(mytree)
+ except KeyError:
+ # backward compatibility
+ mytree = os.path.realpath(mytree)
+ mf = mysettings.repositories.get_repo_for_location(mytree)
+
+ repo_required_hashes = mf.manifest_required_hashes
+ if repo_required_hashes is None:
+ repo_required_hashes = MANIFEST2_HASH_DEFAULTS
+ mf = mf.load_manifest(mysettings["O"], mysettings["DISTDIR"],
+ fetchlist_dict=fetchlist_dict)
+
+ if not mf.allow_create:
+ writemsg_stdout(_(">>> Skipping creating Manifest for %s; "
+ "repository is configured to not use them\n") % mysettings["O"])
+ return 1
+
+ # Don't require all hashes since that can trigger excessive
+ # fetches when sufficient digests already exist. To ease transition
+ # while Manifest 1 is being removed, only require hashes that will
+ # exist before and after the transition.
+ required_hash_types = set()
+ required_hash_types.add("size")
+ required_hash_types.update(repo_required_hashes)
+ dist_hashes = mf.fhashdict.get("DIST", {})
+
+ # To avoid accidental regeneration of digests with the incorrect
+ # files (such as partially downloaded files), trigger the fetch
+ # code if the file exists and it's size doesn't match the current
+ # manifest entry. If there really is a legitimate reason for the
+ # digest to change, `ebuild --force digest` can be used to avoid
+ # triggering this code (or else the old digests can be manually
+ # removed from the Manifest).
+ missing_files = []
+ for myfile in distfiles_map:
+ myhashes = dist_hashes.get(myfile)
+ if not myhashes:
+ try:
+ st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
+ except OSError:
+ st = None
+ if st is None or st.st_size == 0:
+ missing_files.append(myfile)
+ continue
+ size = myhashes.get("size")
+
+ try:
+ st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ if size == 0:
+ missing_files.append(myfile)
+ continue
+ if required_hash_types.difference(myhashes):
+ missing_files.append(myfile)
+ continue
+ else:
+ if st.st_size == 0 or size is not None and size != st.st_size:
+ missing_files.append(myfile)
+ continue
+
+ for myfile in missing_files:
+ uris = set()
+ all_restrict = set()
+ for cpv in distfiles_map[myfile]:
+ uris.update(myportdb.getFetchMap(
+ cpv, mytree=mytree)[myfile])
+ restrict = myportdb.aux_get(cpv, ['RESTRICT'], mytree=mytree)[0]
+ # Here we ignore conditional parts of RESTRICT since
+ # they don't apply unconditionally. Assume such
+ # conditionals only apply on the client side where
+ # digestgen() does not need to be called.
+ all_restrict.update(use_reduce(restrict,
+ flat=True, matchnone=True))
+
+ # fetch() uses CATEGORY and PF to display a message
+ # when fetch restriction is triggered.
+ cat, pf = catsplit(cpv)
+ mysettings["CATEGORY"] = cat
+ mysettings["PF"] = pf
+
+ # fetch() uses PORTAGE_RESTRICT to control fetch
+ # restriction, which is only applied to files that
+ # are not fetchable via a mirror:// URI.
+ mysettings["PORTAGE_RESTRICT"] = " ".join(all_restrict)
+
+ try:
+ st = os.stat(os.path.join(mysettings["DISTDIR"], myfile))
+ except OSError:
+ st = None
+
+ if not fetch({myfile : uris}, mysettings):
+ myebuild = os.path.join(mysettings["O"],
+ catsplit(cpv)[1] + ".ebuild")
+ spawn_nofetch(myportdb, myebuild)
+ writemsg(_("!!! Fetch failed for %s, can't update Manifest\n")
+ % myfile, noiselevel=-1)
+ if myfile in dist_hashes and \
+ st is not None and st.st_size > 0:
+ # stat result is obtained before calling fetch(),
+ # since fetch may rename the existing file if the
+ # digest does not match.
+ cmd = colorize("INFORM", "ebuild --force %s manifest" %
+ os.path.basename(myebuild))
+ writemsg((_(
+ "!!! If you would like to forcefully replace the existing Manifest entry\n"
+ "!!! for %s, use the following command:\n") % myfile) +
+ "!!! %s\n" % cmd,
+ noiselevel=-1)
+ return 0
+
+ writemsg_stdout(_(">>> Creating Manifest for %s\n") % mysettings["O"])
+ try:
+ mf.create(assumeDistHashesSometimes=True,
+ assumeDistHashesAlways=(
+ "assume-digests" in mysettings.features))
+ except FileNotFound as e:
+ writemsg(_("!!! File %s doesn't exist, can't update Manifest\n")
+ % e, noiselevel=-1)
+ return 0
+ except PortagePackageException as e:
+ writemsg(("!!! %s\n") % (e,), noiselevel=-1)
+ return 0
+ try:
+ mf.write(sign=False)
+ except PermissionDenied as e:
+ writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
+ return 0
+ if "assume-digests" not in mysettings.features:
+ distlist = list(mf.fhashdict.get("DIST", {}))
+ distlist.sort()
+ auto_assumed = []
+ for filename in distlist:
+ if not os.path.exists(
+ os.path.join(mysettings["DISTDIR"], filename)):
+ auto_assumed.append(filename)
+ if auto_assumed:
+ cp = os.path.sep.join(mysettings["O"].split(os.path.sep)[-2:])
+ pkgs = myportdb.cp_list(cp, mytree=mytree)
+ pkgs.sort()
+ writemsg_stdout(" digest.assumed" + colorize("WARN",
+ str(len(auto_assumed)).rjust(18)) + "\n")
+ for pkg_key in pkgs:
+ fetchlist = myportdb.getFetchMap(pkg_key, mytree=mytree)
+ pv = pkg_key.split("/")[1]
+ for filename in auto_assumed:
+ if filename in fetchlist:
+ writemsg_stdout(
+ " %s::%s\n" % (pv, filename))
+ return 1
+ finally:
+ portage._doebuild_manifest_exempt_depend -= 1
diff --git a/lib/portage/package/ebuild/doebuild.py b/lib/portage/package/ebuild/doebuild.py
new file mode 100644
index 000000000..941a597e2
--- /dev/null
+++ b/lib/portage/package/ebuild/doebuild.py
@@ -0,0 +1,2539 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = ['doebuild', 'doebuild_environment', 'spawn', 'spawnebuild']
+
+import grp
+import gzip
+import errno
+import fnmatch
+import io
+from itertools import chain
+import logging
+import os as _os
+import platform
+import pwd
+import re
+import signal
+import stat
+import sys
+import tempfile
+from textwrap import wrap
+import time
+import warnings
+import zlib
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.package.ebuild.config:check_config_instance',
+ 'portage.package.ebuild.digestcheck:digestcheck',
+ 'portage.package.ebuild.digestgen:digestgen',
+ 'portage.package.ebuild.fetch:fetch',
+ 'portage.package.ebuild.prepare_build_dirs:_prepare_fake_distdir',
+ 'portage.package.ebuild._ipc.QueryCommand:QueryCommand',
+ 'portage.dep._slot_operator:evaluate_slot_operator_equal_deps',
+ 'portage.package.ebuild._spawn_nofetch:spawn_nofetch',
+ 'portage.util.elf.header:ELFHeader',
+ 'portage.dep.soname.multilib_category:compute_multilib_category',
+ 'portage.util._desktop_entry:validate_desktop_entry',
+ 'portage.util._dyn_libs.NeededEntry:NeededEntry',
+ 'portage.util._dyn_libs.soname_deps:SonameDepsProcessor',
+ 'portage.util._async.SchedulerInterface:SchedulerInterface',
+ 'portage.util._eventloop.EventLoop:EventLoop',
+ 'portage.util._eventloop.global_event_loop:global_event_loop',
+ 'portage.util.ExtractKernelVersion:ExtractKernelVersion'
+)
+
+from portage import bsd_chflags, \
+ eapi_is_supported, merge, os, selinux, shutil, \
+ unmerge, _encodings, _os_merge, \
+ _shell_quote, _unicode_decode, _unicode_encode
+from portage.const import EBUILD_SH_ENV_FILE, EBUILD_SH_ENV_DIR, \
+ EBUILD_SH_BINARY, INVALID_ENV_FILE, MISC_SH_BINARY, PORTAGE_PYM_PACKAGES
+from portage.data import portage_gid, portage_uid, secpass, \
+ uid, userpriv_groups
+from portage.dbapi.porttree import _parse_uri_map
+from portage.dep import Atom, check_required_use, \
+ human_readable_required_use, paren_enclose, use_reduce
+from portage.eapi import (eapi_exports_KV, eapi_exports_merge_type,
+ eapi_exports_replace_vars, eapi_exports_REPOSITORY,
+ eapi_has_required_use, eapi_has_src_prepare_and_src_configure,
+ eapi_has_pkg_pretend, _get_eapi_attrs)
+from portage.elog import elog_process, _preload_elog_modules
+from portage.elog.messages import eerror, eqawarn
+from portage.exception import (DigestException, FileNotFound,
+ IncorrectParameter, InvalidData, InvalidDependString,
+ PermissionDenied, UnsupportedAPIException)
+from portage.localization import _
+from portage.output import colormap
+from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
+from portage.process import find_binary
+from portage.util import ( apply_recursive_permissions,
+ apply_secpass_permissions,
+ noiselimit,
+ shlex_split,
+ varexpand,
+ writemsg,
+ writemsg_stdout,
+ write_atomic
+ )
+from portage.util.cpuinfo import get_cpu_count
+from portage.util.lafilefixer import rewrite_lafile
+from portage.util.compression_probe import _compressors
+from portage.util.socks5 import get_socks5_proxy
+from portage.versions import _pkgsplit
+from _emerge.BinpkgEnvExtractor import BinpkgEnvExtractor
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.EbuildSpawnProcess import EbuildSpawnProcess
+from _emerge.Package import Package
+from _emerge.RootConfig import RootConfig
+
+if sys.hexversion >= 0x3000000:
+ _unicode = str
+else:
+ _unicode = unicode
+
+_unsandboxed_phases = frozenset([
+ "clean", "cleanrm", "config",
+ "help", "info", "postinst",
+ "preinst", "pretend", "postrm",
+ "prerm", "setup"
+])
+
+# phases in which IPC with host is allowed
+_ipc_phases = frozenset([
+ "setup", "pretend", "config", "info",
+ "preinst", "postinst", "prerm", "postrm",
+])
+
+# phases in which networking access is allowed
+_networked_phases = frozenset([
+ # for VCS fetching
+ "unpack",
+ # + for network-bound IPC
+] + list(_ipc_phases))
+
+_phase_func_map = {
+ "config": "pkg_config",
+ "setup": "pkg_setup",
+ "nofetch": "pkg_nofetch",
+ "unpack": "src_unpack",
+ "prepare": "src_prepare",
+ "configure": "src_configure",
+ "compile": "src_compile",
+ "test": "src_test",
+ "install": "src_install",
+ "preinst": "pkg_preinst",
+ "postinst": "pkg_postinst",
+ "prerm": "pkg_prerm",
+ "postrm": "pkg_postrm",
+ "info": "pkg_info",
+ "pretend": "pkg_pretend",
+}
+
+_vdb_use_conditional_keys = Package._dep_keys + \
+ ('LICENSE', 'PROPERTIES', 'RESTRICT',)
+
+def _doebuild_spawn(phase, settings, actionmap=None, **kwargs):
+ """
+ All proper ebuild phases which execute ebuild.sh are spawned
+ via this function. No exceptions.
+ """
+
+ if phase in _unsandboxed_phases:
+ kwargs['free'] = True
+
+ kwargs['ipc'] = 'ipc-sandbox' not in settings.features or \
+ phase in _ipc_phases
+ kwargs['networked'] = 'network-sandbox' not in settings.features or \
+ phase in _networked_phases or \
+ 'network-sandbox' in settings['PORTAGE_RESTRICT'].split()
+
+ if phase == 'depend':
+ kwargs['droppriv'] = 'userpriv' in settings.features
+ # It's not necessary to close_fds for this phase, since
+ # it should not spawn any daemons, and close_fds is
+ # best avoided since it can interact badly with some
+ # garbage collectors (see _setup_pipes docstring).
+ kwargs['close_fds'] = False
+
+ if actionmap is not None and phase in actionmap:
+ kwargs.update(actionmap[phase]["args"])
+ cmd = actionmap[phase]["cmd"] % phase
+ else:
+ if phase == 'cleanrm':
+ ebuild_sh_arg = 'clean'
+ else:
+ ebuild_sh_arg = phase
+
+ cmd = "%s %s" % (_shell_quote(
+ os.path.join(settings["PORTAGE_BIN_PATH"],
+ os.path.basename(EBUILD_SH_BINARY))),
+ ebuild_sh_arg)
+
+ settings['EBUILD_PHASE'] = phase
+ try:
+ return spawn(cmd, settings, **kwargs)
+ finally:
+ settings.pop('EBUILD_PHASE', None)
+
+def _spawn_phase(phase, settings, actionmap=None, returnpid=False,
+ logfile=None, **kwargs):
+
+ if returnpid:
+ return _doebuild_spawn(phase, settings, actionmap=actionmap,
+ returnpid=returnpid, logfile=logfile, **kwargs)
+
+ # The logfile argument is unused here, since EbuildPhase uses
+ # the PORTAGE_LOG_FILE variable if set.
+ ebuild_phase = EbuildPhase(actionmap=actionmap, background=False,
+ phase=phase, scheduler=SchedulerInterface(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ settings=settings, **kwargs)
+
+ ebuild_phase.start()
+ ebuild_phase.wait()
+ return ebuild_phase.returncode
+
+def _doebuild_path(settings, eapi=None):
+ """
+ Generate the PATH variable.
+ """
+
+ # Note: PORTAGE_BIN_PATH may differ from the global constant
+ # when portage is reinstalling itself.
+ portage_bin_path = [settings["PORTAGE_BIN_PATH"]]
+ if portage_bin_path[0] != portage.const.PORTAGE_BIN_PATH:
+ # Add a fallback path for restarting failed builds (bug 547086)
+ portage_bin_path.append(portage.const.PORTAGE_BIN_PATH)
+ eprefix = portage.const.EPREFIX
+ prerootpath = [x for x in settings.get("PREROOTPATH", "").split(":") if x]
+ rootpath = [x for x in settings.get("ROOTPATH", "").split(":") if x]
+ overrides = [x for x in settings.get(
+ "__PORTAGE_TEST_PATH_OVERRIDE", "").split(":") if x]
+
+ prefixes = []
+ if eprefix:
+ prefixes.append(eprefix)
+ prefixes.append("/")
+
+ path = overrides
+
+ if "xattr" in settings.features:
+ for x in portage_bin_path:
+ path.append(os.path.join(x, "ebuild-helpers", "xattr"))
+
+ if uid != 0 and \
+ "unprivileged" in settings.features and \
+ "fakeroot" not in settings.features:
+ for x in portage_bin_path:
+ path.append(os.path.join(x,
+ "ebuild-helpers", "unprivileged"))
+
+ if settings.get("USERLAND", "GNU") != "GNU":
+ for x in portage_bin_path:
+ path.append(os.path.join(x, "ebuild-helpers", "bsd"))
+
+ for x in portage_bin_path:
+ path.append(os.path.join(x, "ebuild-helpers"))
+ path.extend(prerootpath)
+
+ for prefix in prefixes:
+ for x in ("usr/local/sbin", "usr/local/bin", "usr/sbin", "usr/bin", "sbin", "bin"):
+ path.append(os.path.join(prefix, x))
+
+ path.extend(rootpath)
+ settings["PATH"] = ":".join(path)
+
+def doebuild_environment(myebuild, mydo, myroot=None, settings=None,
+ debug=False, use_cache=None, db=None):
+ """
+ Create and store environment variable in the config instance
+ that's passed in as the "settings" parameter. This will raise
+ UnsupportedAPIException if the given ebuild has an unsupported
+ EAPI. All EAPI dependent code comes last, so that essential
+ variables like PORTAGE_BUILDDIR are still initialized even in
+ cases when UnsupportedAPIException needs to be raised, which
+ can be useful when uninstalling a package that has corrupt
+ EAPI metadata.
+ The myroot and use_cache parameters are unused.
+ """
+
+ if settings is None:
+ raise TypeError("settings argument is required")
+
+ if db is None:
+ raise TypeError("db argument is required")
+
+ mysettings = settings
+ mydbapi = db
+ ebuild_path = os.path.abspath(myebuild)
+ pkg_dir = os.path.dirname(ebuild_path)
+ mytree = os.path.dirname(os.path.dirname(pkg_dir))
+ mypv = os.path.basename(ebuild_path)[:-7]
+ mysplit = _pkgsplit(mypv, eapi=mysettings.configdict["pkg"].get("EAPI"))
+ if mysplit is None:
+ raise IncorrectParameter(
+ _("Invalid ebuild path: '%s'") % myebuild)
+
+ if mysettings.mycpv is not None and \
+ mysettings.configdict["pkg"].get("PF") == mypv and \
+ "CATEGORY" in mysettings.configdict["pkg"]:
+ # Assume that PF is enough to assume that we've got
+ # the correct CATEGORY, though this is not really
+ # a solid assumption since it's possible (though
+ # unlikely) that two packages in different
+ # categories have the same PF. Callers should call
+ # setcpv or create a clean clone of a locked config
+ # instance in order to ensure that this assumption
+ # does not fail like in bug #408817.
+ cat = mysettings.configdict["pkg"]["CATEGORY"]
+ mycpv = mysettings.mycpv
+ elif os.path.basename(pkg_dir) in (mysplit[0], mypv):
+ # portdbapi or vardbapi
+ cat = os.path.basename(os.path.dirname(pkg_dir))
+ mycpv = cat + "/" + mypv
+ else:
+ raise AssertionError("unable to determine CATEGORY")
+
+ # Make a backup of PORTAGE_TMPDIR prior to calling config.reset()
+ # so that the caller can override it.
+ tmpdir = mysettings["PORTAGE_TMPDIR"]
+
+ if mydo == 'depend':
+ if mycpv != mysettings.mycpv:
+ # Don't pass in mydbapi here since the resulting aux_get
+ # call would lead to infinite 'depend' phase recursion.
+ mysettings.setcpv(mycpv)
+ else:
+ # If EAPI isn't in configdict["pkg"], it means that setcpv()
+ # hasn't been called with the mydb argument, so we have to
+ # call it here (portage code always calls setcpv properly,
+ # but api consumers might not).
+ if mycpv != mysettings.mycpv or \
+ "EAPI" not in mysettings.configdict["pkg"]:
+ # Reload env.d variables and reset any previous settings.
+ mysettings.reload()
+ mysettings.reset()
+ mysettings.setcpv(mycpv, mydb=mydbapi)
+
+ # config.reset() might have reverted a change made by the caller,
+ # so restore it to its original value. Sandbox needs canonical
+ # paths, so realpath it.
+ mysettings["PORTAGE_TMPDIR"] = os.path.realpath(tmpdir)
+
+ mysettings.pop("EBUILD_PHASE", None) # remove from backupenv
+ mysettings["EBUILD_PHASE"] = mydo
+
+ # Set requested Python interpreter for Portage helpers.
+ mysettings['PORTAGE_PYTHON'] = portage._python_interpreter
+
+ # This is used by assert_sigpipe_ok() that's used by the ebuild
+ # unpack() helper. SIGPIPE is typically 13, but its better not
+ # to assume that.
+ mysettings['PORTAGE_SIGPIPE_STATUS'] = str(128 + signal.SIGPIPE)
+
+ # We are disabling user-specific bashrc files.
+ mysettings["BASH_ENV"] = INVALID_ENV_FILE
+
+ if debug: # Otherwise it overrides emerge's settings.
+ # We have no other way to set debug... debug can't be passed in
+ # due to how it's coded... Don't overwrite this so we can use it.
+ mysettings["PORTAGE_DEBUG"] = "1"
+
+ mysettings["EBUILD"] = ebuild_path
+ mysettings["O"] = pkg_dir
+ mysettings.configdict["pkg"]["CATEGORY"] = cat
+ mysettings["PF"] = mypv
+
+ if hasattr(mydbapi, 'repositories'):
+ repo = mydbapi.repositories.get_repo_for_location(mytree)
+ mysettings['PORTDIR'] = repo.eclass_db.porttrees[0]
+ mysettings['PORTAGE_ECLASS_LOCATIONS'] = repo.eclass_db.eclass_locations_string
+ mysettings.configdict["pkg"]["PORTAGE_REPO_NAME"] = repo.name
+
+ mysettings["PORTDIR"] = os.path.realpath(mysettings["PORTDIR"])
+ mysettings.pop("PORTDIR_OVERLAY", None)
+ mysettings["DISTDIR"] = os.path.realpath(mysettings["DISTDIR"])
+ mysettings["RPMDIR"] = os.path.realpath(mysettings["RPMDIR"])
+
+ mysettings["ECLASSDIR"] = mysettings["PORTDIR"]+"/eclass"
+ mysettings["SANDBOX_LOG"] = mycpv.replace("/", "_-_")
+
+ mysettings["PORTAGE_BASHRC_FILES"] = "\n".join(mysettings._pbashrc)
+
+ mysettings["P"] = mysplit[0]+"-"+mysplit[1]
+ mysettings["PN"] = mysplit[0]
+ mysettings["PV"] = mysplit[1]
+ mysettings["PR"] = mysplit[2]
+
+ if noiselimit < 0:
+ mysettings["PORTAGE_QUIET"] = "1"
+
+ if mysplit[2] == "r0":
+ mysettings["PVR"]=mysplit[1]
+ else:
+ mysettings["PVR"]=mysplit[1]+"-"+mysplit[2]
+
+ # All temporary directories should be subdirectories of
+ # $PORTAGE_TMPDIR/portage, since it's common for /tmp and /var/tmp
+ # to be mounted with the "noexec" option (see bug #346899).
+ mysettings["BUILD_PREFIX"] = mysettings["PORTAGE_TMPDIR"]+"/portage"
+ mysettings["PKG_TMPDIR"] = mysettings["BUILD_PREFIX"]+"/._unmerge_"
+
+ # Package {pre,post}inst and {pre,post}rm may overlap, so they must have separate
+ # locations in order to prevent interference.
+ if mydo in ("unmerge", "prerm", "postrm", "cleanrm"):
+ mysettings["PORTAGE_BUILDDIR"] = os.path.join(
+ mysettings["PKG_TMPDIR"],
+ mysettings["CATEGORY"], mysettings["PF"])
+ else:
+ mysettings["PORTAGE_BUILDDIR"] = os.path.join(
+ mysettings["BUILD_PREFIX"],
+ mysettings["CATEGORY"], mysettings["PF"])
+
+ mysettings["HOME"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "homedir")
+ mysettings["WORKDIR"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "work")
+ mysettings["D"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "image") + os.sep
+ mysettings["T"] = os.path.join(mysettings["PORTAGE_BUILDDIR"], "temp")
+ mysettings["FILESDIR"] = os.path.join(settings["PORTAGE_BUILDDIR"], "files")
+
+ # Prefix forward compatability
+ eprefix_lstrip = mysettings["EPREFIX"].lstrip(os.sep)
+ mysettings["ED"] = os.path.join(
+ mysettings["D"], eprefix_lstrip).rstrip(os.sep) + os.sep
+
+ mysettings["PORTAGE_BASHRC"] = os.path.join(
+ mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_FILE)
+ mysettings["PM_EBUILD_HOOK_DIR"] = os.path.join(
+ mysettings["PORTAGE_CONFIGROOT"], EBUILD_SH_ENV_DIR)
+
+ # Allow color.map to control colors associated with einfo, ewarn, etc...
+ mysettings["PORTAGE_COLORMAP"] = colormap()
+
+ if "COLUMNS" not in mysettings:
+ # Set COLUMNS, in order to prevent unnecessary stty calls
+ # inside the set_colors function of isolated-functions.sh.
+ # We cache the result in os.environ, in order to avoid
+ # multiple stty calls in cases when get_term_size() falls
+ # back to stty due to a missing or broken curses module.
+ columns = os.environ.get("COLUMNS")
+ if columns is None:
+ rows, columns = portage.output.get_term_size()
+ if columns < 1:
+ # Force a sane value for COLUMNS, so that tools
+ # like ls don't complain (see bug #394091).
+ columns = 80
+ columns = str(columns)
+ os.environ["COLUMNS"] = columns
+ mysettings["COLUMNS"] = columns
+
+ # EAPI is always known here, even for the "depend" phase, because
+ # EbuildMetadataPhase gets it from _parse_eapi_ebuild_head().
+ eapi = mysettings.configdict['pkg']['EAPI']
+ _doebuild_path(mysettings, eapi=eapi)
+
+ # All EAPI dependent code comes last, so that essential variables like
+ # PATH and PORTAGE_BUILDDIR are still initialized even in cases when
+ # UnsupportedAPIException needs to be raised, which can be useful
+ # when uninstalling a package that has corrupt EAPI metadata.
+ if not eapi_is_supported(eapi):
+ raise UnsupportedAPIException(mycpv, eapi)
+
+ if eapi_exports_REPOSITORY(eapi) and "PORTAGE_REPO_NAME" in mysettings.configdict["pkg"]:
+ mysettings.configdict["pkg"]["REPOSITORY"] = mysettings.configdict["pkg"]["PORTAGE_REPO_NAME"]
+
+ if mydo != "depend":
+ if hasattr(mydbapi, "getFetchMap") and \
+ ("A" not in mysettings.configdict["pkg"] or \
+ "AA" not in mysettings.configdict["pkg"]):
+ src_uri = mysettings.configdict["pkg"].get("SRC_URI")
+ if src_uri is None:
+ src_uri, = mydbapi.aux_get(mysettings.mycpv,
+ ["SRC_URI"], mytree=mytree)
+ metadata = {
+ "EAPI" : eapi,
+ "SRC_URI" : src_uri,
+ }
+ use = frozenset(mysettings["PORTAGE_USE"].split())
+ try:
+ uri_map = _parse_uri_map(mysettings.mycpv, metadata, use=use)
+ except InvalidDependString:
+ mysettings.configdict["pkg"]["A"] = ""
+ else:
+ mysettings.configdict["pkg"]["A"] = " ".join(uri_map)
+
+ try:
+ uri_map = _parse_uri_map(mysettings.mycpv, metadata)
+ except InvalidDependString:
+ mysettings.configdict["pkg"]["AA"] = ""
+ else:
+ mysettings.configdict["pkg"]["AA"] = " ".join(uri_map)
+
+ ccache = "ccache" in mysettings.features
+ distcc = "distcc" in mysettings.features
+ icecream = "icecream" in mysettings.features
+
+ if ccache or distcc or icecream:
+ libdir = None
+ default_abi = mysettings.get("DEFAULT_ABI")
+ if default_abi:
+ libdir = mysettings.get("LIBDIR_" + default_abi)
+ if not libdir:
+ libdir = "lib"
+
+ # The installation locations use to vary between versions...
+ # Safer to look them up rather than assuming
+ possible_libexecdirs = (libdir, "lib", "libexec")
+ masquerades = []
+ if distcc:
+ masquerades.append(("distcc", "distcc"))
+ if icecream:
+ masquerades.append(("icecream", "icecc"))
+ if ccache:
+ masquerades.append(("ccache", "ccache"))
+
+ for feature, m in masquerades:
+ for l in possible_libexecdirs:
+ p = os.path.join(os.sep, eprefix_lstrip,
+ "usr", l, m, "bin")
+ if os.path.isdir(p):
+ mysettings["PATH"] = p + ":" + mysettings["PATH"]
+ break
+ else:
+ writemsg(("Warning: %s requested but no masquerade dir"
+ + "can be found in /usr/lib*/%s/bin\n") % (m, m))
+ mysettings.features.remove(feature)
+
+ if 'MAKEOPTS' not in mysettings:
+ nproc = get_cpu_count()
+ if nproc:
+ mysettings['MAKEOPTS'] = '-j%d' % (nproc)
+
+ if not eapi_exports_KV(eapi):
+ # Discard KV for EAPIs that don't support it. Cached KV is restored
+ # from the backupenv whenever config.reset() is called.
+ mysettings.pop('KV', None)
+ elif 'KV' not in mysettings and \
+ mydo in ('compile', 'config', 'configure', 'info',
+ 'install', 'nofetch', 'postinst', 'postrm', 'preinst',
+ 'prepare', 'prerm', 'setup', 'test', 'unpack'):
+ mykv, err1 = ExtractKernelVersion(
+ os.path.join(mysettings['EROOT'], "usr/src/linux"))
+ if mykv:
+ # Regular source tree
+ mysettings["KV"] = mykv
+ else:
+ mysettings["KV"] = ""
+ mysettings.backup_changes("KV")
+
+ binpkg_compression = mysettings.get("BINPKG_COMPRESS", "bzip2")
+ try:
+ compression = _compressors[binpkg_compression]
+ except KeyError as e:
+ if binpkg_compression:
+ writemsg("Warning: Invalid or unsupported compression method: %s" % e.args[0])
+ else:
+ # Empty BINPKG_COMPRESS disables compression.
+ mysettings['PORTAGE_COMPRESSION_COMMAND'] = 'cat'
+ else:
+ try:
+ compression_binary = shlex_split(varexpand(compression["compress"], mydict=settings))[0]
+ except IndexError as e:
+ writemsg("Warning: Invalid or unsupported compression method: %s" % e.args[0])
+ else:
+ if find_binary(compression_binary) is None:
+ missing_package = compression["package"]
+ writemsg("Warning: File compression unsupported %s. Missing package: %s" % (binpkg_compression, missing_package))
+ else:
+ cmd = [varexpand(x, mydict=settings) for x in shlex_split(compression["compress"])]
+ # Filter empty elements
+ cmd = [x for x in cmd if x != ""]
+ mysettings['PORTAGE_COMPRESSION_COMMAND'] = ' '.join(cmd)
+
+_doebuild_manifest_cache = None
+_doebuild_broken_ebuilds = set()
+_doebuild_broken_manifests = set()
+_doebuild_commands_without_builddir = (
+ 'clean', 'cleanrm', 'depend', 'digest',
+ 'fetch', 'fetchall', 'help', 'manifest'
+)
+
+def doebuild(myebuild, mydo, _unused=DeprecationWarning, settings=None, debug=0, listonly=0,
+ fetchonly=0, cleanup=0, dbkey=DeprecationWarning, use_cache=1, fetchall=0, tree=None,
+ mydbapi=None, vartree=None, prev_mtimes=None,
+ fd_pipes=None, returnpid=False):
+ """
+ Wrapper function that invokes specific ebuild phases through the spawning
+ of ebuild.sh
+
+ @param myebuild: name of the ebuild to invoke the phase on (CPV)
+ @type myebuild: String
+ @param mydo: Phase to run
+ @type mydo: String
+ @param _unused: Deprecated (use settings["ROOT"] instead)
+ @type _unused: String
+ @param settings: Portage Configuration
+ @type settings: instance of portage.config
+ @param debug: Turns on various debug information (eg, debug for spawn)
+ @type debug: Boolean
+ @param listonly: Used to wrap fetch(); passed such that fetch only lists files required.
+ @type listonly: Boolean
+ @param fetchonly: Used to wrap fetch(); passed such that files are only fetched (no other actions)
+ @type fetchonly: Boolean
+ @param cleanup: Passed to prepare_build_dirs (TODO: what does it do?)
+ @type cleanup: Boolean
+ @param dbkey: A file path where metadata generated by the 'depend' phase
+ will be written.
+ @type dbkey: String
+ @param use_cache: Enables the cache
+ @type use_cache: Boolean
+ @param fetchall: Used to wrap fetch(), fetches all URIs (even ones invalid due to USE conditionals)
+ @type fetchall: Boolean
+ @param tree: Which tree to use ('vartree','porttree','bintree', etc..), defaults to 'porttree'
+ @type tree: String
+ @param mydbapi: a dbapi instance to pass to various functions; this should be a portdbapi instance.
+ @type mydbapi: portdbapi instance
+ @param vartree: A instance of vartree; used for aux_get calls, defaults to db[myroot]['vartree']
+ @type vartree: vartree instance
+ @param prev_mtimes: A dict of { filename:mtime } keys used by merge() to do config_protection
+ @type prev_mtimes: dictionary
+ @param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout }
+ for example.
+ @type fd_pipes: Dictionary
+ @param returnpid: Return a list of process IDs for a successful spawn, or
+ an integer value if spawn is unsuccessful. NOTE: This requires the
+ caller clean up all returned PIDs.
+ @type returnpid: Boolean
+ @rtype: Boolean
+ @return:
+ 1. 0 for success
+ 2. 1 for error
+
+ Most errors have an accompanying error message.
+
+ listonly and fetchonly are only really necessary for operations involving 'fetch'
+ prev_mtimes are only necessary for merge operations.
+ Other variables may not be strictly required, many have defaults that are set inside of doebuild.
+
+ """
+
+ if settings is None:
+ raise TypeError("settings parameter is required")
+ mysettings = settings
+ myroot = settings['EROOT']
+
+ if _unused is not DeprecationWarning:
+ warnings.warn("The third parameter of the "
+ "portage.doebuild() is deprecated. Instead "
+ "settings['EROOT'] is used.",
+ DeprecationWarning, stacklevel=2)
+
+ if dbkey is not DeprecationWarning:
+ warnings.warn("portage.doebuild() called "
+ "with deprecated dbkey argument.",
+ DeprecationWarning, stacklevel=2)
+
+ if not tree:
+ writemsg("Warning: tree not specified to doebuild\n")
+ tree = "porttree"
+
+ # chunked out deps for each phase, so that ebuild binary can use it
+ # to collapse targets down.
+ actionmap_deps={
+ "pretend" : [],
+ "setup": ["pretend"],
+ "unpack": ["setup"],
+ "prepare": ["unpack"],
+ "configure": ["prepare"],
+ "compile":["configure"],
+ "test": ["compile"],
+ "install":["test"],
+ "rpm": ["install"],
+ "package":["install"],
+ "merge" :["install"],
+ }
+
+ if mydbapi is None:
+ mydbapi = portage.db[myroot][tree].dbapi
+
+ if vartree is None and mydo in ("merge", "qmerge", "unmerge"):
+ vartree = portage.db[myroot]["vartree"]
+
+ features = mysettings.features
+
+ clean_phases = ("clean", "cleanrm")
+ validcommands = ["help","clean","prerm","postrm","cleanrm","preinst","postinst",
+ "config", "info", "setup", "depend", "pretend",
+ "fetch", "fetchall", "digest",
+ "unpack", "prepare", "configure", "compile", "test",
+ "install", "rpm", "qmerge", "merge",
+ "package", "unmerge", "manifest", "nofetch"]
+
+ if mydo not in validcommands:
+ validcommands.sort()
+ writemsg("!!! doebuild: '%s' is not one of the following valid commands:" % mydo,
+ noiselevel=-1)
+ for vcount in range(len(validcommands)):
+ if vcount%6 == 0:
+ writemsg("\n!!! ", noiselevel=-1)
+ writemsg(validcommands[vcount].ljust(11), noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+ return 1
+
+ if returnpid and mydo != 'depend':
+ # This case is not supported, since it bypasses the EbuildPhase class
+ # which implements important functionality (including post phase hooks
+ # and IPC for things like best/has_version and die).
+ warnings.warn("portage.doebuild() called "
+ "with returnpid parameter enabled. This usage will "
+ "not be supported in the future.",
+ DeprecationWarning, stacklevel=2)
+
+ if mydo == "fetchall":
+ fetchall = 1
+ mydo = "fetch"
+
+ if mydo not in clean_phases and not os.path.exists(myebuild):
+ writemsg("!!! doebuild: %s not found for %s\n" % (myebuild, mydo),
+ noiselevel=-1)
+ return 1
+
+ global _doebuild_manifest_cache
+ pkgdir = os.path.dirname(myebuild)
+ manifest_path = os.path.join(pkgdir, "Manifest")
+ if tree == "porttree":
+ repo_config = mysettings.repositories.get_repo_for_location(
+ os.path.dirname(os.path.dirname(pkgdir)))
+ else:
+ repo_config = None
+
+ mf = None
+ if "strict" in features and \
+ "digest" not in features and \
+ tree == "porttree" and \
+ not repo_config.thin_manifest and \
+ mydo not in ("digest", "manifest", "help") and \
+ not portage._doebuild_manifest_exempt_depend and \
+ not (repo_config.allow_missing_manifest and not os.path.exists(manifest_path)):
+ # Always verify the ebuild checksums before executing it.
+ global _doebuild_broken_ebuilds
+
+ if myebuild in _doebuild_broken_ebuilds:
+ return 1
+
+ # Avoid checking the same Manifest several times in a row during a
+ # regen with an empty cache.
+ if _doebuild_manifest_cache is None or \
+ _doebuild_manifest_cache.getFullname() != manifest_path:
+ _doebuild_manifest_cache = None
+ if not os.path.exists(manifest_path):
+ out = portage.output.EOutput()
+ out.eerror(_("Manifest not found for '%s'") % (myebuild,))
+ _doebuild_broken_ebuilds.add(myebuild)
+ return 1
+ mf = repo_config.load_manifest(pkgdir, mysettings["DISTDIR"])
+
+ else:
+ mf = _doebuild_manifest_cache
+
+ try:
+ mf.checkFileHashes("EBUILD", os.path.basename(myebuild))
+ except KeyError:
+ if not (mf.allow_missing and
+ os.path.basename(myebuild) not in mf.fhashdict["EBUILD"]):
+ out = portage.output.EOutput()
+ out.eerror(_("Missing digest for '%s'") % (myebuild,))
+ _doebuild_broken_ebuilds.add(myebuild)
+ return 1
+ except FileNotFound:
+ out = portage.output.EOutput()
+ out.eerror(_("A file listed in the Manifest "
+ "could not be found: '%s'") % (myebuild,))
+ _doebuild_broken_ebuilds.add(myebuild)
+ return 1
+ except DigestException as e:
+ out = portage.output.EOutput()
+ out.eerror(_("Digest verification failed:"))
+ out.eerror("%s" % e.value[0])
+ out.eerror(_("Reason: %s") % e.value[1])
+ out.eerror(_("Got: %s") % e.value[2])
+ out.eerror(_("Expected: %s") % e.value[3])
+ _doebuild_broken_ebuilds.add(myebuild)
+ return 1
+
+ if mf.getFullname() in _doebuild_broken_manifests:
+ return 1
+
+ if mf is not _doebuild_manifest_cache and not mf.allow_missing:
+
+ # Make sure that all of the ebuilds are
+ # actually listed in the Manifest.
+ for f in os.listdir(pkgdir):
+ pf = None
+ if f[-7:] == '.ebuild':
+ pf = f[:-7]
+ if pf is not None and not mf.hasFile("EBUILD", f):
+ f = os.path.join(pkgdir, f)
+ if f not in _doebuild_broken_ebuilds:
+ out = portage.output.EOutput()
+ out.eerror(_("A file is not listed in the "
+ "Manifest: '%s'") % (f,))
+ _doebuild_broken_manifests.add(manifest_path)
+ return 1
+
+ # We cache it only after all above checks succeed.
+ _doebuild_manifest_cache = mf
+
+ logfile=None
+ builddir_lock = None
+ tmpdir = None
+ tmpdir_orig = None
+
+ try:
+ if mydo in ("digest", "manifest", "help"):
+ # Temporarily exempt the depend phase from manifest checks, in case
+ # aux_get calls trigger cache generation.
+ portage._doebuild_manifest_exempt_depend += 1
+
+ # If we don't need much space and we don't need a constant location,
+ # we can temporarily override PORTAGE_TMPDIR with a random temp dir
+ # so that there's no need for locking and it can be used even if the
+ # user isn't in the portage group.
+ if not returnpid and mydo in ("info",):
+ tmpdir = tempfile.mkdtemp()
+ tmpdir_orig = mysettings["PORTAGE_TMPDIR"]
+ mysettings["PORTAGE_TMPDIR"] = tmpdir
+
+ doebuild_environment(myebuild, mydo, myroot, mysettings, debug,
+ use_cache, mydbapi)
+
+ if mydo in clean_phases:
+ builddir_lock = None
+ if not returnpid and \
+ 'PORTAGE_BUILDDIR_LOCKED' not in mysettings:
+ builddir_lock = EbuildBuildDir(
+ scheduler=(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ settings=mysettings)
+ builddir_lock.scheduler.run_until_complete(
+ builddir_lock.async_lock())
+ try:
+ return _spawn_phase(mydo, mysettings,
+ fd_pipes=fd_pipes, returnpid=returnpid)
+ finally:
+ if builddir_lock is not None:
+ builddir_lock.scheduler.run_until_complete(
+ builddir_lock.async_unlock())
+
+ # get possible slot information from the deps file
+ if mydo == "depend":
+ writemsg("!!! DEBUG: dbkey: %s\n" % str(dbkey), 2)
+ if returnpid:
+ return _spawn_phase(mydo, mysettings,
+ fd_pipes=fd_pipes, returnpid=returnpid)
+ elif dbkey and dbkey is not DeprecationWarning:
+ mysettings["dbkey"] = dbkey
+ else:
+ mysettings["dbkey"] = \
+ os.path.join(mysettings.depcachedir, "aux_db_key_temp")
+
+ return _spawn_phase(mydo, mysettings,
+ fd_pipes=fd_pipes, returnpid=returnpid)
+
+ elif mydo == "nofetch":
+
+ if returnpid:
+ writemsg("!!! doebuild: %s\n" %
+ _("returnpid is not supported for phase '%s'\n" % mydo),
+ noiselevel=-1)
+
+ return spawn_nofetch(mydbapi, myebuild, settings=mysettings,
+ fd_pipes=fd_pipes)
+
+ if tree == "porttree":
+
+ if not returnpid:
+ # Validate dependency metadata here to ensure that ebuilds with
+ # invalid data are never installed via the ebuild command. Skip
+ # this when returnpid is True (assume the caller handled it).
+ rval = _validate_deps(mysettings, myroot, mydo, mydbapi)
+ if rval != os.EX_OK:
+ return rval
+
+ else:
+ # FEATURES=noauto only makes sense for porttree, and we don't want
+ # it to trigger redundant sourcing of the ebuild for API consumers
+ # that are using binary packages
+ if "noauto" in mysettings.features:
+ mysettings.features.discard("noauto")
+
+ # If we are not using a private temp dir, then check access
+ # to the global temp dir.
+ if tmpdir is None and \
+ mydo not in _doebuild_commands_without_builddir:
+ rval = _check_temp_dir(mysettings)
+ if rval != os.EX_OK:
+ return rval
+
+ if mydo == "unmerge":
+ if returnpid:
+ writemsg("!!! doebuild: %s\n" %
+ _("returnpid is not supported for phase '%s'\n" % mydo),
+ noiselevel=-1)
+ return unmerge(mysettings["CATEGORY"],
+ mysettings["PF"], myroot, mysettings, vartree=vartree)
+
+ phases_to_run = set()
+ if returnpid or \
+ "noauto" in mysettings.features or \
+ mydo not in actionmap_deps:
+ phases_to_run.add(mydo)
+ else:
+ phase_stack = [mydo]
+ while phase_stack:
+ x = phase_stack.pop()
+ if x in phases_to_run:
+ continue
+ phases_to_run.add(x)
+ phase_stack.extend(actionmap_deps.get(x, []))
+ del phase_stack
+
+ alist = set(mysettings.configdict["pkg"].get("A", "").split())
+
+ unpacked = False
+ if tree != "porttree" or \
+ mydo in _doebuild_commands_without_builddir:
+ pass
+ elif "unpack" not in phases_to_run:
+ unpacked = os.path.exists(os.path.join(
+ mysettings["PORTAGE_BUILDDIR"], ".unpacked"))
+ else:
+ try:
+ workdir_st = os.stat(mysettings["WORKDIR"])
+ except OSError:
+ pass
+ else:
+ newstuff = False
+ if not os.path.exists(os.path.join(
+ mysettings["PORTAGE_BUILDDIR"], ".unpacked")):
+ writemsg_stdout(_(
+ ">>> Not marked as unpacked; recreating WORKDIR...\n"))
+ newstuff = True
+ else:
+ for x in alist:
+ writemsg_stdout(">>> Checking %s's mtime...\n" % x)
+ try:
+ x_st = os.stat(os.path.join(
+ mysettings["DISTDIR"], x))
+ except OSError:
+ # file not fetched yet
+ x_st = None
+
+ if x_st is None or x_st.st_mtime > workdir_st.st_mtime:
+ writemsg_stdout(_(">>> Timestamp of "
+ "%s has changed; recreating WORKDIR...\n") % x)
+ newstuff = True
+ break
+
+ if newstuff:
+ if builddir_lock is None and \
+ 'PORTAGE_BUILDDIR_LOCKED' not in mysettings:
+ builddir_lock = EbuildBuildDir(
+ scheduler=(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ settings=mysettings)
+ builddir_lock.scheduler.run_until_complete(
+ builddir_lock.async_lock())
+ try:
+ _spawn_phase("clean", mysettings)
+ finally:
+ if builddir_lock is not None:
+ builddir_lock.scheduler.run_until_complete(
+ builddir_lock.async_unlock())
+ builddir_lock = None
+ else:
+ writemsg_stdout(_(">>> WORKDIR is up-to-date, keeping...\n"))
+ unpacked = True
+
+ # Build directory creation isn't required for any of these.
+ # In the fetch phase, the directory is needed only for RESTRICT=fetch
+ # in order to satisfy the sane $PWD requirement (from bug #239560)
+ # when pkg_nofetch is spawned.
+ have_build_dirs = False
+ if mydo not in ('digest', 'fetch', 'help', 'manifest'):
+ if not returnpid and \
+ 'PORTAGE_BUILDDIR_LOCKED' not in mysettings:
+ builddir_lock = EbuildBuildDir(
+ scheduler=(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ settings=mysettings)
+ builddir_lock.scheduler.run_until_complete(
+ builddir_lock.async_lock())
+ mystatus = prepare_build_dirs(myroot, mysettings, cleanup)
+ if mystatus:
+ return mystatus
+ have_build_dirs = True
+
+ # emerge handles logging externally
+ if not returnpid:
+ # PORTAGE_LOG_FILE is set by the
+ # above prepare_build_dirs() call.
+ logfile = mysettings.get("PORTAGE_LOG_FILE")
+
+ if have_build_dirs:
+ rval = _prepare_env_file(mysettings)
+ if rval != os.EX_OK:
+ return rval
+
+ if eapi_exports_merge_type(mysettings["EAPI"]) and \
+ "MERGE_TYPE" not in mysettings.configdict["pkg"]:
+ if tree == "porttree":
+ mysettings.configdict["pkg"]["MERGE_TYPE"] = "source"
+ elif tree == "bintree":
+ mysettings.configdict["pkg"]["MERGE_TYPE"] = "binary"
+
+ if tree == "porttree":
+ mysettings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
+ elif tree == "bintree":
+ mysettings.configdict["pkg"]["EMERGE_FROM"] = "binary"
+
+ # NOTE: It's not possible to set REPLACED_BY_VERSION for prerm
+ # and postrm here, since we don't necessarily know what
+ # versions are being installed. This could be a problem
+ # for API consumers if they don't use dblink.treewalk()
+ # to execute prerm and postrm.
+ if eapi_exports_replace_vars(mysettings["EAPI"]) and \
+ (mydo in ("postinst", "preinst", "pretend", "setup") or \
+ ("noauto" not in features and not returnpid and \
+ (mydo in actionmap_deps or mydo in ("merge", "package", "qmerge")))):
+ if not vartree:
+ writemsg("Warning: vartree not given to doebuild. " + \
+ "Cannot set REPLACING_VERSIONS in pkg_{pretend,setup}\n")
+ else:
+ vardb = vartree.dbapi
+ cpv = mysettings.mycpv
+ cpv_slot = "%s%s%s" % \
+ (cpv.cp, portage.dep._slot_separator, cpv.slot)
+ mysettings["REPLACING_VERSIONS"] = " ".join(
+ set(portage.versions.cpv_getversion(match) \
+ for match in vardb.match(cpv_slot) + \
+ vardb.match('='+cpv)))
+
+ # if any of these are being called, handle them -- running them out of
+ # the sandbox -- and stop now.
+ if mydo in ("config", "help", "info", "postinst",
+ "preinst", "pretend", "postrm", "prerm"):
+ if mydo in ("preinst", "postinst"):
+ env_file = os.path.join(os.path.dirname(mysettings["EBUILD"]),
+ "environment.bz2")
+ if os.path.isfile(env_file):
+ mysettings["PORTAGE_UPDATE_ENV"] = env_file
+ try:
+ return _spawn_phase(mydo, mysettings,
+ fd_pipes=fd_pipes, logfile=logfile, returnpid=returnpid)
+ finally:
+ mysettings.pop("PORTAGE_UPDATE_ENV", None)
+
+ mycpv = "/".join((mysettings["CATEGORY"], mysettings["PF"]))
+
+ # Only try and fetch the files if we are going to need them ...
+ # otherwise, if user has FEATURES=noauto and they run `ebuild clean
+ # unpack compile install`, we will try and fetch 4 times :/
+ need_distfiles = tree == "porttree" and not unpacked and \
+ (mydo in ("fetch", "unpack") or \
+ mydo not in ("digest", "manifest") and "noauto" not in features)
+ if need_distfiles:
+
+ src_uri = mysettings.configdict["pkg"].get("SRC_URI")
+ if src_uri is None:
+ src_uri, = mydbapi.aux_get(mysettings.mycpv,
+ ["SRC_URI"], mytree=os.path.dirname(os.path.dirname(
+ os.path.dirname(myebuild))))
+ metadata = {
+ "EAPI" : mysettings["EAPI"],
+ "SRC_URI" : src_uri,
+ }
+ use = frozenset(mysettings["PORTAGE_USE"].split())
+ try:
+ alist = _parse_uri_map(mysettings.mycpv, metadata, use=use)
+ aalist = _parse_uri_map(mysettings.mycpv, metadata)
+ except InvalidDependString as e:
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ writemsg(_("!!! Invalid SRC_URI for '%s'.\n") % mycpv,
+ noiselevel=-1)
+ del e
+ return 1
+
+ if "mirror" in features or fetchall:
+ fetchme = aalist
+ else:
+ fetchme = alist
+
+ dist_digests = None
+ if mf is not None:
+ dist_digests = mf.getTypeDigests("DIST")
+ if not fetch(fetchme, mysettings, listonly=listonly,
+ fetchonly=fetchonly, allow_missing_digests=False,
+ digests=dist_digests):
+ # Since listonly mode is called by emerge --pretend in an
+ # asynchronous context, spawn_nofetch would trigger event loop
+ # recursion here, therefore delegate execution of pkg_nofetch
+ # to the caller (bug 657360).
+ if not listonly:
+ spawn_nofetch(mydbapi, myebuild, settings=mysettings,
+ fd_pipes=fd_pipes)
+ return 1
+
+ if need_distfiles:
+ # Files are already checked inside fetch(),
+ # so do not check them again.
+ checkme = []
+ elif unpacked:
+ # The unpack phase is marked as complete, so it
+ # would be wasteful to check distfiles again.
+ checkme = []
+ else:
+ checkme = alist
+
+ if mydo == "fetch" and listonly:
+ return 0
+
+ try:
+ if mydo == "manifest":
+ mf = None
+ _doebuild_manifest_cache = None
+ return not digestgen(mysettings=mysettings, myportdb=mydbapi)
+ elif mydo == "digest":
+ mf = None
+ _doebuild_manifest_cache = None
+ return not digestgen(mysettings=mysettings, myportdb=mydbapi)
+ elif "digest" in mysettings.features:
+ mf = None
+ _doebuild_manifest_cache = None
+ digestgen(mysettings=mysettings, myportdb=mydbapi)
+ except PermissionDenied as e:
+ writemsg(_("!!! Permission Denied: %s\n") % (e,), noiselevel=-1)
+ if mydo in ("digest", "manifest"):
+ return 1
+
+ if mydo == "fetch":
+ # Return after digestgen for FEATURES=digest support.
+ # Return before digestcheck, since fetch() already
+ # checked any relevant digests.
+ return 0
+
+ # See above comment about fetching only when needed
+ if tree == 'porttree' and \
+ not digestcheck(checkme, mysettings, "strict" in features, mf=mf):
+ return 1
+
+ # remove PORTAGE_ACTUAL_DISTDIR once cvs/svn is supported via SRC_URI
+ if tree == 'porttree' and \
+ ((mydo != "setup" and "noauto" not in features) \
+ or mydo in ("install", "unpack")):
+ _prepare_fake_distdir(mysettings, alist)
+
+ #initial dep checks complete; time to process main commands
+ actionmap = _spawn_actionmap(mysettings)
+
+ # merge the deps in so we have again a 'full' actionmap
+ # be glad when this can die.
+ for x in actionmap:
+ if len(actionmap_deps.get(x, [])):
+ actionmap[x]["dep"] = ' '.join(actionmap_deps[x])
+
+ regular_actionmap_phase = mydo in actionmap
+
+ if regular_actionmap_phase:
+ bintree = None
+ if mydo == "package":
+ # Make sure the package directory exists before executing
+ # this phase. This can raise PermissionDenied if
+ # the current user doesn't have write access to $PKGDIR.
+ if hasattr(portage, 'db'):
+ bintree = portage.db[mysettings['EROOT']]['bintree']
+ mysettings["PORTAGE_BINPKG_TMPFILE"] = \
+ bintree.getname(mysettings.mycpv) + \
+ ".%s" % (os.getpid(),)
+ bintree._ensure_dir(os.path.dirname(
+ mysettings["PORTAGE_BINPKG_TMPFILE"]))
+ else:
+ parent_dir = os.path.join(mysettings["PKGDIR"],
+ mysettings["CATEGORY"])
+ portage.util.ensure_dirs(parent_dir)
+ if not os.access(parent_dir, os.W_OK):
+ raise PermissionDenied(
+ "access('%s', os.W_OK)" % parent_dir)
+ retval = spawnebuild(mydo,
+ actionmap, mysettings, debug, logfile=logfile,
+ fd_pipes=fd_pipes, returnpid=returnpid)
+
+ if returnpid and isinstance(retval, list):
+ return retval
+
+ if retval == os.EX_OK:
+ if mydo == "package" and bintree is not None:
+ pkg = bintree.inject(mysettings.mycpv,
+ filename=mysettings["PORTAGE_BINPKG_TMPFILE"])
+ if pkg is not None:
+ infoloc = os.path.join(
+ mysettings["PORTAGE_BUILDDIR"], "build-info")
+ build_info = {
+ "BINPKGMD5": "%s\n" % pkg._metadata["MD5"],
+ }
+ if pkg.build_id is not None:
+ build_info["BUILD_ID"] = "%s\n" % pkg.build_id
+ for k, v in build_info.items():
+ with io.open(_unicode_encode(
+ os.path.join(infoloc, k),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='strict') as f:
+ f.write(v)
+ else:
+ if "PORTAGE_BINPKG_TMPFILE" in mysettings:
+ try:
+ os.unlink(mysettings["PORTAGE_BINPKG_TMPFILE"])
+ except OSError:
+ pass
+
+ elif returnpid:
+ writemsg("!!! doebuild: %s\n" %
+ _("returnpid is not supported for phase '%s'\n" % mydo),
+ noiselevel=-1)
+
+ if regular_actionmap_phase:
+ # handled above
+ pass
+ elif mydo == "qmerge":
+ # check to ensure install was run. this *only* pops up when users
+ # forget it and are using ebuild
+ if not os.path.exists(
+ os.path.join(mysettings["PORTAGE_BUILDDIR"], ".installed")):
+ writemsg(_("!!! mydo=qmerge, but the install phase has not been run\n"),
+ noiselevel=-1)
+ return 1
+ # qmerge is a special phase that implies noclean.
+ if "noclean" not in mysettings.features:
+ mysettings.features.add("noclean")
+ _handle_self_update(mysettings, vartree.dbapi)
+ #qmerge is specifically not supposed to do a runtime dep check
+ retval = merge(
+ mysettings["CATEGORY"], mysettings["PF"], mysettings["D"],
+ os.path.join(mysettings["PORTAGE_BUILDDIR"], "build-info"),
+ myroot, mysettings, myebuild=mysettings["EBUILD"], mytree=tree,
+ mydbapi=mydbapi, vartree=vartree, prev_mtimes=prev_mtimes,
+ fd_pipes=fd_pipes)
+ elif mydo=="merge":
+ retval = spawnebuild("install", actionmap, mysettings, debug,
+ alwaysdep=1, logfile=logfile, fd_pipes=fd_pipes,
+ returnpid=returnpid)
+ if retval != os.EX_OK:
+ # The merge phase handles this already. Callers don't know how
+ # far this function got, so we have to call elog_process() here
+ # so that it's only called once.
+ elog_process(mysettings.mycpv, mysettings)
+ if retval == os.EX_OK:
+ _handle_self_update(mysettings, vartree.dbapi)
+ retval = merge(mysettings["CATEGORY"], mysettings["PF"],
+ mysettings["D"], os.path.join(mysettings["PORTAGE_BUILDDIR"],
+ "build-info"), myroot, mysettings,
+ myebuild=mysettings["EBUILD"], mytree=tree, mydbapi=mydbapi,
+ vartree=vartree, prev_mtimes=prev_mtimes,
+ fd_pipes=fd_pipes)
+
+ else:
+ writemsg_stdout(_("!!! Unknown mydo: %s\n") % mydo, noiselevel=-1)
+ return 1
+
+ return retval
+
+ finally:
+
+ if builddir_lock is not None:
+ builddir_lock.scheduler.run_until_complete(
+ builddir_lock.async_unlock())
+ if tmpdir:
+ mysettings["PORTAGE_TMPDIR"] = tmpdir_orig
+ shutil.rmtree(tmpdir)
+
+ mysettings.pop("REPLACING_VERSIONS", None)
+
+ if logfile and not returnpid:
+ try:
+ if os.stat(logfile).st_size == 0:
+ os.unlink(logfile)
+ except OSError:
+ pass
+
+ if mydo in ("digest", "manifest", "help"):
+ # If necessary, depend phase has been triggered by aux_get calls
+ # and the exemption is no longer needed.
+ portage._doebuild_manifest_exempt_depend -= 1
+
+def _check_temp_dir(settings):
+ if "PORTAGE_TMPDIR" not in settings or \
+ not os.path.isdir(settings["PORTAGE_TMPDIR"]):
+ writemsg(_("The directory specified in your "
+ "PORTAGE_TMPDIR variable, '%s',\n"
+ "does not exist. Please create this directory or "
+ "correct your PORTAGE_TMPDIR setting.\n") % \
+ settings.get("PORTAGE_TMPDIR", ""), noiselevel=-1)
+ return 1
+
+ # as some people use a separate PORTAGE_TMPDIR mount
+ # we prefer that as the checks below would otherwise be pointless
+ # for those people.
+ tmpdir = os.path.realpath(settings["PORTAGE_TMPDIR"])
+ if os.path.exists(os.path.join(tmpdir, "portage")):
+ checkdir = os.path.realpath(os.path.join(tmpdir, "portage"))
+ if ("sandbox" in settings.features or
+ "usersandox" in settings.features) and \
+ not checkdir.startswith(tmpdir + os.sep):
+ msg = _("The 'portage' subdirectory of the directory "
+ "referenced by the PORTAGE_TMPDIR variable appears to be "
+ "a symlink. In order to avoid sandbox violations (see bug "
+ "#378379), you must adjust PORTAGE_TMPDIR instead of using "
+ "the symlink located at '%s'. A suitable PORTAGE_TMPDIR "
+ "setting would be '%s'.") % \
+ (os.path.join(tmpdir, "portage"), checkdir)
+ lines = []
+ lines.append("")
+ lines.append("")
+ lines.extend(wrap(msg, 72))
+ lines.append("")
+ for line in lines:
+ if line:
+ line = "!!! %s" % (line,)
+ writemsg("%s\n" % (line,), noiselevel=-1)
+ return 1
+ else:
+ checkdir = tmpdir
+
+ if not os.access(checkdir, os.W_OK):
+ writemsg(_("%s is not writable.\n"
+ "Likely cause is that you've mounted it as readonly.\n") % checkdir,
+ noiselevel=-1)
+ return 1
+
+ with tempfile.NamedTemporaryFile(prefix="exectest-", dir=checkdir) as fd:
+ os.chmod(fd.name, 0o755)
+ if not os.access(fd.name, os.X_OK):
+ writemsg(_("Can not execute files in %s\n"
+ "Likely cause is that you've mounted it with one of the\n"
+ "following mount options: 'noexec', 'user', 'users'\n\n"
+ "Please make sure that portage can execute files in this directory.\n") % checkdir,
+ noiselevel=-1)
+ return 1
+
+ return os.EX_OK
+
+def _prepare_env_file(settings):
+ """
+ Extract environment.bz2 if it exists, but only if the destination
+ environment file doesn't already exist. There are lots of possible
+ states when doebuild() calls this function, and we want to avoid
+ clobbering an existing environment file.
+ """
+
+ env_extractor = BinpkgEnvExtractor(background=False,
+ scheduler=(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ settings=settings)
+
+ if env_extractor.dest_env_exists():
+ # There are lots of possible states when doebuild()
+ # calls this function, and we want to avoid
+ # clobbering an existing environment file.
+ return os.EX_OK
+
+ if not env_extractor.saved_env_exists():
+ # If the environment.bz2 doesn't exist, then ebuild.sh will
+ # source the ebuild as a fallback.
+ return os.EX_OK
+
+ env_extractor.start()
+ env_extractor.wait()
+ return env_extractor.returncode
+
+def _spawn_actionmap(settings):
+ features = settings.features
+ restrict = settings["PORTAGE_RESTRICT"].split()
+ nosandbox = (("userpriv" in features) and \
+ ("usersandbox" not in features) and \
+ "userpriv" not in restrict and \
+ "nouserpriv" not in restrict)
+
+ if not portage.process.sandbox_capable:
+ nosandbox = True
+
+ sesandbox = settings.selinux_enabled() and \
+ "sesandbox" in features
+
+ droppriv = "userpriv" in features and \
+ "userpriv" not in restrict and \
+ secpass >= 2
+
+ fakeroot = "fakeroot" in features
+
+ portage_bin_path = settings["PORTAGE_BIN_PATH"]
+ ebuild_sh_binary = os.path.join(portage_bin_path,
+ os.path.basename(EBUILD_SH_BINARY))
+ misc_sh_binary = os.path.join(portage_bin_path,
+ os.path.basename(MISC_SH_BINARY))
+ ebuild_sh = _shell_quote(ebuild_sh_binary) + " %s"
+ misc_sh = _shell_quote(misc_sh_binary) + " __dyn_%s"
+
+ # args are for the to spawn function
+ actionmap = {
+"pretend": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0, "fakeroot":0}},
+"setup": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":1, "sesandbox":0, "fakeroot":0}},
+"unpack": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
+"prepare": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":0, "sesandbox":sesandbox, "fakeroot":0}},
+"configure":{"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
+"compile": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
+"test": {"cmd":ebuild_sh, "args":{"droppriv":droppriv, "free":nosandbox, "sesandbox":sesandbox, "fakeroot":0}},
+"install": {"cmd":ebuild_sh, "args":{"droppriv":0, "free":0, "sesandbox":sesandbox, "fakeroot":fakeroot}},
+"rpm": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
+"package": {"cmd":misc_sh, "args":{"droppriv":0, "free":0, "sesandbox":0, "fakeroot":fakeroot}},
+ }
+
+ return actionmap
+
+def _validate_deps(mysettings, myroot, mydo, mydbapi):
+
+ invalid_dep_exempt_phases = \
+ set(["clean", "cleanrm", "help", "prerm", "postrm"])
+ all_keys = set(Package.metadata_keys)
+ all_keys.add("SRC_URI")
+ all_keys = tuple(all_keys)
+ metadata = mysettings.configdict['pkg']
+ if all(k in metadata for k in ("PORTAGE_REPO_NAME", "SRC_URI")):
+ metadata = dict(((k, metadata[k]) for k in all_keys if k in metadata),
+ repository=metadata["PORTAGE_REPO_NAME"])
+ else:
+ metadata = dict(zip(all_keys,
+ mydbapi.aux_get(mysettings.mycpv, all_keys,
+ myrepo=mysettings.get("PORTAGE_REPO_NAME"))))
+
+ class FakeTree(object):
+ def __init__(self, mydb):
+ self.dbapi = mydb
+
+ root_config = RootConfig(mysettings, {"porttree":FakeTree(mydbapi)}, None)
+
+ pkg = Package(built=False, cpv=mysettings.mycpv,
+ metadata=metadata, root_config=root_config,
+ type_name="ebuild")
+
+ msgs = []
+ if pkg.invalid:
+ for k, v in pkg.invalid.items():
+ for msg in v:
+ msgs.append(" %s\n" % (msg,))
+
+ if msgs:
+ portage.util.writemsg_level(_("Error(s) in metadata for '%s':\n") % \
+ (mysettings.mycpv,), level=logging.ERROR, noiselevel=-1)
+ for x in msgs:
+ portage.util.writemsg_level(x,
+ level=logging.ERROR, noiselevel=-1)
+ if mydo not in invalid_dep_exempt_phases:
+ return 1
+
+ if not pkg.built and \
+ mydo not in ("digest", "help", "manifest") and \
+ pkg._metadata["REQUIRED_USE"] and \
+ eapi_has_required_use(pkg.eapi):
+ result = check_required_use(pkg._metadata["REQUIRED_USE"],
+ pkg.use.enabled, pkg.iuse.is_valid_flag, eapi=pkg.eapi)
+ if not result:
+ reduced_noise = result.tounicode()
+ writemsg("\n %s\n" % _("The following REQUIRED_USE flag" + \
+ " constraints are unsatisfied:"), noiselevel=-1)
+ writemsg(" %s\n" % reduced_noise,
+ noiselevel=-1)
+ normalized_required_use = \
+ " ".join(pkg._metadata["REQUIRED_USE"].split())
+ if reduced_noise != normalized_required_use:
+ writemsg("\n %s\n" % _("The above constraints " + \
+ "are a subset of the following complete expression:"),
+ noiselevel=-1)
+ writemsg(" %s\n" % \
+ human_readable_required_use(normalized_required_use),
+ noiselevel=-1)
+ writemsg("\n", noiselevel=-1)
+ return 1
+
+ return os.EX_OK
+
+# XXX This would be to replace getstatusoutput completely.
+# XXX Issue: cannot block execution. Deadlock condition.
+def spawn(mystring, mysettings, debug=False, free=False, droppriv=False,
+ sesandbox=False, fakeroot=False, networked=True, ipc=True, **keywords):
+ """
+ Spawn a subprocess with extra portage-specific options.
+ Optiosn include:
+
+ Sandbox: Sandbox means the spawned process will be limited in its ability t
+ read and write files (normally this means it is restricted to ${D}/)
+ SElinux Sandbox: Enables sandboxing on SElinux
+ Reduced Privileges: Drops privilages such that the process runs as portage:portage
+ instead of as root.
+
+ Notes: os.system cannot be used because it messes with signal handling. Instead we
+ use the portage.process spawn* family of functions.
+
+ This function waits for the process to terminate.
+
+ @param mystring: Command to run
+ @type mystring: String
+ @param mysettings: Either a Dict of Key,Value pairs or an instance of portage.config
+ @type mysettings: Dictionary or config instance
+ @param debug: Ignored
+ @type debug: Boolean
+ @param free: Enable sandboxing for this process
+ @type free: Boolean
+ @param droppriv: Drop to portage:portage when running this command
+ @type droppriv: Boolean
+ @param sesandbox: Enable SELinux Sandboxing (toggles a context switch)
+ @type sesandbox: Boolean
+ @param fakeroot: Run this command with faked root privileges
+ @type fakeroot: Boolean
+ @param networked: Run this command with networking access enabled
+ @type networked: Boolean
+ @param ipc: Run this command with host IPC access enabled
+ @type ipc: Boolean
+ @param keywords: Extra options encoded as a dict, to be passed to spawn
+ @type keywords: Dictionary
+ @rtype: Integer
+ @return:
+ 1. The return code of the spawned process.
+ """
+
+ check_config_instance(mysettings)
+
+ fd_pipes = keywords.get("fd_pipes")
+ if fd_pipes is None:
+ fd_pipes = {
+ 0:portage._get_stdin().fileno(),
+ 1:sys.__stdout__.fileno(),
+ 2:sys.__stderr__.fileno(),
+ }
+ # In some cases the above print statements don't flush stdout, so
+ # it needs to be flushed before allowing a child process to use it
+ # so that output always shows in the correct order.
+ stdout_filenos = (sys.__stdout__.fileno(), sys.__stderr__.fileno())
+ for fd in fd_pipes.values():
+ if fd in stdout_filenos:
+ sys.__stdout__.flush()
+ sys.__stderr__.flush()
+ break
+
+ features = mysettings.features
+
+ # Use Linux namespaces if available
+ if uid == 0 and platform.system() == 'Linux':
+ keywords['unshare_net'] = not networked
+ keywords['unshare_ipc'] = not ipc
+
+ if not networked and mysettings.get("EBUILD_PHASE") != "nofetch" and \
+ ("network-sandbox-proxy" in features or "distcc" in features):
+ # Provide a SOCKS5-over-UNIX-socket proxy to escape sandbox
+ # Don't do this for pkg_nofetch, since the spawn_nofetch
+ # function creates a private PORTAGE_TMPDIR.
+ try:
+ proxy = get_socks5_proxy(mysettings)
+ except NotImplementedError:
+ pass
+ else:
+ mysettings['PORTAGE_SOCKS5_PROXY'] = proxy
+ mysettings['DISTCC_SOCKS_PROXY'] = proxy
+
+ # TODO: Enable fakeroot to be used together with droppriv. The
+ # fake ownership/permissions will have to be converted to real
+ # permissions in the merge phase.
+ fakeroot = fakeroot and uid != 0 and portage.process.fakeroot_capable
+ portage_build_uid = os.getuid()
+ portage_build_gid = os.getgid()
+ logname = None
+ if uid == 0 and portage_uid and portage_gid and hasattr(os, "setgroups"):
+ if droppriv:
+ logname = portage.data._portage_username
+ keywords.update({
+ "uid": portage_uid,
+ "gid": portage_gid,
+ "groups": userpriv_groups,
+ "umask": 0o22
+ })
+
+ # Adjust pty ownership so that subprocesses
+ # can directly access /dev/fd/{1,2}.
+ stdout_fd = fd_pipes.get(1)
+ if stdout_fd is not None:
+ try:
+ subprocess_tty = _os.ttyname(stdout_fd)
+ except OSError:
+ pass
+ else:
+ try:
+ parent_tty = _os.ttyname(sys.__stdout__.fileno())
+ except OSError:
+ parent_tty = None
+
+ if subprocess_tty != parent_tty:
+ _os.chown(subprocess_tty,
+ int(portage_uid), int(portage_gid))
+
+ if "userpriv" in features and "userpriv" not in mysettings["PORTAGE_RESTRICT"].split() and secpass >= 2:
+ # Since Python 3.4, getpwuid and getgrgid
+ # require int type (no proxies).
+ portage_build_uid = int(portage_uid)
+ portage_build_gid = int(portage_gid)
+
+ if "PORTAGE_BUILD_USER" not in mysettings:
+ user = None
+ try:
+ user = pwd.getpwuid(portage_build_uid).pw_name
+ except KeyError:
+ if portage_build_uid == 0:
+ user = "root"
+ elif portage_build_uid == portage_uid:
+ user = portage.data._portage_username
+ if user is not None:
+ mysettings["PORTAGE_BUILD_USER"] = user
+
+ if "PORTAGE_BUILD_GROUP" not in mysettings:
+ group = None
+ try:
+ group = grp.getgrgid(portage_build_gid).gr_name
+ except KeyError:
+ if portage_build_gid == 0:
+ group = "root"
+ elif portage_build_gid == portage_gid:
+ group = portage.data._portage_grpname
+ if group is not None:
+ mysettings["PORTAGE_BUILD_GROUP"] = group
+
+ if not free:
+ free=((droppriv and "usersandbox" not in features) or \
+ (not droppriv and "sandbox" not in features and \
+ "usersandbox" not in features and not fakeroot))
+
+ if not free and not (fakeroot or portage.process.sandbox_capable):
+ free = True
+
+ if mysettings.mycpv is not None:
+ keywords["opt_name"] = "[%s]" % mysettings.mycpv
+ else:
+ keywords["opt_name"] = "[%s/%s]" % \
+ (mysettings.get("CATEGORY",""), mysettings.get("PF",""))
+
+ if free or "SANDBOX_ACTIVE" in os.environ:
+ keywords["opt_name"] += " bash"
+ spawn_func = portage.process.spawn_bash
+ elif fakeroot:
+ keywords["opt_name"] += " fakeroot"
+ keywords["fakeroot_state"] = os.path.join(mysettings["T"], "fakeroot.state")
+ spawn_func = portage.process.spawn_fakeroot
+ else:
+ keywords["opt_name"] += " sandbox"
+ spawn_func = portage.process.spawn_sandbox
+
+ if sesandbox:
+ spawn_func = selinux.spawn_wrapper(spawn_func,
+ mysettings["PORTAGE_SANDBOX_T"])
+
+ logname_backup = None
+ if logname is not None:
+ logname_backup = mysettings.configdict["env"].get("LOGNAME")
+ mysettings.configdict["env"]["LOGNAME"] = logname
+
+ try:
+ if keywords.get("returnpid"):
+ return spawn_func(mystring, env=mysettings.environ(),
+ **keywords)
+
+ proc = EbuildSpawnProcess(
+ background=False, args=mystring,
+ scheduler=SchedulerInterface(portage._internal_caller and
+ global_event_loop() or EventLoop(main=False)),
+ spawn_func=spawn_func,
+ settings=mysettings, **keywords)
+
+ proc.start()
+ proc.wait()
+
+ return proc.returncode
+
+ finally:
+ if logname is None:
+ pass
+ elif logname_backup is None:
+ mysettings.configdict["env"].pop("LOGNAME", None)
+ else:
+ mysettings.configdict["env"]["LOGNAME"] = logname_backup
+
+# parse actionmap to spawn ebuild with the appropriate args
+def spawnebuild(mydo, actionmap, mysettings, debug, alwaysdep=0,
+ logfile=None, fd_pipes=None, returnpid=False):
+
+ if returnpid:
+ warnings.warn("portage.spawnebuild() called "
+ "with returnpid parameter enabled. This usage will "
+ "not be supported in the future.",
+ DeprecationWarning, stacklevel=2)
+
+ if not returnpid and \
+ (alwaysdep or "noauto" not in mysettings.features):
+ # process dependency first
+ if "dep" in actionmap[mydo]:
+ retval = spawnebuild(actionmap[mydo]["dep"], actionmap,
+ mysettings, debug, alwaysdep=alwaysdep, logfile=logfile,
+ fd_pipes=fd_pipes, returnpid=returnpid)
+ if retval:
+ return retval
+
+ eapi = mysettings["EAPI"]
+
+ if mydo in ("configure", "prepare") and not eapi_has_src_prepare_and_src_configure(eapi):
+ return os.EX_OK
+
+ if mydo == "pretend" and not eapi_has_pkg_pretend(eapi):
+ return os.EX_OK
+
+ if not (mydo == "install" and "noauto" in mysettings.features):
+ check_file = os.path.join(
+ mysettings["PORTAGE_BUILDDIR"], ".%sed" % mydo.rstrip('e'))
+ if os.path.exists(check_file):
+ writemsg_stdout(_(">>> It appears that "
+ "'%(action)s' has already executed for '%(pkg)s'; skipping.\n") %
+ {"action":mydo, "pkg":mysettings["PF"]})
+ writemsg_stdout(_(">>> Remove '%(file)s' to force %(action)s.\n") %
+ {"file":check_file, "action":mydo})
+ return os.EX_OK
+
+ return _spawn_phase(mydo, mysettings,
+ actionmap=actionmap, logfile=logfile,
+ fd_pipes=fd_pipes, returnpid=returnpid)
+
+_post_phase_cmds = {
+
+ "install" : [
+ "install_qa_check",
+ "install_symlink_html_docs",
+ "install_hooks"],
+
+ "preinst" : (
+ (
+ # Since SELinux does not allow LD_PRELOAD across domain transitions,
+ # disable the LD_PRELOAD sandbox for preinst_selinux_labels.
+ {
+ "ld_preload_sandbox": False,
+ "selinux_only": True,
+ },
+ [
+ "preinst_selinux_labels",
+ ],
+ ),
+ (
+ {},
+ [
+ "preinst_sfperms",
+ "preinst_suid_scan",
+ "preinst_qa_check",
+ ],
+ ),
+ ),
+ "postinst" : [
+ "postinst_qa_check"],
+}
+
+def _post_phase_userpriv_perms(mysettings):
+ if "userpriv" in mysettings.features and secpass >= 2:
+ """ Privileged phases may have left files that need to be made
+ writable to a less privileged user."""
+ apply_recursive_permissions(mysettings["T"],
+ uid=portage_uid, gid=portage_gid, dirmode=0o700, dirmask=0,
+ filemode=0o600, filemask=0)
+
+
+def _check_build_log(mysettings, out=None):
+ """
+ Search the content of $PORTAGE_LOG_FILE if it exists
+ and generate the following QA Notices when appropriate:
+
+ * Automake "maintainer mode"
+ * command not found
+ * Unrecognized configure options
+ """
+ logfile = mysettings.get("PORTAGE_LOG_FILE")
+ if logfile is None:
+ return
+ try:
+ f = open(_unicode_encode(logfile, encoding=_encodings['fs'],
+ errors='strict'), mode='rb')
+ except EnvironmentError:
+ return
+
+ f_real = None
+ if logfile.endswith('.gz'):
+ f_real = f
+ f = gzip.GzipFile(filename='', mode='rb', fileobj=f)
+
+ am_maintainer_mode = []
+ bash_command_not_found = []
+ bash_command_not_found_re = re.compile(
+ r'(.*): line (\d*): (.*): command not found$')
+ command_not_found_exclude_re = re.compile(r'/configure: line ')
+ helper_missing_file = []
+ helper_missing_file_re = re.compile(
+ r'^!!! (do|new).*: .* does not exist$')
+
+ configure_opts_warn = []
+ configure_opts_warn_re = re.compile(
+ r'^configure: WARNING: [Uu]nrecognized options: (.*)')
+
+ qa_configure_opts = ""
+ try:
+ with io.open(_unicode_encode(os.path.join(
+ mysettings["PORTAGE_BUILDDIR"],
+ "build-info", "QA_CONFIGURE_OPTIONS"),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as qa_configure_opts_f:
+ qa_configure_opts = qa_configure_opts_f.read()
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+
+ qa_configure_opts = qa_configure_opts.split()
+ if qa_configure_opts:
+ if len(qa_configure_opts) > 1:
+ qa_configure_opts = "|".join("(%s)" % x for x in qa_configure_opts)
+ qa_configure_opts = "^(%s)$" % qa_configure_opts
+ else:
+ qa_configure_opts = "^%s$" % qa_configure_opts[0]
+ qa_configure_opts = re.compile(qa_configure_opts)
+
+ qa_am_maintainer_mode = []
+ try:
+ with io.open(_unicode_encode(os.path.join(
+ mysettings["PORTAGE_BUILDDIR"],
+ "build-info", "QA_AM_MAINTAINER_MODE"),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as qa_am_maintainer_mode_f:
+ qa_am_maintainer_mode = [x for x in
+ qa_am_maintainer_mode_f.read().splitlines() if x]
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+
+ if qa_am_maintainer_mode:
+ if len(qa_am_maintainer_mode) > 1:
+ qa_am_maintainer_mode = \
+ "|".join("(%s)" % x for x in qa_am_maintainer_mode)
+ qa_am_maintainer_mode = "^(%s)$" % qa_am_maintainer_mode
+ else:
+ qa_am_maintainer_mode = "^%s$" % qa_am_maintainer_mode[0]
+ qa_am_maintainer_mode = re.compile(qa_am_maintainer_mode)
+
+ # Exclude output from dev-libs/yaz-3.0.47 which looks like this:
+ #
+ #Configuration:
+ # Automake: ${SHELL} /var/tmp/portage/dev-libs/yaz-3.0.47/work/yaz-3.0.47/config/missing --run automake-1.10
+ am_maintainer_mode_re = re.compile(r'/missing --run ')
+ am_maintainer_mode_exclude_re = \
+ re.compile(r'(/missing --run (autoheader|autotest|help2man|makeinfo)|^\s*Automake:\s)')
+
+ make_jobserver_re = \
+ re.compile(r'g?make\[\d+\]: warning: jobserver unavailable:')
+ make_jobserver = []
+
+ def _eerror(lines):
+ for line in lines:
+ eerror(line, phase="install", key=mysettings.mycpv, out=out)
+
+ try:
+ for line in f:
+ line = _unicode_decode(line)
+ if am_maintainer_mode_re.search(line) is not None and \
+ am_maintainer_mode_exclude_re.search(line) is None and \
+ (not qa_am_maintainer_mode or
+ qa_am_maintainer_mode.search(line) is None):
+ am_maintainer_mode.append(line.rstrip("\n"))
+
+ if bash_command_not_found_re.match(line) is not None and \
+ command_not_found_exclude_re.search(line) is None:
+ bash_command_not_found.append(line.rstrip("\n"))
+
+ if helper_missing_file_re.match(line) is not None:
+ helper_missing_file.append(line.rstrip("\n"))
+
+ m = configure_opts_warn_re.match(line)
+ if m is not None:
+ for x in m.group(1).split(", "):
+ if not qa_configure_opts or qa_configure_opts.match(x) is None:
+ configure_opts_warn.append(x)
+
+ if make_jobserver_re.match(line) is not None:
+ make_jobserver.append(line.rstrip("\n"))
+
+ except zlib.error as e:
+ _eerror(["portage encountered a zlib error: '%s'" % (e,),
+ "while reading the log file: '%s'" % logfile])
+ finally:
+ f.close()
+
+ def _eqawarn(lines):
+ for line in lines:
+ eqawarn(line, phase="install", key=mysettings.mycpv, out=out)
+ wrap_width = 70
+
+ if am_maintainer_mode:
+ msg = [_("QA Notice: Automake \"maintainer mode\" detected:")]
+ msg.append("")
+ msg.extend("\t" + line for line in am_maintainer_mode)
+ msg.append("")
+ msg.extend(wrap(_(
+ "If you patch Makefile.am, "
+ "configure.in, or configure.ac then you "
+ "should use autotools.eclass and "
+ "eautomake or eautoreconf. Exceptions "
+ "are limited to system packages "
+ "for which it is impossible to run "
+ "autotools during stage building. "
+ "See https://wiki.gentoo.org/wiki/Project:Quality_Assurance/Autotools_failures"
+ " for more information."),
+ wrap_width))
+ _eqawarn(msg)
+
+ if bash_command_not_found:
+ msg = [_("QA Notice: command not found:")]
+ msg.append("")
+ msg.extend("\t" + line for line in bash_command_not_found)
+ _eqawarn(msg)
+
+ if helper_missing_file:
+ msg = [_("QA Notice: file does not exist:")]
+ msg.append("")
+ msg.extend("\t" + line[4:] for line in helper_missing_file)
+ _eqawarn(msg)
+
+ if configure_opts_warn:
+ msg = [_("QA Notice: Unrecognized configure options:")]
+ msg.append("")
+ msg.extend("\t%s" % x for x in configure_opts_warn)
+ _eqawarn(msg)
+
+ if make_jobserver:
+ msg = [_("QA Notice: make jobserver unavailable:")]
+ msg.append("")
+ msg.extend("\t" + line for line in make_jobserver)
+ _eqawarn(msg)
+
+ f.close()
+ if f_real is not None:
+ f_real.close()
+
+def _post_src_install_write_metadata(settings):
+ """
+ It's possible that the ebuild has changed the
+ CHOST variable, so revert it to the initial
+ setting. Also, revert IUSE in case it's corrupted
+ due to local environment settings like in bug #386829.
+ """
+
+ eapi_attrs = _get_eapi_attrs(settings.configdict['pkg']['EAPI'])
+
+ build_info_dir = os.path.join(settings['PORTAGE_BUILDDIR'], 'build-info')
+
+ metadata_keys = ['IUSE']
+ if eapi_attrs.iuse_effective:
+ metadata_keys.append('IUSE_EFFECTIVE')
+
+ for k in metadata_keys:
+ v = settings.configdict['pkg'].get(k)
+ if v is not None:
+ write_atomic(os.path.join(build_info_dir, k), v + '\n')
+
+ for k in ('CHOST',):
+ v = settings.get(k)
+ if v is not None:
+ write_atomic(os.path.join(build_info_dir, k), v + '\n')
+
+ with io.open(_unicode_encode(os.path.join(build_info_dir,
+ 'BUILD_TIME'), encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='strict') as f:
+ f.write("%.0f\n" % (time.time(),))
+
+ use = frozenset(settings['PORTAGE_USE'].split())
+ for k in _vdb_use_conditional_keys:
+ v = settings.configdict['pkg'].get(k)
+ filename = os.path.join(build_info_dir, k)
+ if v is None:
+ try:
+ os.unlink(filename)
+ except OSError:
+ pass
+ continue
+
+ if k.endswith('DEPEND'):
+ if eapi_attrs.slot_operator:
+ continue
+ token_class = Atom
+ else:
+ token_class = None
+
+ v = use_reduce(v, uselist=use, token_class=token_class)
+ v = paren_enclose(v)
+ if not v:
+ try:
+ os.unlink(filename)
+ except OSError:
+ pass
+ continue
+ with io.open(_unicode_encode(os.path.join(build_info_dir,
+ k), encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='strict') as f:
+ f.write('%s\n' % v)
+
+ if eapi_attrs.slot_operator:
+ deps = evaluate_slot_operator_equal_deps(settings, use, QueryCommand.get_db())
+ for k, v in deps.items():
+ filename = os.path.join(build_info_dir, k)
+ if not v:
+ try:
+ os.unlink(filename)
+ except OSError:
+ pass
+ continue
+ with io.open(_unicode_encode(os.path.join(build_info_dir,
+ k), encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='strict') as f:
+ f.write('%s\n' % v)
+
+def _preinst_bsdflags(mysettings):
+ if bsd_chflags:
+ # Save all the file flags for restoration later.
+ os.system("mtree -c -p %s -k flags > %s" % \
+ (_shell_quote(mysettings["D"]),
+ _shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree"))))
+
+ # Remove all the file flags to avoid EPERM errors.
+ os.system("chflags -R noschg,nouchg,nosappnd,nouappnd %s" % \
+ (_shell_quote(mysettings["D"]),))
+ os.system("chflags -R nosunlnk,nouunlnk %s 2>/dev/null" % \
+ (_shell_quote(mysettings["D"]),))
+
+
+def _postinst_bsdflags(mysettings):
+ if bsd_chflags:
+ # Restore all of the flags saved above.
+ os.system("mtree -e -p %s -U -k flags < %s > /dev/null" % \
+ (_shell_quote(mysettings["ROOT"]),
+ _shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree"))))
+
+def _post_src_install_uid_fix(mysettings, out):
+ """
+ Files in $D with user and group bits that match the "portage"
+ user or group are automatically mapped to PORTAGE_INST_UID and
+ PORTAGE_INST_GID if necessary. The chown system call may clear
+ S_ISUID and S_ISGID bits, so those bits are restored if
+ necessary.
+ """
+
+ os = _os_merge
+
+ inst_uid = int(mysettings["PORTAGE_INST_UID"])
+ inst_gid = int(mysettings["PORTAGE_INST_GID"])
+
+ _preinst_bsdflags(mysettings)
+
+ destdir = mysettings["D"]
+ ed_len = len(mysettings["ED"])
+ unicode_errors = []
+ desktop_file_validate = \
+ portage.process.find_binary("desktop-file-validate") is not None
+ xdg_dirs = mysettings.get('XDG_DATA_DIRS', '/usr/share').split(':')
+ xdg_dirs = tuple(os.path.join(i, "applications") + os.sep
+ for i in xdg_dirs if i)
+
+ qa_desktop_file = ""
+ try:
+ with io.open(_unicode_encode(os.path.join(
+ mysettings["PORTAGE_BUILDDIR"],
+ "build-info", "QA_DESKTOP_FILE"),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ qa_desktop_file = f.read()
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+
+ qa_desktop_file = qa_desktop_file.split()
+ if qa_desktop_file:
+ if len(qa_desktop_file) > 1:
+ qa_desktop_file = "|".join("(%s)" % x for x in qa_desktop_file)
+ qa_desktop_file = "^(%s)$" % qa_desktop_file
+ else:
+ qa_desktop_file = "^%s$" % qa_desktop_file[0]
+ qa_desktop_file = re.compile(qa_desktop_file)
+
+ while True:
+
+ unicode_error = False
+ size = 0
+ counted_inodes = set()
+ fixlafiles_announced = False
+ fixlafiles = "fixlafiles" in mysettings.features
+ desktopfile_errors = []
+
+ for parent, dirs, files in os.walk(destdir):
+ try:
+ parent = _unicode_decode(parent,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeDecodeError:
+ new_parent = _unicode_decode(parent,
+ encoding=_encodings['merge'], errors='replace')
+ new_parent = _unicode_encode(new_parent,
+ encoding='ascii', errors='backslashreplace')
+ new_parent = _unicode_decode(new_parent,
+ encoding=_encodings['merge'], errors='replace')
+ os.rename(parent, new_parent)
+ unicode_error = True
+ unicode_errors.append(new_parent[ed_len:])
+ break
+
+ for fname in chain(dirs, files):
+ try:
+ fname = _unicode_decode(fname,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeDecodeError:
+ fpath = _os.path.join(
+ parent.encode(_encodings['merge']), fname)
+ new_fname = _unicode_decode(fname,
+ encoding=_encodings['merge'], errors='replace')
+ new_fname = _unicode_encode(new_fname,
+ encoding='ascii', errors='backslashreplace')
+ new_fname = _unicode_decode(new_fname,
+ encoding=_encodings['merge'], errors='replace')
+ new_fpath = os.path.join(parent, new_fname)
+ os.rename(fpath, new_fpath)
+ unicode_error = True
+ unicode_errors.append(new_fpath[ed_len:])
+ fname = new_fname
+ fpath = new_fpath
+ else:
+ fpath = os.path.join(parent, fname)
+
+ fpath_relative = fpath[ed_len - 1:]
+ if desktop_file_validate and fname.endswith(".desktop") and \
+ os.path.isfile(fpath) and \
+ fpath_relative.startswith(xdg_dirs) and \
+ not (qa_desktop_file and qa_desktop_file.match(fpath_relative.strip(os.sep)) is not None):
+
+ desktop_validate = validate_desktop_entry(fpath)
+ if desktop_validate:
+ desktopfile_errors.extend(desktop_validate)
+
+ if fixlafiles and \
+ fname.endswith(".la") and os.path.isfile(fpath):
+ f = open(_unicode_encode(fpath,
+ encoding=_encodings['merge'], errors='strict'),
+ mode='rb')
+ has_lafile_header = b'.la - a libtool library file' \
+ in f.readline()
+ f.seek(0)
+ contents = f.read()
+ f.close()
+ try:
+ needs_update, new_contents = rewrite_lafile(contents)
+ except portage.exception.InvalidData as e:
+ needs_update = False
+ if not fixlafiles_announced:
+ fixlafiles_announced = True
+ writemsg("Fixing .la files\n", fd=out)
+
+ # Suppress warnings if the file does not have the
+ # expected header (bug #340725). Even if the header is
+ # missing, we still call rewrite_lafile() since some
+ # valid libtool archives may not have the header.
+ msg = " %s is not a valid libtool archive, skipping\n" % fpath[len(destdir):]
+ qa_msg = "QA Notice: invalid .la file found: %s, %s" % (fpath[len(destdir):], e)
+ if has_lafile_header:
+ writemsg(msg, fd=out)
+ eqawarn(qa_msg, key=mysettings.mycpv, out=out)
+
+ if needs_update:
+ if not fixlafiles_announced:
+ fixlafiles_announced = True
+ writemsg("Fixing .la files\n", fd=out)
+ writemsg(" %s\n" % fpath[len(destdir):], fd=out)
+ # write_atomic succeeds even in some cases in which
+ # a normal write might fail due to file permission
+ # settings on some operating systems such as HP-UX
+ write_atomic(_unicode_encode(fpath,
+ encoding=_encodings['merge'], errors='strict'),
+ new_contents, mode='wb')
+
+ mystat = os.lstat(fpath)
+ if stat.S_ISREG(mystat.st_mode) and \
+ mystat.st_ino not in counted_inodes:
+ counted_inodes.add(mystat.st_ino)
+ size += mystat.st_size
+ if mystat.st_uid != portage_uid and \
+ mystat.st_gid != portage_gid:
+ continue
+ myuid = -1
+ mygid = -1
+ if mystat.st_uid == portage_uid:
+ myuid = inst_uid
+ if mystat.st_gid == portage_gid:
+ mygid = inst_gid
+ apply_secpass_permissions(
+ _unicode_encode(fpath, encoding=_encodings['merge']),
+ uid=myuid, gid=mygid,
+ mode=mystat.st_mode, stat_cached=mystat,
+ follow_links=False)
+
+ if unicode_error:
+ break
+
+ if not unicode_error:
+ break
+
+ if desktopfile_errors:
+ for l in _merge_desktopfile_error(desktopfile_errors):
+ l = l.replace(mysettings["ED"], '/')
+ eqawarn(l, phase='install', key=mysettings.mycpv, out=out)
+
+ if unicode_errors:
+ for l in _merge_unicode_error(unicode_errors):
+ eqawarn(l, phase='install', key=mysettings.mycpv, out=out)
+
+ build_info_dir = os.path.join(mysettings['PORTAGE_BUILDDIR'],
+ 'build-info')
+
+ f = io.open(_unicode_encode(os.path.join(build_info_dir,
+ 'SIZE'), encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='strict')
+ f.write('%d\n' % size)
+ f.close()
+
+ _reapply_bsdflags_to_image(mysettings)
+
+def _reapply_bsdflags_to_image(mysettings):
+ """
+ Reapply flags saved and removed by _preinst_bsdflags.
+ """
+ if bsd_chflags:
+ os.system("mtree -e -p %s -U -k flags < %s > /dev/null" % \
+ (_shell_quote(mysettings["D"]),
+ _shell_quote(os.path.join(mysettings["T"], "bsdflags.mtree"))))
+
+def _post_src_install_soname_symlinks(mysettings, out):
+ """
+ Check that libraries in $D have corresponding soname symlinks.
+ If symlinks are missing then create them and trigger a QA Notice.
+ This requires $PORTAGE_BUILDDIR/build-info/NEEDED.ELF.2 for
+ operation.
+ """
+
+ image_dir = mysettings["D"]
+ needed_filename = os.path.join(mysettings["PORTAGE_BUILDDIR"],
+ "build-info", "NEEDED.ELF.2")
+
+ f = None
+ try:
+ f = io.open(_unicode_encode(needed_filename,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ lines = f.readlines()
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ return
+ finally:
+ if f is not None:
+ f.close()
+
+ metadata = {}
+ for k in ("QA_PREBUILT", "QA_NO_SYMLINK"):
+ try:
+ with io.open(_unicode_encode(os.path.join(
+ mysettings["PORTAGE_BUILDDIR"],
+ "build-info", k),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ v = f.read()
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ else:
+ metadata[k] = v
+
+ qa_prebuilt = metadata.get("QA_PREBUILT", "").strip()
+ if qa_prebuilt:
+ qa_prebuilt = re.compile("|".join(
+ fnmatch.translate(x.lstrip(os.sep))
+ for x in portage.util.shlex_split(qa_prebuilt)))
+
+ qa_no_symlink = metadata.get("QA_NO_SYMLINK", "").split()
+ if qa_no_symlink:
+ if len(qa_no_symlink) > 1:
+ qa_no_symlink = "|".join("(%s)" % x for x in qa_no_symlink)
+ qa_no_symlink = "^(%s)$" % qa_no_symlink
+ else:
+ qa_no_symlink = "^%s$" % qa_no_symlink[0]
+ qa_no_symlink = re.compile(qa_no_symlink)
+
+ libpaths = set(portage.util.getlibpaths(
+ mysettings["ROOT"], env=mysettings))
+ libpath_inodes = set()
+ for libpath in libpaths:
+ libdir = os.path.join(mysettings["ROOT"], libpath.lstrip(os.sep))
+ try:
+ s = os.stat(libdir)
+ except OSError:
+ continue
+ else:
+ libpath_inodes.add((s.st_dev, s.st_ino))
+
+ is_libdir_cache = {}
+
+ def is_libdir(obj_parent):
+ try:
+ return is_libdir_cache[obj_parent]
+ except KeyError:
+ pass
+
+ rval = False
+ if obj_parent in libpaths:
+ rval = True
+ else:
+ parent_path = os.path.join(mysettings["ROOT"],
+ obj_parent.lstrip(os.sep))
+ try:
+ s = os.stat(parent_path)
+ except OSError:
+ pass
+ else:
+ if (s.st_dev, s.st_ino) in libpath_inodes:
+ rval = True
+
+ is_libdir_cache[obj_parent] = rval
+ return rval
+
+ build_info_dir = os.path.join(
+ mysettings['PORTAGE_BUILDDIR'], 'build-info')
+ try:
+ with io.open(_unicode_encode(os.path.join(build_info_dir,
+ "PROVIDES_EXCLUDE"), encoding=_encodings['fs'],
+ errors='strict'), mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ provides_exclude = f.read()
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ provides_exclude = ""
+
+ try:
+ with io.open(_unicode_encode(os.path.join(build_info_dir,
+ "REQUIRES_EXCLUDE"), encoding=_encodings['fs'],
+ errors='strict'), mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ requires_exclude = f.read()
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ requires_exclude = ""
+
+ missing_symlinks = []
+ unrecognized_elf_files = []
+ soname_deps = SonameDepsProcessor(
+ provides_exclude, requires_exclude)
+
+ # Parse NEEDED.ELF.2 like LinkageMapELF.rebuild() does, and
+ # rewrite it to include multilib categories.
+ needed_file = portage.util.atomic_ofstream(needed_filename,
+ encoding=_encodings["repo.content"], errors="strict")
+
+ for l in lines:
+ l = l.rstrip("\n")
+ if not l:
+ continue
+ try:
+ entry = NeededEntry.parse(needed_filename, l)
+ except InvalidData as e:
+ portage.util.writemsg_level("\n%s\n\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+
+ filename = os.path.join(image_dir,
+ entry.filename.lstrip(os.sep))
+ with open(_unicode_encode(filename, encoding=_encodings['fs'],
+ errors='strict'), 'rb') as f:
+ elf_header = ELFHeader.read(f)
+
+ # Compute the multilib category and write it back to the file.
+ entry.multilib_category = compute_multilib_category(elf_header)
+ needed_file.write(_unicode(entry))
+
+ if entry.multilib_category is None:
+ if not qa_prebuilt or qa_prebuilt.match(
+ entry.filename[len(mysettings["EPREFIX"]):].lstrip(
+ os.sep)) is None:
+ unrecognized_elf_files.append(entry)
+ else:
+ soname_deps.add(entry)
+
+ obj = entry.filename
+ soname = entry.soname
+
+ if not soname:
+ continue
+ if not is_libdir(os.path.dirname(obj)):
+ continue
+ if qa_no_symlink and qa_no_symlink.match(obj.strip(os.sep)) is not None:
+ continue
+
+ obj_file_path = os.path.join(image_dir, obj.lstrip(os.sep))
+ sym_file_path = os.path.join(os.path.dirname(obj_file_path), soname)
+ try:
+ os.lstat(sym_file_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ else:
+ continue
+
+ missing_symlinks.append((obj, soname))
+
+ needed_file.close()
+
+ if soname_deps.requires is not None:
+ with io.open(_unicode_encode(os.path.join(build_info_dir,
+ 'REQUIRES'), encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='strict') as f:
+ f.write(soname_deps.requires)
+
+ if soname_deps.provides is not None:
+ with io.open(_unicode_encode(os.path.join(build_info_dir,
+ 'PROVIDES'), encoding=_encodings['fs'], errors='strict'),
+ mode='w', encoding=_encodings['repo.content'],
+ errors='strict') as f:
+ f.write(soname_deps.provides)
+
+ if unrecognized_elf_files:
+ qa_msg = ["QA Notice: Unrecognized ELF file(s):"]
+ qa_msg.append("")
+ qa_msg.extend("\t%s" % _unicode(entry).rstrip()
+ for entry in unrecognized_elf_files)
+ qa_msg.append("")
+ for line in qa_msg:
+ eqawarn(line, key=mysettings.mycpv, out=out)
+
+ if not missing_symlinks:
+ return
+
+ qa_msg = ["QA Notice: Missing soname symlink(s):"]
+ qa_msg.append("")
+ qa_msg.extend("\t%s -> %s" % (os.path.join(
+ os.path.dirname(obj).lstrip(os.sep), soname),
+ os.path.basename(obj))
+ for obj, soname in missing_symlinks)
+ qa_msg.append("")
+ for line in qa_msg:
+ eqawarn(line, key=mysettings.mycpv, out=out)
+
+def _merge_desktopfile_error(errors):
+ lines = []
+
+ msg = _("QA Notice: This package installs one or more .desktop files "
+ "that do not pass validation.")
+ lines.extend(wrap(msg, 72))
+
+ lines.append("")
+ errors.sort()
+ lines.extend("\t" + x for x in errors)
+ lines.append("")
+
+ return lines
+
+def _merge_unicode_error(errors):
+ lines = []
+
+ msg = _("QA Notice: This package installs one or more file names "
+ "containing characters that are not encoded with the UTF-8 encoding.")
+ lines.extend(wrap(msg, 72))
+
+ lines.append("")
+ errors.sort()
+ lines.extend("\t" + x for x in errors)
+ lines.append("")
+
+ return lines
+
+def _prepare_self_update(settings):
+ """
+ Call this when portage is updating itself, in order to create
+ temporary copies of PORTAGE_BIN_PATH and PORTAGE_PYM_PATH, since
+ the new versions may be incompatible. An atexit hook will
+ automatically clean up the temporary copies.
+ """
+
+ # sanity check: ensure that that this routine only runs once
+ if portage._bin_path != portage.const.PORTAGE_BIN_PATH:
+ return
+
+ # Load lazily referenced portage submodules into memory,
+ # so imports won't fail during portage upgrade/downgrade.
+ _preload_elog_modules(settings)
+ portage.proxy.lazyimport._preload_portage_submodules()
+
+ # Make the temp directory inside $PORTAGE_TMPDIR/portage, since
+ # it's common for /tmp and /var/tmp to be mounted with the
+ # "noexec" option (see bug #346899).
+ build_prefix = os.path.join(settings["PORTAGE_TMPDIR"], "portage")
+ portage.util.ensure_dirs(build_prefix)
+ base_path_tmp = tempfile.mkdtemp(
+ "", "._portage_reinstall_.", build_prefix)
+ portage.process.atexit_register(shutil.rmtree, base_path_tmp)
+
+ orig_bin_path = portage._bin_path
+ portage._bin_path = os.path.join(base_path_tmp, "bin")
+ shutil.copytree(orig_bin_path, portage._bin_path, symlinks=True)
+
+ orig_pym_path = portage._pym_path
+ portage._pym_path = os.path.join(base_path_tmp, "lib")
+ os.mkdir(portage._pym_path)
+ for pmod in PORTAGE_PYM_PACKAGES:
+ shutil.copytree(os.path.join(orig_pym_path, pmod),
+ os.path.join(portage._pym_path, pmod),
+ symlinks=True)
+
+ for dir_path in (base_path_tmp, portage._bin_path, portage._pym_path):
+ os.chmod(dir_path, 0o755)
+
+def _handle_self_update(settings, vardb):
+ cpv = settings.mycpv
+ if settings["ROOT"] == "/" and \
+ portage.dep.match_from_list(
+ portage.const.PORTAGE_PACKAGE_ATOM, [cpv]):
+ _prepare_self_update(settings)
+ return True
+ return False
diff --git a/lib/portage/package/ebuild/fetch.py b/lib/portage/package/ebuild/fetch.py
new file mode 100644
index 000000000..0431e11ea
--- /dev/null
+++ b/lib/portage/package/ebuild/fetch.py
@@ -0,0 +1,1174 @@
+# Copyright 2010-2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+__all__ = ['fetch']
+
+import errno
+import io
+import logging
+import random
+import re
+import stat
+import sys
+import tempfile
+
+try:
+ from urllib.parse import urlparse
+except ImportError:
+ from urlparse import urlparse
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.package.ebuild.config:check_config_instance,config',
+ 'portage.package.ebuild.doebuild:doebuild_environment,' + \
+ '_doebuild_spawn',
+ 'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
+)
+
+from portage import OrderedDict, os, selinux, shutil, _encodings, \
+ _shell_quote, _unicode_encode
+from portage.checksum import (get_valid_checksum_keys, perform_md5, verify_all,
+ _filter_unaccelarated_hashes, _hash_filter, _apply_hash_filter)
+from portage.const import BASH_BINARY, CUSTOM_MIRRORS_FILE, \
+ GLOBAL_CONFIG_PATH
+from portage.data import portage_gid, portage_uid, secpass, userpriv_groups
+from portage.exception import FileNotFound, OperationNotPermitted, \
+ PortageException, TryAgain
+from portage.localization import _
+from portage.locks import lockfile, unlockfile
+from portage.output import colorize, EOutput
+from portage.util import apply_recursive_permissions, \
+ apply_secpass_permissions, ensure_dirs, grabdict, shlex_split, \
+ varexpand, writemsg, writemsg_level, writemsg_stdout
+from portage.process import spawn
+
+_userpriv_spawn_kwargs = (
+ ("uid", portage_uid),
+ ("gid", portage_gid),
+ ("groups", userpriv_groups),
+ ("umask", 0o02),
+)
+
+def _hide_url_passwd(url):
+ return re.sub(r'//(.+):.+@(.+)', r'//\1:*password*@\2', url)
+
+def _spawn_fetch(settings, args, **kwargs):
+ """
+ Spawn a process with appropriate settings for fetching, including
+ userfetch and selinux support.
+ """
+
+ global _userpriv_spawn_kwargs
+
+ # Redirect all output to stdout since some fetchers like
+ # wget pollute stderr (if portage detects a problem then it
+ # can send it's own message to stderr).
+ if "fd_pipes" not in kwargs:
+
+ kwargs["fd_pipes"] = {
+ 0 : portage._get_stdin().fileno(),
+ 1 : sys.__stdout__.fileno(),
+ 2 : sys.__stdout__.fileno(),
+ }
+
+ logname = None
+ if "userfetch" in settings.features and \
+ os.getuid() == 0 and portage_gid and portage_uid and \
+ hasattr(os, "setgroups"):
+ kwargs.update(_userpriv_spawn_kwargs)
+ logname = portage.data._portage_username
+
+ spawn_func = spawn
+
+ if settings.selinux_enabled():
+ spawn_func = selinux.spawn_wrapper(spawn_func,
+ settings["PORTAGE_FETCH_T"])
+
+ # bash is an allowed entrypoint, while most binaries are not
+ if args[0] != BASH_BINARY:
+ args = [BASH_BINARY, "-c", "exec \"$@\"", args[0]] + args
+
+ # Ensure that EBUILD_PHASE is set to fetch, so that config.environ()
+ # does not filter the calling environment (which may contain needed
+ # proxy variables, as in bug #315421).
+ phase_backup = settings.get('EBUILD_PHASE')
+ settings['EBUILD_PHASE'] = 'fetch'
+ env = settings.environ()
+ if logname is not None:
+ env["LOGNAME"] = logname
+ try:
+ rval = spawn_func(args, env=env, **kwargs)
+ finally:
+ if phase_backup is None:
+ settings.pop('EBUILD_PHASE', None)
+ else:
+ settings['EBUILD_PHASE'] = phase_backup
+
+ return rval
+
+_userpriv_test_write_file_cache = {}
+_userpriv_test_write_cmd_script = ">> %(file_path)s 2>/dev/null ; rval=$? ; " + \
+ "rm -f %(file_path)s ; exit $rval"
+
+def _userpriv_test_write_file(settings, file_path):
+ """
+ Drop privileges and try to open a file for writing. The file may or
+ may not exist, and the parent directory is assumed to exist. The file
+ is removed before returning.
+
+ @param settings: A config instance which is passed to _spawn_fetch()
+ @param file_path: A file path to open and write.
+ @return: True if write succeeds, False otherwise.
+ """
+
+ global _userpriv_test_write_file_cache, _userpriv_test_write_cmd_script
+ rval = _userpriv_test_write_file_cache.get(file_path)
+ if rval is not None:
+ return rval
+
+ args = [BASH_BINARY, "-c", _userpriv_test_write_cmd_script % \
+ {"file_path" : _shell_quote(file_path)}]
+
+ returncode = _spawn_fetch(settings, args)
+
+ rval = returncode == os.EX_OK
+ _userpriv_test_write_file_cache[file_path] = rval
+ return rval
+
+def _checksum_failure_temp_file(distdir, basename):
+ """
+ First try to find a duplicate temp file with the same checksum and return
+ that filename if available. Otherwise, use mkstemp to create a new unique
+ filename._checksum_failure_.$RANDOM, rename the given file, and return the
+ new filename. In any case, filename will be renamed or removed before this
+ function returns a temp filename.
+ """
+
+ filename = os.path.join(distdir, basename)
+ size = os.stat(filename).st_size
+ checksum = None
+ tempfile_re = re.compile(re.escape(basename) + r'\._checksum_failure_\..*')
+ for temp_filename in os.listdir(distdir):
+ if not tempfile_re.match(temp_filename):
+ continue
+ temp_filename = os.path.join(distdir, temp_filename)
+ try:
+ if size != os.stat(temp_filename).st_size:
+ continue
+ except OSError:
+ continue
+ try:
+ temp_checksum = perform_md5(temp_filename)
+ except FileNotFound:
+ # Apparently the temp file disappeared. Let it go.
+ continue
+ if checksum is None:
+ checksum = perform_md5(filename)
+ if checksum == temp_checksum:
+ os.unlink(filename)
+ return temp_filename
+
+ fd, temp_filename = \
+ tempfile.mkstemp("", basename + "._checksum_failure_.", distdir)
+ os.close(fd)
+ os.rename(filename, temp_filename)
+ return temp_filename
+
+def _check_digests(filename, digests, show_errors=1):
+ """
+ Check digests and display a message if an error occurs.
+ @return True if all digests match, False otherwise.
+ """
+ verified_ok, reason = verify_all(filename, digests)
+ if not verified_ok:
+ if show_errors:
+ writemsg(_("!!! Previously fetched"
+ " file: '%s'\n") % filename, noiselevel=-1)
+ writemsg(_("!!! Reason: %s\n") % reason[0],
+ noiselevel=-1)
+ writemsg(_("!!! Got: %s\n"
+ "!!! Expected: %s\n") % \
+ (reason[1], reason[2]), noiselevel=-1)
+ return False
+ return True
+
+def _check_distfile(filename, digests, eout, show_errors=1, hash_filter=None):
+ """
+ @return a tuple of (match, stat_obj) where match is True if filename
+ matches all given digests (if any) and stat_obj is a stat result, or
+ None if the file does not exist.
+ """
+ if digests is None:
+ digests = {}
+ size = digests.get("size")
+ if size is not None and len(digests) == 1:
+ digests = None
+
+ try:
+ st = os.stat(filename)
+ except OSError:
+ return (False, None)
+ if size is not None and size != st.st_size:
+ return (False, st)
+ if not digests:
+ if size is not None:
+ eout.ebegin(_("%s size ;-)") % os.path.basename(filename))
+ eout.eend(0)
+ elif st.st_size == 0:
+ # Zero-byte distfiles are always invalid.
+ return (False, st)
+ else:
+ digests = _filter_unaccelarated_hashes(digests)
+ if hash_filter is not None:
+ digests = _apply_hash_filter(digests, hash_filter)
+ if _check_digests(filename, digests, show_errors=show_errors):
+ eout.ebegin("%s %s ;-)" % (os.path.basename(filename),
+ " ".join(sorted(digests))))
+ eout.eend(0)
+ else:
+ return (False, st)
+ return (True, st)
+
+_fetch_resume_size_re = re.compile(r'(^[\d]+)([KMGTPEZY]?$)')
+
+_size_suffix_map = {
+ '' : 0,
+ 'K' : 10,
+ 'M' : 20,
+ 'G' : 30,
+ 'T' : 40,
+ 'P' : 50,
+ 'E' : 60,
+ 'Z' : 70,
+ 'Y' : 80,
+}
+
+def fetch(myuris, mysettings, listonly=0, fetchonly=0,
+ locks_in_subdir=".locks", use_locks=1, try_mirrors=1, digests=None,
+ allow_missing_digests=True):
+ "fetch files. Will use digest file if available."
+
+ if not myuris:
+ return 1
+
+ features = mysettings.features
+ restrict = mysettings.get("PORTAGE_RESTRICT","").split()
+
+ userfetch = secpass >= 2 and "userfetch" in features
+ userpriv = secpass >= 2 and "userpriv" in features
+
+ # 'nomirror' is bad/negative logic. You Restrict mirroring, not no-mirroring.
+ restrict_mirror = "mirror" in restrict or "nomirror" in restrict
+ if restrict_mirror:
+ if ("mirror" in features) and ("lmirror" not in features):
+ # lmirror should allow you to bypass mirror restrictions.
+ # XXX: This is not a good thing, and is temporary at best.
+ print(_(">>> \"mirror\" mode desired and \"mirror\" restriction found; skipping fetch."))
+ return 1
+
+ # Generally, downloading the same file repeatedly from
+ # every single available mirror is a waste of bandwidth
+ # and time, so there needs to be a cap.
+ checksum_failure_max_tries = 5
+ v = checksum_failure_max_tries
+ try:
+ v = int(mysettings.get("PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS",
+ checksum_failure_max_tries))
+ except (ValueError, OverflowError):
+ writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
+ " contains non-integer value: '%s'\n") % \
+ mysettings["PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"], noiselevel=-1)
+ writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
+ "default value: %s\n") % checksum_failure_max_tries,
+ noiselevel=-1)
+ v = checksum_failure_max_tries
+ if v < 1:
+ writemsg(_("!!! Variable PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS"
+ " contains value less than 1: '%s'\n") % v, noiselevel=-1)
+ writemsg(_("!!! Using PORTAGE_FETCH_CHECKSUM_TRY_MIRRORS "
+ "default value: %s\n") % checksum_failure_max_tries,
+ noiselevel=-1)
+ v = checksum_failure_max_tries
+ checksum_failure_max_tries = v
+ del v
+
+ fetch_resume_size_default = "350K"
+ fetch_resume_size = mysettings.get("PORTAGE_FETCH_RESUME_MIN_SIZE")
+ if fetch_resume_size is not None:
+ fetch_resume_size = "".join(fetch_resume_size.split())
+ if not fetch_resume_size:
+ # If it's undefined or empty, silently use the default.
+ fetch_resume_size = fetch_resume_size_default
+ match = _fetch_resume_size_re.match(fetch_resume_size)
+ if match is None or \
+ (match.group(2).upper() not in _size_suffix_map):
+ writemsg(_("!!! Variable PORTAGE_FETCH_RESUME_MIN_SIZE"
+ " contains an unrecognized format: '%s'\n") % \
+ mysettings["PORTAGE_FETCH_RESUME_MIN_SIZE"], noiselevel=-1)
+ writemsg(_("!!! Using PORTAGE_FETCH_RESUME_MIN_SIZE "
+ "default value: %s\n") % fetch_resume_size_default,
+ noiselevel=-1)
+ fetch_resume_size = None
+ if fetch_resume_size is None:
+ fetch_resume_size = fetch_resume_size_default
+ match = _fetch_resume_size_re.match(fetch_resume_size)
+ fetch_resume_size = int(match.group(1)) * \
+ 2 ** _size_suffix_map[match.group(2).upper()]
+
+ # Behave like the package has RESTRICT="primaryuri" after a
+ # couple of checksum failures, to increase the probablility
+ # of success before checksum_failure_max_tries is reached.
+ checksum_failure_primaryuri = 2
+ thirdpartymirrors = mysettings.thirdpartymirrors()
+
+ # In the background parallel-fetch process, it's safe to skip checksum
+ # verification of pre-existing files in $DISTDIR that have the correct
+ # file size. The parent process will verify their checksums prior to
+ # the unpack phase.
+
+ parallel_fetchonly = "PORTAGE_PARALLEL_FETCHONLY" in mysettings
+ if parallel_fetchonly:
+ fetchonly = 1
+
+ check_config_instance(mysettings)
+
+ custommirrors = grabdict(os.path.join(mysettings["PORTAGE_CONFIGROOT"],
+ CUSTOM_MIRRORS_FILE), recursive=1)
+
+ mymirrors=[]
+
+ if listonly or ("distlocks" not in features):
+ use_locks = 0
+
+ fetch_to_ro = 0
+ if "skiprocheck" in features:
+ fetch_to_ro = 1
+
+ if not os.access(mysettings["DISTDIR"],os.W_OK) and fetch_to_ro:
+ if use_locks:
+ writemsg(colorize("BAD",
+ _("!!! For fetching to a read-only filesystem, "
+ "locking should be turned off.\n")), noiselevel=-1)
+ writemsg(_("!!! This can be done by adding -distlocks to "
+ "FEATURES in /etc/portage/make.conf\n"), noiselevel=-1)
+# use_locks = 0
+
+ # local mirrors are always added
+ if "local" in custommirrors:
+ mymirrors += custommirrors["local"]
+
+ if restrict_mirror:
+ # We don't add any mirrors.
+ pass
+ else:
+ if try_mirrors:
+ mymirrors += [x.rstrip("/") for x in mysettings["GENTOO_MIRRORS"].split() if x]
+
+ hash_filter = _hash_filter(mysettings.get("PORTAGE_CHECKSUM_FILTER", ""))
+ if hash_filter.transparent:
+ hash_filter = None
+ skip_manifest = mysettings.get("EBUILD_SKIP_MANIFEST") == "1"
+ if skip_manifest:
+ allow_missing_digests = True
+ pkgdir = mysettings.get("O")
+ if digests is None and not (pkgdir is None or skip_manifest):
+ mydigests = mysettings.repositories.get_repo_for_location(
+ os.path.dirname(os.path.dirname(pkgdir))).load_manifest(
+ pkgdir, mysettings["DISTDIR"]).getTypeDigests("DIST")
+ elif digests is None or skip_manifest:
+ # no digests because fetch was not called for a specific package
+ mydigests = {}
+ else:
+ mydigests = digests
+
+ ro_distdirs = [x for x in \
+ shlex_split(mysettings.get("PORTAGE_RO_DISTDIRS", "")) \
+ if os.path.isdir(x)]
+
+ fsmirrors = []
+ for x in range(len(mymirrors)-1,-1,-1):
+ if mymirrors[x] and mymirrors[x][0]=='/':
+ fsmirrors += [mymirrors[x]]
+ del mymirrors[x]
+
+ restrict_fetch = "fetch" in restrict
+ force_mirror = "force-mirror" in features and not restrict_mirror
+ custom_local_mirrors = custommirrors.get("local", [])
+ if restrict_fetch:
+ # With fetch restriction, a normal uri may only be fetched from
+ # custom local mirrors (if available). A mirror:// uri may also
+ # be fetched from specific mirrors (effectively overriding fetch
+ # restriction, but only for specific mirrors).
+ locations = custom_local_mirrors
+ else:
+ locations = mymirrors
+
+ file_uri_tuples = []
+ # Check for 'items' attribute since OrderedDict is not a dict.
+ if hasattr(myuris, 'items'):
+ for myfile, uri_set in myuris.items():
+ for myuri in uri_set:
+ file_uri_tuples.append((myfile, myuri))
+ if not uri_set:
+ file_uri_tuples.append((myfile, None))
+ else:
+ for myuri in myuris:
+ if urlparse(myuri).scheme:
+ file_uri_tuples.append((os.path.basename(myuri), myuri))
+ else:
+ file_uri_tuples.append((os.path.basename(myuri), None))
+
+ filedict = OrderedDict()
+ primaryuri_dict = {}
+ thirdpartymirror_uris = {}
+ for myfile, myuri in file_uri_tuples:
+ if myfile not in filedict:
+ filedict[myfile]=[]
+ for y in range(0,len(locations)):
+ filedict[myfile].append(locations[y]+"/distfiles/"+myfile)
+ if myuri is None:
+ continue
+ if myuri[:9]=="mirror://":
+ eidx = myuri.find("/", 9)
+ if eidx != -1:
+ mirrorname = myuri[9:eidx]
+ path = myuri[eidx+1:]
+
+ # Try user-defined mirrors first
+ if mirrorname in custommirrors:
+ for cmirr in custommirrors[mirrorname]:
+ filedict[myfile].append(
+ cmirr.rstrip("/") + "/" + path)
+
+ # now try the official mirrors
+ if mirrorname in thirdpartymirrors:
+ uris = [locmirr.rstrip("/") + "/" + path \
+ for locmirr in thirdpartymirrors[mirrorname]]
+ random.shuffle(uris)
+ filedict[myfile].extend(uris)
+ thirdpartymirror_uris.setdefault(myfile, []).extend(uris)
+
+ if mirrorname not in custommirrors and \
+ mirrorname not in thirdpartymirrors:
+ writemsg(_("!!! No known mirror by the name: %s\n") % (mirrorname))
+ else:
+ writemsg(_("Invalid mirror definition in SRC_URI:\n"), noiselevel=-1)
+ writemsg(" %s\n" % (myuri), noiselevel=-1)
+ else:
+ if restrict_fetch or force_mirror:
+ # Only fetch from specific mirrors is allowed.
+ continue
+ primaryuris = primaryuri_dict.get(myfile)
+ if primaryuris is None:
+ primaryuris = []
+ primaryuri_dict[myfile] = primaryuris
+ primaryuris.append(myuri)
+
+ # Order primaryuri_dict values to match that in SRC_URI.
+ for uris in primaryuri_dict.values():
+ uris.reverse()
+
+ # Prefer thirdpartymirrors over normal mirrors in cases when
+ # the file does not yet exist on the normal mirrors.
+ for myfile, uris in thirdpartymirror_uris.items():
+ primaryuri_dict.setdefault(myfile, []).extend(uris)
+
+ # Now merge primaryuri values into filedict (includes mirrors
+ # explicitly referenced in SRC_URI).
+ if "primaryuri" in restrict:
+ for myfile, uris in filedict.items():
+ filedict[myfile] = primaryuri_dict.get(myfile, []) + uris
+ else:
+ for myfile in filedict:
+ filedict[myfile] += primaryuri_dict.get(myfile, [])
+
+ can_fetch=True
+
+ if listonly:
+ can_fetch = False
+
+ if can_fetch and not fetch_to_ro:
+ global _userpriv_test_write_file_cache
+ dirmode = 0o070
+ filemode = 0o60
+ modemask = 0o2
+ dir_gid = portage_gid
+ if "FAKED_MODE" in mysettings:
+ # When inside fakeroot, directories with portage's gid appear
+ # to have root's gid. Therefore, use root's gid instead of
+ # portage's gid to avoid spurrious permissions adjustments
+ # when inside fakeroot.
+ dir_gid = 0
+ distdir_dirs = [""]
+ try:
+
+ for x in distdir_dirs:
+ mydir = os.path.join(mysettings["DISTDIR"], x)
+ write_test_file = os.path.join(
+ mydir, ".__portage_test_write__")
+
+ try:
+ st = os.stat(mydir)
+ except OSError:
+ st = None
+
+ if st is not None and stat.S_ISDIR(st.st_mode):
+ if not (userfetch or userpriv):
+ continue
+ if _userpriv_test_write_file(mysettings, write_test_file):
+ continue
+
+ _userpriv_test_write_file_cache.pop(write_test_file, None)
+ if ensure_dirs(mydir, gid=dir_gid, mode=dirmode, mask=modemask):
+ if st is None:
+ # The directory has just been created
+ # and therefore it must be empty.
+ continue
+ writemsg(_("Adjusting permissions recursively: '%s'\n") % mydir,
+ noiselevel=-1)
+ def onerror(e):
+ raise # bail out on the first error that occurs during recursion
+ if not apply_recursive_permissions(mydir,
+ gid=dir_gid, dirmode=dirmode, dirmask=modemask,
+ filemode=filemode, filemask=modemask, onerror=onerror):
+ raise OperationNotPermitted(
+ _("Failed to apply recursive permissions for the portage group."))
+ except PortageException as e:
+ if not os.path.isdir(mysettings["DISTDIR"]):
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ writemsg(_("!!! Directory Not Found: DISTDIR='%s'\n") % mysettings["DISTDIR"], noiselevel=-1)
+ writemsg(_("!!! Fetching will fail!\n"), noiselevel=-1)
+
+ if can_fetch and \
+ not fetch_to_ro and \
+ not os.access(mysettings["DISTDIR"], os.W_OK):
+ writemsg(_("!!! No write access to '%s'\n") % mysettings["DISTDIR"],
+ noiselevel=-1)
+ can_fetch = False
+
+ distdir_writable = can_fetch and not fetch_to_ro
+ failed_files = set()
+ restrict_fetch_msg = False
+ valid_hashes = set(get_valid_checksum_keys())
+ valid_hashes.discard("size")
+
+ for myfile in filedict:
+ """
+ fetched status
+ 0 nonexistent
+ 1 partially downloaded
+ 2 completely downloaded
+ """
+ fetched = 0
+
+ orig_digests = mydigests.get(myfile, {})
+
+ if not (allow_missing_digests or listonly):
+ verifiable_hash_types = set(orig_digests).intersection(valid_hashes)
+ if not verifiable_hash_types:
+ expected = " ".join(sorted(valid_hashes))
+ got = set(orig_digests)
+ got.discard("size")
+ got = " ".join(sorted(got))
+ reason = (_("Insufficient data for checksum verification"),
+ got, expected)
+ writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
+ noiselevel=-1)
+ writemsg(_("!!! Reason: %s\n") % reason[0],
+ noiselevel=-1)
+ writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \
+ (reason[1], reason[2]), noiselevel=-1)
+
+ if fetchonly:
+ failed_files.add(myfile)
+ continue
+ else:
+ return 0
+
+ size = orig_digests.get("size")
+ if size == 0:
+ # Zero-byte distfiles are always invalid, so discard their digests.
+ del mydigests[myfile]
+ orig_digests.clear()
+ size = None
+ pruned_digests = orig_digests
+ if parallel_fetchonly:
+ pruned_digests = {}
+ if size is not None:
+ pruned_digests["size"] = size
+
+ myfile_path = os.path.join(mysettings["DISTDIR"], myfile)
+ has_space = True
+ has_space_superuser = True
+ file_lock = None
+ if listonly:
+ writemsg_stdout("\n", noiselevel=-1)
+ else:
+ # check if there is enough space in DISTDIR to completely store myfile
+ # overestimate the filesize so we aren't bitten by FS overhead
+ vfs_stat = None
+ if size is not None and hasattr(os, "statvfs"):
+ try:
+ vfs_stat = os.statvfs(mysettings["DISTDIR"])
+ except OSError as e:
+ writemsg_level("!!! statvfs('%s'): %s\n" %
+ (mysettings["DISTDIR"], e),
+ noiselevel=-1, level=logging.ERROR)
+ del e
+
+ if vfs_stat is not None:
+ try:
+ mysize = os.stat(myfile_path).st_size
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ mysize = 0
+ if (size - mysize + vfs_stat.f_bsize) >= \
+ (vfs_stat.f_bsize * vfs_stat.f_bavail):
+
+ if (size - mysize + vfs_stat.f_bsize) >= \
+ (vfs_stat.f_bsize * vfs_stat.f_bfree):
+ has_space_superuser = False
+
+ if not has_space_superuser:
+ has_space = False
+ elif secpass < 2:
+ has_space = False
+ elif userfetch:
+ has_space = False
+
+ if distdir_writable and use_locks:
+
+ lock_kwargs = {}
+ if fetchonly:
+ lock_kwargs["flags"] = os.O_NONBLOCK
+
+ try:
+ file_lock = lockfile(myfile_path,
+ wantnewlockfile=1, **lock_kwargs)
+ except TryAgain:
+ writemsg(_(">>> File '%s' is already locked by "
+ "another fetcher. Continuing...\n") % myfile,
+ noiselevel=-1)
+ continue
+ try:
+ if not listonly:
+
+ eout = EOutput()
+ eout.quiet = mysettings.get("PORTAGE_QUIET") == "1"
+ match, mystat = _check_distfile(
+ myfile_path, pruned_digests, eout, hash_filter=hash_filter)
+ if match:
+ # Skip permission adjustment for symlinks, since we don't
+ # want to modify anything outside of the primary DISTDIR,
+ # and symlinks typically point to PORTAGE_RO_DISTDIRS.
+ if distdir_writable and not os.path.islink(myfile_path):
+ try:
+ apply_secpass_permissions(myfile_path,
+ gid=portage_gid, mode=0o664, mask=0o2,
+ stat_cached=mystat)
+ except PortageException as e:
+ if not os.access(myfile_path, os.R_OK):
+ writemsg(_("!!! Failed to adjust permissions:"
+ " %s\n") % str(e), noiselevel=-1)
+ del e
+ continue
+
+ if distdir_writable and mystat is None:
+ # Remove broken symlinks if necessary.
+ try:
+ os.unlink(myfile_path)
+ except OSError:
+ pass
+
+ if mystat is not None:
+ if stat.S_ISDIR(mystat.st_mode):
+ writemsg_level(
+ _("!!! Unable to fetch file since "
+ "a directory is in the way: \n"
+ "!!! %s\n") % myfile_path,
+ level=logging.ERROR, noiselevel=-1)
+ return 0
+
+ if mystat.st_size == 0:
+ if distdir_writable:
+ try:
+ os.unlink(myfile_path)
+ except OSError:
+ pass
+ elif distdir_writable and size is not None:
+ if mystat.st_size < fetch_resume_size and \
+ mystat.st_size < size:
+ # If the file already exists and the size does not
+ # match the existing digests, it may be that the
+ # user is attempting to update the digest. In this
+ # case, the digestgen() function will advise the
+ # user to use `ebuild --force foo.ebuild manifest`
+ # in order to force the old digests to be replaced.
+ # Since the user may want to keep this file, rename
+ # it instead of deleting it.
+ writemsg(_(">>> Renaming distfile with size "
+ "%d (smaller than " "PORTAGE_FETCH_RESU"
+ "ME_MIN_SIZE)\n") % mystat.st_size)
+ temp_filename = \
+ _checksum_failure_temp_file(
+ mysettings["DISTDIR"], myfile)
+ writemsg_stdout(_("Refetching... "
+ "File renamed to '%s'\n\n") % \
+ temp_filename, noiselevel=-1)
+ elif mystat.st_size >= size:
+ temp_filename = \
+ _checksum_failure_temp_file(
+ mysettings["DISTDIR"], myfile)
+ writemsg_stdout(_("Refetching... "
+ "File renamed to '%s'\n\n") % \
+ temp_filename, noiselevel=-1)
+
+ if distdir_writable and ro_distdirs:
+ readonly_file = None
+ for x in ro_distdirs:
+ filename = os.path.join(x, myfile)
+ match, mystat = _check_distfile(
+ filename, pruned_digests, eout, hash_filter=hash_filter)
+ if match:
+ readonly_file = filename
+ break
+ if readonly_file is not None:
+ try:
+ os.unlink(myfile_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ os.symlink(readonly_file, myfile_path)
+ continue
+
+ # this message is shown only after we know that
+ # the file is not already fetched
+ if not has_space:
+ writemsg(_("!!! Insufficient space to store %s in %s\n") % \
+ (myfile, mysettings["DISTDIR"]), noiselevel=-1)
+
+ if has_space_superuser:
+ writemsg(_("!!! Insufficient privileges to use "
+ "remaining space.\n"), noiselevel=-1)
+ if userfetch:
+ writemsg(_("!!! You may set FEATURES=\"-userfetch\""
+ " in /etc/portage/make.conf in order to fetch with\n"
+ "!!! superuser privileges.\n"), noiselevel=-1)
+
+ if fsmirrors and not os.path.exists(myfile_path) and has_space:
+ for mydir in fsmirrors:
+ mirror_file = os.path.join(mydir, myfile)
+ try:
+ shutil.copyfile(mirror_file, myfile_path)
+ writemsg(_("Local mirror has file: %s\n") % myfile)
+ break
+ except (IOError, OSError) as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+
+ try:
+ mystat = os.stat(myfile_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ else:
+ # Skip permission adjustment for symlinks, since we don't
+ # want to modify anything outside of the primary DISTDIR,
+ # and symlinks typically point to PORTAGE_RO_DISTDIRS.
+ if not os.path.islink(myfile_path):
+ try:
+ apply_secpass_permissions(myfile_path,
+ gid=portage_gid, mode=0o664, mask=0o2,
+ stat_cached=mystat)
+ except PortageException as e:
+ if not os.access(myfile_path, os.R_OK):
+ writemsg(_("!!! Failed to adjust permissions:"
+ " %s\n") % (e,), noiselevel=-1)
+
+ # If the file is empty then it's obviously invalid. Remove
+ # the empty file and try to download if possible.
+ if mystat.st_size == 0:
+ if distdir_writable:
+ try:
+ os.unlink(myfile_path)
+ except EnvironmentError:
+ pass
+ elif myfile not in mydigests:
+ # We don't have a digest, but the file exists. We must
+ # assume that it is fully downloaded.
+ continue
+ else:
+ if (mydigests[myfile].get("size") is not None
+ and mystat.st_size < mydigests[myfile]["size"]
+ and not restrict_fetch):
+ fetched = 1 # Try to resume this download.
+ elif parallel_fetchonly and \
+ mystat.st_size == mydigests[myfile]["size"]:
+ eout = EOutput()
+ eout.quiet = \
+ mysettings.get("PORTAGE_QUIET") == "1"
+ eout.ebegin(
+ "%s size ;-)" % (myfile, ))
+ eout.eend(0)
+ continue
+ else:
+ digests = _filter_unaccelarated_hashes(mydigests[myfile])
+ if hash_filter is not None:
+ digests = _apply_hash_filter(digests, hash_filter)
+ verified_ok, reason = verify_all(myfile_path, digests)
+ if not verified_ok:
+ writemsg(_("!!! Previously fetched"
+ " file: '%s'\n") % myfile, noiselevel=-1)
+ writemsg(_("!!! Reason: %s\n") % reason[0],
+ noiselevel=-1)
+ writemsg(_("!!! Got: %s\n"
+ "!!! Expected: %s\n") % \
+ (reason[1], reason[2]), noiselevel=-1)
+ if reason[0] == _("Insufficient data for checksum verification"):
+ return 0
+ if distdir_writable:
+ temp_filename = \
+ _checksum_failure_temp_file(
+ mysettings["DISTDIR"], myfile)
+ writemsg_stdout(_("Refetching... "
+ "File renamed to '%s'\n\n") % \
+ temp_filename, noiselevel=-1)
+ else:
+ eout = EOutput()
+ eout.quiet = \
+ mysettings.get("PORTAGE_QUIET", None) == "1"
+ if digests:
+ digests = list(digests)
+ digests.sort()
+ eout.ebegin(
+ "%s %s ;-)" % (myfile, " ".join(digests)))
+ eout.eend(0)
+ continue # fetch any remaining files
+
+ # Create a reversed list since that is optimal for list.pop().
+ uri_list = filedict[myfile][:]
+ uri_list.reverse()
+ checksum_failure_count = 0
+ tried_locations = set()
+ while uri_list:
+ loc = uri_list.pop()
+ # Eliminate duplicates here in case we've switched to
+ # "primaryuri" mode on the fly due to a checksum failure.
+ if loc in tried_locations:
+ continue
+ tried_locations.add(loc)
+ if listonly:
+ writemsg_stdout(loc+" ", noiselevel=-1)
+ continue
+ # allow different fetchcommands per protocol
+ protocol = loc[0:loc.find("://")]
+
+ global_config_path = GLOBAL_CONFIG_PATH
+ if portage.const.EPREFIX:
+ global_config_path = os.path.join(portage.const.EPREFIX,
+ GLOBAL_CONFIG_PATH.lstrip(os.sep))
+
+ missing_file_param = False
+ fetchcommand_var = "FETCHCOMMAND_" + protocol.upper()
+ fetchcommand = mysettings.get(fetchcommand_var)
+ if fetchcommand is None:
+ fetchcommand_var = "FETCHCOMMAND"
+ fetchcommand = mysettings.get(fetchcommand_var)
+ if fetchcommand is None:
+ writemsg_level(
+ _("!!! %s is unset. It should "
+ "have been defined in\n!!! %s/make.globals.\n") \
+ % (fetchcommand_var, global_config_path),
+ level=logging.ERROR, noiselevel=-1)
+ return 0
+ if "${FILE}" not in fetchcommand:
+ writemsg_level(
+ _("!!! %s does not contain the required ${FILE}"
+ " parameter.\n") % fetchcommand_var,
+ level=logging.ERROR, noiselevel=-1)
+ missing_file_param = True
+
+ resumecommand_var = "RESUMECOMMAND_" + protocol.upper()
+ resumecommand = mysettings.get(resumecommand_var)
+ if resumecommand is None:
+ resumecommand_var = "RESUMECOMMAND"
+ resumecommand = mysettings.get(resumecommand_var)
+ if resumecommand is None:
+ writemsg_level(
+ _("!!! %s is unset. It should "
+ "have been defined in\n!!! %s/make.globals.\n") \
+ % (resumecommand_var, global_config_path),
+ level=logging.ERROR, noiselevel=-1)
+ return 0
+ if "${FILE}" not in resumecommand:
+ writemsg_level(
+ _("!!! %s does not contain the required ${FILE}"
+ " parameter.\n") % resumecommand_var,
+ level=logging.ERROR, noiselevel=-1)
+ missing_file_param = True
+
+ if missing_file_param:
+ writemsg_level(
+ _("!!! Refer to the make.conf(5) man page for "
+ "information about how to\n!!! correctly specify "
+ "FETCHCOMMAND and RESUMECOMMAND.\n"),
+ level=logging.ERROR, noiselevel=-1)
+ if myfile != os.path.basename(loc):
+ return 0
+
+ if not can_fetch:
+ if fetched != 2:
+ try:
+ mysize = os.stat(myfile_path).st_size
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ mysize = 0
+
+ if mysize == 0:
+ writemsg(_("!!! File %s isn't fetched but unable to get it.\n") % myfile,
+ noiselevel=-1)
+ elif size is None or size > mysize:
+ writemsg(_("!!! File %s isn't fully fetched, but unable to complete it\n") % myfile,
+ noiselevel=-1)
+ else:
+ writemsg(_("!!! File %s is incorrect size, "
+ "but unable to retry.\n") % myfile, noiselevel=-1)
+ return 0
+ else:
+ continue
+
+ if fetched != 2 and has_space:
+ #we either need to resume or start the download
+ if fetched == 1:
+ try:
+ mystat = os.stat(myfile_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ fetched = 0
+ else:
+ if mystat.st_size < fetch_resume_size:
+ writemsg(_(">>> Deleting distfile with size "
+ "%d (smaller than " "PORTAGE_FETCH_RESU"
+ "ME_MIN_SIZE)\n") % mystat.st_size)
+ try:
+ os.unlink(myfile_path)
+ except OSError as e:
+ if e.errno not in \
+ (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ fetched = 0
+ if fetched == 1:
+ #resume mode:
+ writemsg(_(">>> Resuming download...\n"))
+ locfetch=resumecommand
+ command_var = resumecommand_var
+ else:
+ #normal mode:
+ locfetch=fetchcommand
+ command_var = fetchcommand_var
+ writemsg_stdout(_(">>> Downloading '%s'\n") % \
+ _hide_url_passwd(loc))
+ variables = {
+ "URI": loc,
+ "FILE": myfile
+ }
+
+ for k in ("DISTDIR", "PORTAGE_SSH_OPTS"):
+ v = mysettings.get(k)
+ if v is not None:
+ variables[k] = v
+
+ myfetch = shlex_split(locfetch)
+ myfetch = [varexpand(x, mydict=variables) for x in myfetch]
+ myret = -1
+ try:
+
+ myret = _spawn_fetch(mysettings, myfetch)
+
+ finally:
+ try:
+ apply_secpass_permissions(myfile_path,
+ gid=portage_gid, mode=0o664, mask=0o2)
+ except FileNotFound:
+ pass
+ except PortageException as e:
+ if not os.access(myfile_path, os.R_OK):
+ writemsg(_("!!! Failed to adjust permissions:"
+ " %s\n") % str(e), noiselevel=-1)
+ del e
+
+ # If the file is empty then it's obviously invalid. Don't
+ # trust the return value from the fetcher. Remove the
+ # empty file and try to download again.
+ try:
+ if os.stat(myfile_path).st_size == 0:
+ os.unlink(myfile_path)
+ fetched = 0
+ continue
+ except EnvironmentError:
+ pass
+
+ if mydigests is not None and myfile in mydigests:
+ try:
+ mystat = os.stat(myfile_path)
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ del e
+ fetched = 0
+ else:
+
+ if stat.S_ISDIR(mystat.st_mode):
+ # This can happen if FETCHCOMMAND erroneously
+ # contains wget's -P option where it should
+ # instead have -O.
+ writemsg_level(
+ _("!!! The command specified in the "
+ "%s variable appears to have\n!!! "
+ "created a directory instead of a "
+ "normal file.\n") % command_var,
+ level=logging.ERROR, noiselevel=-1)
+ writemsg_level(
+ _("!!! Refer to the make.conf(5) "
+ "man page for information about how "
+ "to\n!!! correctly specify "
+ "FETCHCOMMAND and RESUMECOMMAND.\n"),
+ level=logging.ERROR, noiselevel=-1)
+ return 0
+
+ # no exception? file exists. let digestcheck() report
+ # an appropriately for size or checksum errors
+
+ # If the fetcher reported success and the file is
+ # too small, it's probably because the digest is
+ # bad (upstream changed the distfile). In this
+ # case we don't want to attempt to resume. Show a
+ # digest verification failure to that the user gets
+ # a clue about what just happened.
+ if myret != os.EX_OK and \
+ mystat.st_size < mydigests[myfile]["size"]:
+ # Fetch failed... Try the next one... Kill 404 files though.
+ if (mystat[stat.ST_SIZE]<100000) and (len(myfile)>4) and not ((myfile[-5:]==".html") or (myfile[-4:]==".htm")):
+ html404=re.compile("<title>.*(not found|404).*</title>",re.I|re.M)
+ with io.open(
+ _unicode_encode(myfile_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace'
+ ) as f:
+ if html404.search(f.read()):
+ try:
+ os.unlink(mysettings["DISTDIR"]+"/"+myfile)
+ writemsg(_(">>> Deleting invalid distfile. (Improper 404 redirect from server.)\n"))
+ fetched = 0
+ continue
+ except (IOError, OSError):
+ pass
+ fetched = 1
+ continue
+ if True:
+ # File is the correct size--check the checksums for the fetched
+ # file NOW, for those users who don't have a stable/continuous
+ # net connection. This way we have a chance to try to download
+ # from another mirror...
+ digests = _filter_unaccelarated_hashes(mydigests[myfile])
+ if hash_filter is not None:
+ digests = _apply_hash_filter(digests, hash_filter)
+ verified_ok, reason = verify_all(myfile_path, digests)
+ if not verified_ok:
+ writemsg(_("!!! Fetched file: %s VERIFY FAILED!\n") % myfile,
+ noiselevel=-1)
+ writemsg(_("!!! Reason: %s\n") % reason[0],
+ noiselevel=-1)
+ writemsg(_("!!! Got: %s\n!!! Expected: %s\n") % \
+ (reason[1], reason[2]), noiselevel=-1)
+ if reason[0] == _("Insufficient data for checksum verification"):
+ return 0
+ temp_filename = \
+ _checksum_failure_temp_file(
+ mysettings["DISTDIR"], myfile)
+ writemsg_stdout(_("Refetching... "
+ "File renamed to '%s'\n\n") % \
+ temp_filename, noiselevel=-1)
+ fetched=0
+ checksum_failure_count += 1
+ if checksum_failure_count == \
+ checksum_failure_primaryuri:
+ # Switch to "primaryuri" mode in order
+ # to increase the probablility of
+ # of success.
+ primaryuris = \
+ primaryuri_dict.get(myfile)
+ if primaryuris:
+ uri_list.extend(
+ reversed(primaryuris))
+ if checksum_failure_count >= \
+ checksum_failure_max_tries:
+ break
+ else:
+ eout = EOutput()
+ eout.quiet = mysettings.get("PORTAGE_QUIET", None) == "1"
+ if digests:
+ eout.ebegin("%s %s ;-)" % \
+ (myfile, " ".join(sorted(digests))))
+ eout.eend(0)
+ fetched=2
+ break
+ else:
+ if not myret:
+ fetched=2
+ break
+ elif mydigests!=None:
+ writemsg(_("No digest file available and download failed.\n\n"),
+ noiselevel=-1)
+ finally:
+ if use_locks and file_lock:
+ unlockfile(file_lock)
+ file_lock = None
+
+ if listonly:
+ writemsg_stdout("\n", noiselevel=-1)
+ if fetched != 2:
+ if restrict_fetch and not restrict_fetch_msg:
+ restrict_fetch_msg = True
+ msg = _("\n!!! %s/%s"
+ " has fetch restriction turned on.\n"
+ "!!! This probably means that this "
+ "ebuild's files must be downloaded\n"
+ "!!! manually. See the comments in"
+ " the ebuild for more information.\n\n") % \
+ (mysettings["CATEGORY"], mysettings["PF"])
+ writemsg_level(msg,
+ level=logging.ERROR, noiselevel=-1)
+ elif restrict_fetch:
+ pass
+ elif listonly:
+ pass
+ elif not filedict[myfile]:
+ writemsg(_("Warning: No mirrors available for file"
+ " '%s'\n") % (myfile), noiselevel=-1)
+ else:
+ writemsg(_("!!! Couldn't download '%s'. Aborting.\n") % myfile,
+ noiselevel=-1)
+
+ if listonly:
+ failed_files.add(myfile)
+ continue
+ elif fetchonly:
+ failed_files.add(myfile)
+ continue
+ return 0
+ if failed_files:
+ return 0
+ return 1
diff --git a/lib/portage/package/ebuild/getmaskingreason.py b/lib/portage/package/ebuild/getmaskingreason.py
new file mode 100644
index 000000000..1e4ed21ce
--- /dev/null
+++ b/lib/portage/package/ebuild/getmaskingreason.py
@@ -0,0 +1,126 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['getmaskingreason']
+
+import portage
+from portage import os
+from portage.const import USER_CONFIG_PATH
+from portage.dep import Atom, match_from_list
+from portage.exception import InvalidAtom
+from portage.localization import _
+from portage.repository.config import _gen_valid_repo
+from portage.util import grablines, normalize_path
+from portage.versions import catpkgsplit, _pkg_str
+
+def getmaskingreason(mycpv, metadata=None, settings=None,
+ portdb=None, return_location=False, myrepo=None):
+ """
+ If specified, the myrepo argument is assumed to be valid. This
+ should be a safe assumption since portdbapi methods always
+ return valid repo names and valid "repository" metadata from
+ aux_get.
+ """
+ if settings is None:
+ settings = portage.settings
+ if portdb is None:
+ portdb = portage.portdb
+ mysplit = catpkgsplit(mycpv)
+ if not mysplit:
+ raise ValueError(_("invalid CPV: %s") % mycpv)
+
+ if metadata is None:
+ db_keys = list(portdb._aux_cache_keys)
+ try:
+ metadata = dict(zip(db_keys,
+ portdb.aux_get(mycpv, db_keys, myrepo=myrepo)))
+ except KeyError:
+ if not portdb.cpv_exists(mycpv):
+ raise
+ else:
+ if myrepo is None:
+ myrepo = _gen_valid_repo(metadata["repository"])
+
+ elif myrepo is None:
+ myrepo = metadata.get("repository")
+ if myrepo is not None:
+ myrepo = _gen_valid_repo(metadata["repository"])
+
+ if metadata is not None and \
+ not portage.eapi_is_supported(metadata["EAPI"]):
+ # Return early since otherwise we might produce invalid
+ # results given that the EAPI is not supported. Also,
+ # metadata is mostly useless in this case since it doesn't
+ # contain essential things like SLOT.
+ if return_location:
+ return (None, None)
+ else:
+ return None
+
+ # Sometimes we can't access SLOT or repository due to corruption.
+ pkg = mycpv
+ try:
+ pkg.slot
+ except AttributeError:
+ pkg = _pkg_str(mycpv, metadata=metadata, repo=myrepo)
+
+ cpv_slot_list = [pkg]
+
+ mycp = pkg.cp
+
+ locations = []
+ if pkg.repo in settings.repositories:
+ for repo in settings.repositories[pkg.repo].masters + (settings.repositories[pkg.repo],):
+ locations.append(os.path.join(repo.location, "profiles"))
+ locations.extend(settings.profiles)
+ locations.append(os.path.join(settings["PORTAGE_CONFIGROOT"],
+ USER_CONFIG_PATH))
+ locations.reverse()
+ pmasklists = []
+ for profile in locations:
+ pmask_filename = os.path.join(profile, "package.mask")
+ node = None
+ for l, recursive_filename in grablines(pmask_filename,
+ recursive=1, remember_source_file=True):
+ if node is None or node[0] != recursive_filename:
+ node = (recursive_filename, [])
+ pmasklists.append(node)
+ node[1].append(l)
+
+ pmaskdict = settings._mask_manager._pmaskdict
+ if mycp in pmaskdict:
+ for x in pmaskdict[mycp]:
+ if match_from_list(x, cpv_slot_list):
+ x = x.without_repo
+ for pmask in pmasklists:
+ comment = ""
+ comment_valid = -1
+ pmask_filename = pmask[0]
+ for i in range(len(pmask[1])):
+ l = pmask[1][i].strip()
+ try:
+ l_atom = Atom(l, allow_repo=True,
+ allow_wildcard=True).without_repo
+ except InvalidAtom:
+ l_atom = None
+ if l == "":
+ comment = ""
+ comment_valid = -1
+ elif l[0] == "#":
+ comment += (l+"\n")
+ comment_valid = i + 1
+ elif l_atom == x:
+ if comment_valid != i:
+ comment = ""
+ if return_location:
+ return (comment, pmask_filename)
+ else:
+ return comment
+ elif comment_valid != -1:
+ # Apparently this comment applies to multiple masks, so
+ # it remains valid until a blank line is encountered.
+ comment_valid += 1
+ if return_location:
+ return (None, None)
+ else:
+ return None
diff --git a/lib/portage/package/ebuild/getmaskingstatus.py b/lib/portage/package/ebuild/getmaskingstatus.py
new file mode 100644
index 000000000..4b9e588f7
--- /dev/null
+++ b/lib/portage/package/ebuild/getmaskingstatus.py
@@ -0,0 +1,192 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = ['getmaskingstatus']
+
+import sys
+
+import portage
+from portage import eapi_is_supported, _eapi_is_deprecated
+from portage.exception import InvalidDependString
+from portage.localization import _
+from portage.package.ebuild.config import config
+from portage.versions import catpkgsplit, _pkg_str
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+class _UnmaskHint(object):
+
+ __slots__ = ('key', 'value')
+
+ def __init__(self, key, value):
+ self.key = key
+ self.value = value
+
+class _MaskReason(object):
+
+ __slots__ = ('category', 'message', 'unmask_hint')
+
+ def __init__(self, category, message, unmask_hint=None):
+ self.category = category
+ self.message = message
+ self.unmask_hint = unmask_hint
+
+def getmaskingstatus(mycpv, settings=None, portdb=None, myrepo=None):
+ if settings is None:
+ settings = config(clone=portage.settings)
+ if portdb is None:
+ portdb = portage.portdb
+
+ return [mreason.message for \
+ mreason in _getmaskingstatus(mycpv, settings, portdb,myrepo)]
+
+def _getmaskingstatus(mycpv, settings, portdb, myrepo=None):
+
+ metadata = None
+ installed = False
+ if not isinstance(mycpv, basestring):
+ # emerge passed in a Package instance
+ pkg = mycpv
+ mycpv = pkg.cpv
+ metadata = pkg._metadata
+ installed = pkg.installed
+
+ if metadata is None:
+ db_keys = list(portdb._aux_cache_keys)
+ try:
+ metadata = dict(zip(db_keys, portdb.aux_get(mycpv, db_keys, myrepo=myrepo)))
+ except KeyError:
+ if not portdb.cpv_exists(mycpv):
+ raise
+ return [_MaskReason("corruption", "corruption")]
+ if "?" in metadata["LICENSE"]:
+ settings.setcpv(mycpv, mydb=metadata)
+ metadata["USE"] = settings["PORTAGE_USE"]
+ else:
+ metadata["USE"] = ""
+
+ try:
+ mycpv.slot
+ except AttributeError:
+ try:
+ mycpv = _pkg_str(mycpv, metadata=metadata, settings=settings)
+ except portage.exception.InvalidData:
+ raise ValueError(_("invalid CPV: %s") % mycpv)
+
+ rValue = []
+
+ # package.mask checking
+ if settings._getMaskAtom(mycpv, metadata):
+ rValue.append(_MaskReason("package.mask", "package.mask", _UnmaskHint("p_mask", None)))
+
+ # keywords checking
+ eapi = metadata["EAPI"]
+ mygroups = settings._getKeywords(mycpv, metadata)
+ licenses = metadata["LICENSE"]
+ properties = metadata["PROPERTIES"]
+ restrict = metadata["RESTRICT"]
+ if not eapi_is_supported(eapi):
+ return [_MaskReason("EAPI", "EAPI %s" % eapi)]
+ elif _eapi_is_deprecated(eapi) and not installed:
+ return [_MaskReason("EAPI", "EAPI %s" % eapi)]
+ egroups = settings.configdict["backupenv"].get(
+ "ACCEPT_KEYWORDS", "").split()
+ global_accept_keywords = settings.get("ACCEPT_KEYWORDS", "")
+ pgroups = global_accept_keywords.split()
+ myarch = settings["ARCH"]
+ if pgroups and myarch not in pgroups:
+ """For operating systems other than Linux, ARCH is not necessarily a
+ valid keyword."""
+ myarch = pgroups[0].lstrip("~")
+
+ # NOTE: This logic is copied from KeywordsManager.getMissingKeywords().
+ unmaskgroups = settings._keywords_manager.getPKeywords(mycpv,
+ metadata["SLOT"], metadata["repository"], global_accept_keywords)
+ pgroups.extend(unmaskgroups)
+ if unmaskgroups or egroups:
+ pgroups = settings._keywords_manager._getEgroups(egroups, pgroups)
+ else:
+ pgroups = set(pgroups)
+
+ kmask = "missing"
+ kmask_hint = None
+
+ if '**' in pgroups:
+ kmask = None
+ else:
+ for keyword in pgroups:
+ if keyword in mygroups:
+ kmask = None
+ break
+
+ if kmask:
+ for gp in mygroups:
+ if gp=="*":
+ kmask=None
+ break
+ elif gp == "~*":
+ for x in pgroups:
+ if x[:1] == "~":
+ kmask = None
+ break
+ if kmask is None:
+ break
+ elif gp=="-"+myarch and myarch in pgroups:
+ kmask="-"+myarch
+ break
+ elif gp=="~"+myarch and myarch in pgroups:
+ kmask="~"+myarch
+ kmask_hint = _UnmaskHint("unstable keyword", kmask)
+ break
+
+ if kmask == "missing":
+ kmask_hint = _UnmaskHint("unstable keyword", "**")
+
+ try:
+ missing_licenses = settings._getMissingLicenses(mycpv, metadata)
+ if missing_licenses:
+ allowed_tokens = set(["||", "(", ")"])
+ allowed_tokens.update(missing_licenses)
+ license_split = licenses.split()
+ license_split = [x for x in license_split \
+ if x in allowed_tokens]
+ msg = license_split[:]
+ msg.append("license(s)")
+ rValue.append(_MaskReason("LICENSE", " ".join(msg), _UnmaskHint("license", set(missing_licenses))))
+ except portage.exception.InvalidDependString as e:
+ rValue.append(_MaskReason("invalid", "LICENSE: "+str(e)))
+
+ try:
+ missing_properties = settings._getMissingProperties(mycpv, metadata)
+ if missing_properties:
+ allowed_tokens = set(["||", "(", ")"])
+ allowed_tokens.update(missing_properties)
+ properties_split = properties.split()
+ properties_split = [x for x in properties_split \
+ if x in allowed_tokens]
+ msg = properties_split[:]
+ msg.append("properties")
+ rValue.append(_MaskReason("PROPERTIES", " ".join(msg)))
+ except portage.exception.InvalidDependString as e:
+ rValue.append(_MaskReason("invalid", "PROPERTIES: "+str(e)))
+
+ try:
+ missing_restricts = settings._getMissingRestrict(mycpv, metadata)
+ if missing_restricts:
+ msg = list(missing_restricts)
+ msg.append("in RESTRICT")
+ rValue.append(_MaskReason("RESTRICT", " ".join(msg)))
+ except InvalidDependString as e:
+ rValue.append(_MaskReason("invalid", "RESTRICT: %s" % (e,)))
+
+ # Only show KEYWORDS masks for installed packages
+ # if they're not masked for any other reason.
+ if kmask and (not installed or not rValue):
+ rValue.append(_MaskReason("KEYWORDS",
+ kmask + " keyword", unmask_hint=kmask_hint))
+
+ return rValue
diff --git a/lib/portage/package/ebuild/prepare_build_dirs.py b/lib/portage/package/ebuild/prepare_build_dirs.py
new file mode 100644
index 000000000..e53ccd0fb
--- /dev/null
+++ b/lib/portage/package/ebuild/prepare_build_dirs.py
@@ -0,0 +1,443 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = ['prepare_build_dirs']
+
+import errno
+import gzip
+import stat
+import time
+
+import portage
+from portage import os, shutil, _encodings, _unicode_encode, _unicode_decode
+from portage.data import portage_gid, portage_uid, secpass
+from portage.exception import DirectoryNotFound, FileNotFound, \
+ OperationNotPermitted, PermissionDenied, PortageException
+from portage.localization import _
+from portage.output import colorize
+from portage.util import apply_recursive_permissions, \
+ apply_secpass_permissions, ensure_dirs, normalize_path, writemsg
+from portage.util.install_mask import _raise_exc
+from portage.const import EPREFIX
+
+def prepare_build_dirs(myroot=None, settings=None, cleanup=False):
+ """
+ The myroot parameter is ignored.
+ """
+ myroot = None
+
+ if settings is None:
+ raise TypeError("settings argument is required")
+
+ mysettings = settings
+ clean_dirs = [mysettings["HOME"]]
+
+ # We enable cleanup when we want to make sure old cruft (such as the old
+ # environment) doesn't interfere with the current phase.
+ if cleanup and 'keeptemp' not in mysettings.features:
+ clean_dirs.append(mysettings["T"])
+
+ for clean_dir in clean_dirs:
+ try:
+ shutil.rmtree(clean_dir)
+ except OSError as oe:
+ if errno.ENOENT == oe.errno:
+ pass
+ elif errno.EPERM == oe.errno:
+ writemsg("%s\n" % oe, noiselevel=-1)
+ writemsg(_("Operation Not Permitted: rmtree('%s')\n") % \
+ clean_dir, noiselevel=-1)
+ return 1
+ else:
+ # Wrap with PermissionDenied if appropriate, so that callers
+ # display a short error message without a traceback.
+ _raise_exc(oe)
+
+ def makedirs(dir_path):
+ try:
+ os.makedirs(dir_path)
+ except OSError as oe:
+ if errno.EEXIST == oe.errno:
+ pass
+ elif errno.EPERM == oe.errno:
+ writemsg("%s\n" % oe, noiselevel=-1)
+ writemsg(_("Operation Not Permitted: makedirs('%s')\n") % \
+ dir_path, noiselevel=-1)
+ return False
+ else:
+ raise
+ return True
+
+ mysettings["PKG_LOGDIR"] = os.path.join(mysettings["T"], "logging")
+
+ mydirs = [os.path.dirname(mysettings["PORTAGE_BUILDDIR"])]
+ mydirs.append(os.path.dirname(mydirs[-1]))
+
+ try:
+ for mydir in mydirs:
+ ensure_dirs(mydir)
+ try:
+ apply_secpass_permissions(mydir,
+ gid=portage_gid, uid=portage_uid, mode=0o700, mask=0)
+ except PortageException:
+ if not os.path.isdir(mydir):
+ raise
+ for dir_key in ("PORTAGE_BUILDDIR", "HOME", "PKG_LOGDIR", "T"):
+ ensure_dirs(mysettings[dir_key], mode=0o755)
+ apply_secpass_permissions(mysettings[dir_key],
+ uid=portage_uid, gid=portage_gid)
+ except PermissionDenied as e:
+ writemsg(_("Permission Denied: %s\n") % str(e), noiselevel=-1)
+ return 1
+ except OperationNotPermitted as e:
+ writemsg(_("Operation Not Permitted: %s\n") % str(e), noiselevel=-1)
+ return 1
+ except FileNotFound as e:
+ writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1)
+ return 1
+
+ # Reset state for things like noauto and keepwork in FEATURES.
+ for x in ('.die_hooks',):
+ try:
+ os.unlink(os.path.join(mysettings['PORTAGE_BUILDDIR'], x))
+ except OSError:
+ pass
+
+ _prepare_workdir(mysettings)
+ if mysettings.get("EBUILD_PHASE") not in ("info", "fetch", "pretend"):
+ # Avoid spurious permissions adjustments when fetching with
+ # a temporary PORTAGE_TMPDIR setting (for fetchonly).
+ _prepare_features_dirs(mysettings)
+
+def _adjust_perms_msg(settings, msg):
+
+ def write(msg):
+ writemsg(msg, noiselevel=-1)
+
+ background = settings.get("PORTAGE_BACKGROUND") == "1"
+ log_path = settings.get("PORTAGE_LOG_FILE")
+ log_file = None
+ log_file_real = None
+
+ if background and log_path is not None:
+ try:
+ log_file = open(_unicode_encode(log_path,
+ encoding=_encodings['fs'], errors='strict'), mode='ab')
+ log_file_real = log_file
+ except IOError:
+ def write(msg):
+ pass
+ else:
+ if log_path.endswith('.gz'):
+ log_file = gzip.GzipFile(filename='',
+ mode='ab', fileobj=log_file)
+ def write(msg):
+ log_file.write(_unicode_encode(msg))
+ log_file.flush()
+
+ try:
+ write(msg)
+ finally:
+ if log_file is not None:
+ log_file.close()
+ if log_file_real is not log_file:
+ log_file_real.close()
+
+def _prepare_features_dirs(mysettings):
+
+ # Use default ABI libdir in accordance with bug #355283.
+ libdir = None
+ default_abi = mysettings.get("DEFAULT_ABI")
+ if default_abi:
+ libdir = mysettings.get("LIBDIR_" + default_abi)
+ if not libdir:
+ libdir = "lib"
+
+ features_dirs = {
+ "ccache":{
+ "basedir_var":"CCACHE_DIR",
+ "default_dir":os.path.join(mysettings["PORTAGE_TMPDIR"], "ccache"),
+ "always_recurse":False},
+ "distcc":{
+ "basedir_var":"DISTCC_DIR",
+ "default_dir":os.path.join(mysettings["BUILD_PREFIX"], ".distcc"),
+ "subdirs":("lock", "state"),
+ "always_recurse":True}
+ }
+ dirmode = 0o2070
+ filemode = 0o60
+ modemask = 0o2
+ restrict = mysettings.get("PORTAGE_RESTRICT","").split()
+ droppriv = secpass >= 2 and \
+ "userpriv" in mysettings.features and \
+ "userpriv" not in restrict
+ for myfeature, kwargs in features_dirs.items():
+ if myfeature in mysettings.features:
+ failure = False
+ basedir = mysettings.get(kwargs["basedir_var"])
+ if basedir is None or not basedir.strip():
+ basedir = kwargs["default_dir"]
+ mysettings[kwargs["basedir_var"]] = basedir
+ try:
+ mydirs = [mysettings[kwargs["basedir_var"]]]
+ if "subdirs" in kwargs:
+ for subdir in kwargs["subdirs"]:
+ mydirs.append(os.path.join(basedir, subdir))
+ for mydir in mydirs:
+ modified = ensure_dirs(mydir)
+ # Generally, we only want to apply permissions for
+ # initial creation. Otherwise, we don't know exactly what
+ # permissions the user wants, so should leave them as-is.
+ droppriv_fix = False
+ if droppriv:
+ st = os.stat(mydir)
+ if st.st_gid != portage_gid or \
+ not dirmode == (stat.S_IMODE(st.st_mode) & dirmode):
+ droppriv_fix = True
+ if not droppriv_fix:
+ # Check permissions of files in the directory.
+ for filename in os.listdir(mydir):
+ try:
+ subdir_st = os.lstat(
+ os.path.join(mydir, filename))
+ except OSError:
+ continue
+ if subdir_st.st_gid != portage_gid or \
+ ((stat.S_ISDIR(subdir_st.st_mode) and \
+ not dirmode == (stat.S_IMODE(subdir_st.st_mode) & dirmode))):
+ droppriv_fix = True
+ break
+
+ if droppriv_fix:
+ _adjust_perms_msg(mysettings,
+ colorize("WARN", " * ") + \
+ _("Adjusting permissions "
+ "for FEATURES=userpriv: '%s'\n") % mydir)
+ elif modified:
+ _adjust_perms_msg(mysettings,
+ colorize("WARN", " * ") + \
+ _("Adjusting permissions "
+ "for FEATURES=%s: '%s'\n") % (myfeature, mydir))
+
+ if modified or kwargs["always_recurse"] or droppriv_fix:
+ def onerror(e):
+ raise # The feature is disabled if a single error
+ # occurs during permissions adjustment.
+ if not apply_recursive_permissions(mydir,
+ gid=portage_gid, dirmode=dirmode, dirmask=modemask,
+ filemode=filemode, filemask=modemask, onerror=onerror):
+ raise OperationNotPermitted(
+ _("Failed to apply recursive permissions for the portage group."))
+
+ except DirectoryNotFound as e:
+ failure = True
+ writemsg(_("\n!!! Directory does not exist: '%s'\n") % \
+ (e,), noiselevel=-1)
+ writemsg(_("!!! Disabled FEATURES='%s'\n") % myfeature,
+ noiselevel=-1)
+
+ except PortageException as e:
+ failure = True
+ writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
+ writemsg(_("!!! Failed resetting perms on %s='%s'\n") % \
+ (kwargs["basedir_var"], basedir), noiselevel=-1)
+ writemsg(_("!!! Disabled FEATURES='%s'\n") % myfeature,
+ noiselevel=-1)
+
+ if failure:
+ mysettings.features.remove(myfeature)
+ time.sleep(5)
+
+def _prepare_workdir(mysettings):
+ workdir_mode = 0o700
+ try:
+ mode = mysettings["PORTAGE_WORKDIR_MODE"]
+ if mode.isdigit():
+ parsed_mode = int(mode, 8)
+ elif mode == "":
+ raise KeyError()
+ else:
+ raise ValueError()
+ if parsed_mode & 0o7777 != parsed_mode:
+ raise ValueError("Invalid file mode: %s" % mode)
+ else:
+ workdir_mode = parsed_mode
+ except KeyError as e:
+ writemsg(_("!!! PORTAGE_WORKDIR_MODE is unset, using %s.\n") % oct(workdir_mode))
+ except ValueError as e:
+ if len(str(e)) > 0:
+ writemsg("%s\n" % e)
+ writemsg(_("!!! Unable to parse PORTAGE_WORKDIR_MODE='%s', using %s.\n") % \
+ (mysettings["PORTAGE_WORKDIR_MODE"], oct(workdir_mode)))
+ mysettings["PORTAGE_WORKDIR_MODE"] = oct(workdir_mode).replace('o', '')
+ try:
+ apply_secpass_permissions(mysettings["WORKDIR"],
+ uid=portage_uid, gid=portage_gid, mode=workdir_mode)
+ except FileNotFound:
+ pass # ebuild.sh will create it
+
+ if mysettings.get("PORT_LOGDIR", "") == "":
+ while "PORT_LOGDIR" in mysettings:
+ del mysettings["PORT_LOGDIR"]
+ if "PORT_LOGDIR" in mysettings:
+ try:
+ modified = ensure_dirs(mysettings["PORT_LOGDIR"])
+ if modified:
+ # Only initialize group/mode if the directory doesn't
+ # exist, so that we don't override permissions if they
+ # were previously set by the administrator.
+ # NOTE: These permissions should be compatible with our
+ # default logrotate config as discussed in bug 374287.
+ apply_secpass_permissions(mysettings["PORT_LOGDIR"],
+ uid=portage_uid, gid=portage_gid, mode=0o2770)
+ except PortageException as e:
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ writemsg(_("!!! Permission issues with PORT_LOGDIR='%s'\n") % \
+ mysettings["PORT_LOGDIR"], noiselevel=-1)
+ writemsg(_("!!! Disabling logging.\n"), noiselevel=-1)
+ while "PORT_LOGDIR" in mysettings:
+ del mysettings["PORT_LOGDIR"]
+
+ compress_log_ext = ''
+ if 'compress-build-logs' in mysettings.features:
+ compress_log_ext = '.gz'
+
+ logdir_subdir_ok = False
+ if "PORT_LOGDIR" in mysettings and \
+ os.access(mysettings["PORT_LOGDIR"], os.W_OK):
+ logdir = normalize_path(mysettings["PORT_LOGDIR"])
+ logid_path = os.path.join(mysettings["PORTAGE_BUILDDIR"], ".logid")
+ if not os.path.exists(logid_path):
+ open(_unicode_encode(logid_path), 'w').close()
+ logid_time = _unicode_decode(time.strftime("%Y%m%d-%H%M%S",
+ time.gmtime(os.stat(logid_path).st_mtime)),
+ encoding=_encodings['content'], errors='replace')
+
+ if "split-log" in mysettings.features:
+ log_subdir = os.path.join(logdir, "build", mysettings["CATEGORY"])
+ mysettings["PORTAGE_LOG_FILE"] = os.path.join(
+ log_subdir, "%s:%s.log%s" %
+ (mysettings["PF"], logid_time, compress_log_ext))
+ else:
+ log_subdir = logdir
+ mysettings["PORTAGE_LOG_FILE"] = os.path.join(
+ logdir, "%s:%s:%s.log%s" % \
+ (mysettings["CATEGORY"], mysettings["PF"], logid_time,
+ compress_log_ext))
+
+ if log_subdir is logdir:
+ logdir_subdir_ok = True
+ else:
+ try:
+ _ensure_log_subdirs(logdir, log_subdir)
+ except PortageException as e:
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
+
+ if os.access(log_subdir, os.W_OK):
+ logdir_subdir_ok = True
+ else:
+ writemsg("!!! %s: %s\n" %
+ (_("Permission Denied"), log_subdir), noiselevel=-1)
+
+ tmpdir_log_path = os.path.join(
+ mysettings["T"], "build.log%s" % compress_log_ext)
+ if not logdir_subdir_ok:
+ # NOTE: When sesandbox is enabled, the local SELinux security policies
+ # may not allow output to be piped out of the sesandbox domain. The
+ # current policy will allow it to work when a pty is available, but
+ # not through a normal pipe. See bug #162404.
+ mysettings["PORTAGE_LOG_FILE"] = tmpdir_log_path
+ else:
+ # Create a symlink from tmpdir_log_path to PORTAGE_LOG_FILE, as
+ # requested in bug #412865.
+ make_new_symlink = False
+ try:
+ target = os.readlink(tmpdir_log_path)
+ except OSError:
+ make_new_symlink = True
+ else:
+ if target != mysettings["PORTAGE_LOG_FILE"]:
+ make_new_symlink = True
+ if make_new_symlink:
+ try:
+ os.unlink(tmpdir_log_path)
+ except OSError:
+ pass
+ os.symlink(mysettings["PORTAGE_LOG_FILE"], tmpdir_log_path)
+
+def _ensure_log_subdirs(logdir, subdir):
+ """
+ This assumes that logdir exists, and creates subdirectories down
+ to subdir as necessary. The gid of logdir is copied to all
+ subdirectories, along with 0x2070 mode bits if present. Both logdir
+ and subdir are assumed to be normalized absolute paths.
+ """
+ st = os.stat(logdir)
+ uid = -1
+ gid = st.st_gid
+ grp_mode = 0o2070 & st.st_mode
+
+ # If logdir is writable by the portage group but its uid
+ # is not portage_uid, then set the uid to portage_uid if
+ # we have privileges to do so, for compatibility with our
+ # default logrotate config (see bug 378451). With the
+ # "su portage portage" directive and logrotate-3.8.0,
+ # logrotate's chown call during the compression phase will
+ # only succeed if the log file's uid is portage_uid.
+ if grp_mode and gid == portage_gid and \
+ portage.data.secpass >= 2:
+ uid = portage_uid
+ if st.st_uid != portage_uid:
+ ensure_dirs(logdir, uid=uid)
+
+ logdir_split_len = len(logdir.split(os.sep))
+ subdir_split = subdir.split(os.sep)[logdir_split_len:]
+ subdir_split.reverse()
+ current = logdir
+ while subdir_split:
+ current = os.path.join(current, subdir_split.pop())
+ ensure_dirs(current, uid=uid, gid=gid, mode=grp_mode, mask=0)
+
+def _prepare_fake_filesdir(settings):
+ real_filesdir = settings["O"]+"/files"
+ symlink_path = settings["FILESDIR"]
+
+ try:
+ link_target = os.readlink(symlink_path)
+ except OSError:
+ os.symlink(real_filesdir, symlink_path)
+ else:
+ if link_target != real_filesdir:
+ os.unlink(symlink_path)
+ os.symlink(real_filesdir, symlink_path)
+
+def _prepare_fake_distdir(settings, alist):
+ orig_distdir = settings["DISTDIR"]
+ edpath = os.path.join(settings["PORTAGE_BUILDDIR"], "distdir")
+ portage.util.ensure_dirs(edpath, gid=portage_gid, mode=0o755)
+
+ # Remove any unexpected files or directories.
+ for x in os.listdir(edpath):
+ symlink_path = os.path.join(edpath, x)
+ st = os.lstat(symlink_path)
+ if x in alist and stat.S_ISLNK(st.st_mode):
+ continue
+ if stat.S_ISDIR(st.st_mode):
+ shutil.rmtree(symlink_path)
+ else:
+ os.unlink(symlink_path)
+
+ # Check for existing symlinks and recreate if necessary.
+ for x in alist:
+ symlink_path = os.path.join(edpath, x)
+ target = os.path.join(orig_distdir, x)
+ try:
+ link_target = os.readlink(symlink_path)
+ except OSError:
+ os.symlink(target, symlink_path)
+ else:
+ if link_target != target:
+ os.unlink(symlink_path)
+ os.symlink(target, symlink_path)
diff --git a/lib/portage/package/ebuild/profile_iuse.py b/lib/portage/package/ebuild/profile_iuse.py
new file mode 100644
index 000000000..d3f201e54
--- /dev/null
+++ b/lib/portage/package/ebuild/profile_iuse.py
@@ -0,0 +1,32 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'iter_iuse_vars',
+)
+
+
+def iter_iuse_vars(env):
+ """
+ Iterate over (key, value) pairs of profile variables that contribute
+ to implicit IUSE for EAPI 5 and later.
+
+ @param env: Ebuild environment
+ @type env: Mapping
+ @rtype: iterator
+ @return: iterator over (key, value) pairs of profile variables
+ """
+
+ for k in ('IUSE_IMPLICIT', 'USE_EXPAND_IMPLICIT', 'USE_EXPAND_UNPREFIXED', 'USE_EXPAND'):
+ v = env.get(k)
+ if v is not None:
+ yield (k, v)
+
+ use_expand_implicit = frozenset(env.get('USE_EXPAND_IMPLICIT', '').split())
+
+ for v in env.get('USE_EXPAND_UNPREFIXED', '').split() + env.get('USE_EXPAND', '').split():
+ if v in use_expand_implicit:
+ k = 'USE_EXPAND_VALUES_' + v
+ v = env.get(k)
+ if v is not None:
+ yield (k, v)
diff --git a/lib/portage/process.py b/lib/portage/process.py
new file mode 100644
index 000000000..fd326731a
--- /dev/null
+++ b/lib/portage/process.py
@@ -0,0 +1,689 @@
+# portage.py -- core Portage functionality
+# Copyright 1998-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+
+import atexit
+import errno
+import fcntl
+import platform
+import signal
+import socket
+import struct
+import sys
+import traceback
+import os as _os
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.util:dump_traceback,writemsg',
+)
+
+from portage.const import BASH_BINARY, SANDBOX_BINARY, FAKEROOT_BINARY
+from portage.exception import CommandNotFound
+from portage.util._ctypes import find_library, LoadLibrary, ctypes
+
+try:
+ import resource
+ max_fd_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
+except ImportError:
+ max_fd_limit = 256
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+# Support PEP 446 for Python >=3.4
+try:
+ _set_inheritable = _os.set_inheritable
+except AttributeError:
+ _set_inheritable = None
+
+try:
+ _FD_CLOEXEC = fcntl.FD_CLOEXEC
+except AttributeError:
+ _FD_CLOEXEC = None
+
+# Prefer /proc/self/fd if available (/dev/fd
+# doesn't work on solaris, see bug #474536).
+for _fd_dir in ("/proc/self/fd", "/dev/fd"):
+ if os.path.isdir(_fd_dir):
+ break
+ else:
+ _fd_dir = None
+
+# /dev/fd does not work on FreeBSD, see bug #478446
+if platform.system() in ('FreeBSD',) and _fd_dir == '/dev/fd':
+ _fd_dir = None
+
+if _fd_dir is not None:
+ def get_open_fds():
+ return (int(fd) for fd in os.listdir(_fd_dir) if fd.isdigit())
+
+ if platform.python_implementation() == 'PyPy':
+ # EAGAIN observed with PyPy 1.8.
+ _get_open_fds = get_open_fds
+ def get_open_fds():
+ try:
+ return _get_open_fds()
+ except OSError as e:
+ if e.errno != errno.EAGAIN:
+ raise
+ return range(max_fd_limit)
+
+elif os.path.isdir("/proc/%s/fd" % os.getpid()):
+ # In order for this function to work in forked subprocesses,
+ # os.getpid() must be called from inside the function.
+ def get_open_fds():
+ return (int(fd) for fd in os.listdir("/proc/%s/fd" % os.getpid())
+ if fd.isdigit())
+
+else:
+ def get_open_fds():
+ return range(max_fd_limit)
+
+sandbox_capable = (os.path.isfile(SANDBOX_BINARY) and
+ os.access(SANDBOX_BINARY, os.X_OK))
+
+fakeroot_capable = (os.path.isfile(FAKEROOT_BINARY) and
+ os.access(FAKEROOT_BINARY, os.X_OK))
+
+
+def sanitize_fds():
+ """
+ Set the inheritable flag to False for all open file descriptors,
+ except for those corresponding to stdin, stdout, and stderr. This
+ ensures that any unintentionally inherited file descriptors will
+ not be inherited by child processes.
+ """
+ if _set_inheritable is not None:
+
+ whitelist = frozenset([
+ sys.__stdin__.fileno(),
+ sys.__stdout__.fileno(),
+ sys.__stderr__.fileno(),
+ ])
+
+ for fd in get_open_fds():
+ if fd not in whitelist:
+ try:
+ _set_inheritable(fd, False)
+ except OSError:
+ pass
+
+
+def spawn_bash(mycommand, debug=False, opt_name=None, **keywords):
+ """
+ Spawns a bash shell running a specific commands
+
+ @param mycommand: The command for bash to run
+ @type mycommand: String
+ @param debug: Turn bash debugging on (set -x)
+ @type debug: Boolean
+ @param opt_name: Name of the spawned process (detaults to binary name)
+ @type opt_name: String
+ @param keywords: Extra Dictionary arguments to pass to spawn
+ @type keywords: Dictionary
+ """
+
+ args = [BASH_BINARY]
+ if not opt_name:
+ opt_name = os.path.basename(mycommand.split()[0])
+ if debug:
+ # Print commands and their arguments as they are executed.
+ args.append("-x")
+ args.append("-c")
+ args.append(mycommand)
+ return spawn(args, opt_name=opt_name, **keywords)
+
+def spawn_sandbox(mycommand, opt_name=None, **keywords):
+ if not sandbox_capable:
+ return spawn_bash(mycommand, opt_name=opt_name, **keywords)
+ args = [SANDBOX_BINARY]
+ if not opt_name:
+ opt_name = os.path.basename(mycommand.split()[0])
+ args.append(mycommand)
+ return spawn(args, opt_name=opt_name, **keywords)
+
+def spawn_fakeroot(mycommand, fakeroot_state=None, opt_name=None, **keywords):
+ args = [FAKEROOT_BINARY]
+ if not opt_name:
+ opt_name = os.path.basename(mycommand.split()[0])
+ if fakeroot_state:
+ open(fakeroot_state, "a").close()
+ args.append("-s")
+ args.append(fakeroot_state)
+ args.append("-i")
+ args.append(fakeroot_state)
+ args.append("--")
+ args.append(BASH_BINARY)
+ args.append("-c")
+ args.append(mycommand)
+ return spawn(args, opt_name=opt_name, **keywords)
+
+_exithandlers = []
+def atexit_register(func, *args, **kargs):
+ """Wrapper around atexit.register that is needed in order to track
+ what is registered. For example, when portage restarts itself via
+ os.execv, the atexit module does not work so we have to do it
+ manually by calling the run_exitfuncs() function in this module."""
+ _exithandlers.append((func, args, kargs))
+
+def run_exitfuncs():
+ """This should behave identically to the routine performed by
+ the atexit module at exit time. It's only necessary to call this
+ function when atexit will not work (because of os.execv, for
+ example)."""
+
+ # This function is a copy of the private atexit._run_exitfuncs()
+ # from the python 2.4.2 sources. The only difference from the
+ # original function is in the output to stderr.
+ exc_info = None
+ while _exithandlers:
+ func, targs, kargs = _exithandlers.pop()
+ try:
+ func(*targs, **kargs)
+ except SystemExit:
+ exc_info = sys.exc_info()
+ except: # No idea what they called, so we need this broad except here.
+ dump_traceback("Error in portage.process.run_exitfuncs", noiselevel=0)
+ exc_info = sys.exc_info()
+
+ if exc_info is not None:
+ if sys.hexversion >= 0x3000000:
+ raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
+ else:
+ exec("raise exc_info[0], exc_info[1], exc_info[2]")
+
+atexit.register(run_exitfuncs)
+
+# It used to be necessary for API consumers to remove pids from spawned_pids,
+# since otherwise it would accumulate a pids endlessly. Now, spawned_pids is
+# just an empty dummy list, so for backward compatibility, ignore ValueError
+# for removal on non-existent items.
+class _dummy_list(list):
+ def remove(self, item):
+ # TODO: Trigger a DeprecationWarning here, after stable portage
+ # has dummy spawned_pids.
+ try:
+ list.remove(self, item)
+ except ValueError:
+ pass
+
+spawned_pids = _dummy_list()
+
+def cleanup():
+ pass
+
+def spawn(mycommand, env={}, opt_name=None, fd_pipes=None, returnpid=False,
+ uid=None, gid=None, groups=None, umask=None, logfile=None,
+ path_lookup=True, pre_exec=None,
+ close_fds=(sys.version_info < (3, 4)), unshare_net=False,
+ unshare_ipc=False, cgroup=None):
+ """
+ Spawns a given command.
+
+ @param mycommand: the command to execute
+ @type mycommand: String or List (Popen style list)
+ @param env: A dict of Key=Value pairs for env variables
+ @type env: Dictionary
+ @param opt_name: an optional name for the spawn'd process (defaults to the binary name)
+ @type opt_name: String
+ @param fd_pipes: A dict of mapping for pipes, { '0': stdin, '1': stdout } for example
+ (default is {0:stdin, 1:stdout, 2:stderr})
+ @type fd_pipes: Dictionary
+ @param returnpid: Return the Process IDs for a successful spawn.
+ NOTE: This requires the caller clean up all the PIDs, otherwise spawn will clean them.
+ @type returnpid: Boolean
+ @param uid: User ID to spawn as; useful for dropping privilages
+ @type uid: Integer
+ @param gid: Group ID to spawn as; useful for dropping privilages
+ @type gid: Integer
+ @param groups: Group ID's to spawn in: useful for having the process run in multiple group contexts.
+ @type groups: List
+ @param umask: An integer representing the umask for the process (see man chmod for umask details)
+ @type umask: Integer
+ @param logfile: name of a file to use for logging purposes
+ @type logfile: String
+ @param path_lookup: If the binary is not fully specified then look for it in PATH
+ @type path_lookup: Boolean
+ @param pre_exec: A function to be called with no arguments just prior to the exec call.
+ @type pre_exec: callable
+ @param close_fds: If True, then close all file descriptors except those
+ referenced by fd_pipes (default is True for python3.3 and earlier, and False for
+ python3.4 and later due to non-inheritable file descriptor behavior from PEP 446).
+ @type close_fds: Boolean
+ @param unshare_net: If True, networking will be unshared from the spawned process
+ @type unshare_net: Boolean
+ @param unshare_ipc: If True, IPC will be unshared from the spawned process
+ @type unshare_ipc: Boolean
+ @param cgroup: CGroup path to bind the process to
+ @type cgroup: String
+
+ logfile requires stdout and stderr to be assigned to this process (ie not pointed
+ somewhere else.)
+
+ """
+
+ # mycommand is either a str or a list
+ if isinstance(mycommand, basestring):
+ mycommand = mycommand.split()
+
+ if sys.hexversion < 0x3000000:
+ # Avoid a potential UnicodeEncodeError from os.execve().
+ env_bytes = {}
+ for k, v in env.items():
+ env_bytes[_unicode_encode(k, encoding=_encodings['content'])] = \
+ _unicode_encode(v, encoding=_encodings['content'])
+ env = env_bytes
+ del env_bytes
+
+ # If an absolute path to an executable file isn't given
+ # search for it unless we've been told not to.
+ binary = mycommand[0]
+ if binary not in (BASH_BINARY, SANDBOX_BINARY, FAKEROOT_BINARY) and \
+ (not os.path.isabs(binary) or not os.path.isfile(binary)
+ or not os.access(binary, os.X_OK)):
+ binary = path_lookup and find_binary(binary) or None
+ if not binary:
+ raise CommandNotFound(mycommand[0])
+
+ # If we haven't been told what file descriptors to use
+ # default to propagating our stdin, stdout and stderr.
+ if fd_pipes is None:
+ fd_pipes = {
+ 0:portage._get_stdin().fileno(),
+ 1:sys.__stdout__.fileno(),
+ 2:sys.__stderr__.fileno(),
+ }
+
+ # mypids will hold the pids of all processes created.
+ mypids = []
+
+ if logfile:
+ # Using a log file requires that stdout and stderr
+ # are assigned to the process we're running.
+ if 1 not in fd_pipes or 2 not in fd_pipes:
+ raise ValueError(fd_pipes)
+
+ # Create a pipe
+ (pr, pw) = os.pipe()
+
+ # Create a tee process, giving it our stdout and stderr
+ # as well as the read end of the pipe.
+ mypids.extend(spawn(('tee', '-i', '-a', logfile),
+ returnpid=True, fd_pipes={0:pr,
+ 1:fd_pipes[1], 2:fd_pipes[2]}))
+
+ # We don't need the read end of the pipe, so close it.
+ os.close(pr)
+
+ # Assign the write end of the pipe to our stdout and stderr.
+ fd_pipes[1] = pw
+ fd_pipes[2] = pw
+
+ # This caches the libc library lookup in the current
+ # process, so that it's only done once rather than
+ # for each child process.
+ if unshare_net or unshare_ipc:
+ find_library("c")
+
+ # Force instantiation of portage.data.userpriv_groups before the
+ # fork, so that the result is cached in the main process.
+ bool(groups)
+
+ parent_pid = os.getpid()
+ pid = None
+ try:
+ pid = os.fork()
+
+ if pid == 0:
+ try:
+ _exec(binary, mycommand, opt_name, fd_pipes,
+ env, gid, groups, uid, umask, pre_exec, close_fds,
+ unshare_net, unshare_ipc, cgroup)
+ except SystemExit:
+ raise
+ except Exception as e:
+ # We need to catch _any_ exception so that it doesn't
+ # propagate out of this function and cause exiting
+ # with anything other than os._exit()
+ writemsg("%s:\n %s\n" % (e, " ".join(mycommand)),
+ noiselevel=-1)
+ traceback.print_exc()
+ sys.stderr.flush()
+
+ finally:
+ if pid == 0 or (pid is None and os.getpid() != parent_pid):
+ # Call os._exit() from a finally block in order
+ # to suppress any finally blocks from earlier
+ # in the call stack (see bug #345289). This
+ # finally block has to be setup before the fork
+ # in order to avoid a race condition.
+ os._exit(1)
+
+ if not isinstance(pid, int):
+ raise AssertionError("fork returned non-integer: %s" % (repr(pid),))
+
+ # Add the pid to our local and the global pid lists.
+ mypids.append(pid)
+
+ # If we started a tee process the write side of the pipe is no
+ # longer needed, so close it.
+ if logfile:
+ os.close(pw)
+
+ # If the caller wants to handle cleaning up the processes, we tell
+ # it about all processes that were created.
+ if returnpid:
+ return mypids
+
+ # Otherwise we clean them up.
+ while mypids:
+
+ # Pull the last reader in the pipe chain. If all processes
+ # in the pipe are well behaved, it will die when the process
+ # it is reading from dies.
+ pid = mypids.pop(0)
+
+ # and wait for it.
+ retval = os.waitpid(pid, 0)[1]
+
+ if retval:
+ # If it failed, kill off anything else that
+ # isn't dead yet.
+ for pid in mypids:
+ # With waitpid and WNOHANG, only check the
+ # first element of the tuple since the second
+ # element may vary (bug #337465).
+ if os.waitpid(pid, os.WNOHANG)[0] == 0:
+ os.kill(pid, signal.SIGTERM)
+ os.waitpid(pid, 0)
+
+ # If it got a signal, return the signal that was sent.
+ if (retval & 0xff):
+ return ((retval & 0xff) << 8)
+
+ # Otherwise, return its exit code.
+ return (retval >> 8)
+
+ # Everything succeeded
+ return 0
+
+def _exec(binary, mycommand, opt_name, fd_pipes, env, gid, groups, uid, umask,
+ pre_exec, close_fds, unshare_net, unshare_ipc, cgroup):
+
+ """
+ Execute a given binary with options
+
+ @param binary: Name of program to execute
+ @type binary: String
+ @param mycommand: Options for program
+ @type mycommand: String
+ @param opt_name: Name of process (defaults to binary)
+ @type opt_name: String
+ @param fd_pipes: Mapping pipes to destination; { 0:0, 1:1, 2:2 }
+ @type fd_pipes: Dictionary
+ @param env: Key,Value mapping for Environmental Variables
+ @type env: Dictionary
+ @param gid: Group ID to run the process under
+ @type gid: Integer
+ @param groups: Groups the Process should be in.
+ @type groups: Integer
+ @param uid: User ID to run the process under
+ @type uid: Integer
+ @param umask: an int representing a unix umask (see man chmod for umask details)
+ @type umask: Integer
+ @param pre_exec: A function to be called with no arguments just prior to the exec call.
+ @type pre_exec: callable
+ @param unshare_net: If True, networking will be unshared from the spawned process
+ @type unshare_net: Boolean
+ @param unshare_ipc: If True, IPC will be unshared from the spawned process
+ @type unshare_ipc: Boolean
+ @param cgroup: CGroup path to bind the process to
+ @type cgroup: String
+ @rtype: None
+ @return: Never returns (calls os.execve)
+ """
+
+ # If the process we're creating hasn't been given a name
+ # assign it the name of the executable.
+ if not opt_name:
+ if binary is portage._python_interpreter:
+ # NOTE: PyPy 1.7 will die due to "libary path not found" if argv[0]
+ # does not contain the full path of the binary.
+ opt_name = binary
+ else:
+ opt_name = os.path.basename(binary)
+
+ # Set up the command's argument list.
+ myargs = [opt_name]
+ myargs.extend(mycommand[1:])
+
+ # Avoid a potential UnicodeEncodeError from os.execve().
+ myargs = [_unicode_encode(x, encoding=_encodings['fs'],
+ errors='strict') for x in myargs]
+
+ # Use default signal handlers in order to avoid problems
+ # killing subprocesses as reported in bug #353239.
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ # Unregister SIGCHLD handler and wakeup_fd for the parent
+ # process's event loop (bug 655656).
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ try:
+ wakeup_fd = signal.set_wakeup_fd(-1)
+ if wakeup_fd > 0:
+ os.close(wakeup_fd)
+ except (ValueError, OSError):
+ pass
+
+ # Quiet killing of subprocesses by SIGPIPE (see bug #309001).
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+ # Avoid issues triggered by inheritance of SIGQUIT handler from
+ # the parent process (see bug #289486).
+ signal.signal(signal.SIGQUIT, signal.SIG_DFL)
+
+ _setup_pipes(fd_pipes, close_fds=close_fds, inheritable=True)
+
+ # Add to cgroup
+ # it's better to do it from the child since we can guarantee
+ # it is done before we start forking children
+ if cgroup:
+ with open(os.path.join(cgroup, 'cgroup.procs'), 'a') as f:
+ f.write('%d\n' % os.getpid())
+
+ # Unshare (while still uid==0)
+ if unshare_net or unshare_ipc:
+ filename = find_library("c")
+ if filename is not None:
+ libc = LoadLibrary(filename)
+ if libc is not None:
+ CLONE_NEWIPC = 0x08000000
+ CLONE_NEWNET = 0x40000000
+
+ flags = 0
+ if unshare_net:
+ flags |= CLONE_NEWNET
+ if unshare_ipc:
+ flags |= CLONE_NEWIPC
+
+ try:
+ if libc.unshare(flags) != 0:
+ writemsg("Unable to unshare: %s\n" % (
+ errno.errorcode.get(ctypes.get_errno(), '?')),
+ noiselevel=-1)
+ else:
+ if unshare_net:
+ # 'up' the loopback
+ IFF_UP = 0x1
+ ifreq = struct.pack('16sh', b'lo', IFF_UP)
+ SIOCSIFFLAGS = 0x8914
+
+ sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
+ try:
+ fcntl.ioctl(sock, SIOCSIFFLAGS, ifreq)
+ except IOError as e:
+ writemsg("Unable to enable loopback interface: %s\n" % (
+ errno.errorcode.get(e.errno, '?')),
+ noiselevel=-1)
+ sock.close()
+ except AttributeError:
+ # unshare() not supported by libc
+ pass
+
+ # Set requested process permissions.
+ if gid:
+ # Cast proxies to int, in case it matters.
+ os.setgid(int(gid))
+ if groups:
+ os.setgroups(groups)
+ if uid:
+ # Cast proxies to int, in case it matters.
+ os.setuid(int(uid))
+ if umask:
+ os.umask(umask)
+ if pre_exec:
+ pre_exec()
+
+ # And switch to the new process.
+ os.execve(binary, myargs, env)
+
+def _setup_pipes(fd_pipes, close_fds=True, inheritable=None):
+ """Setup pipes for a forked process.
+
+ Even when close_fds is False, file descriptors referenced as
+ values in fd_pipes are automatically closed if they do not also
+ occur as keys in fd_pipes. It is assumed that the caller will
+ explicitly add them to the fd_pipes keys if they are intended
+ to remain open. This allows for convenient elimination of
+ unnecessary duplicate file descriptors.
+
+ WARNING: When not followed by exec, the close_fds behavior
+ can trigger interference from destructors that close file
+ descriptors. This interference happens when the garbage
+ collector intermittently executes such destructors after their
+ corresponding file descriptors have been re-used, leading
+ to intermittent "[Errno 9] Bad file descriptor" exceptions in
+ forked processes. This problem has been observed with PyPy 1.8,
+ and also with CPython under some circumstances (as triggered
+ by xmpppy in bug #374335). In order to close a safe subset of
+ file descriptors, see portage.locks._close_fds().
+
+ NOTE: When not followed by exec, even when close_fds is False,
+ it's still possible for dup2() calls to cause interference in a
+ way that's similar to the way that close_fds interferes (since
+ dup2() has to close the target fd if it happens to be open).
+ It's possible to avoid such interference by using allocated
+ file descriptors as the keys in fd_pipes. For example:
+
+ pr, pw = os.pipe()
+ fd_pipes[pw] = pw
+
+ By using the allocated pw file descriptor as the key in fd_pipes,
+ it's not necessary for dup2() to close a file descriptor (it
+ actually does nothing in this case), which avoids possible
+ interference.
+ """
+
+ reverse_map = {}
+ # To protect from cases where direct assignment could
+ # clobber needed fds ({1:2, 2:1}) we create a reverse map
+ # in order to know when it's necessary to create temporary
+ # backup copies with os.dup().
+ for newfd, oldfd in fd_pipes.items():
+ newfds = reverse_map.get(oldfd)
+ if newfds is None:
+ newfds = []
+ reverse_map[oldfd] = newfds
+ newfds.append(newfd)
+
+ # Assign newfds via dup2(), making temporary backups when
+ # necessary, and closing oldfd if the caller has not
+ # explicitly requested for it to remain open by adding
+ # it to the keys of fd_pipes.
+ while reverse_map:
+
+ oldfd, newfds = reverse_map.popitem()
+ old_fdflags = None
+
+ for newfd in newfds:
+ if newfd in reverse_map:
+ # Make a temporary backup before re-assignment, assuming
+ # that backup_fd won't collide with a key in reverse_map
+ # (since all of the keys correspond to open file
+ # descriptors, and os.dup() only allocates a previously
+ # unused file discriptors).
+ backup_fd = os.dup(newfd)
+ reverse_map[backup_fd] = reverse_map.pop(newfd)
+
+ if oldfd != newfd:
+ os.dup2(oldfd, newfd)
+ if _set_inheritable is not None:
+ # Don't do this unless _set_inheritable is available,
+ # since it's used below to ensure correct state, and
+ # otherwise /dev/null stdin fails to inherit (at least
+ # with Python versions from 3.1 to 3.3).
+ if old_fdflags is None:
+ old_fdflags = fcntl.fcntl(oldfd, fcntl.F_GETFD)
+ fcntl.fcntl(newfd, fcntl.F_SETFD, old_fdflags)
+
+ if _set_inheritable is not None:
+
+ inheritable_state = None
+ if not (old_fdflags is None or _FD_CLOEXEC is None):
+ inheritable_state = not bool(old_fdflags & _FD_CLOEXEC)
+
+ if inheritable is not None:
+ if inheritable_state is not inheritable:
+ _set_inheritable(newfd, inheritable)
+
+ elif newfd in (0, 1, 2):
+ if inheritable_state is not True:
+ _set_inheritable(newfd, True)
+
+ if oldfd not in fd_pipes:
+ # If oldfd is not a key in fd_pipes, then it's safe
+ # to close now, since we've already made all of the
+ # requested duplicates. This also closes every
+ # backup_fd that may have been created on previous
+ # iterations of this loop.
+ os.close(oldfd)
+
+ if close_fds:
+ # Then close _all_ fds that haven't been explicitly
+ # requested to be kept open.
+ for fd in get_open_fds():
+ if fd not in fd_pipes:
+ try:
+ os.close(fd)
+ except OSError:
+ pass
+
+def find_binary(binary):
+ """
+ Given a binary name, find the binary in PATH
+
+ @param binary: Name of the binary to find
+ @type string
+ @rtype: None or string
+ @return: full path to binary or None if the binary could not be located.
+ """
+ paths = os.environ.get("PATH", "")
+ if sys.hexversion >= 0x3000000 and isinstance(binary, bytes):
+ # return bytes when input is bytes
+ paths = paths.encode(sys.getfilesystemencoding(), 'surrogateescape')
+ paths = paths.split(b':')
+ else:
+ paths = paths.split(':')
+
+ for path in paths:
+ filename = _os.path.join(path, binary)
+ if _os.access(filename, os.X_OK) and _os.path.isfile(filename):
+ return filename
+ return None
diff --git a/lib/portage/progress.py b/lib/portage/progress.py
new file mode 100644
index 000000000..e43c2afbd
--- /dev/null
+++ b/lib/portage/progress.py
@@ -0,0 +1,61 @@
+# Copyright 2005-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import time
+import signal
+
+import portage
+
+
+class ProgressHandler(object):
+ def __init__(self):
+ self.reset()
+
+ def reset(self):
+ self.curval = 0
+ self.maxval = 0
+ self.last_update = 0
+ self.min_display_latency = 0.2
+
+ def onProgress(self, maxval, curval):
+ self.maxval = maxval
+ self.curval = curval
+ cur_time = time.time()
+ if cur_time - self.last_update >= self.min_display_latency:
+ self.last_update = cur_time
+ self.display()
+
+ def display(self):
+ raise NotImplementedError(self)
+
+
+class ProgressBar(ProgressHandler):
+ """Class to set up and return a Progress Bar"""
+
+ def __init__(self, isatty, **kwargs):
+ self.isatty = isatty
+ self.kwargs = kwargs
+ ProgressHandler.__init__(self)
+ self.progressBar = None
+
+ def start(self):
+ if self.isatty:
+ self.progressBar = portage.output.TermProgressBar(**self.kwargs)
+ signal.signal(signal.SIGWINCH, self.sigwinch_handler)
+ else:
+ self.onProgress = None
+ return self.onProgress
+
+ def set_label(self, _label):
+ self.kwargs['label'] = _label
+
+ def display(self):
+ self.progressBar.set(self.curval, self.maxval)
+
+ def sigwinch_handler(self, signum, frame):
+ lines, self.progressBar.term_columns = \
+ portage.output.get_term_size()
+
+ def stop(self):
+ signal.signal(signal.SIGWINCH, signal.SIG_DFL)
+
diff --git a/lib/portage/proxy/__init__.py b/lib/portage/proxy/__init__.py
new file mode 100644
index 000000000..f98c56457
--- /dev/null
+++ b/lib/portage/proxy/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/proxy/lazyimport.py b/lib/portage/proxy/lazyimport.py
new file mode 100644
index 000000000..d4258706d
--- /dev/null
+++ b/lib/portage/proxy/lazyimport.py
@@ -0,0 +1,222 @@
+# Copyright 2009-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['lazyimport']
+
+import sys
+import types
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+from portage.proxy.objectproxy import ObjectProxy
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+_module_proxies = {}
+_module_proxies_lock = threading.RLock()
+
+def _preload_portage_submodules():
+ """
+ Load lazily referenced portage submodules into memory,
+ so imports won't fail during portage upgrade/downgrade.
+ Note that this recursively loads only the modules that
+ are lazily referenced by currently imported modules,
+ so some portage submodules may still remain unimported
+ after this function is called.
+ """
+ imported = set()
+ while True:
+ remaining = False
+ for name in list(_module_proxies):
+ if name.startswith('portage.') or name.startswith('_emerge.'):
+ if name in imported:
+ continue
+ imported.add(name)
+ remaining = True
+ __import__(name)
+ _unregister_module_proxy(name)
+ if not remaining:
+ break
+
+def _register_module_proxy(name, proxy):
+ _module_proxies_lock.acquire()
+ try:
+ proxy_list = _module_proxies.get(name)
+ if proxy_list is None:
+ proxy_list = []
+ _module_proxies[name] = proxy_list
+ proxy_list.append(proxy)
+ finally:
+ _module_proxies_lock.release()
+
+def _unregister_module_proxy(name):
+ """
+ Destroy all proxies that reference the give module name. Also, check
+ for other proxies referenced by modules that have been imported and
+ destroy those proxies too. This way, destruction of a single proxy
+ can trigger destruction of all the rest. If a target module appears
+ to be partially imported (indicated when an AttributeError is caught),
+ this function will leave in place proxies that reference it.
+ """
+ _module_proxies_lock.acquire()
+ try:
+ if name in _module_proxies:
+ modules = sys.modules
+ for name, proxy_list in list(_module_proxies.items()):
+ if name not in modules:
+ continue
+ # First delete this name from the dict so that
+ # if this same thread reenters below, it won't
+ # enter this path again.
+ del _module_proxies[name]
+ try:
+ while proxy_list:
+ proxy = proxy_list.pop()
+ object.__getattribute__(proxy, '_get_target')()
+ except AttributeError:
+ # Apparently the target module is only partially
+ # imported, so proxies that reference it cannot
+ # be destroyed yet.
+ proxy_list.append(proxy)
+ _module_proxies[name] = proxy_list
+ finally:
+ _module_proxies_lock.release()
+
+class _LazyImport(ObjectProxy):
+
+ __slots__ = ('_scope', '_alias', '_name', '_target')
+
+ def __init__(self, scope, alias, name):
+ ObjectProxy.__init__(self)
+ object.__setattr__(self, '_scope', scope)
+ object.__setattr__(self, '_alias', alias)
+ object.__setattr__(self, '_name', name)
+ _register_module_proxy(name, self)
+
+ def _get_target(self):
+ try:
+ return object.__getattribute__(self, '_target')
+ except AttributeError:
+ pass
+ name = object.__getattribute__(self, '_name')
+ __import__(name)
+ target = sys.modules[name]
+ object.__setattr__(self, '_target', target)
+ object.__getattribute__(self, '_scope')[
+ object.__getattribute__(self, '_alias')] = target
+ _unregister_module_proxy(name)
+ return target
+
+class _LazyImportFrom(_LazyImport):
+
+ __slots__ = ('_attr_name',)
+
+ def __init__(self, scope, name, attr_name, alias):
+ object.__setattr__(self, '_attr_name', attr_name)
+ _LazyImport.__init__(self, scope, alias, name)
+
+ def _get_target(self):
+ try:
+ return object.__getattribute__(self, '_target')
+ except AttributeError:
+ pass
+ name = object.__getattribute__(self, '_name')
+ attr_name = object.__getattribute__(self, '_attr_name')
+ __import__(name)
+ try:
+ target = getattr(sys.modules[name], attr_name)
+ except AttributeError:
+ # Try to import it as a submodule
+ try:
+ __import__("%s.%s" % (name, attr_name))
+ except ImportError:
+ pass
+ # If it's a submodule, this will succeed. Otherwise, it may
+ # be that the module is only partially imported, so raise
+ # AttributeError for _unregister_module_proxy() to handle.
+ target = getattr(sys.modules[name], attr_name)
+
+ object.__setattr__(self, '_target', target)
+ object.__getattribute__(self, '_scope')[
+ object.__getattribute__(self, '_alias')] = target
+ _unregister_module_proxy(name)
+ return target
+
+def lazyimport(scope, *args):
+ """
+ Create a proxy in the given scope in order to performa a lazy import.
+
+ Syntax Result
+ foo import foo
+ foo:bar,baz from foo import bar, baz
+ foo:bar@baz from foo import bar as baz
+
+ @param scope: the scope in which to place the import, typically globals()
+ @type myfilename: dict
+ @param args: module names to import
+ @type args: strings
+ """
+
+ modules = sys.modules
+
+ for s in args:
+ parts = s.split(':', 1)
+ if len(parts) == 1:
+ name = s
+
+ if not name or not isinstance(name, basestring):
+ raise ValueError(name)
+
+ components = name.split('.')
+ parent_scope = scope
+ for i in range(len(components)):
+ alias = components[i]
+ if i < len(components) - 1:
+ parent_name = ".".join(components[:i+1])
+ __import__(parent_name)
+ mod = modules.get(parent_name)
+ if not isinstance(mod, types.ModuleType):
+ # raise an exception
+ __import__(name)
+ parent_scope[alias] = mod
+ parent_scope = mod.__dict__
+ continue
+
+ already_imported = modules.get(name)
+ if already_imported is not None:
+ parent_scope[alias] = already_imported
+ else:
+ parent_scope[alias] = \
+ _LazyImport(parent_scope, alias, name)
+
+ else:
+ name, fromlist = parts
+ already_imported = modules.get(name)
+ fromlist = fromlist.split(',')
+ for s in fromlist:
+ if not s:
+ # This happens if there's an extra comma in fromlist.
+ raise ValueError('Empty module attribute name')
+ alias = s.split('@', 1)
+ if len(alias) == 1:
+ alias = alias[0]
+ attr_name = alias
+ else:
+ attr_name, alias = alias
+ if already_imported is not None:
+ try:
+ scope[alias] = getattr(already_imported, attr_name)
+ except AttributeError:
+ # Apparently the target module is only partially
+ # imported, so create a proxy.
+ already_imported = None
+ scope[alias] = \
+ _LazyImportFrom(scope, name, attr_name, alias)
+ else:
+ scope[alias] = \
+ _LazyImportFrom(scope, name, attr_name, alias)
diff --git a/lib/portage/proxy/objectproxy.py b/lib/portage/proxy/objectproxy.py
new file mode 100644
index 000000000..a755774ae
--- /dev/null
+++ b/lib/portage/proxy/objectproxy.py
@@ -0,0 +1,98 @@
+# Copyright 2008-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+__all__ = ['ObjectProxy']
+
+class ObjectProxy(object):
+
+ """
+ Object that acts as a proxy to another object, forwarding
+ attribute accesses and method calls. This can be useful
+ for implementing lazy initialization.
+ """
+
+ __slots__ = ()
+
+ def _get_target(self):
+ raise NotImplementedError(self)
+
+ def __getattribute__(self, attr):
+ result = object.__getattribute__(self, '_get_target')()
+ return getattr(result, attr)
+
+ def __setattr__(self, attr, value):
+ result = object.__getattribute__(self, '_get_target')()
+ setattr(result, attr, value)
+
+ def __call__(self, *args, **kwargs):
+ result = object.__getattribute__(self, '_get_target')()
+ return result(*args, **kwargs)
+
+ def __enter__(self):
+ return object.__getattribute__(self, '_get_target')().__enter__()
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ return object.__getattribute__(self, '_get_target')().__exit__(
+ exc_type, exc_value, traceback)
+
+ def __setitem__(self, key, value):
+ object.__getattribute__(self, '_get_target')()[key] = value
+
+ def __getitem__(self, key):
+ return object.__getattribute__(self, '_get_target')()[key]
+
+ def __delitem__(self, key):
+ del object.__getattribute__(self, '_get_target')()[key]
+
+ def __contains__(self, key):
+ return key in object.__getattribute__(self, '_get_target')()
+
+ def __iter__(self):
+ return iter(object.__getattribute__(self, '_get_target')())
+
+ def __len__(self):
+ return len(object.__getattribute__(self, '_get_target')())
+
+ def __repr__(self):
+ return repr(object.__getattribute__(self, '_get_target')())
+
+ def __str__(self):
+ return str(object.__getattribute__(self, '_get_target')())
+
+ def __add__(self, other):
+ return self.__str__() + other
+
+ def __hash__(self):
+ return hash(object.__getattribute__(self, '_get_target')())
+
+ def __ge__(self, other):
+ return object.__getattribute__(self, '_get_target')() >= other
+
+ def __gt__(self, other):
+ return object.__getattribute__(self, '_get_target')() > other
+
+ def __le__(self, other):
+ return object.__getattribute__(self, '_get_target')() <= other
+
+ def __lt__(self, other):
+ return object.__getattribute__(self, '_get_target')() < other
+
+ def __eq__(self, other):
+ return object.__getattribute__(self, '_get_target')() == other
+
+ def __ne__(self, other):
+ return object.__getattribute__(self, '_get_target')() != other
+
+ def __bool__(self):
+ return bool(object.__getattribute__(self, '_get_target')())
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
+
+ def __unicode__(self):
+ return unicode(object.__getattribute__(self, '_get_target')())
+
+ def __int__(self):
+ return int(object.__getattribute__(self, '_get_target')())
diff --git a/lib/portage/repository/__init__.py b/lib/portage/repository/__init__.py
new file mode 100644
index 000000000..21a391aee
--- /dev/null
+++ b/lib/portage/repository/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/repository/config.py b/lib/portage/repository/config.py
new file mode 100644
index 000000000..f790f9392
--- /dev/null
+++ b/lib/portage/repository/config.py
@@ -0,0 +1,1177 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import io
+import logging
+import warnings
+import sys
+import re
+
+import portage
+from portage import eclass_cache, os
+from portage.checksum import get_valid_checksum_keys
+from portage.const import (PORTAGE_BASE_PATH, REPO_NAME_LOC, USER_CONFIG_PATH)
+from portage.eapi import eapi_allows_directories_on_profile_level_and_repository_level
+from portage.env.loaders import KeyValuePairFileLoader
+from portage.util import (normalize_path, read_corresponding_eapi_file, shlex_split,
+ stack_lists, writemsg, writemsg_level, _recursive_file_list)
+from portage.util.configparser import (SafeConfigParser, ConfigParserError,
+ read_configs)
+from portage.util._path import isdir_raise_eaccess
+from portage.util.path import first_existing
+from portage.localization import _
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage import _encodings
+from portage import manifest
+import portage.sync
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+# Characters prohibited by repoman's file.name check.
+_invalid_path_char_re = re.compile(r'[^a-zA-Z0-9._\-+/]')
+
+_valid_profile_formats = frozenset(
+ ['pms', 'portage-1', 'portage-2', 'profile-bashrcs', 'profile-set',
+ 'profile-default-eapi', 'build-id'])
+
+_portage1_profiles_allow_directories = frozenset(
+ ["portage-1-compat", "portage-1", 'portage-2'])
+
+_repo_name_sub_re = re.compile(r'[^\w-]')
+
+def _gen_valid_repo(name):
+ """
+ Substitute hyphen in place of characters that don't conform to PMS 3.1.5,
+ and strip hyphen from left side if necessary. This returns None if the
+ given name contains no valid characters.
+ """
+ name = _repo_name_sub_re.sub(' ', name.strip())
+ name = '-'.join(name.split())
+ name = name.lstrip('-')
+ if not name:
+ name = None
+ return name
+
+def _find_invalid_path_char(path, pos=0, endpos=None):
+ """
+ Returns the position of the first invalid character found in basename,
+ or -1 if no invalid characters are found.
+ """
+ if endpos is None:
+ endpos = len(path)
+
+ m = _invalid_path_char_re.search(path, pos=pos, endpos=endpos)
+ if m is not None:
+ return m.start()
+
+ return -1
+
+class RepoConfig(object):
+ """Stores config of one repository"""
+
+ __slots__ = ('aliases', 'allow_missing_manifest', 'allow_provide_virtual',
+ 'auto_sync', 'cache_formats', 'clone_depth',
+ 'create_manifest', 'disable_manifest',
+ 'eapi', 'eclass_db', 'eclass_locations', 'eclass_overrides',
+ 'find_invalid_path_char', 'force', 'format', 'local_config', 'location',
+ 'main_repo', 'manifest_hashes', 'masters', 'missing_repo_name',
+ 'name', 'portage1_profiles', 'portage1_profiles_compat', 'priority',
+ 'profile_formats', 'sign_commit', 'sign_manifest', 'strict_misc_digests',
+ 'sync_depth', 'sync_hooks_only_on_change',
+ 'sync_type', 'sync_umask', 'sync_uri', 'sync_user', 'thin_manifest',
+ 'update_changelog', '_eapis_banned', '_eapis_deprecated',
+ '_masters_orig', 'module_specific_options', 'manifest_required_hashes',
+ 'sync_allow_hardlinks',
+ 'sync_openpgp_key_path',
+ 'sync_openpgp_key_refresh_retry_count',
+ 'sync_openpgp_key_refresh_retry_delay_max',
+ 'sync_openpgp_key_refresh_retry_delay_exp_base',
+ 'sync_openpgp_key_refresh_retry_delay_mult',
+ 'sync_openpgp_key_refresh_retry_overall_timeout',
+ )
+
+ def __init__(self, name, repo_opts, local_config=True):
+ """Build a RepoConfig with options in repo_opts
+ Try to read repo_name in repository location, but if
+ it is not found use variable name as repository name"""
+
+ force = repo_opts.get('force')
+ if force is not None:
+ force = tuple(force.split())
+ self.force = force
+ if force is None:
+ force = ()
+
+ self.local_config = local_config
+
+ if local_config or 'aliases' in force:
+ aliases = repo_opts.get('aliases')
+ if aliases is not None:
+ aliases = tuple(aliases.split())
+ else:
+ aliases = None
+
+ self.aliases = aliases
+
+ if local_config or 'eclass-overrides' in force:
+ eclass_overrides = repo_opts.get('eclass-overrides')
+ if eclass_overrides is not None:
+ eclass_overrides = tuple(eclass_overrides.split())
+ else:
+ eclass_overrides = None
+
+ self.eclass_overrides = eclass_overrides
+ # Eclass databases and locations are computed later.
+ self.eclass_db = None
+ self.eclass_locations = None
+
+ if local_config or 'masters' in force:
+ # Masters from repos.conf override layout.conf.
+ masters = repo_opts.get('masters')
+ if masters is not None:
+ masters = tuple(masters.split())
+ else:
+ masters = None
+
+ self.masters = masters
+
+ #The main-repo key makes only sense for the 'DEFAULT' section.
+ self.main_repo = repo_opts.get('main-repo')
+
+ priority = repo_opts.get('priority')
+ if priority is not None:
+ try:
+ priority = int(priority)
+ except ValueError:
+ priority = None
+ self.priority = priority
+
+ sync_type = repo_opts.get('sync-type')
+ if sync_type is not None:
+ sync_type = sync_type.strip()
+ self.sync_type = sync_type or None
+
+ sync_umask = repo_opts.get('sync-umask')
+ if sync_umask is not None:
+ sync_umask = sync_umask.strip()
+ self.sync_umask = sync_umask or None
+
+ sync_uri = repo_opts.get('sync-uri')
+ if sync_uri is not None:
+ sync_uri = sync_uri.strip()
+ self.sync_uri = sync_uri or None
+
+ sync_user = repo_opts.get('sync-user')
+ if sync_user is not None:
+ sync_user = sync_user.strip()
+ self.sync_user = sync_user or None
+
+ auto_sync = repo_opts.get('auto-sync', 'yes')
+ if auto_sync is not None:
+ auto_sync = auto_sync.strip().lower()
+ self.auto_sync = auto_sync
+
+ self.clone_depth = repo_opts.get('clone-depth')
+ self.sync_depth = repo_opts.get('sync-depth')
+
+ self.sync_hooks_only_on_change = repo_opts.get(
+ 'sync-hooks-only-on-change', 'false').lower() == 'true'
+
+ self.strict_misc_digests = repo_opts.get(
+ 'strict-misc-digests', 'true').lower() == 'true'
+
+ self.sync_allow_hardlinks = repo_opts.get(
+ 'sync-allow-hardlinks', 'true').lower() in ('true', 'yes')
+
+ self.sync_openpgp_key_path = repo_opts.get(
+ 'sync-openpgp-key-path', None)
+
+ for k in ('sync_openpgp_key_refresh_retry_count',
+ 'sync_openpgp_key_refresh_retry_delay_max',
+ 'sync_openpgp_key_refresh_retry_delay_exp_base',
+ 'sync_openpgp_key_refresh_retry_delay_mult',
+ 'sync_openpgp_key_refresh_retry_overall_timeout'):
+ setattr(self, k, repo_opts.get(k.replace('_', '-'), None))
+
+ self.module_specific_options = {}
+
+ # Not implemented.
+ format = repo_opts.get('format')
+ if format is not None:
+ format = format.strip()
+ self.format = format
+
+ location = repo_opts.get('location')
+ if location is not None and location.strip():
+ if os.path.isdir(location) or portage._sync_mode:
+ location = os.path.realpath(location)
+ else:
+ location = None
+ self.location = location
+
+ missing = True
+ self.name = name
+ if self.location is not None:
+ self.name, missing = self._read_valid_repo_name(self.location)
+ if missing:
+ # The name from repos.conf has to be used here for
+ # things like emerge-webrsync to work when the repo
+ # is empty (bug #484950).
+ if name is not None:
+ self.name = name
+ if portage._sync_mode:
+ missing = False
+
+ elif name == "DEFAULT":
+ missing = False
+
+ self.eapi = None
+ self.missing_repo_name = missing
+ # sign_commit is disabled by default, since it requires Git >=1.7.9,
+ # and key_id configured by `git config user.signingkey key_id`
+ self.sign_commit = False
+ self.sign_manifest = True
+ self.thin_manifest = False
+ self.allow_missing_manifest = False
+ self.allow_provide_virtual = False
+ self.create_manifest = True
+ self.disable_manifest = False
+ self.manifest_hashes = None
+ self.manifest_required_hashes = None
+ self.update_changelog = False
+ self.cache_formats = None
+ self.portage1_profiles = True
+ self.portage1_profiles_compat = False
+ self.find_invalid_path_char = _find_invalid_path_char
+ self._masters_orig = None
+
+ # Parse layout.conf.
+ if self.location:
+ layout_data = parse_layout_conf(self.location, self.name)[0]
+ self._masters_orig = layout_data['masters']
+
+ # layout.conf masters may be overridden here if we have a masters
+ # setting from the user's repos.conf
+ if self.masters is None:
+ self.masters = layout_data['masters']
+
+ if (local_config or 'aliases' in force) and layout_data['aliases']:
+ aliases = self.aliases
+ if aliases is None:
+ aliases = ()
+ # repos.conf aliases come after layout.conf aliases, giving
+ # them the ability to do incremental overrides
+ self.aliases = layout_data['aliases'] + tuple(aliases)
+
+ if layout_data['repo-name']:
+ # allow layout.conf to override repository name
+ # useful when having two copies of the same repo enabled
+ # to avoid modifying profiles/repo_name in one of them
+ self.name = layout_data['repo-name']
+ self.missing_repo_name = False
+
+ for value in ('allow-missing-manifest',
+ 'cache-formats',
+ 'create-manifest', 'disable-manifest', 'manifest-hashes',
+ 'manifest-required-hashes', 'profile-formats',
+ 'sign-commit', 'sign-manifest', 'thin-manifest', 'update-changelog'):
+ setattr(self, value.lower().replace("-", "_"), layout_data[value])
+
+ # If profile-formats specifies a default EAPI, then set
+ # self.eapi to that, otherwise set it to "0" as specified
+ # by PMS.
+ self.eapi = layout_data.get(
+ 'profile_eapi_when_unspecified', '0')
+
+ eapi = read_corresponding_eapi_file(
+ os.path.join(self.location, REPO_NAME_LOC),
+ default=self.eapi)
+
+ self.portage1_profiles = eapi_allows_directories_on_profile_level_and_repository_level(eapi) or \
+ any(x in _portage1_profiles_allow_directories for x in layout_data['profile-formats'])
+ self.portage1_profiles_compat = not eapi_allows_directories_on_profile_level_and_repository_level(eapi) and \
+ layout_data['profile-formats'] == ('portage-1-compat',)
+
+ self._eapis_banned = frozenset(layout_data['eapis-banned'])
+ self._eapis_deprecated = frozenset(layout_data['eapis-deprecated'])
+
+ def set_module_specific_opt(self, opt, val):
+ self.module_specific_options[opt] = val
+
+ def eapi_is_banned(self, eapi):
+ return eapi in self._eapis_banned
+
+ def eapi_is_deprecated(self, eapi):
+ return eapi in self._eapis_deprecated
+
+ def iter_pregenerated_caches(self, auxdbkeys, readonly=True, force=False):
+ """
+ Reads layout.conf cache-formats from left to right and yields cache
+ instances for each supported type that's found. If no cache-formats
+ are specified in layout.conf, 'pms' type is assumed if the
+ metadata/cache directory exists or force is True.
+ """
+ formats = self.cache_formats
+ if not formats:
+ if not force:
+ return
+ # The default egencache format was 'pms' prior to portage-2.1.11.32
+ # (portage versions prior to portage-2.1.11.14 will NOT
+ # recognize md5-dict format unless it is explicitly listed in
+ # layout.conf).
+ formats = ('md5-dict',)
+
+ for fmt in formats:
+ name = None
+ if fmt == 'pms':
+ from portage.cache.metadata import database
+ name = 'metadata/cache'
+ elif fmt == 'md5-dict':
+ from portage.cache.flat_hash import md5_database as database
+ name = 'metadata/md5-cache'
+
+ if name is not None:
+ yield database(self.location, name,
+ auxdbkeys, readonly=readonly)
+
+ def get_pregenerated_cache(self, auxdbkeys, readonly=True, force=False):
+ """
+ Returns the first cache instance yielded from
+ iter_pregenerated_caches(), or None if no cache is available or none
+ of the available formats are supported.
+ """
+ return next(self.iter_pregenerated_caches(
+ auxdbkeys, readonly=readonly, force=force), None)
+
+ def load_manifest(self, *args, **kwds):
+ kwds['thin'] = self.thin_manifest
+ kwds['allow_missing'] = self.allow_missing_manifest
+ kwds['allow_create'] = self.create_manifest
+ kwds['hashes'] = self.manifest_hashes
+ kwds['required_hashes'] = self.manifest_required_hashes
+ kwds['strict_misc_digests'] = self.strict_misc_digests
+ if self.disable_manifest:
+ kwds['from_scratch'] = True
+ kwds['find_invalid_path_char'] = self.find_invalid_path_char
+ return manifest.Manifest(*args, **kwds)
+
+ def update(self, new_repo):
+ """Update repository with options in another RepoConfig"""
+
+ keys = set(self.__slots__)
+ keys.discard("missing_repo_name")
+ for k in keys:
+ v = getattr(new_repo, k, None)
+ if v is not None:
+ setattr(self, k, v)
+
+ if new_repo.name is not None:
+ self.missing_repo_name = new_repo.missing_repo_name
+
+ @property
+ def writable(self):
+ """
+ Check if self.location is writable, or permissions are sufficient
+ to create it if it does not exist yet.
+ @rtype: bool
+ @return: True if self.location is writable or can be created,
+ False otherwise
+ """
+ return os.access(first_existing(self.location), os.W_OK)
+
+ @staticmethod
+ def _read_valid_repo_name(repo_path):
+ name, missing = RepoConfig._read_repo_name(repo_path)
+ # We must ensure that the name conforms to PMS 3.1.5
+ # in order to avoid InvalidAtom exceptions when we
+ # use it to generate atoms.
+ name = _gen_valid_repo(name)
+ if not name:
+ # name only contains invalid characters
+ name = "x-" + os.path.basename(repo_path)
+ name = _gen_valid_repo(name)
+ # If basename only contains whitespace then the
+ # end result is name = 'x-'.
+ return name, missing
+
+ @staticmethod
+ def _read_repo_name(repo_path):
+ """
+ Read repo_name from repo_path.
+ Returns repo_name, missing.
+ """
+ repo_name_path = os.path.join(repo_path, REPO_NAME_LOC)
+ f = None
+ try:
+ f = io.open(
+ _unicode_encode(repo_name_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ return f.readline().strip(), False
+ except EnvironmentError:
+ return "x-" + os.path.basename(repo_path), True
+ finally:
+ if f is not None:
+ f.close()
+
+ def info_string(self):
+ """
+ Returns a formatted string containing informations about the repository.
+ Used by emerge --info.
+ """
+ indent = " " * 4
+ repo_msg = []
+ repo_msg.append(self.name)
+ if self.format:
+ repo_msg.append(indent + "format: " + self.format)
+ if self.location:
+ repo_msg.append(indent + "location: " + self.location)
+ if not self.strict_misc_digests:
+ repo_msg.append(indent + "strict-misc-digests: false")
+ if self.sync_type:
+ repo_msg.append(indent + "sync-type: " + self.sync_type)
+ if self.sync_umask:
+ repo_msg.append(indent + "sync-umask: " + self.sync_umask)
+ if self.sync_uri:
+ repo_msg.append(indent + "sync-uri: " + self.sync_uri)
+ if self.sync_user:
+ repo_msg.append(indent + "sync-user: " + self.sync_user)
+ if self.masters:
+ repo_msg.append(indent + "masters: " + " ".join(master.name for master in self.masters))
+ if self.priority is not None:
+ repo_msg.append(indent + "priority: " + str(self.priority))
+ if self.aliases:
+ repo_msg.append(indent + "aliases: " + " ".join(self.aliases))
+ if self.eclass_overrides:
+ repo_msg.append(indent + "eclass-overrides: " + \
+ " ".join(self.eclass_overrides))
+ for o, v in self.module_specific_options.items():
+ if v is not None:
+ repo_msg.append(indent + o + ": " + v)
+ repo_msg.append("")
+ return "\n".join(repo_msg)
+
+ def __repr__(self):
+ return "<portage.repository.config.RepoConfig(name=%r, location=%r)>" % (self.name, _unicode_decode(self.location))
+
+ def __str__(self):
+ d = {}
+ for k in self.__slots__:
+ d[k] = getattr(self, k, None)
+ return "%s" % (d,)
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__())
+
+class RepoConfigLoader(object):
+ """Loads and store config of several repositories, loaded from PORTDIR_OVERLAY or repos.conf"""
+
+ @staticmethod
+ def _add_repositories(portdir, portdir_overlay, prepos,
+ ignored_map, local_config, default_portdir):
+ """Add overlays in PORTDIR_OVERLAY as repositories"""
+ overlays = []
+ portdir_orig = None
+ if portdir:
+ portdir = normalize_path(portdir)
+ portdir_orig = portdir
+ overlays.append(portdir)
+ try:
+ port_ov = [normalize_path(i) for i in shlex_split(portdir_overlay)]
+ except ValueError as e:
+ #File "/usr/lib/python3.2/shlex.py", line 168, in read_token
+ # raise ValueError("No closing quotation")
+ writemsg(_("!!! Invalid PORTDIR_OVERLAY:"
+ " %s: %s\n") % (e, portdir_overlay), noiselevel=-1)
+ port_ov = []
+ overlays.extend(port_ov)
+ default_repo_opts = {}
+ if prepos['DEFAULT'].aliases is not None:
+ default_repo_opts['aliases'] = \
+ ' '.join(prepos['DEFAULT'].aliases)
+ if prepos['DEFAULT'].eclass_overrides is not None:
+ default_repo_opts['eclass-overrides'] = \
+ ' '.join(prepos['DEFAULT'].eclass_overrides)
+ if prepos['DEFAULT'].masters is not None:
+ default_repo_opts['masters'] = \
+ ' '.join(prepos['DEFAULT'].masters)
+
+ if overlays:
+ # We need a copy of the original repos.conf data, since we're
+ # going to modify the prepos dict and some of the RepoConfig
+ # objects that we put in prepos may have to be discarded if
+ # they get overridden by a repository with the same name but
+ # a different location. This is common with repoman, for example,
+ # when temporarily overriding an rsync repo with another copy
+ # of the same repo from CVS.
+ repos_conf = prepos.copy()
+ #overlay priority is negative because we want them to be looked before any other repo
+ base_priority = 0
+ for ov in overlays:
+ # Ignore missing directory for 'gentoo' so that
+ # first sync with emerge-webrsync is possible.
+ if isdir_raise_eaccess(ov) or \
+ (base_priority == 0 and ov is portdir):
+ repo_opts = default_repo_opts.copy()
+ repo_opts['location'] = ov
+ name = prepos['DEFAULT'].main_repo if ov is portdir else None
+ repo = RepoConfig(name, repo_opts, local_config=local_config)
+ # repos_conf_opts contains options from repos.conf
+ repos_conf_opts = repos_conf.get(repo.name)
+ if repos_conf_opts is not None:
+ # Selectively copy only the attributes which
+ # repos.conf is allowed to override.
+ for k in ('aliases', 'auto_sync',
+ 'clone_depth', 'eclass_overrides',
+ 'force', 'masters', 'priority', 'strict_misc_digests',
+ 'sync_depth', 'sync_hooks_only_on_change',
+ 'sync_allow_hardlinks',
+ 'sync_openpgp_key_path',
+ 'sync_openpgp_key_refresh_retry_count',
+ 'sync_openpgp_key_refresh_retry_delay_max',
+ 'sync_openpgp_key_refresh_retry_delay_exp_base',
+ 'sync_openpgp_key_refresh_retry_delay_mult',
+ 'sync_openpgp_key_refresh_retry_overall_timeout',
+ 'sync_type', 'sync_umask', 'sync_uri', 'sync_user',
+ 'module_specific_options'):
+ v = getattr(repos_conf_opts, k, None)
+ if v is not None:
+ setattr(repo, k, v)
+
+ if repo.name in prepos:
+ # Silently ignore when PORTDIR overrides the location
+ # setting from the default repos.conf (bug #478544).
+ old_location = prepos[repo.name].location
+ if old_location is not None and \
+ old_location != repo.location and \
+ not (base_priority == 0 and
+ old_location == default_portdir):
+ ignored_map.setdefault(repo.name, []).append(old_location)
+ if old_location == portdir:
+ portdir = repo.location
+
+ if repo.priority is None:
+ if base_priority == 0 and ov == portdir_orig:
+ # If it's the original PORTDIR setting and it's not
+ # in PORTDIR_OVERLAY, then it will be assigned a
+ # special priority setting later.
+ pass
+ else:
+ repo.priority = base_priority
+ base_priority += 1
+
+ prepos[repo.name] = repo
+ else:
+
+ if not portage._sync_mode:
+ writemsg(_("!!! Invalid PORTDIR_OVERLAY (not a dir): '%s'\n") % ov, noiselevel=-1)
+
+ return portdir
+
+ @staticmethod
+ def _parse(paths, prepos, local_config, default_opts):
+ """Parse files in paths to load config"""
+ parser = SafeConfigParser(defaults=default_opts)
+
+ recursive_paths = []
+ for p in paths:
+ if isinstance(p, basestring):
+ recursive_paths.extend(_recursive_file_list(p))
+ else:
+ recursive_paths.append(p)
+
+ read_configs(parser, recursive_paths)
+
+ prepos['DEFAULT'] = RepoConfig("DEFAULT",
+ parser.defaults(), local_config=local_config)
+
+ for sname in parser.sections():
+ optdict = {}
+ for oname in parser.options(sname):
+ optdict[oname] = parser.get(sname, oname)
+
+ repo = RepoConfig(sname, optdict, local_config=local_config)
+ for o in portage.sync.module_specific_options(repo):
+ if parser.has_option(sname, o):
+ repo.set_module_specific_opt(o, parser.get(sname, o))
+
+ # Perform repos.conf sync variable validation
+ portage.sync.validate_config(repo, logging)
+
+ # For backward compatibility with locations set via PORTDIR and
+ # PORTDIR_OVERLAY, delay validation of the location and repo.name
+ # until after PORTDIR and PORTDIR_OVERLAY have been processed.
+ prepos[sname] = repo
+
+ def __init__(self, paths, settings):
+ """Load config from files in paths"""
+
+ prepos = {}
+ location_map = {}
+ treemap = {}
+ ignored_map = {}
+ default_opts = {
+ "EPREFIX" : settings["EPREFIX"],
+ "EROOT" : settings["EROOT"],
+ "PORTAGE_CONFIGROOT" : settings["PORTAGE_CONFIGROOT"],
+ "ROOT" : settings["ROOT"],
+ }
+
+ if "PORTAGE_REPOSITORIES" in settings:
+ portdir = ""
+ portdir_overlay = ""
+ # deprecated portdir_sync
+ portdir_sync = ""
+ else:
+ portdir = settings.get("PORTDIR", "")
+ portdir_overlay = settings.get("PORTDIR_OVERLAY", "")
+ # deprecated portdir_sync
+ portdir_sync = settings.get("SYNC", "")
+
+ default_opts['sync-rsync-extra-opts'] = \
+ settings.get("PORTAGE_RSYNC_EXTRA_OPTS", "")
+
+ try:
+ self._parse(paths, prepos, settings.local_config, default_opts)
+ except ConfigParserError as e:
+ writemsg(
+ _("!!! Error while reading repo config file: %s\n") % e,
+ noiselevel=-1)
+ # The configparser state is unreliable (prone to quirky
+ # exceptions) after it has thrown an error, so use empty
+ # config and try to fall back to PORTDIR{,_OVERLAY}.
+ prepos.clear()
+ prepos['DEFAULT'] = RepoConfig('DEFAULT',
+ {}, local_config=settings.local_config)
+ location_map.clear()
+ treemap.clear()
+
+ default_portdir = os.path.join(os.sep,
+ settings['EPREFIX'].lstrip(os.sep), 'usr', 'portage')
+
+ # If PORTDIR_OVERLAY contains a repo with the same repo_name as
+ # PORTDIR, then PORTDIR is overridden.
+ portdir = self._add_repositories(portdir, portdir_overlay, prepos,
+ ignored_map, settings.local_config,
+ default_portdir)
+ if portdir and portdir.strip():
+ portdir = os.path.realpath(portdir)
+
+ ignored_repos = tuple((repo_name, tuple(paths)) \
+ for repo_name, paths in ignored_map.items())
+
+ self.missing_repo_names = frozenset(repo.location
+ for repo in prepos.values()
+ if repo.location is not None and repo.missing_repo_name)
+
+ # Do this before expanding aliases, so that location_map and
+ # treemap consistently map unaliased names whenever available.
+ for repo_name, repo in list(prepos.items()):
+ if repo.location is None:
+ if repo_name != 'DEFAULT':
+ # Skip this warning for repoman (bug #474578).
+ if settings.local_config and paths:
+ writemsg_level("!!! %s\n" % _("Section '%s' in repos.conf is missing location attribute") %
+ repo.name, level=logging.ERROR, noiselevel=-1)
+ del prepos[repo_name]
+ continue
+ else:
+ if not portage._sync_mode:
+ if not isdir_raise_eaccess(repo.location):
+ writemsg_level("!!! %s\n" % _("Section '%s' in repos.conf has location attribute set "
+ "to nonexistent directory: '%s'") %
+ (repo_name, repo.location), level=logging.ERROR, noiselevel=-1)
+
+ # Ignore missing directory for 'gentoo' so that
+ # first sync with emerge-webrsync is possible.
+ if repo.name != 'gentoo':
+ del prepos[repo_name]
+ continue
+
+ # After removing support for PORTDIR_OVERLAY, the following check can be:
+ # if repo.missing_repo_name:
+ if repo.missing_repo_name and repo.name != repo_name:
+ writemsg_level("!!! %s\n" % _("Section '%s' in repos.conf refers to repository "
+ "without repository name set in '%s'") %
+ (repo_name, os.path.join(repo.location, REPO_NAME_LOC)), level=logging.ERROR, noiselevel=-1)
+ del prepos[repo_name]
+ continue
+
+ if repo.name != repo_name:
+ writemsg_level("!!! %s\n" % _("Section '%s' in repos.conf has name different "
+ "from repository name '%s' set inside repository") %
+ (repo_name, repo.name), level=logging.ERROR, noiselevel=-1)
+ del prepos[repo_name]
+ continue
+
+ location_map[repo.location] = repo_name
+ treemap[repo_name] = repo.location
+
+ # Add alias mappings, but never replace unaliased mappings.
+ for repo_name, repo in list(prepos.items()):
+ names = set()
+ names.add(repo_name)
+ if repo.aliases:
+ aliases = stack_lists([repo.aliases], incremental=True)
+ names.update(aliases)
+
+ for name in names:
+ if name in prepos and prepos[name].location is not None:
+ if name == repo_name:
+ # unaliased names already handled earlier
+ continue
+ writemsg_level(_("!!! Repository name or alias '%s', " + \
+ "defined for repository '%s', overrides " + \
+ "existing alias or repository.\n") % (name, repo_name), level=logging.WARNING, noiselevel=-1)
+ # Never replace an unaliased mapping with
+ # an aliased mapping.
+ continue
+ prepos[name] = repo
+ if repo.location is not None:
+ if repo.location not in location_map:
+ # Never replace an unaliased mapping with
+ # an aliased mapping.
+ location_map[repo.location] = name
+ treemap[name] = repo.location
+
+ main_repo = prepos['DEFAULT'].main_repo
+ if main_repo is None or main_repo not in prepos:
+ #setting main_repo if it was not set in repos.conf
+ main_repo = location_map.get(portdir)
+ if main_repo is not None:
+ prepos['DEFAULT'].main_repo = main_repo
+ else:
+ prepos['DEFAULT'].main_repo = None
+ if portdir and not portage._sync_mode:
+ writemsg(_("!!! main-repo not set in DEFAULT and PORTDIR is empty.\n"), noiselevel=-1)
+
+ if main_repo is not None and prepos[main_repo].priority is None:
+ # This happens if main-repo has been set in repos.conf.
+ prepos[main_repo].priority = -1000
+
+ # DEPRECATED Backward compatible SYNC support for old mirrorselect.
+ # Feb. 2, 2015. Version 2.2.16
+ if portdir_sync and main_repo is not None:
+ writemsg(_("!!! SYNC setting found in make.conf.\n "
+ "This setting is Deprecated and no longer used. "
+ "Please ensure your 'sync-type' and 'sync-uri' are set correctly"
+ " in /etc/portage/repos.conf/gentoo.conf\n"),
+ noiselevel=-1)
+
+
+ # Include repo.name in sort key, for predictable sorting
+ # even when priorities are equal.
+ prepos_order = sorted(prepos.items(),
+ key=lambda r:(r[1].priority or 0, r[1].name))
+
+ # filter duplicates from aliases, by only including
+ # items where repo.name == key
+ prepos_order = [repo.name for (key, repo) in prepos_order
+ if repo.name == key and key != 'DEFAULT' and
+ repo.location is not None]
+
+ self.prepos = prepos
+ self.prepos_order = prepos_order
+ self.ignored_repos = ignored_repos
+ self.location_map = location_map
+ self.treemap = treemap
+ self._prepos_changed = True
+ self._repo_location_list = []
+
+ #The 'masters' key currently contains repo names. Replace them with the matching RepoConfig.
+ for repo_name, repo in prepos.items():
+ if repo_name == "DEFAULT":
+ continue
+ if repo.masters is None:
+ if self.mainRepo() and repo_name != self.mainRepo().name:
+ repo.masters = self.mainRepo(),
+ else:
+ repo.masters = ()
+ else:
+ if repo.masters and isinstance(repo.masters[0], RepoConfig):
+ # This one has already been processed
+ # because it has an alias.
+ continue
+ master_repos = []
+ for master_name in repo.masters:
+ if master_name not in prepos:
+ layout_filename = os.path.join(repo.location,
+ "metadata", "layout.conf")
+ writemsg_level(_("Unavailable repository '%s' " \
+ "referenced by masters entry in '%s'\n") % \
+ (master_name, layout_filename),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ master_repos.append(prepos[master_name])
+ repo.masters = tuple(master_repos)
+
+ #The 'eclass_overrides' key currently contains repo names. Replace them with the matching repo paths.
+ for repo_name, repo in prepos.items():
+ if repo_name == "DEFAULT":
+ continue
+
+ eclass_locations = []
+ eclass_locations.extend(master_repo.location for master_repo in repo.masters)
+ # Only append the current repo to eclass_locations if it's not
+ # there already. This allows masters to have more control over
+ # eclass override order, which may be useful for scenarios in
+ # which there is a plan to migrate eclasses to a master repo.
+ if repo.location not in eclass_locations:
+ eclass_locations.append(repo.location)
+
+ if repo.eclass_overrides:
+ for other_repo_name in repo.eclass_overrides:
+ if other_repo_name in self.treemap:
+ eclass_locations.append(self.get_location_for_name(other_repo_name))
+ else:
+ writemsg_level(_("Unavailable repository '%s' " \
+ "referenced by eclass-overrides entry for " \
+ "'%s'\n") % (other_repo_name, repo_name), \
+ level=logging.ERROR, noiselevel=-1)
+ repo.eclass_locations = tuple(eclass_locations)
+
+ eclass_dbs = {}
+ for repo_name, repo in prepos.items():
+ if repo_name == "DEFAULT":
+ continue
+
+ eclass_db = None
+ for eclass_location in repo.eclass_locations:
+ tree_db = eclass_dbs.get(eclass_location)
+ if tree_db is None:
+ tree_db = eclass_cache.cache(eclass_location)
+ eclass_dbs[eclass_location] = tree_db
+ if eclass_db is None:
+ eclass_db = tree_db.copy()
+ else:
+ eclass_db.append(tree_db)
+ repo.eclass_db = eclass_db
+
+ for repo_name, repo in prepos.items():
+ if repo_name == "DEFAULT":
+ continue
+
+ if repo._masters_orig is None and self.mainRepo() and \
+ repo.name != self.mainRepo().name and not portage._sync_mode:
+ # TODO: Delete masters code in lib/portage/tests/resolver/ResolverPlayground.py when deleting this warning.
+ writemsg_level("!!! %s\n" % _("Repository '%s' is missing masters attribute in '%s'") %
+ (repo.name, os.path.join(repo.location, "metadata", "layout.conf")) +
+ "!!! %s\n" % _("Set 'masters = %s' in this file for future compatibility") %
+ self.mainRepo().name, level=logging.WARNING, noiselevel=-1)
+
+ self._prepos_changed = True
+ self._repo_location_list = []
+
+ self._check_locations()
+
+ def repoLocationList(self):
+ """Get a list of repositories location. Replaces PORTDIR_OVERLAY"""
+ if self._prepos_changed:
+ _repo_location_list = []
+ for repo in self.prepos_order:
+ if self.prepos[repo].location is not None:
+ _repo_location_list.append(self.prepos[repo].location)
+ self._repo_location_list = tuple(_repo_location_list)
+
+ self._prepos_changed = False
+ return self._repo_location_list
+
+ def mainRepoLocation(self):
+ """Returns the location of main repo"""
+ main_repo = self.prepos['DEFAULT'].main_repo
+ if main_repo is not None and main_repo in self.prepos:
+ return self.prepos[main_repo].location
+ else:
+ return ''
+
+ def mainRepo(self):
+ """Returns the main repo"""
+ main_repo = self.prepos['DEFAULT'].main_repo
+ if main_repo is None:
+ return None
+ return self.prepos[main_repo]
+
+ def _check_locations(self):
+ """Check if repositories location are correct and show a warning message if not"""
+ for (name, r) in self.prepos.items():
+ if name != 'DEFAULT':
+ if r.location is None:
+ writemsg(_("!!! Location not set for repository %s\n") % name, noiselevel=-1)
+ else:
+ if not isdir_raise_eaccess(r.location) and not portage._sync_mode:
+ self.prepos_order.remove(name)
+ writemsg(_("!!! Invalid Repository Location"
+ " (not a dir): '%s'\n") % r.location, noiselevel=-1)
+
+ def repos_with_profiles(self):
+ for repo_name in self.prepos_order:
+ repo = self.prepos[repo_name]
+ if repo.format != "unavailable":
+ yield repo
+
+ def get_name_for_location(self, location):
+ return self.location_map[location]
+
+ def get_location_for_name(self, repo_name):
+ if repo_name is None:
+ # This simplifies code in places where
+ # we want to be able to pass in Atom.repo
+ # even if it is None.
+ return None
+ return self.treemap[repo_name]
+
+ def get_repo_for_location(self, location):
+ return self.prepos[self.get_name_for_location(location)]
+
+ def __setitem__(self, repo_name, repo):
+ # self.prepos[repo_name] = repo
+ raise NotImplementedError
+
+ def __getitem__(self, repo_name):
+ return self.prepos[repo_name]
+
+ def __delitem__(self, repo_name):
+ if repo_name == self.prepos['DEFAULT'].main_repo:
+ self.prepos['DEFAULT'].main_repo = None
+ location = self.prepos[repo_name].location
+ del self.prepos[repo_name]
+ if repo_name in self.prepos_order:
+ self.prepos_order.remove(repo_name)
+ for k, v in self.location_map.copy().items():
+ if v == repo_name:
+ del self.location_map[k]
+ if repo_name in self.treemap:
+ del self.treemap[repo_name]
+ self._repo_location_list = tuple(x for x in self._repo_location_list if x != location)
+
+ def __iter__(self):
+ for repo_name in self.prepos_order:
+ yield self.prepos[repo_name]
+
+ def __contains__(self, repo_name):
+ return repo_name in self.prepos
+
+ def config_string(self):
+ bool_keys = ("strict_misc_digests", "sync_allow_hardlinks")
+ str_or_int_keys = ("auto_sync", "clone_depth", "format", "location",
+ "main_repo", "priority", "sync_depth", "sync_openpgp_key_path",
+ "sync_openpgp_key_refresh_retry_count",
+ "sync_openpgp_key_refresh_retry_delay_max",
+ "sync_openpgp_key_refresh_retry_delay_exp_base",
+ "sync_openpgp_key_refresh_retry_delay_mult",
+ "sync_openpgp_key_refresh_retry_overall_timeout",
+ "sync_type", "sync_umask", "sync_uri", 'sync_user')
+ str_tuple_keys = ("aliases", "eclass_overrides", "force")
+ repo_config_tuple_keys = ("masters",)
+ keys = bool_keys + str_or_int_keys + str_tuple_keys + repo_config_tuple_keys
+ config_string = ""
+ for repo_name, repo in sorted(self.prepos.items(), key=lambda x: (x[0] != "DEFAULT", x[0])):
+ config_string += "\n[%s]\n" % repo_name
+ for key in sorted(keys):
+ if key == "main_repo" and repo_name != "DEFAULT":
+ continue
+ if getattr(repo, key) is not None:
+ if key in bool_keys:
+ config_string += "%s = %s\n" % (key.replace("_", "-"),
+ 'true' if getattr(repo, key) else 'false')
+ elif key in str_or_int_keys:
+ config_string += "%s = %s\n" % (key.replace("_", "-"), getattr(repo, key))
+ elif key in str_tuple_keys:
+ config_string += "%s = %s\n" % (key.replace("_", "-"), " ".join(getattr(repo, key)))
+ elif key in repo_config_tuple_keys:
+ config_string += "%s = %s\n" % (key.replace("_", "-"), " ".join(x.name for x in getattr(repo, key)))
+ for o, v in repo.module_specific_options.items():
+ config_string += "%s = %s\n" % (o, v)
+ return config_string.lstrip("\n")
+
+def load_repository_config(settings, extra_files=None):
+ repoconfigpaths = []
+ if "PORTAGE_REPOSITORIES" in settings:
+ repoconfigpaths.append(io.StringIO(settings["PORTAGE_REPOSITORIES"]))
+ else:
+ if portage._not_installed:
+ repoconfigpaths.append(os.path.join(PORTAGE_BASE_PATH, "cnf", "repos.conf"))
+ else:
+ repoconfigpaths.append(os.path.join(settings.global_config_path, "repos.conf"))
+ repoconfigpaths.append(os.path.join(settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH, "repos.conf"))
+ if extra_files:
+ repoconfigpaths.extend(extra_files)
+ return RepoConfigLoader(repoconfigpaths, settings)
+
+def _get_repo_name(repo_location, cached=None):
+ if cached is not None:
+ return cached
+ name, missing = RepoConfig._read_repo_name(repo_location)
+ if missing:
+ return None
+ return name
+
+def parse_layout_conf(repo_location, repo_name=None):
+ eapi = read_corresponding_eapi_file(os.path.join(repo_location, REPO_NAME_LOC))
+
+ layout_filename = os.path.join(repo_location, "metadata", "layout.conf")
+ layout_file = KeyValuePairFileLoader(layout_filename, None, None)
+ layout_data, layout_errors = layout_file.load()
+
+ data = {}
+
+ # None indicates abscence of a masters setting, which later code uses
+ # to trigger a backward compatibility fallback that sets an implicit
+ # master. In order to avoid this fallback behavior, layout.conf can
+ # explicitly set masters to an empty value, which will result in an
+ # empty tuple here instead of None.
+ masters = layout_data.get('masters')
+ if masters is not None:
+ masters = tuple(masters.split())
+ data['masters'] = masters
+ data['aliases'] = tuple(layout_data.get('aliases', '').split())
+
+ data['eapis-banned'] = tuple(layout_data.get('eapis-banned', '').split())
+ data['eapis-deprecated'] = tuple(layout_data.get('eapis-deprecated', '').split())
+
+ data['sign-commit'] = layout_data.get('sign-commits', 'false').lower() \
+ == 'true'
+
+ data['sign-manifest'] = layout_data.get('sign-manifests', 'true').lower() \
+ == 'true'
+
+ data['thin-manifest'] = layout_data.get('thin-manifests', 'false').lower() \
+ == 'true'
+
+ data['repo-name'] = _gen_valid_repo(layout_data.get('repo-name', ''))
+
+ manifest_policy = layout_data.get('use-manifests', 'strict').lower()
+ data['allow-missing-manifest'] = manifest_policy != 'strict'
+ data['create-manifest'] = manifest_policy != 'false'
+ data['disable-manifest'] = manifest_policy == 'false'
+
+ # for compatibility w/ PMS, fallback to pms; but also check if the
+ # cache exists or not.
+ cache_formats = layout_data.get('cache-formats', '').lower().split()
+ if not cache_formats:
+ # Auto-detect cache formats, and prefer md5-cache if available.
+ # This behavior was deployed in portage-2.1.11.14, so that the
+ # default egencache format could eventually be changed to md5-dict
+ # in portage-2.1.11.32. WARNING: Versions prior to portage-2.1.11.14
+ # will NOT recognize md5-dict format unless it is explicitly
+ # listed in layout.conf.
+ cache_formats = []
+ if os.path.isdir(os.path.join(repo_location, 'metadata', 'md5-cache')):
+ cache_formats.append('md5-dict')
+ if os.path.isdir(os.path.join(repo_location, 'metadata', 'cache')):
+ cache_formats.append('pms')
+ data['cache-formats'] = tuple(cache_formats)
+
+ manifest_hashes = layout_data.get('manifest-hashes')
+ manifest_required_hashes = layout_data.get('manifest-required-hashes')
+
+ if manifest_required_hashes is not None and manifest_hashes is None:
+ repo_name = _get_repo_name(repo_location, cached=repo_name)
+ warnings.warn((_("Repository named '%(repo_name)s' specifies "
+ "'manifest-required-hashes' setting without corresponding "
+ "'manifest-hashes'. Portage will default it to match "
+ "the required set but please add the missing entry "
+ "to: %(layout_filename)s") %
+ {"repo_name": repo_name or 'unspecified',
+ "layout_filename":layout_filename}),
+ SyntaxWarning)
+ manifest_hashes = manifest_required_hashes
+
+ if manifest_hashes is not None:
+ # require all the hashes unless specified otherwise
+ if manifest_required_hashes is None:
+ manifest_required_hashes = manifest_hashes
+
+ manifest_required_hashes = frozenset(manifest_required_hashes.upper().split())
+ manifest_hashes = frozenset(manifest_hashes.upper().split())
+ missing_required_hashes = manifest_required_hashes.difference(
+ manifest_hashes)
+ if missing_required_hashes:
+ repo_name = _get_repo_name(repo_location, cached=repo_name)
+ warnings.warn((_("Repository named '%(repo_name)s' has a "
+ "'manifest-hashes' setting that does not contain "
+ "the '%(hash)s' hashes which are listed in "
+ "'manifest-required-hashes'. Please fix that file "
+ "if you want to generate valid manifests for this "
+ "repository: %(layout_filename)s") %
+ {"repo_name": repo_name or 'unspecified',
+ "hash": ' '.join(missing_required_hashes),
+ "layout_filename":layout_filename}),
+ SyntaxWarning)
+ unsupported_hashes = manifest_hashes.difference(
+ get_valid_checksum_keys())
+ if unsupported_hashes:
+ repo_name = _get_repo_name(repo_location, cached=repo_name)
+ warnings.warn((_("Repository named '%(repo_name)s' has a "
+ "'manifest-hashes' setting that contains one "
+ "or more hash types '%(hashes)s' which are not supported by "
+ "this portage version. You will have to upgrade "
+ "portage if you want to generate valid manifests for "
+ "this repository: %(layout_filename)s") %
+ {"repo_name": repo_name or 'unspecified',
+ "hashes":" ".join(sorted(unsupported_hashes)),
+ "layout_filename":layout_filename}),
+ DeprecationWarning)
+
+ data['manifest-hashes'] = manifest_hashes
+ data['manifest-required-hashes'] = manifest_required_hashes
+
+ data['update-changelog'] = layout_data.get('update-changelog', 'false').lower() \
+ == 'true'
+
+ raw_formats = layout_data.get('profile-formats')
+ if raw_formats is None:
+ if eapi_allows_directories_on_profile_level_and_repository_level(eapi):
+ raw_formats = ('portage-1',)
+ else:
+ raw_formats = ('portage-1-compat',)
+ else:
+ raw_formats = set(raw_formats.split())
+ unknown = raw_formats.difference(_valid_profile_formats)
+ if unknown:
+ repo_name = _get_repo_name(repo_location, cached=repo_name)
+ warnings.warn((_("Repository named '%(repo_name)s' has unsupported "
+ "profiles in use ('profile-formats = %(unknown_fmts)s' setting in "
+ "'%(layout_filename)s; please upgrade portage.") %
+ dict(repo_name=repo_name or 'unspecified',
+ layout_filename=layout_filename,
+ unknown_fmts=" ".join(unknown))),
+ DeprecationWarning)
+ raw_formats = tuple(raw_formats.intersection(_valid_profile_formats))
+ data['profile-formats'] = raw_formats
+
+ try:
+ eapi = layout_data['profile_eapi_when_unspecified']
+ except KeyError:
+ pass
+ else:
+ if 'profile-default-eapi' not in raw_formats:
+ warnings.warn((_("Repository named '%(repo_name)s' has "
+ "profile_eapi_when_unspecified setting in "
+ "'%(layout_filename)s', but 'profile-default-eapi' is "
+ "not listed in the profile-formats field. Please "
+ "report this issue to the repository maintainer.") %
+ dict(repo_name=repo_name or 'unspecified',
+ layout_filename=layout_filename)),
+ SyntaxWarning)
+ elif not portage.eapi_is_supported(eapi):
+ warnings.warn((_("Repository named '%(repo_name)s' has "
+ "unsupported EAPI '%(eapi)s' setting in "
+ "'%(layout_filename)s'; please upgrade portage.") %
+ dict(repo_name=repo_name or 'unspecified',
+ eapi=eapi, layout_filename=layout_filename)),
+ SyntaxWarning)
+ else:
+ data['profile_eapi_when_unspecified'] = eapi
+
+ return data, layout_errors
diff --git a/lib/portage/sync/__init__.py b/lib/portage/sync/__init__.py
new file mode 100644
index 000000000..805b1f270
--- /dev/null
+++ b/lib/portage/sync/__init__.py
@@ -0,0 +1,52 @@
+# Copyright 2014-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+
+from portage import OrderedDict
+from portage.module import Modules
+from portage.sync.controller import SyncManager
+from portage.sync.config_checks import check_type
+
+_SUBMODULE_PATH_MAP = OrderedDict([
+ ('glsa', ('metadata/glsa',)),
+ ('news', ('metadata/news',)),
+ ('profiles', ('metadata/layout.conf', 'profiles')),
+])
+
+path = os.path.join(os.path.dirname(__file__), "modules")
+# initial development debug info
+#print("module path:", path)
+
+module_controller = Modules(path=path, namepath="portage.sync.modules")
+
+# initial development debug info
+#print(module_controller.module_names)
+module_names = module_controller.module_names[:]
+
+
+def module_specific_options(repo):
+ '''Get the authorized module specific options set for
+ the repos.conf settings for the repo'''
+ global module_controller
+
+ if repo.sync_type:
+ try:
+ return frozenset(
+ module_controller.modules[repo.sync_type]['module_specific_options'])
+ except KeyError:
+ pass
+ return frozenset()
+
+
+def validate_config(repo, logger):
+ '''Validate the repos.conf settings for the repo'''
+ global module_names, module_controller
+ if not check_type(repo, logger, module_names):
+ return False
+
+ #print(repo)
+ if repo.sync_type:
+ validated = module_controller.modules[repo.sync_type]['validate_config']
+ return validated(repo, logger).repo_checks()
+ return True
diff --git a/lib/portage/sync/config_checks.py b/lib/portage/sync/config_checks.py
new file mode 100644
index 000000000..db316aa88
--- /dev/null
+++ b/lib/portage/sync/config_checks.py
@@ -0,0 +1,72 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+'''
+Base class for performing repos.conf sync variables checks.
+This class contains common checks code and functions.
+
+For additional checks or other customizations,
+subclass it adding and/or overriding classes as needed.
+'''
+
+import logging
+
+from portage.localization import _
+from portage.util import writemsg_level
+
+
+def check_type(repo, logger, module_names):
+ if repo.sync_uri is not None and repo.sync_type is None:
+ writemsg_level("!!! %s\n" %
+ _("Repository '%s' has sync-uri attribute, but is missing sync-type attribute")
+ % repo.name, level=logger.ERROR, noiselevel=-1)
+ return False
+ if repo.sync_type not in module_names + [None]:
+ writemsg_level("!!! %s\n" %
+ _("Repository '%s' has sync-type attribute set to unsupported value: '%s'")
+ % (repo.name, repo.sync_type),
+ level=logger.ERROR, noiselevel=-1)
+ writemsg_level("!!! %s\n" %
+ _("Installed sync-types are: '%s'")
+ % (str(module_names)),
+ level=logger.ERROR, noiselevel=-1)
+ return False
+ return True
+
+
+class CheckSyncConfig(object):
+ '''Base repos.conf settings checks class'''
+
+ def __init__(self, repo=None, logger=None):
+ '''Class init function
+
+ @param logger: optional logging instance,
+ defaults to logging module
+ '''
+ self.logger = logger or logging
+ self.repo = repo
+ self.checks = ['check_uri', 'check_auto_sync']
+
+
+ def repo_checks(self):
+ '''Perform all checks available'''
+ for check in self.checks:
+ getattr(self, check)()
+
+
+ def check_uri(self):
+ '''Check the sync_uri setting'''
+ if self.repo.sync_uri is None:
+ writemsg_level("!!! %s\n" % _("Repository '%s' has sync-type attribute, but is missing sync-uri attribute")
+ % self.repo.name, level=self.logger.ERROR, noiselevel=-1)
+
+
+ def check_auto_sync(self):
+ '''Check the auto_sync setting'''
+ if self.repo.auto_sync is None:
+ writemsg_level("!!! %s\n" % _("Repository '%s' is missing auto_sync attribute")
+ % self.repo.name, level=self.logger.ERROR, noiselevel=-1)
+ elif self.repo.auto_sync.lower() not in ["yes", "true", "no", "false"]:
+ writemsg_level("!!! %s\n" % _("Repository '%s' auto_sync attribute must be one of: %s")
+ % (self.repo.name, '{yes, true, no, false}'),
+ level=self.logger.ERROR, noiselevel=-1)
diff --git a/lib/portage/sync/controller.py b/lib/portage/sync/controller.py
new file mode 100644
index 000000000..3bccf6f74
--- /dev/null
+++ b/lib/portage/sync/controller.py
@@ -0,0 +1,397 @@
+# Copyright 2014-2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+
+import sys
+import logging
+import grp
+import pwd
+import warnings
+
+import portage
+from portage import os
+from portage.progress import ProgressBar
+#from portage.emaint.defaults import DEFAULT_OPTIONS
+from portage.util import writemsg, writemsg_level
+from portage.output import create_color_func
+good = create_color_func("GOOD")
+bad = create_color_func("BAD")
+warn = create_color_func("WARN")
+from portage.package.ebuild.doebuild import _check_temp_dir
+from portage.metadata import action_metadata
+from portage.util._async.AsyncFunction import AsyncFunction
+from portage import OrderedDict
+from portage import _unicode_decode
+from portage import util
+from _emerge.CompositeTask import CompositeTask
+
+
+class TaskHandler(object):
+ """Handles the running of the tasks it is given
+ """
+
+ def __init__(self, show_progress_bar=True, verbose=True, callback=None):
+ self.show_progress_bar = show_progress_bar
+ self.verbose = verbose
+ self.callback = callback
+ self.isatty = os.environ.get('TERM') != 'dumb' and sys.stdout.isatty()
+ self.progress_bar = ProgressBar(self.isatty, title="Portage-Sync", max_desc_length=27)
+
+
+ def run_tasks(self, tasks, func, status=None, verbose=True, options=None):
+ """Runs the module tasks"""
+ # Ensure we have a task and function
+ assert(tasks)
+ assert(func)
+ for task in tasks:
+ inst = task()
+ show_progress = self.show_progress_bar and self.isatty
+ # check if the function is capable of progressbar
+ # and possibly override it off
+ if show_progress and hasattr(inst, 'can_progressbar'):
+ show_progress = inst.can_progressbar(func)
+ if show_progress:
+ self.progress_bar.reset()
+ self.progress_bar.set_label(func + " " + inst.name())
+ onProgress = self.progress_bar.start()
+ else:
+ onProgress = None
+ kwargs = {
+ 'onProgress': onProgress,
+ # pass in a copy of the options so a module can not pollute or change
+ # them for other tasks if there is more to do.
+ 'options': options.copy()
+ }
+ result = getattr(inst, func)(**kwargs)
+ if show_progress:
+ # make sure the final progress is displayed
+ self.progress_bar.display()
+ print()
+ self.progress_bar.stop()
+ if self.callback:
+ self.callback(result)
+
+
+def print_results(results):
+ if results:
+ print()
+ print("\n".join(results))
+ print("\n")
+
+
+class SyncManager(object):
+ '''Main sync control module'''
+
+ def __init__(self, settings, logger):
+ self.settings = settings
+ self.logger = logger
+ # Similar to emerge, sync needs a default umask so that created
+ # files have sane permissions.
+ os.umask(0o22)
+
+ self.module_controller = portage.sync.module_controller
+ self.module_names = self.module_controller.module_names
+ self.hooks = {}
+ for _dir in ["repo.postsync.d", "postsync.d"]:
+ postsync_dir = os.path.join(self.settings["PORTAGE_CONFIGROOT"],
+ portage.USER_CONFIG_PATH, _dir)
+ hooks = OrderedDict()
+ for filepath in util._recursive_file_list(postsync_dir):
+ name = filepath.split(postsync_dir)[1].lstrip(os.sep)
+ if os.access(filepath, os.X_OK):
+ hooks[filepath] = name
+ else:
+ writemsg_level(" %s %s hook: '%s' is not executable\n"
+ % (warn("*"), _dir, _unicode_decode(name),),
+ level=logging.WARN, noiselevel=2)
+ self.hooks[_dir] = hooks
+
+ def __getattr__(self, name):
+ if name == 'async':
+ warnings.warn("portage.sync.controller.SyncManager.async "
+ "has been renamed to sync_async",
+ DeprecationWarning, stacklevel=2)
+ return self.sync_async
+ else:
+ raise AttributeError(name)
+
+ def get_module_descriptions(self, mod):
+ desc = self.module_controller.get_func_descriptions(mod)
+ if desc:
+ return desc
+ return []
+
+ def sync_async(self, emerge_config=None, repo=None, master_hooks=True):
+ self.emerge_config = emerge_config
+ self.settings, self.trees, self.mtimedb = emerge_config
+ self.xterm_titles = "notitles" not in self.settings.features
+ self.portdb = self.trees[self.settings['EROOT']]['porttree'].dbapi
+ return SyncRepo(sync_task=AsyncFunction(target=self.sync,
+ kwargs=dict(emerge_config=emerge_config, repo=repo,
+ master_hooks=master_hooks)),
+ sync_callback=self._sync_callback)
+
+ def sync(self, emerge_config=None, repo=None, master_hooks=True):
+ self.callback = None
+ self.repo = repo
+ self.exitcode = 1
+ self.updatecache_flg = False
+ hooks_enabled = master_hooks or not repo.sync_hooks_only_on_change
+ if repo.sync_type in self.module_names:
+ tasks = [self.module_controller.get_class(repo.sync_type)]
+ else:
+ msg = "\n%s: Sync module '%s' is not an installed/known type'\n" \
+ % (bad("ERROR"), repo.sync_type)
+ return self.exitcode, msg, self.updatecache_flg, hooks_enabled
+
+ rval = self.pre_sync(repo)
+ if rval != os.EX_OK:
+ return rval, None, self.updatecache_flg, hooks_enabled
+
+ # need to pass the kwargs dict to the modules
+ # so they are available if needed.
+ task_opts = {
+ 'emerge_config': emerge_config,
+ 'logger': self.logger,
+ 'portdb': self.portdb,
+ 'repo': repo,
+ 'settings': self.settings,
+ 'spawn_kwargs': self.spawn_kwargs,
+ 'usersync_uid': self.usersync_uid,
+ 'xterm_titles': self.xterm_titles,
+ }
+ func = 'sync'
+ status = None
+ taskmaster = TaskHandler(callback=self.do_callback)
+ taskmaster.run_tasks(tasks, func, status, options=task_opts)
+
+ if (master_hooks or self.updatecache_flg or
+ not repo.sync_hooks_only_on_change):
+ hooks_enabled = True
+ self.perform_post_sync_hook(
+ repo.name, repo.sync_uri, repo.location)
+
+ return self.exitcode, None, self.updatecache_flg, hooks_enabled
+
+
+ def do_callback(self, result):
+ #print("result:", result, "callback()", self.callback)
+ exitcode, updatecache_flg = result
+ self.exitcode = exitcode
+ self.updatecache_flg = updatecache_flg
+ if exitcode == 0:
+ msg = "=== Sync completed for %s" % self.repo.name
+ self.logger(self.xterm_titles, msg)
+ writemsg_level(msg + "\n")
+ if self.callback:
+ self.callback(exitcode, updatecache_flg)
+ return
+
+
+ def perform_post_sync_hook(self, reponame, dosyncuri='', repolocation=''):
+ succeeded = os.EX_OK
+ if reponame:
+ _hooks = self.hooks["repo.postsync.d"]
+ else:
+ _hooks = self.hooks["postsync.d"]
+ for filepath in _hooks:
+ writemsg_level("Spawning post_sync hook: %s\n"
+ % (_unicode_decode(_hooks[filepath])),
+ level=logging.ERROR, noiselevel=4)
+ if reponame:
+ retval = portage.process.spawn(
+ [filepath, reponame, dosyncuri, repolocation],
+ env=self.settings.environ())
+ else:
+ retval = portage.process.spawn([filepath],
+ env=self.settings.environ())
+ if retval != os.EX_OK:
+ writemsg_level(" %s Spawn failed for: %s, %s\n" % (bad("*"),
+ _unicode_decode(_hooks[filepath]), filepath),
+ level=logging.ERROR, noiselevel=-1)
+ succeeded = retval
+ return succeeded
+
+
+ def pre_sync(self, repo):
+ msg = ">>> Syncing repository '%s' into '%s'..." \
+ % (repo.name, repo.location)
+ self.logger(self.xterm_titles, msg)
+ writemsg_level(msg + "\n")
+ try:
+ st = os.stat(repo.location)
+ except OSError:
+ st = None
+
+ self.usersync_uid = None
+ spawn_kwargs = {}
+ # Redirect command stderr to stdout, in order to prevent
+ # spurious cron job emails (bug 566132).
+ spawn_kwargs["fd_pipes"] = {
+ 0: sys.__stdin__.fileno(),
+ 1: sys.__stdout__.fileno(),
+ 2: sys.__stdout__.fileno()
+ }
+ spawn_kwargs["env"] = self.settings.environ()
+ if repo.sync_user is not None:
+ def get_sync_user_data(sync_user):
+ user = None
+ group = None
+ home = None
+ logname = None
+
+ spl = sync_user.split(':', 1)
+ if spl[0]:
+ username = spl[0]
+ try:
+ try:
+ pw = pwd.getpwnam(username)
+ except KeyError:
+ pw = pwd.getpwuid(int(username))
+ except (ValueError, KeyError):
+ writemsg("!!! User '%s' invalid or does not exist\n"
+ % username, noiselevel=-1)
+ return (logname, user, group, home)
+ user = pw.pw_uid
+ group = pw.pw_gid
+ home = pw.pw_dir
+ logname = pw.pw_name
+
+ if len(spl) > 1:
+ groupname = spl[1]
+ try:
+ try:
+ gp = grp.getgrnam(groupname)
+ except KeyError:
+ pw = grp.getgrgid(int(groupname))
+ except (ValueError, KeyError):
+ writemsg("!!! Group '%s' invalid or does not exist\n"
+ % groupname, noiselevel=-1)
+ return (logname, user, group, home)
+
+ group = gp.gr_gid
+
+ return (logname, user, group, home)
+
+ # user or user:group
+ (logname, uid, gid, home) = get_sync_user_data(
+ repo.sync_user)
+ if uid is not None:
+ spawn_kwargs["uid"] = uid
+ self.usersync_uid = uid
+ if gid is not None:
+ spawn_kwargs["gid"] = gid
+ spawn_kwargs["groups"] = [gid]
+ if home is not None:
+ spawn_kwargs["env"]["HOME"] = home
+ if logname is not None:
+ spawn_kwargs["env"]["LOGNAME"] = logname
+
+ if st is None:
+ perms = {'mode': 0o755}
+ # respect sync-user if set
+ if 'umask' in spawn_kwargs:
+ perms['mode'] &= ~spawn_kwargs['umask']
+ if 'uid' in spawn_kwargs:
+ perms['uid'] = spawn_kwargs['uid']
+ if 'gid' in spawn_kwargs:
+ perms['gid'] = spawn_kwargs['gid']
+
+ portage.util.ensure_dirs(repo.location, **perms)
+ st = os.stat(repo.location)
+
+ if (repo.sync_user is None and
+ 'usersync' in self.settings.features and
+ portage.data.secpass >= 2 and
+ (st.st_uid != os.getuid() and st.st_mode & 0o700 or
+ st.st_gid != os.getgid() and st.st_mode & 0o070)):
+ try:
+ pw = pwd.getpwuid(st.st_uid)
+ except KeyError:
+ pass
+ else:
+ # Drop privileges when syncing, in order to match
+ # existing uid/gid settings.
+ self.usersync_uid = st.st_uid
+ spawn_kwargs["uid"] = st.st_uid
+ spawn_kwargs["gid"] = st.st_gid
+ spawn_kwargs["groups"] = [st.st_gid]
+ spawn_kwargs["env"]["HOME"] = pw.pw_dir
+ spawn_kwargs["env"]["LOGNAME"] = pw.pw_name
+ umask = 0o002
+ if not st.st_mode & 0o020:
+ umask = umask | 0o020
+ spawn_kwargs["umask"] = umask
+ # override the defaults when sync_umask is set
+ if repo.sync_umask is not None:
+ spawn_kwargs["umask"] = int(repo.sync_umask, 8)
+ self.spawn_kwargs = spawn_kwargs
+
+ if self.usersync_uid is not None:
+ # PORTAGE_TMPDIR is used below, so validate it and
+ # bail out if necessary.
+ rval = _check_temp_dir(self.settings)
+ if rval != os.EX_OK:
+ return rval
+
+ os.umask(0o022)
+ return os.EX_OK
+
+ def _sync_callback(self, proc):
+ """
+ This is called in the parent process, serially, for each of the
+ sync jobs when they complete. Some cache backends such as sqlite
+ may require that cache access be performed serially in the
+ parent process like this.
+ """
+ repo = proc.kwargs['repo']
+ exitcode = proc.returncode
+ updatecache_flg = False
+ if proc.returncode == os.EX_OK:
+ exitcode, message, updatecache_flg, hooks_enabled = proc.result
+
+ if updatecache_flg and "metadata-transfer" not in self.settings.features:
+ updatecache_flg = False
+
+ if updatecache_flg and \
+ os.path.exists(os.path.join(
+ repo.location, 'metadata', 'md5-cache')):
+
+ # Only update cache for repo.location since that's
+ # the only one that's been synced here.
+ action_metadata(self.settings, self.portdb, self.emerge_config.opts,
+ porttrees=[repo.location])
+
+
+class SyncRepo(CompositeTask):
+ """
+ Encapsulates a sync operation and the callback which executes afterwards,
+ so both can be considered as a single composite task. This is useful
+ since we don't want to consider a particular repo's sync operation as
+ complete until after the callback has executed (bug 562264).
+
+ The kwargs and result properties expose attributes that are accessed
+ by SyncScheduler.
+ """
+
+ __slots__ = ('sync_task', 'sync_callback')
+
+ @property
+ def kwargs(self):
+ return self.sync_task.kwargs
+
+ @property
+ def result(self):
+ return self.sync_task.result
+
+ def _start(self):
+ self._start_task(self.sync_task, self._sync_task_exit)
+
+ def _sync_task_exit(self, sync_task):
+ self._current_task = None
+ self.returncode = sync_task.returncode
+ self.sync_callback(self.sync_task)
+ self._async_wait()
+
diff --git a/lib/portage/sync/getaddrinfo_validate.py b/lib/portage/sync/getaddrinfo_validate.py
new file mode 100644
index 000000000..5e6009c74
--- /dev/null
+++ b/lib/portage/sync/getaddrinfo_validate.py
@@ -0,0 +1,29 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+def getaddrinfo_validate(addrinfos):
+ """
+ Validate structures returned from getaddrinfo(),
+ since they may be corrupt, especially when python
+ has IPv6 support disabled (bug #340899).
+ """
+ valid_addrinfos = []
+ for addrinfo in addrinfos:
+ try:
+ if len(addrinfo) != 5:
+ continue
+ if len(addrinfo[4]) < 2:
+ continue
+ if not isinstance(addrinfo[4][0], basestring):
+ continue
+ except TypeError:
+ continue
+
+ valid_addrinfos.append(addrinfo)
+
+ return valid_addrinfos
diff --git a/lib/portage/sync/modules/__init__.py b/lib/portage/sync/modules/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/sync/modules/__init__.py
diff --git a/lib/portage/sync/modules/cvs/__init__.py b/lib/portage/sync/modules/cvs/__init__.py
new file mode 100644
index 000000000..8025a2907
--- /dev/null
+++ b/lib/portage/sync/modules/cvs/__init__.py
@@ -0,0 +1,47 @@
+# Copyright 2014-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+doc = """CVS plug-in module for portage.
+Performs a cvs up on repositories."""
+__doc__ = doc[:]
+
+from portage.localization import _
+from portage.sync.config_checks import CheckSyncConfig
+from portage.util import writemsg_level
+
+
+class CheckCVSConfig(CheckSyncConfig):
+
+ def __init__(self, repo, logger):
+ CheckSyncConfig.__init__(self, repo, logger)
+ self.checks.append('check_cvs_repo')
+
+
+ def check_cvs_repo(self):
+ if self.repo.module_specific_options.get('sync-cvs-repo') is None:
+ writemsg_level("!!! %s\n" %
+ _("Repository '%s' has sync-type=cvs, but is missing sync-cvs-repo attribute")
+ % self.repo.name, level=self.logger.ERROR, noiselevel=-1)
+
+
+module_spec = {
+ 'name': 'cvs',
+ 'description': doc,
+ 'provides':{
+ 'cvs-module': {
+ 'name': "cvs",
+ 'sourcefile': "cvs",
+ 'class': "CVSSync",
+ 'description': doc,
+ 'functions': ['sync', 'new', 'exists'],
+ 'func_desc': {
+ 'sync': 'Performs a cvs up on the repository',
+ 'new': 'Creates the new repository at the specified location',
+ 'exists': 'Returns a boolean of whether the specified dir ' +
+ 'exists and is a valid CVS repository',
+ },
+ 'validate_config': CheckCVSConfig,
+ 'module_specific_options': ("sync-cvs-repo",),
+ }
+ }
+}
diff --git a/lib/portage/sync/modules/cvs/cvs.py b/lib/portage/sync/modules/cvs/cvs.py
new file mode 100644
index 000000000..e202560c3
--- /dev/null
+++ b/lib/portage/sync/modules/cvs/cvs.py
@@ -0,0 +1,67 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+
+import portage
+from portage import os
+from portage.util import writemsg_level
+from portage.sync.syncbase import NewBase
+
+
+class CVSSync(NewBase):
+ '''CVS sync module'''
+
+ short_desc = "Perform sync operations on CVS repositories"
+
+ @staticmethod
+ def name():
+ return "CVSSync"
+
+
+ def __init__(self):
+ NewBase.__init__(self, "cvs", portage.const.CVS_PACKAGE_ATOM)
+
+
+ def exists(self, **kwargs):
+ '''Tests whether the repo is checked out'''
+ return os.path.exists(os.path.join(self.repo.location, 'CVS'))
+
+
+ def new(self, **kwargs):
+ if kwargs:
+ self._kwargs(kwargs)
+ #initial checkout
+ cvs_root = self.repo.sync_uri
+ if portage.process.spawn_bash(
+ "cd %s; exec cvs -z0 -d %s co -P -d %s %s" %
+ (portage._shell_quote(os.path.dirname(self.repo.location)), portage._shell_quote(cvs_root),
+ portage._shell_quote(os.path.basename(self.repo.location)),
+ portage._shell_quote(self.repo.module_specific_options["sync-cvs-repo"])),
+ **self.spawn_kwargs) != os.EX_OK:
+ msg = "!!! cvs checkout error; exiting."
+ self.logger(self.xterm_titles, msg)
+ writemsg_level(msg + "\n", noiselevel=-1, level=logging.ERROR)
+ return (1, False)
+ return (0, False)
+
+
+ def update(self):
+ """
+ Internal function to update an existing CVS repository
+
+ @return: tuple of return code (0=success), whether the cache
+ needs to be updated
+ @rtype: (int, bool)
+ """
+
+ #cvs update
+ exitcode = portage.process.spawn_bash(
+ "cd %s; exec cvs -z0 -q update -dP" % \
+ (portage._shell_quote(self.repo.location),),
+ **self.spawn_kwargs)
+ if exitcode != os.EX_OK:
+ msg = "!!! cvs update error; exiting."
+ self.logger(self.xterm_titles, msg)
+ writemsg_level(msg + "\n", noiselevel=-1, level=logging.ERROR)
+ return (exitcode, False)
diff --git a/lib/portage/sync/modules/git/__init__.py b/lib/portage/sync/modules/git/__init__.py
new file mode 100644
index 000000000..270d97186
--- /dev/null
+++ b/lib/portage/sync/modules/git/__init__.py
@@ -0,0 +1,65 @@
+# Copyright 2014-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+doc = """Git plug-in module for portage.
+Performs a git pull on repositories."""
+__doc__ = doc[:]
+
+from portage.localization import _
+from portage.sync.config_checks import CheckSyncConfig
+from portage.util import writemsg_level
+
+
+class CheckGitConfig(CheckSyncConfig):
+ def __init__(self, repo, logger):
+ CheckSyncConfig.__init__(self, repo, logger)
+ self.checks.append('check_depth')
+
+ def check_depth(self):
+ for attr in ('clone_depth', 'sync_depth'):
+ self._check_depth(attr)
+
+ def _check_depth(self, attr):
+ d = getattr(self.repo, attr)
+
+ if d is not None:
+ try:
+ d = int(d)
+ except ValueError:
+ writemsg_level("!!! %s\n" %
+ _("%s value is not a number: '%s'")
+ % (attr.replace('_', '-'), d),
+ level=self.logger.ERROR, noiselevel=-1)
+ else:
+ setattr(self.repo, attr, d)
+
+
+module_spec = {
+ 'name': 'git',
+ 'description': doc,
+ 'provides':{
+ 'git-module': {
+ 'name': "git",
+ 'sourcefile': "git",
+ 'class': "GitSync",
+ 'description': doc,
+ 'functions': ['sync', 'new', 'exists', 'retrieve_head'],
+ 'func_desc': {
+ 'sync': 'Performs a git pull on the repository',
+ 'new': 'Creates the new repository at the specified location',
+ 'exists': 'Returns a boolean of whether the specified dir ' +
+ 'exists and is a valid Git repository',
+ 'retrieve_head': 'Returns the head commit hash',
+ },
+ 'validate_config': CheckGitConfig,
+ 'module_specific_options': (
+ 'sync-git-clone-env',
+ 'sync-git-clone-extra-opts',
+ 'sync-git-env',
+ 'sync-git-pull-env',
+ 'sync-git-pull-extra-opts',
+ 'sync-git-verify-commit-signature',
+ ),
+ }
+ }
+}
diff --git a/lib/portage/sync/modules/git/git.py b/lib/portage/sync/modules/git/git.py
new file mode 100644
index 000000000..2fb82c600
--- /dev/null
+++ b/lib/portage/sync/modules/git/git.py
@@ -0,0 +1,286 @@
+# Copyright 2005-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import logging
+import subprocess
+
+import portage
+from portage import os
+from portage.util import writemsg_level, shlex_split
+from portage.util.futures import asyncio
+from portage.output import create_color_func, EOutput
+good = create_color_func("GOOD")
+bad = create_color_func("BAD")
+warn = create_color_func("WARN")
+from portage.sync.syncbase import NewBase
+
+try:
+ from gemato.exceptions import GematoException
+ import gemato.openpgp
+except ImportError:
+ gemato = None
+
+
+class GitSync(NewBase):
+ '''Git sync class'''
+
+ short_desc = "Perform sync operations on git based repositories"
+
+ @staticmethod
+ def name():
+ return "GitSync"
+
+
+ def __init__(self):
+ NewBase.__init__(self, "git", portage.const.GIT_PACKAGE_ATOM)
+
+
+ def exists(self, **kwargs):
+ '''Tests whether the repo actually exists'''
+ return os.path.exists(os.path.join(self.repo.location, '.git'))
+
+
+ def new(self, **kwargs):
+ '''Do the initial clone of the repository'''
+ if kwargs:
+ self._kwargs(kwargs)
+ if not self.has_bin:
+ return (1, False)
+ try:
+ if not os.path.exists(self.repo.location):
+ os.makedirs(self.repo.location)
+ self.logger(self.xterm_titles,
+ 'Created new directory %s' % self.repo.location)
+ except IOError:
+ return (1, False)
+
+ sync_uri = self.repo.sync_uri
+ if sync_uri.startswith("file://"):
+ sync_uri = sync_uri[7:]
+
+ git_cmd_opts = ""
+ if self.repo.module_specific_options.get('sync-git-env'):
+ shlexed_env = shlex_split(self.repo.module_specific_options['sync-git-env'])
+ env = dict((k, v) for k, _, v in (assignment.partition('=') for assignment in shlexed_env) if k)
+ self.spawn_kwargs['env'].update(env)
+
+ if self.repo.module_specific_options.get('sync-git-clone-env'):
+ shlexed_env = shlex_split(self.repo.module_specific_options['sync-git-clone-env'])
+ clone_env = dict((k, v) for k, _, v in (assignment.partition('=') for assignment in shlexed_env) if k)
+ self.spawn_kwargs['env'].update(clone_env)
+
+ if self.settings.get("PORTAGE_QUIET") == "1":
+ git_cmd_opts += " --quiet"
+ if self.repo.clone_depth is not None:
+ if self.repo.clone_depth != 0:
+ git_cmd_opts += " --depth %d" % self.repo.clone_depth
+ elif self.repo.sync_depth is not None:
+ if self.repo.sync_depth != 0:
+ git_cmd_opts += " --depth %d" % self.repo.sync_depth
+ else:
+ # default
+ git_cmd_opts += " --depth 1"
+
+ if self.repo.module_specific_options.get('sync-git-clone-extra-opts'):
+ git_cmd_opts += " %s" % self.repo.module_specific_options['sync-git-clone-extra-opts']
+ git_cmd = "%s clone%s %s ." % (self.bin_command, git_cmd_opts,
+ portage._shell_quote(sync_uri))
+ writemsg_level(git_cmd + "\n")
+
+ exitcode = portage.process.spawn_bash("cd %s ; exec %s" % (
+ portage._shell_quote(self.repo.location), git_cmd),
+ **self.spawn_kwargs)
+ if exitcode != os.EX_OK:
+ msg = "!!! git clone error in %s" % self.repo.location
+ self.logger(self.xterm_titles, msg)
+ writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
+ return (exitcode, False)
+ if not self.verify_head():
+ return (1, False)
+ return (os.EX_OK, True)
+
+
+ def update(self):
+ ''' Update existing git repository, and ignore the syncuri. We are
+ going to trust the user and assume that the user is in the branch
+ that he/she wants updated. We'll let the user manage branches with
+ git directly.
+ '''
+ if not self.has_bin:
+ return (1, False)
+ git_cmd_opts = ""
+ quiet = self.settings.get("PORTAGE_QUIET") == "1"
+ if self.repo.module_specific_options.get('sync-git-env'):
+ shlexed_env = shlex_split(self.repo.module_specific_options['sync-git-env'])
+ env = dict((k, v) for k, _, v in (assignment.partition('=') for assignment in shlexed_env) if k)
+ self.spawn_kwargs['env'].update(env)
+
+ if self.repo.module_specific_options.get('sync-git-pull-env'):
+ shlexed_env = shlex_split(self.repo.module_specific_options['sync-git-pull-env'])
+ pull_env = dict((k, v) for k, _, v in (assignment.partition('=') for assignment in shlexed_env) if k)
+ self.spawn_kwargs['env'].update(pull_env)
+
+ if self.settings.get("PORTAGE_QUIET") == "1":
+ git_cmd_opts += " --quiet"
+ if self.repo.module_specific_options.get('sync-git-pull-extra-opts'):
+ git_cmd_opts += " %s" % self.repo.module_specific_options['sync-git-pull-extra-opts']
+
+ try:
+ remote_branch = portage._unicode_decode(
+ subprocess.check_output([self.bin_command, 'rev-parse',
+ '--abbrev-ref', '--symbolic-full-name', '@{upstream}'],
+ cwd=portage._unicode_encode(self.repo.location))).rstrip('\n')
+ except subprocess.CalledProcessError as e:
+ msg = "!!! git rev-parse error in %s" % self.repo.location
+ self.logger(self.xterm_titles, msg)
+ writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
+ return (e.returncode, False)
+
+ shallow = self.repo.sync_depth is not None and self.repo.sync_depth != 0
+ if shallow:
+ git_cmd_opts += " --depth %d" % self.repo.sync_depth
+
+ # For shallow fetch, unreachable objects may need to be pruned
+ # manually, in order to prevent automatic git gc calls from
+ # eventually failing (see bug 599008).
+ gc_cmd = ['git', '-c', 'gc.autodetach=false', 'gc', '--auto']
+ if quiet:
+ gc_cmd.append('--quiet')
+ exitcode = subprocess.call(gc_cmd,
+ cwd=portage._unicode_encode(self.repo.location))
+ if exitcode != os.EX_OK:
+ msg = "!!! git gc error in %s" % self.repo.location
+ self.logger(self.xterm_titles, msg)
+ writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
+ return (exitcode, False)
+
+ git_cmd = "%s fetch %s%s" % (self.bin_command,
+ remote_branch.partition('/')[0], git_cmd_opts)
+
+ writemsg_level(git_cmd + "\n")
+
+ rev_cmd = [self.bin_command, "rev-list", "--max-count=1", "HEAD"]
+ previous_rev = subprocess.check_output(rev_cmd,
+ cwd=portage._unicode_encode(self.repo.location))
+
+ exitcode = portage.process.spawn_bash("cd %s ; exec %s" % (
+ portage._shell_quote(self.repo.location), git_cmd),
+ **self.spawn_kwargs)
+
+ if exitcode != os.EX_OK:
+ msg = "!!! git fetch error in %s" % self.repo.location
+ self.logger(self.xterm_titles, msg)
+ writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
+ return (exitcode, False)
+
+ if not self.verify_head(revision='refs/remotes/%s' % remote_branch):
+ return (1, False)
+
+ if shallow:
+ # Since the default merge strategy typically fails when
+ # the depth is not unlimited, `git reset --merge`.
+ merge_cmd = [self.bin_command, 'reset', '--merge']
+ else:
+ merge_cmd = [self.bin_command, 'merge']
+ merge_cmd.append('refs/remotes/%s' % remote_branch)
+ if quiet:
+ merge_cmd.append('--quiet')
+ exitcode = subprocess.call(merge_cmd,
+ cwd=portage._unicode_encode(self.repo.location))
+
+ if exitcode != os.EX_OK:
+ msg = "!!! git merge error in %s" % self.repo.location
+ self.logger(self.xterm_titles, msg)
+ writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
+ return (exitcode, False)
+
+ current_rev = subprocess.check_output(rev_cmd,
+ cwd=portage._unicode_encode(self.repo.location))
+
+ return (os.EX_OK, current_rev != previous_rev)
+
+ def verify_head(self, revision='-1'):
+ if (self.repo.module_specific_options.get(
+ 'sync-git-verify-commit-signature', 'false') != 'true'):
+ return True
+
+ if self.repo.sync_openpgp_key_path is not None:
+ if gemato is None:
+ writemsg_level("!!! Verifying against specified key requires gemato-11.0+ installed\n",
+ level=logging.ERROR, noiselevel=-1)
+ return False
+ openpgp_env = gemato.openpgp.OpenPGPEnvironment()
+ else:
+ openpgp_env = None
+
+ try:
+ out = EOutput()
+ env = None
+ if openpgp_env is not None:
+ try:
+ out.einfo('Using keys from %s' % (self.repo.sync_openpgp_key_path,))
+ with io.open(self.repo.sync_openpgp_key_path, 'rb') as f:
+ openpgp_env.import_key(f)
+ self._refresh_keys(openpgp_env)
+ except (GematoException, asyncio.TimeoutError) as e:
+ writemsg_level("!!! Verification impossible due to keyring problem:\n%s\n"
+ % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ return (1, False)
+
+ env = os.environ.copy()
+ env['GNUPGHOME'] = openpgp_env.home
+
+ rev_cmd = [self.bin_command, "log", "-n1", "--pretty=format:%G?", revision]
+ try:
+ status = (portage._unicode_decode(
+ subprocess.check_output(rev_cmd,
+ cwd=portage._unicode_encode(self.repo.location),
+ env=env))
+ .strip())
+ except subprocess.CalledProcessError:
+ return False
+
+ if status == 'G': # good signature is good
+ out.einfo('Trusted signature found on top commit')
+ return True
+ elif status == 'U': # untrusted
+ out.ewarn('Top commit signature is valid but not trusted')
+ return True
+ else:
+ if status == 'B':
+ expl = 'bad signature'
+ elif status == 'X':
+ expl = 'expired signature'
+ elif status == 'Y':
+ expl = 'expired key'
+ elif status == 'R':
+ expl = 'revoked key'
+ elif status == 'E':
+ expl = 'unable to verify signature (missing key?)'
+ elif status == 'N':
+ expl = 'no signature'
+ else:
+ expl = 'unknown issue'
+ out.eerror('No valid signature found: %s' % (expl,))
+ return False
+ finally:
+ if openpgp_env is not None:
+ openpgp_env.close()
+
+ def retrieve_head(self, **kwargs):
+ '''Get information about the head commit'''
+ if kwargs:
+ self._kwargs(kwargs)
+ if self.bin_command is None:
+ # return quietly so that we don't pollute emerge --info output
+ return (1, False)
+ rev_cmd = [self.bin_command, "rev-list", "--max-count=1", "HEAD"]
+ try:
+ ret = (os.EX_OK,
+ portage._unicode_decode(subprocess.check_output(rev_cmd,
+ cwd=portage._unicode_encode(self.repo.location))))
+ except subprocess.CalledProcessError:
+ ret = (1, False)
+ return ret
diff --git a/lib/portage/sync/modules/rsync/__init__.py b/lib/portage/sync/modules/rsync/__init__.py
new file mode 100644
index 000000000..cb80f6d66
--- /dev/null
+++ b/lib/portage/sync/modules/rsync/__init__.py
@@ -0,0 +1,37 @@
+# Copyright 2014-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+doc = """Rsync plug-in module for portage.
+ Performs rsync transfers on repositories."""
+__doc__ = doc[:]
+
+from portage.sync.config_checks import CheckSyncConfig
+
+
+module_spec = {
+ 'name': 'rsync',
+ 'description': doc,
+ 'provides':{
+ 'rsync-module': {
+ 'name': "rsync",
+ 'sourcefile': "rsync",
+ 'class': "RsyncSync",
+ 'description': doc,
+ 'functions': ['sync', 'new', 'exists', 'retrieve_head'],
+ 'func_desc': {
+ 'sync': 'Performs rsync transfers on the repository',
+ 'new': 'Creates the new repository at the specified location',
+ 'exists': 'Returns a boolean if the specified directory exists',
+ 'retrieve_head': 'Returns the head commit based on metadata/timestamp.commit',
+ },
+ 'validate_config': CheckSyncConfig,
+ 'module_specific_options': (
+ 'sync-rsync-extra-opts',
+ 'sync-rsync-vcs-ignore',
+ 'sync-rsync-verify-jobs',
+ 'sync-rsync-verify-max-age',
+ 'sync-rsync-verify-metamanifest',
+ ),
+ }
+ }
+ }
diff --git a/lib/portage/sync/modules/rsync/rsync.py b/lib/portage/sync/modules/rsync/rsync.py
new file mode 100644
index 000000000..fb1960a3c
--- /dev/null
+++ b/lib/portage/sync/modules/rsync/rsync.py
@@ -0,0 +1,782 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+import logging
+import time
+import signal
+import socket
+import datetime
+import io
+import re
+import random
+import subprocess
+import tempfile
+
+import portage
+from portage import os
+from portage import _unicode_decode
+from portage.exception import CommandNotFound
+from portage.util import writemsg_level
+from portage.output import create_color_func, yellow, blue, bold
+good = create_color_func("GOOD")
+bad = create_color_func("BAD")
+warn = create_color_func("WARN")
+from portage.const import VCS_DIRS, TIMESTAMP_FORMAT, RSYNC_PACKAGE_ATOM
+from portage.util import writemsg, writemsg_stdout
+from portage.util.futures import asyncio
+from portage.sync.getaddrinfo_validate import getaddrinfo_validate
+from _emerge.UserQuery import UserQuery
+from portage.sync.syncbase import NewBase
+
+try:
+ from gemato.exceptions import GematoException
+ import gemato.openpgp
+ import gemato.recursiveloader
+except ImportError:
+ gemato = None
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ _unicode = str
+else:
+ _unicode = unicode
+
+SERVER_OUT_OF_DATE = -1
+EXCEEDED_MAX_RETRIES = -2
+
+
+class RsyncSync(NewBase):
+ '''Rsync sync module'''
+
+ short_desc = "Perform sync operations on rsync based repositories"
+
+ @staticmethod
+ def name():
+ return "RsyncSync"
+
+
+ def __init__(self):
+ NewBase.__init__(self, "rsync", RSYNC_PACKAGE_ATOM)
+
+ def _select_download_dir(self):
+ '''
+ Select and return the download directory. It's desirable to be able
+ to create shared hardlinks between the download directory to the
+ normal repository, and this is facilitated by making the download
+ directory be a subdirectory of the normal repository location
+ (ensuring that no mountpoints are crossed). Shared hardlinks are
+ created by using the rsync --link-dest option.
+
+ Since the download is initially unverified, it is safest to save
+ it in a quarantine directory. The quarantine directory is also
+ useful for making the repository update more atomic, so that it
+ less likely that normal repository location will be observed in
+ a partially synced state.
+
+ This method returns a quarantine directory if sync-allow-hardlinks
+ is enabled in repos.conf, and otherwise it returne the normal
+ repository location.
+ '''
+ if self.repo.sync_allow_hardlinks:
+ return os.path.join(self.repo.location, '.tmp-unverified-download-quarantine')
+ else:
+ return self.repo.location
+
+ def _commit_download(self, download_dir):
+ '''
+ Commit changes from download_dir if it does not refer to the
+ normal repository location.
+ '''
+ exitcode = 0
+ if self.repo.location != download_dir:
+ rsynccommand = [self.bin_command] + self.rsync_opts + self.extra_rsync_opts
+ rsynccommand.append('--exclude=/%s' % os.path.basename(download_dir))
+ rsynccommand.append('%s/' % download_dir.rstrip('/'))
+ rsynccommand.append('%s/' % self.repo.location)
+ exitcode = subprocess.call(rsynccommand)
+
+ return exitcode
+
+ def _remove_download(self, download_dir):
+ """
+ Remove download_dir if it does not refer to the normal repository
+ location.
+ """
+ exitcode = 0
+ if self.repo.location != download_dir:
+ exitcode = subprocess.call(['rm', '-rf', download_dir])
+ return exitcode
+
+ def update(self):
+ '''Internal update function which performs the transfer'''
+ opts = self.options.get('emerge_config').opts
+ self.usersync_uid = self.options.get('usersync_uid', None)
+ enter_invalid = '--ask-enter-invalid' in opts
+ quiet = '--quiet' in opts
+ out = portage.output.EOutput(quiet=quiet)
+ syncuri = self.repo.sync_uri
+ if self.repo.module_specific_options.get(
+ 'sync-rsync-vcs-ignore', 'false').lower() == 'true':
+ vcs_dirs = ()
+ else:
+ vcs_dirs = frozenset(VCS_DIRS)
+ vcs_dirs = vcs_dirs.intersection(os.listdir(self.repo.location))
+
+ for vcs_dir in vcs_dirs:
+ writemsg_level(("!!! %s appears to be under revision " + \
+ "control (contains %s).\n!!! Aborting rsync sync "
+ "(override with \"sync-rsync-vcs-ignore = true\" in repos.conf).\n") % \
+ (self.repo.location, vcs_dir), level=logging.ERROR, noiselevel=-1)
+ return (1, False)
+ self.timeout=180
+
+ rsync_opts = []
+ if self.settings["PORTAGE_RSYNC_OPTS"] == "":
+ rsync_opts = self._set_rsync_defaults()
+ else:
+ rsync_opts = self._validate_rsync_opts(rsync_opts, syncuri)
+ self.rsync_opts = self._rsync_opts_extend(opts, rsync_opts)
+
+ self.extra_rsync_opts = list()
+ if self.repo.module_specific_options.get('sync-rsync-extra-opts'):
+ self.extra_rsync_opts.extend(portage.util.shlex_split(
+ self.repo.module_specific_options['sync-rsync-extra-opts']))
+
+ download_dir = self._select_download_dir()
+ exitcode = 0
+
+ # Process GLEP74 verification options.
+ # Default verification to 'no'; it's enabled for ::gentoo
+ # via default repos.conf though.
+ self.verify_metamanifest = (
+ self.repo.module_specific_options.get(
+ 'sync-rsync-verify-metamanifest', 'no') in ('yes', 'true'))
+ # Support overriding job count.
+ self.verify_jobs = self.repo.module_specific_options.get(
+ 'sync-rsync-verify-jobs', None)
+ if self.verify_jobs is not None:
+ try:
+ self.verify_jobs = int(self.verify_jobs)
+ if self.verify_jobs < 0:
+ raise ValueError(self.verify_jobs)
+ except ValueError:
+ writemsg_level("!!! sync-rsync-verify-jobs not a positive integer: %s\n" % (self.verify_jobs,),
+ level=logging.WARNING, noiselevel=-1)
+ self.verify_jobs = None
+ else:
+ if self.verify_jobs == 0:
+ # Use the apparent number of processors if gemato
+ # supports it.
+ self.verify_jobs = None
+ # Support overriding max age.
+ self.max_age = self.repo.module_specific_options.get(
+ 'sync-rsync-verify-max-age', '')
+ if self.max_age:
+ try:
+ self.max_age = int(self.max_age)
+ if self.max_age < 0:
+ raise ValueError(self.max_age)
+ except ValueError:
+ writemsg_level("!!! sync-rsync-max-age must be a non-negative integer: %s\n" % (self.max_age,),
+ level=logging.WARNING, noiselevel=-1)
+ self.max_age = 0
+ else:
+ self.max_age = 0
+
+ openpgp_env = None
+ if self.verify_metamanifest and gemato is not None:
+ # Use isolated environment if key is specified,
+ # system environment otherwise
+ if self.repo.sync_openpgp_key_path is not None:
+ openpgp_env = gemato.openpgp.OpenPGPEnvironment()
+ else:
+ openpgp_env = gemato.openpgp.OpenPGPSystemEnvironment()
+
+ try:
+ # Load and update the keyring early. If it fails, then verification
+ # will not be performed and the user will have to fix it and try again,
+ # so we may as well bail out before actual rsync happens.
+ if openpgp_env is not None and self.repo.sync_openpgp_key_path is not None:
+ try:
+ out.einfo('Using keys from %s' % (self.repo.sync_openpgp_key_path,))
+ with io.open(self.repo.sync_openpgp_key_path, 'rb') as f:
+ openpgp_env.import_key(f)
+ self._refresh_keys(openpgp_env)
+ except (GematoException, asyncio.TimeoutError) as e:
+ writemsg_level("!!! Manifest verification impossible due to keyring problem:\n%s\n"
+ % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ return (1, False)
+
+ # Real local timestamp file.
+ self.servertimestampfile = os.path.join(
+ self.repo.location, "metadata", "timestamp.chk")
+
+ content = portage.util.grabfile(self.servertimestampfile)
+ timestamp = 0
+ if content:
+ try:
+ timestamp = time.mktime(time.strptime(content[0],
+ TIMESTAMP_FORMAT))
+ except (OverflowError, ValueError):
+ pass
+ del content
+
+ try:
+ self.rsync_initial_timeout = \
+ int(self.settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
+ except ValueError:
+ self.rsync_initial_timeout = 15
+
+ try:
+ maxretries=int(self.settings["PORTAGE_RSYNC_RETRIES"])
+ except SystemExit as e:
+ raise # Needed else can't exit
+ except:
+ maxretries = -1 #default number of retries
+
+ if syncuri.startswith("file://"):
+ self.proto = "file"
+ dosyncuri = syncuri[7:]
+ unchanged, is_synced, exitcode, updatecache_flg = self._do_rsync(
+ dosyncuri, timestamp, opts, download_dir)
+ self._process_exitcode(exitcode, dosyncuri, out, 1)
+ if exitcode == 0 and not unchanged:
+ self._commit_download(download_dir)
+ return (exitcode, updatecache_flg)
+
+ retries=0
+ try:
+ self.proto, user_name, hostname, port = re.split(
+ r"(rsync|ssh)://([^:/]+@)?(\[[:\da-fA-F]*\]|[^:/]*)(:[0-9]+)?",
+ syncuri, maxsplit=4)[1:5]
+ except ValueError:
+ writemsg_level("!!! sync-uri is invalid: %s\n" % syncuri,
+ noiselevel=-1, level=logging.ERROR)
+ return (1, False)
+
+ self.ssh_opts = self.settings.get("PORTAGE_SSH_OPTS")
+
+ if port is None:
+ port=""
+ if user_name is None:
+ user_name=""
+ if re.match(r"^\[[:\da-fA-F]*\]$", hostname) is None:
+ getaddrinfo_host = hostname
+ else:
+ # getaddrinfo needs the brackets stripped
+ getaddrinfo_host = hostname[1:-1]
+ updatecache_flg = False
+ all_rsync_opts = set(self.rsync_opts)
+ all_rsync_opts.update(self.extra_rsync_opts)
+
+ family = socket.AF_UNSPEC
+ if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
+ family = socket.AF_INET
+ elif socket.has_ipv6 and \
+ ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
+ family = socket.AF_INET6
+
+ addrinfos = None
+ uris = []
+
+ try:
+ addrinfos = getaddrinfo_validate(
+ socket.getaddrinfo(getaddrinfo_host, None,
+ family, socket.SOCK_STREAM))
+ except socket.error as e:
+ writemsg_level(
+ "!!! getaddrinfo failed for '%s': %s\n"
+ % (_unicode_decode(hostname), _unicode(e)),
+ noiselevel=-1, level=logging.ERROR)
+
+ if addrinfos:
+
+ AF_INET = socket.AF_INET
+ AF_INET6 = None
+ if socket.has_ipv6:
+ AF_INET6 = socket.AF_INET6
+
+ ips_v4 = []
+ ips_v6 = []
+
+ for addrinfo in addrinfos:
+ if addrinfo[0] == AF_INET:
+ ips_v4.append("%s" % addrinfo[4][0])
+ elif AF_INET6 is not None and addrinfo[0] == AF_INET6:
+ # IPv6 addresses need to be enclosed in square brackets
+ ips_v6.append("[%s]" % addrinfo[4][0])
+
+ random.shuffle(ips_v4)
+ random.shuffle(ips_v6)
+
+ # Give priority to the address family that
+ # getaddrinfo() returned first.
+ if AF_INET6 is not None and addrinfos and \
+ addrinfos[0][0] == AF_INET6:
+ ips = ips_v6 + ips_v4
+ else:
+ ips = ips_v4 + ips_v6
+
+ for ip in ips:
+ uris.append(syncuri.replace(
+ "//" + user_name + hostname + port + "/",
+ "//" + user_name + ip + port + "/", 1))
+
+ if not uris:
+ # With some configurations we need to use the plain hostname
+ # rather than try to resolve the ip addresses (bug #340817).
+ uris.append(syncuri)
+
+ # reverse, for use with pop()
+ uris.reverse()
+ uris_orig = uris[:]
+
+ effective_maxretries = maxretries
+ if effective_maxretries < 0:
+ effective_maxretries = len(uris) - 1
+
+ local_state_unchanged = True
+ while (1):
+ if uris:
+ dosyncuri = uris.pop()
+ elif maxretries < 0 or retries > maxretries:
+ writemsg("!!! Exhausted addresses for %s\n"
+ % _unicode_decode(hostname), noiselevel=-1)
+ return (1, False)
+ else:
+ uris.extend(uris_orig)
+ dosyncuri = uris.pop()
+
+ if (retries==0):
+ if "--ask" in opts:
+ uq = UserQuery(opts)
+ if uq.query("Do you want to sync your Portage tree " + \
+ "with the mirror at\n" + blue(dosyncuri) + bold("?"),
+ enter_invalid) == "No":
+ print()
+ print("Quitting.")
+ print()
+ sys.exit(128 + signal.SIGINT)
+ self.logger(self.xterm_titles,
+ ">>> Starting rsync with " + dosyncuri)
+ if "--quiet" not in opts:
+ print(">>> Starting rsync with "+dosyncuri+"...")
+ else:
+ self.logger(self.xterm_titles,
+ ">>> Starting retry %d of %d with %s" % \
+ (retries, effective_maxretries, dosyncuri))
+ writemsg_stdout(
+ "\n\n>>> Starting retry %d of %d with %s\n" % \
+ (retries, effective_maxretries, dosyncuri), noiselevel=-1)
+
+ if dosyncuri.startswith('ssh://'):
+ dosyncuri = dosyncuri[6:].replace('/', ':/', 1)
+
+ unchanged, is_synced, exitcode, updatecache_flg = self._do_rsync(
+ dosyncuri, timestamp, opts, download_dir)
+ if not unchanged:
+ local_state_unchanged = False
+ if is_synced:
+ break
+
+ retries=retries+1
+
+ if maxretries < 0 or retries <= maxretries:
+ print(">>> Retrying...")
+ else:
+ # over retries
+ # exit loop
+ exitcode = EXCEEDED_MAX_RETRIES
+ break
+ self._process_exitcode(exitcode, dosyncuri, out, maxretries)
+
+ if local_state_unchanged:
+ # The quarantine download_dir is not intended to exist
+ # in this case, so refer gemato to the normal repository
+ # location.
+ download_dir = self.repo.location
+
+ # if synced successfully, verify now
+ if exitcode == 0 and self.verify_metamanifest:
+ if gemato is None:
+ writemsg_level("!!! Unable to verify: gemato-11.0+ is required\n",
+ level=logging.ERROR, noiselevel=-1)
+ exitcode = 127
+ else:
+ try:
+ # we always verify the Manifest signature, in case
+ # we had to deal with key revocation case
+ m = gemato.recursiveloader.ManifestRecursiveLoader(
+ os.path.join(download_dir, 'Manifest'),
+ verify_openpgp=True,
+ openpgp_env=openpgp_env,
+ max_jobs=self.verify_jobs)
+ if not m.openpgp_signed:
+ raise RuntimeError('OpenPGP signature not found on Manifest')
+
+ ts = m.find_timestamp()
+ if ts is None:
+ raise RuntimeError('Timestamp not found in Manifest')
+ if (self.max_age != 0 and
+ (datetime.datetime.utcnow() - ts.ts).days > self.max_age):
+ out.quiet = False
+ out.ewarn('Manifest is over %d days old, this is suspicious!' % (self.max_age,))
+ out.ewarn('You may want to try using another mirror and/or reporting this one:')
+ out.ewarn(' %s' % (dosyncuri,))
+ out.ewarn('')
+ out.quiet = quiet
+
+ out.einfo('Manifest timestamp: %s UTC' % (ts.ts,))
+ out.einfo('Valid OpenPGP signature found:')
+ out.einfo('- primary key: %s' % (
+ m.openpgp_signature.primary_key_fingerprint))
+ out.einfo('- subkey: %s' % (
+ m.openpgp_signature.fingerprint))
+ out.einfo('- timestamp: %s UTC' % (
+ m.openpgp_signature.timestamp))
+
+ # if nothing has changed, skip the actual Manifest
+ # verification
+ if not local_state_unchanged:
+ out.ebegin('Verifying %s' % (download_dir,))
+ m.assert_directory_verifies()
+ out.eend(0)
+ except GematoException as e:
+ writemsg_level("!!! Manifest verification failed:\n%s\n"
+ % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ exitcode = 1
+
+ if exitcode == 0 and not local_state_unchanged:
+ exitcode = self._commit_download(download_dir)
+
+ return (exitcode, updatecache_flg)
+ finally:
+ if exitcode == 0:
+ self._remove_download(download_dir)
+ if openpgp_env is not None:
+ openpgp_env.close()
+
+ def _process_exitcode(self, exitcode, syncuri, out, maxretries):
+ if (exitcode==0):
+ pass
+ elif exitcode == SERVER_OUT_OF_DATE:
+ exitcode = 1
+ elif exitcode == EXCEEDED_MAX_RETRIES:
+ sys.stderr.write(
+ ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
+ exitcode = 1
+ elif (exitcode>0):
+ msg = []
+ if exitcode==1:
+ msg.append("Rsync has reported that there is a syntax error. Please ensure")
+ msg.append("that sync-uri attribute for repository '%s' is proper." % self.repo.name)
+ msg.append("sync-uri: '%s'" % self.repo.sync_uri)
+ elif exitcode==11:
+ msg.append("Rsync has reported that there is a File IO error. Normally")
+ msg.append("this means your disk is full, but can be caused by corruption")
+ msg.append("on the filesystem that contains repository '%s'. Please investigate" % self.repo.name)
+ msg.append("and try again after the problem has been fixed.")
+ msg.append("Location of repository: '%s'" % self.repo.location)
+ elif exitcode==20:
+ msg.append("Rsync was killed before it finished.")
+ else:
+ msg.append("Rsync has not successfully finished. It is recommended that you keep")
+ msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
+ msg.append("to use rsync due to firewall or other restrictions. This should be a")
+ msg.append("temporary problem unless complications exist with your network")
+ msg.append("(and possibly your system's filesystem) configuration.")
+ for line in msg:
+ out.eerror(line)
+
+
+ def new(self, **kwargs):
+ if kwargs:
+ self._kwargs(kwargs)
+ try:
+ if not os.path.exists(self.repo.location):
+ os.makedirs(self.repo.location)
+ self.logger(self.self.xterm_titles,
+ 'Created New Directory %s ' % self.repo.location )
+ except IOError:
+ return (1, False)
+ return self.update()
+
+ def retrieve_head(self, **kwargs):
+ '''Get information about the head commit'''
+ if kwargs:
+ self._kwargs(kwargs)
+ last_sync = portage.grabfile(os.path.join(self.repo.location, "metadata", "timestamp.commit"))
+ ret = (1, False)
+ if last_sync:
+ try:
+ ret = (os.EX_OK, last_sync[0].split()[0])
+ except IndexError:
+ pass
+ return ret
+
+ def _set_rsync_defaults(self):
+ portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
+ rsync_opts = [
+ "--recursive", # Recurse directories
+ "--links", # Consider symlinks
+ "--safe-links", # Ignore links outside of tree
+ "--perms", # Preserve permissions
+ "--times", # Preserive mod times
+ "--omit-dir-times",
+ "--compress", # Compress the data transmitted
+ "--force", # Force deletion on non-empty dirs
+ "--whole-file", # Don't do block transfers, only entire files
+ "--delete", # Delete files that aren't in the master tree
+ "--stats", # Show final statistics about what was transfered
+ "--human-readable",
+ "--timeout="+str(self.timeout), # IO timeout if not done in X seconds
+ "--exclude=/distfiles", # Exclude distfiles from consideration
+ "--exclude=/local", # Exclude local from consideration
+ "--exclude=/packages", # Exclude packages from consideration
+ ]
+ return rsync_opts
+
+
+ def _validate_rsync_opts(self, rsync_opts, syncuri):
+ # The below validation is not needed when using the above hardcoded
+ # defaults.
+
+ portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
+ rsync_opts.extend(portage.util.shlex_split(
+ self.settings.get("PORTAGE_RSYNC_OPTS", "")))
+ for opt in ("--recursive", "--times"):
+ if opt not in rsync_opts:
+ portage.writemsg(yellow("WARNING:") + " adding required option " + \
+ "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
+ rsync_opts.append(opt)
+
+ for exclude in ("distfiles", "local", "packages"):
+ opt = "--exclude=/%s" % exclude
+ if opt not in rsync_opts:
+ portage.writemsg(yellow("WARNING:") + \
+ " adding required option %s not included in " % opt + \
+ "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
+ rsync_opts.append(opt)
+
+ if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
+ def rsync_opt_startswith(opt_prefix):
+ for x in rsync_opts:
+ if x.startswith(opt_prefix):
+ return (1, False)
+ return (0, False)
+
+ if not rsync_opt_startswith("--timeout="):
+ rsync_opts.append("--timeout=%d" % self.timeout)
+
+ for opt in ("--compress", "--whole-file"):
+ if opt not in rsync_opts:
+ portage.writemsg(yellow("WARNING:") + " adding required option " + \
+ "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
+ rsync_opts.append(opt)
+ return rsync_opts
+
+
+ @staticmethod
+ def _rsync_opts_extend(opts, rsync_opts):
+ if "--quiet" in opts:
+ rsync_opts.append("--quiet") # Shut up a lot
+ else:
+ rsync_opts.append("--verbose") # Print filelist
+
+ if "--verbose" in opts:
+ rsync_opts.append("--progress") # Progress meter for each file
+
+ if "--debug" in opts:
+ rsync_opts.append("--checksum") # Force checksum on all files
+ return rsync_opts
+
+
+ def _do_rsync(self, syncuri, timestamp, opts, download_dir):
+ updatecache_flg = False
+ is_synced = False
+ if timestamp != 0 and "--quiet" not in opts:
+ print(">>> Checking server timestamp ...")
+
+ rsynccommand = [self.bin_command] + self.rsync_opts + self.extra_rsync_opts
+
+ if self.proto == 'ssh' and self.ssh_opts:
+ rsynccommand.append("--rsh=ssh " + self.ssh_opts)
+
+ if "--debug" in opts:
+ print(rsynccommand)
+
+ local_state_unchanged = False
+ exitcode = os.EX_OK
+ servertimestamp = 0
+ # Even if there's no timestamp available locally, fetch the
+ # timestamp anyway as an initial probe to verify that the server is
+ # responsive. This protects us from hanging indefinitely on a
+ # connection attempt to an unresponsive server which rsync's
+ # --timeout option does not prevent.
+
+ #if True:
+ # Temporary file for remote server timestamp comparison.
+ # NOTE: If FEATURES=usersync is enabled then the tempfile
+ # needs to be in a directory that's readable by the usersync
+ # user. We assume that PORTAGE_TMPDIR will satisfy this
+ # requirement, since that's not necessarily true for the
+ # default directory used by the tempfile module.
+ if self.usersync_uid is not None:
+ tmpdir = self.settings['PORTAGE_TMPDIR']
+ else:
+ # use default dir from tempfile module
+ tmpdir = None
+ fd, tmpservertimestampfile = \
+ tempfile.mkstemp(dir=tmpdir)
+ os.close(fd)
+ if self.usersync_uid is not None:
+ portage.util.apply_permissions(tmpservertimestampfile,
+ uid=self.usersync_uid)
+ command = rsynccommand[:]
+ command.append(syncuri.rstrip("/") + \
+ "/metadata/timestamp.chk")
+ command.append(tmpservertimestampfile)
+ content = None
+ pids = []
+ try:
+ # Timeout here in case the server is unresponsive. The
+ # --timeout rsync option doesn't apply to the initial
+ # connection attempt.
+ try:
+ if self.rsync_initial_timeout:
+ portage.exception.AlarmSignal.register(
+ self.rsync_initial_timeout)
+
+ pids.extend(portage.process.spawn(
+ command, returnpid=True,
+ **self.spawn_kwargs))
+ exitcode = os.waitpid(pids[0], 0)[1]
+ if self.usersync_uid is not None:
+ portage.util.apply_permissions(tmpservertimestampfile,
+ uid=os.getuid())
+ content = portage.grabfile(tmpservertimestampfile)
+ finally:
+ if self.rsync_initial_timeout:
+ portage.exception.AlarmSignal.unregister()
+ try:
+ os.unlink(tmpservertimestampfile)
+ except OSError:
+ pass
+ except portage.exception.AlarmSignal:
+ # timed out
+ print('timed out')
+ # With waitpid and WNOHANG, only check the
+ # first element of the tuple since the second
+ # element may vary (bug #337465).
+ if pids and os.waitpid(pids[0], os.WNOHANG)[0] == 0:
+ os.kill(pids[0], signal.SIGTERM)
+ os.waitpid(pids[0], 0)
+ # This is the same code rsync uses for timeout.
+ exitcode = 30
+ else:
+ if exitcode != os.EX_OK:
+ if exitcode & 0xff:
+ exitcode = (exitcode & 0xff) << 8
+ else:
+ exitcode = exitcode >> 8
+
+ if content:
+ try:
+ servertimestamp = time.mktime(time.strptime(
+ content[0], TIMESTAMP_FORMAT))
+ except (OverflowError, ValueError):
+ pass
+ del command, pids, content
+
+ if exitcode == os.EX_OK:
+ if (servertimestamp != 0) and (servertimestamp == timestamp):
+ local_state_unchanged = True
+ is_synced = True
+ self.logger(self.xterm_titles,
+ ">>> Cancelling sync -- Already current.")
+ print()
+ print(">>>")
+ print(">>> Timestamps on the server and in the local repository are the same.")
+ print(">>> Cancelling all further sync action. You are already up to date.")
+ print(">>>")
+ print(">>> In order to force sync, remove '%s'." % self.servertimestampfile)
+ print(">>>")
+ print()
+ elif (servertimestamp != 0) and (servertimestamp < timestamp):
+ self.logger(self.xterm_titles,
+ ">>> Server out of date: %s" % syncuri)
+ print()
+ print(">>>")
+ print(">>> SERVER OUT OF DATE: %s" % syncuri)
+ print(">>>")
+ print(">>> In order to force sync, remove '%s'." % self.servertimestampfile)
+ print(">>>")
+ print()
+ exitcode = SERVER_OUT_OF_DATE
+ elif (servertimestamp == 0) or (servertimestamp > timestamp):
+ # actual sync
+ command = rsynccommand[:]
+
+ if self.repo.location != download_dir:
+ # Use shared hardlinks for files that are identical
+ # in the previous snapshot of the repository.
+ command.append('--link-dest=%s' % self.repo.location)
+
+ submodule_paths = self._get_submodule_paths()
+ if submodule_paths:
+ # The only way to select multiple directories to
+ # sync, without calling rsync multiple times, is
+ # to use --relative.
+ command.append("--relative")
+ for path in submodule_paths:
+ # /./ is special syntax supported with the
+ # rsync --relative option.
+ command.append(syncuri + "/./" + path)
+ else:
+ command.append(syncuri + "/")
+
+ command.append(download_dir)
+
+ exitcode = None
+ try:
+ exitcode = portage.process.spawn(command,
+ **self.spawn_kwargs)
+ finally:
+ if exitcode is None:
+ # interrupted
+ exitcode = 128 + signal.SIGINT
+
+ # 0 Success
+ # 1 Syntax or usage error
+ # 2 Protocol incompatibility
+ # 5 Error starting client-server protocol
+ # 35 Timeout waiting for daemon connection
+ if exitcode not in (0, 1, 2, 5, 35):
+ # If the exit code is not among those listed above,
+ # then we may have a partial/inconsistent sync
+ # state, so our previously read timestamp as well
+ # as the corresponding file can no longer be
+ # trusted.
+ timestamp = 0
+ try:
+ os.unlink(self.servertimestampfile)
+ except OSError:
+ pass
+ else:
+ updatecache_flg = True
+
+ if exitcode in [0,1,3,4,11,14,20,21]:
+ is_synced = True
+ elif exitcode in [1,3,4,11,14,20,21]:
+ is_synced = True
+ else:
+ # Code 2 indicates protocol incompatibility, which is expected
+ # for servers with protocol < 29 that don't support
+ # --prune-empty-directories. Retry for a server that supports
+ # at least rsync protocol version 29 (>=rsync-2.6.4).
+ pass
+
+ return local_state_unchanged, is_synced, exitcode, updatecache_flg
diff --git a/lib/portage/sync/modules/svn/__init__.py b/lib/portage/sync/modules/svn/__init__.py
new file mode 100644
index 000000000..c7ae3b87c
--- /dev/null
+++ b/lib/portage/sync/modules/svn/__init__.py
@@ -0,0 +1,33 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+doc = """SVN plug-in module for portage.
+Performs a svn up on repositories."""
+__doc__ = doc[:]
+
+from portage.localization import _
+from portage.sync.config_checks import CheckSyncConfig
+from portage.util import writemsg_level
+
+
+module_spec = {
+ 'name': 'svn',
+ 'description': doc,
+ 'provides':{
+ 'svn-module': {
+ 'name': "svn",
+ 'sourcefile': "svn",
+ 'class': "SVNSync",
+ 'description': doc,
+ 'functions': ['sync', 'new', 'exists'],
+ 'func_desc': {
+ 'sync': 'Performs a svn up on the repository',
+ 'new': 'Creates the new repository at the specified location',
+ 'exists': 'Returns a boolean of whether the specified dir ' +
+ 'exists and is a valid SVN repository',
+ },
+ 'validate_config': CheckSyncConfig,
+ 'module_specific_options': (),
+ }
+ }
+}
diff --git a/lib/portage/sync/modules/svn/svn.py b/lib/portage/sync/modules/svn/svn.py
new file mode 100644
index 000000000..723beedcb
--- /dev/null
+++ b/lib/portage/sync/modules/svn/svn.py
@@ -0,0 +1,89 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import logging
+
+import portage
+from portage import os
+from portage.util import writemsg_level
+from portage.sync.syncbase import NewBase
+
+
+class SVNSync(NewBase):
+ '''SVN sync module'''
+
+ short_desc = "Perform sync operations on SVN repositories"
+
+ @staticmethod
+ def name():
+ return "SVNSync"
+
+
+ def __init__(self):
+ NewBase.__init__(self, "svn", "dev-vcs/subversion")
+
+
+ def exists(self, **kwargs):
+ '''Tests whether the repo actually exists'''
+ return os.path.exists(os.path.join(self.repo.location, '.svn'))
+
+
+ def new(self, **kwargs):
+ if kwargs:
+ self._kwargs(kwargs)
+ #initial checkout
+ svn_root = self.repo.sync_uri
+ exitcode = portage.process.spawn_bash(
+ "cd %s; exec svn co %s ." %
+ (portage._shell_quote(self.repo.location),
+ portage._shell_quote(svn_root)),
+ **self.spawn_kwargs)
+ if exitcode != os.EX_OK:
+ msg = "!!! svn checkout error; exiting."
+ self.logger(self.xterm_titles, msg)
+ writemsg_level(msg + "\n", noiselevel=-1, level=logging.ERROR)
+ return (exitcode, False)
+
+
+ def update(self):
+ """
+ Internal function to update an existing SVN repository
+
+ @return: tuple of return code (0=success), whether the cache
+ needs to be updated
+ @rtype: (int, bool)
+ """
+
+ exitcode = self._svn_upgrade()
+ if exitcode != os.EX_OK:
+ return (exitcode, False)
+
+ #svn update
+ exitcode = portage.process.spawn_bash(
+ "cd %s; exec svn update" % \
+ (portage._shell_quote(self.repo.location),),
+ **self.spawn_kwargs)
+ if exitcode != os.EX_OK:
+ msg = "!!! svn update error; exiting."
+ self.logger(self.xterm_titles, msg)
+ writemsg_level(msg + "\n", noiselevel=-1, level=logging.ERROR)
+ return (exitcode, False)
+
+
+ def _svn_upgrade(self):
+ """
+ Internal function which performs an svn upgrade on the repo
+
+ @return: tuple of return code (0=success), whether the cache
+ needs to be updated
+ @rtype: (int, bool)
+ """
+ exitcode = portage.process.spawn_bash(
+ "cd %s; exec svn upgrade" %
+ (portage._shell_quote(self.repo.location),),
+ **self.spawn_kwargs)
+ if exitcode != os.EX_OK:
+ msg = "!!! svn upgrade error; exiting."
+ self.logger(self.xterm_titles, msg)
+ writemsg_level(msg + "\n", noiselevel=-1, level=logging.ERROR)
+ return exitcode
diff --git a/lib/portage/sync/modules/webrsync/__init__.py b/lib/portage/sync/modules/webrsync/__init__.py
new file mode 100644
index 000000000..dc7def20c
--- /dev/null
+++ b/lib/portage/sync/modules/webrsync/__init__.py
@@ -0,0 +1,51 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+doc = """WebRSync plug-in module for portage.
+Performs a http download of a portage snapshot, verifies and
+unpacks it to the repo location."""
+__doc__ = doc[:]
+
+
+import os
+
+from portage.sync.config_checks import CheckSyncConfig
+
+
+DEFAULT_CLASS = "WebRsync"
+AVAILABLE_CLASSES = [ "WebRsync", "PyWebsync"]
+options = {"1": "WebRsync", "2": "PyWebsync"}
+
+
+config_class = DEFAULT_CLASS
+try:
+ test_param = os.environ["TESTIT"]
+ if test_param in options:
+ config_class = options[test_param]
+except KeyError:
+ pass
+
+
+module_spec = {
+ 'name': 'webrsync',
+ 'description': doc,
+ 'provides':{
+ 'webrsync-module': {
+ 'name': "webrsync",
+ 'sourcefile': "webrsync",
+ 'class': config_class,
+ 'description': doc,
+ 'functions': ['sync', 'new', 'exists'],
+ 'func_desc': {
+ 'sync': 'Performs an archived http download of the ' +
+ 'repository, then unpacks it. Optionally it performs a ' +
+ 'gpg verification of the downloaded file(s)',
+ 'new': 'Creates the new repository at the specified location',
+ 'exists': 'Returns a boolean of whether the specified dir ' +
+ 'exists and is a valid repository',
+ },
+ 'validate_config': CheckSyncConfig,
+ 'module_specific_options': (),
+ },
+ }
+}
diff --git a/lib/portage/sync/modules/webrsync/webrsync.py b/lib/portage/sync/modules/webrsync/webrsync.py
new file mode 100644
index 000000000..3d79f4557
--- /dev/null
+++ b/lib/portage/sync/modules/webrsync/webrsync.py
@@ -0,0 +1,70 @@
+
+'''WebRsync module for portage'''
+
+import logging
+
+import portage
+from portage import os
+from portage.util import writemsg_level
+from portage.output import create_color_func
+good = create_color_func("GOOD")
+bad = create_color_func("BAD")
+warn = create_color_func("WARN")
+from portage.sync.syncbase import SyncBase
+
+
+class WebRsync(SyncBase):
+ '''WebRSync sync class'''
+
+ short_desc = "Perform sync operations on webrsync based repositories"
+
+ @staticmethod
+ def name():
+ return "WebRSync"
+
+
+ def __init__(self):
+ SyncBase.__init__(self, 'emerge-webrsync', '>=sys-apps/portage-2.3')
+
+
+ def sync(self, **kwargs):
+ '''Sync the repository'''
+ if kwargs:
+ self._kwargs(kwargs)
+
+ if not self.has_bin:
+ return (1, False)
+
+ # filter these out to prevent gpg errors
+ for var in ['uid', 'gid', 'groups']:
+ self.spawn_kwargs.pop(var, None)
+
+ exitcode = portage.process.spawn_bash("%s" % \
+ (self.bin_command),
+ **self.spawn_kwargs)
+ if exitcode != os.EX_OK:
+ msg = "!!! emerge-webrsync error in %s" % self.repo.location
+ self.logger(self.xterm_titles, msg)
+ writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
+ return (exitcode, False)
+ return (exitcode, True)
+
+
+class PyWebRsync(SyncBase):
+ '''WebRSync sync class'''
+
+ short_desc = "Perform sync operations on webrsync based repositories"
+
+ @staticmethod
+ def name():
+ return "WebRSync"
+
+
+ def __init__(self):
+ SyncBase.__init__(self, None, '>=sys-apps/portage-2.3')
+
+
+ def sync(self, **kwargs):
+ '''Sync the repository'''
+ pass
+
diff --git a/lib/portage/sync/old_tree_timestamp.py b/lib/portage/sync/old_tree_timestamp.py
new file mode 100644
index 000000000..aaed18b56
--- /dev/null
+++ b/lib/portage/sync/old_tree_timestamp.py
@@ -0,0 +1,101 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division
+
+import locale
+import logging
+import time
+
+from portage import os, _unicode_decode
+from portage.exception import PortageException
+from portage.localization import _
+from portage.output import EOutput
+from portage.util import grabfile, writemsg_level
+
+def have_english_locale():
+ lang, enc = locale.getdefaultlocale()
+ if lang is not None:
+ lang = lang.lower()
+ lang = lang.split('_', 1)[0]
+ return lang is None or lang in ('c', 'en')
+
+def whenago(seconds):
+ sec = int(seconds)
+ mins = 0
+ days = 0
+ hrs = 0
+ years = 0
+ out = []
+
+ if sec > 60:
+ mins = sec // 60
+ sec = sec % 60
+ if mins > 60:
+ hrs = mins // 60
+ mins = mins % 60
+ if hrs > 24:
+ days = hrs // 24
+ hrs = hrs % 24
+ if days > 365:
+ years = days // 365
+ days = days % 365
+
+ if years:
+ out.append("%dy " % years)
+ if days:
+ out.append("%dd " % days)
+ if hrs:
+ out.append("%dh " % hrs)
+ if mins:
+ out.append("%dm " % mins)
+ if sec:
+ out.append("%ds " % sec)
+
+ return "".join(out).strip()
+
+def old_tree_timestamp_warn(portdir, settings):
+ unixtime = time.time()
+ default_warnsync = 30
+
+ timestamp_file = os.path.join(portdir, "metadata/timestamp.x")
+ try:
+ lastsync = grabfile(timestamp_file)
+ except PortageException:
+ return False
+
+ if not lastsync:
+ return False
+
+ lastsync = lastsync[0].split()
+ if not lastsync:
+ return False
+
+ try:
+ lastsync = int(lastsync[0])
+ except ValueError:
+ return False
+
+ var_name = 'PORTAGE_SYNC_STALE'
+ try:
+ warnsync = float(settings.get(var_name, default_warnsync))
+ except ValueError:
+ writemsg_level("!!! %s contains non-numeric value: %s\n" % \
+ (var_name, settings[var_name]),
+ level=logging.ERROR, noiselevel=-1)
+ return False
+
+ if warnsync <= 0:
+ return False
+
+ if (unixtime - 86400 * warnsync) > lastsync:
+ out = EOutput()
+ if have_english_locale():
+ out.ewarn("Last emerge --sync was %s ago." % \
+ whenago(unixtime - lastsync))
+ else:
+ out.ewarn(_("Last emerge --sync was %s.") % \
+ _unicode_decode(time.strftime(
+ '%c', time.localtime(lastsync))))
+ return True
+ return False
diff --git a/lib/portage/sync/syncbase.py b/lib/portage/sync/syncbase.py
new file mode 100644
index 000000000..ce69a4fc0
--- /dev/null
+++ b/lib/portage/sync/syncbase.py
@@ -0,0 +1,263 @@
+# Copyright 2014-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+'''
+Base class for performing sync operations.
+This class contains common initialization code and functions.
+'''
+
+from __future__ import unicode_literals
+import functools
+import logging
+import os
+
+import portage
+from portage.util import writemsg_level
+from portage.util._eventloop.global_event_loop import global_event_loop
+from portage.util.backoff import RandomExponentialBackoff
+from portage.util.futures.retry import retry
+from portage.util.futures.executor.fork import ForkExecutor
+from . import _SUBMODULE_PATH_MAP
+
+class SyncBase(object):
+ '''Base Sync class for subclassing'''
+
+ short_desc = "Perform sync operations on repositories"
+
+ @staticmethod
+ def name():
+ return "BlankSync"
+
+
+ def can_progressbar(self, func):
+ return False
+
+
+ def __init__(self, bin_command, bin_pkg):
+ self.options = None
+ self.settings = None
+ self.logger = None
+ self.repo = None
+ self.xterm_titles = None
+ self.spawn_kwargs = None
+ self.bin_command = None
+ self._bin_command = bin_command
+ self.bin_pkg = bin_pkg
+ if bin_command:
+ self.bin_command = portage.process.find_binary(bin_command)
+
+
+ @property
+ def has_bin(self):
+ '''Checks for existance of the external binary.
+
+ MUST only be called after _kwargs() has set the logger
+ '''
+ if self.bin_command is None:
+ msg = ["Command not found: %s" % self._bin_command,
+ "Type \"emerge %s\" to enable %s support."
+ % (self.bin_pkg, self._bin_command)]
+ for l in msg:
+ writemsg_level("!!! %s\n" % l,
+ level=logging.ERROR, noiselevel=-1)
+ return False
+ return True
+
+
+ def _kwargs(self, kwargs):
+ '''Sets internal variables from kwargs'''
+ self.options = kwargs.get('options', {})
+ self.settings = self.options.get('settings', None)
+ self.logger = self.options.get('logger', None)
+ self.repo = self.options.get('repo', None)
+ self.xterm_titles = self.options.get('xterm_titles', False)
+ self.spawn_kwargs = self.options.get('spawn_kwargs', None)
+
+
+ def exists(self, **kwargs):
+ '''Tests whether the repo actually exists'''
+ if kwargs:
+ self._kwargs(kwargs)
+ elif not self.repo:
+ return False
+ if not os.path.exists(self.repo.location):
+ return False
+ return True
+
+
+ def sync(self, **kwargs):
+ '''Sync the repository'''
+ raise NotImplementedError
+
+
+ def post_sync(self, portdb, location, emerge_config):
+ '''repo.sync_type == "Blank":
+ # NOTE: Do this after reloading the config, in case
+ # it did not exist prior to sync, so that the config
+ # and portdb properly account for its existence.
+ '''
+ pass
+
+
+ def _get_submodule_paths(self):
+ paths = []
+ emerge_config = self.options.get('emerge_config')
+ if emerge_config is not None:
+ for name in emerge_config.opts.get('--sync-submodule', []):
+ paths.extend(_SUBMODULE_PATH_MAP[name])
+ return tuple(paths)
+
+ def retrieve_head(self, **kwargs):
+ '''Get information about the head commit'''
+ raise NotImplementedError
+
+ def _key_refresh_retry_decorator(self):
+ '''
+ Return a retry decorator, or None if retry is disabled.
+
+ If retry fails, the function reraises the exception raised
+ by the decorated function. If retry times out and no exception
+ is available to reraise, the function raises TimeoutError.
+ '''
+ errors = []
+
+ if self.repo.sync_openpgp_key_refresh_retry_count is None:
+ return None
+ try:
+ retry_count = int(self.repo.sync_openpgp_key_refresh_retry_count)
+ except Exception as e:
+ errors.append('sync-openpgp-key-refresh-retry-count: {}'.format(e))
+ else:
+ if retry_count <= 0:
+ return None
+
+ if self.repo.sync_openpgp_key_refresh_retry_overall_timeout is None:
+ retry_overall_timeout = None
+ else:
+ try:
+ retry_overall_timeout = float(self.repo.sync_openpgp_key_refresh_retry_overall_timeout)
+ except Exception as e:
+ errors.append('sync-openpgp-key-refresh-retry-overall-timeout: {}'.format(e))
+ else:
+ if retry_overall_timeout < 0:
+ errors.append('sync-openpgp-key-refresh-retry-overall-timeout: '
+ 'value must be greater than or equal to zero: {}'.format(retry_overall_timeout))
+ elif retry_overall_timeout == 0:
+ retry_overall_timeout = None
+
+ if self.repo.sync_openpgp_key_refresh_retry_delay_mult is None:
+ retry_delay_mult = None
+ else:
+ try:
+ retry_delay_mult = float(self.repo.sync_openpgp_key_refresh_retry_delay_mult)
+ except Exception as e:
+ errors.append('sync-openpgp-key-refresh-retry-delay-mult: {}'.format(e))
+ else:
+ if retry_delay_mult <= 0:
+ errors.append('sync-openpgp-key-refresh-retry-mult: '
+ 'value must be greater than zero: {}'.format(retry_delay_mult))
+
+ if self.repo.sync_openpgp_key_refresh_retry_delay_exp_base is None:
+ retry_delay_exp_base = None
+ else:
+ try:
+ retry_delay_exp_base = float(self.repo.sync_openpgp_key_refresh_retry_delay_exp_base)
+ except Exception as e:
+ errors.append('sync-openpgp-key-refresh-retry-delay-exp: {}'.format(e))
+ else:
+ if retry_delay_exp_base <= 0:
+ errors.append('sync-openpgp-key-refresh-retry-delay-exp: '
+ 'value must be greater than zero: {}'.format(retry_delay_mult))
+
+ if errors:
+ lines = []
+ lines.append('')
+ lines.append('!!! Retry disabled for openpgp key refresh:')
+ lines.append('')
+ for msg in errors:
+ lines.append(' {}'.format(msg))
+ lines.append('')
+
+ for line in lines:
+ writemsg_level("{}\n".format(line),
+ level=logging.ERROR, noiselevel=-1)
+
+ return None
+
+ return retry(
+ reraise=True,
+ try_max=retry_count,
+ overall_timeout=(retry_overall_timeout if retry_overall_timeout > 0 else None),
+ delay_func=RandomExponentialBackoff(
+ multiplier=(1 if retry_delay_mult is None else retry_delay_mult),
+ base=(2 if retry_delay_exp_base is None else retry_delay_exp_base)))
+
+ def _refresh_keys(self, openpgp_env):
+ """
+ Refresh keys stored in openpgp_env. Raises gemato.exceptions.GematoException
+ or asyncio.TimeoutError on failure.
+
+ @param openpgp_env: openpgp environment
+ @type openpgp_env: gemato.openpgp.OpenPGPEnvironment
+ """
+ out = portage.output.EOutput(quiet=('--quiet' in self.options['emerge_config'].opts))
+ out.ebegin('Refreshing keys from keyserver')
+ retry_decorator = self._key_refresh_retry_decorator()
+ if retry_decorator is None:
+ openpgp_env.refresh_keys()
+ else:
+ def noisy_refresh_keys():
+ """
+ Since retry does not help for some types of
+ errors, display errors as soon as they occur.
+ """
+ try:
+ openpgp_env.refresh_keys()
+ except Exception as e:
+ writemsg_level("%s\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ raise # retry
+
+ # The ThreadPoolExecutor that asyncio uses by default
+ # does not support cancellation of tasks, therefore
+ # use ForkExecutor for task cancellation support, in
+ # order to enforce timeouts.
+ loop = global_event_loop()
+ with ForkExecutor(loop=loop) as executor:
+ func_coroutine = functools.partial(loop.run_in_executor,
+ executor, noisy_refresh_keys)
+ decorated_func = retry_decorator(func_coroutine, loop=loop)
+ loop.run_until_complete(decorated_func())
+ out.eend(0)
+
+
+class NewBase(SyncBase):
+ '''Subclasses Syncbase adding a new() and runs it
+ instead of update() if the repository does not exist()'''
+
+
+ def __init__(self, bin_command, bin_pkg):
+ SyncBase.__init__(self, bin_command, bin_pkg)
+
+
+ def sync(self, **kwargs):
+ '''Sync the repository'''
+ if kwargs:
+ self._kwargs(kwargs)
+
+ if not self.has_bin:
+ return (1, False)
+
+ if not self.exists():
+ return self.new()
+ return self.update()
+
+
+ def new(self, **kwargs):
+ '''Do the initial download and install of the repository'''
+ raise NotImplementedError
+
+ def update(self):
+ '''Update existing repository
+ '''
+ raise NotImplementedError
diff --git a/lib/portage/tests/__init__.py b/lib/portage/tests/__init__.py
new file mode 100644
index 000000000..e149b5c0c
--- /dev/null
+++ b/lib/portage/tests/__init__.py
@@ -0,0 +1,353 @@
+# tests/__init__.py -- Portage Unit Test functionality
+# Copyright 2006-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import argparse
+import sys
+import time
+import unittest
+
+try:
+ from unittest.runner import _TextTestResult # new in python-2.7
+except ImportError:
+ from unittest import _TextTestResult
+
+try:
+ # They added the skip framework to python-2.7.
+ # Drop this once we drop python-2.6 support.
+ unittest_skip_shims = False
+ import unittest.SkipTest as SkipTest # new in python-2.7
+except ImportError:
+ unittest_skip_shims = True
+
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage.const import (EPREFIX, GLOBAL_CONFIG_PATH, PORTAGE_BASE_PATH,
+ PORTAGE_BIN_PATH)
+
+
+if portage._not_installed:
+ cnf_path = os.path.join(PORTAGE_BASE_PATH, 'cnf')
+ cnf_etc_path = cnf_path
+ cnf_bindir = PORTAGE_BIN_PATH
+ cnf_sbindir = cnf_bindir
+else:
+ cnf_path = os.path.join(EPREFIX or '/', GLOBAL_CONFIG_PATH)
+ cnf_etc_path = os.path.join(EPREFIX or '/', 'etc')
+ cnf_eprefix = EPREFIX
+ cnf_bindir = os.path.join(EPREFIX or '/', 'usr', 'bin')
+ cnf_sbindir = os.path.join(EPREFIX or '/', 'usr', 'sbin')
+
+
+def main():
+ suite = unittest.TestSuite()
+ basedir = os.path.dirname(os.path.realpath(__file__))
+
+ usage = "usage: %s [options] [tests to run]" % os.path.basename(sys.argv[0])
+ parser = argparse.ArgumentParser(usage=usage)
+ parser.add_argument("-l", "--list", help="list all tests",
+ action="store_true", dest="list_tests")
+ options, args = parser.parse_known_args(args=sys.argv)
+
+ if (os.environ.get('NOCOLOR') in ('yes', 'true') or
+ os.environ.get('TERM') == 'dumb' or
+ not sys.stdout.isatty()):
+ portage.output.nocolor()
+
+ if options.list_tests:
+ testdir = os.path.dirname(sys.argv[0])
+ for mydir in getTestDirs(basedir):
+ testsubdir = os.path.basename(mydir)
+ for name in getTestNames(mydir):
+ print("%s/%s/%s.py" % (testdir, testsubdir, name))
+ return os.EX_OK
+
+ if len(args) > 1:
+ suite.addTests(getTestFromCommandLine(args[1:], basedir))
+ else:
+ for mydir in getTestDirs(basedir):
+ suite.addTests(getTests(os.path.join(basedir, mydir), basedir))
+
+ result = TextTestRunner(verbosity=2).run(suite)
+ if not result.wasSuccessful():
+ return 1
+ return os.EX_OK
+
+def my_import(name):
+ mod = __import__(name)
+ components = name.split('.')
+ for comp in components[1:]:
+ mod = getattr(mod, comp)
+ return mod
+
+def getTestFromCommandLine(args, base_path):
+ result = []
+ for arg in args:
+ realpath = os.path.realpath(arg)
+ path = os.path.dirname(realpath)
+ f = realpath[len(path)+1:]
+
+ if not f.startswith("test") or not f.endswith(".py"):
+ raise Exception("Invalid argument: '%s'" % arg)
+
+ mymodule = f[:-3]
+ result.extend(getTestsFromFiles(path, base_path, [mymodule]))
+ return result
+
+def getTestDirs(base_path):
+ TEST_FILE = b'__test__.py'
+ testDirs = []
+
+ # the os.walk help mentions relative paths as being quirky
+ # I was tired of adding dirs to the list, so now we add __test__.py
+ # to each dir we want tested.
+ for root, dirs, files in os.walk(base_path):
+ try:
+ root = _unicode_decode(root,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+
+ if TEST_FILE in files:
+ testDirs.append(root)
+
+ testDirs.sort()
+ return testDirs
+
+def getTestNames(path):
+ files = os.listdir(path)
+ files = [f[:-3] for f in files if f.startswith("test") and f.endswith(".py")]
+ files.sort()
+ return files
+
+def getTestsFromFiles(path, base_path, files):
+ parent_path = path[len(base_path)+1:]
+ parent_module = ".".join(("portage", "tests", parent_path))
+ parent_module = parent_module.replace('/', '.')
+ result = []
+ for mymodule in files:
+ # Make the trailing / a . for module importing
+ modname = ".".join((parent_module, mymodule))
+ mod = my_import(modname)
+ result.append(unittest.TestLoader().loadTestsFromModule(mod))
+ return result
+
+def getTests(path, base_path):
+ """
+
+ path is the path to a given subdir ( 'portage/' for example)
+ This does a simple filter on files in that dir to give us modules
+ to import
+
+ """
+ return getTestsFromFiles(path, base_path, getTestNames(path))
+
+class TextTestResult(_TextTestResult):
+ """
+ We need a subclass of unittest._TextTestResult to handle tests with TODO
+
+ This just adds an addTodo method that can be used to add tests
+ that are marked TODO; these can be displayed later
+ by the test runner.
+ """
+
+ def __init__(self, stream, descriptions, verbosity):
+ super(TextTestResult, self).__init__(stream, descriptions, verbosity)
+ self.todoed = []
+ self.portage_skipped = []
+
+ def addTodo(self, test, info):
+ self.todoed.append((test, info))
+ if self.showAll:
+ self.stream.writeln("TODO")
+ elif self.dots:
+ self.stream.write(".")
+
+ def addPortageSkip(self, test, info):
+ self.portage_skipped.append((test, info))
+ if self.showAll:
+ self.stream.writeln("SKIP")
+ elif self.dots:
+ self.stream.write(".")
+
+ def printErrors(self):
+ if self.dots or self.showAll:
+ self.stream.writeln()
+ self.printErrorList('ERROR', self.errors)
+ self.printErrorList('FAIL', self.failures)
+ self.printErrorList('TODO', self.todoed)
+ self.printErrorList('SKIP', self.portage_skipped)
+
+class TestCase(unittest.TestCase):
+ """
+ We need a way to mark a unit test as "ok to fail"
+ This way someone can add a broken test and mark it as failed
+ and then fix the code later. This may not be a great approach
+ (broken code!!??!11oneone) but it does happen at times.
+ """
+
+ def __init__(self, *pargs, **kwargs):
+ unittest.TestCase.__init__(self, *pargs, **kwargs)
+ self.todo = False
+ self.portage_skip = None
+ self.cnf_path = cnf_path
+ self.cnf_etc_path = cnf_etc_path
+ self.bindir = cnf_bindir
+ self.sbindir = cnf_sbindir
+
+ def defaultTestResult(self):
+ return TextTestResult()
+
+ def run(self, result=None):
+ if result is None: result = self.defaultTestResult()
+ result.startTest(self)
+ testMethod = getattr(self, self._testMethodName)
+ try:
+ ok = False
+ try:
+ try:
+ self.setUp()
+ except KeyboardInterrupt:
+ raise
+ except SkipTest:
+ raise
+ except Exception:
+ result.addError(self, sys.exc_info())
+ return
+
+ testMethod()
+ ok = True
+ except SkipTest as e:
+ result.addPortageSkip(self, "%s: SKIP: %s" %
+ (testMethod, str(e)))
+ except self.failureException:
+ if self.portage_skip is not None:
+ if self.portage_skip is True:
+ result.addPortageSkip(self, "%s: SKIP" % testMethod)
+ else:
+ result.addPortageSkip(self, "%s: SKIP: %s" %
+ (testMethod, self.portage_skip))
+ elif self.todo:
+ result.addTodo(self, "%s: TODO" % testMethod)
+ else:
+ result.addFailure(self, sys.exc_info())
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ result.addError(self, sys.exc_info())
+
+ try:
+ self.tearDown()
+ except SystemExit:
+ raise
+ except KeyboardInterrupt:
+ raise
+ except:
+ result.addError(self, sys.exc_info())
+ ok = False
+ if ok:
+ result.addSuccess(self)
+ finally:
+ result.stopTest(self)
+
+ def assertRaisesMsg(self, msg, excClass, callableObj, *args, **kwargs):
+ """Fail unless an exception of class excClass is thrown
+ by callableObj when invoked with arguments args and keyword
+ arguments kwargs. If a different type of exception is
+ thrown, it will not be caught, and the test case will be
+ deemed to have suffered an error, exactly as for an
+ unexpected exception.
+ """
+ try:
+ callableObj(*args, **kwargs)
+ except excClass:
+ return
+ else:
+ if hasattr(excClass, '__name__'): excName = excClass.__name__
+ else: excName = str(excClass)
+ raise self.failureException("%s not raised: %s" % (excName, msg))
+
+ def assertExists(self, path):
+ """Make sure |path| exists"""
+ if not os.path.exists(path):
+ msg = ['path is missing: %s' % (path,)]
+ while path != '/':
+ path = os.path.dirname(path)
+ if not path:
+ # If we're given something like "foo", abort once we get to "".
+ break
+ result = os.path.exists(path)
+ msg.append('\tos.path.exists(%s): %s' % (path, result))
+ if result:
+ msg.append('\tcontents: %r' % os.listdir(path))
+ break
+ raise self.failureException('\n'.join(msg))
+
+ def assertNotExists(self, path):
+ """Make sure |path| does not exist"""
+ if os.path.exists(path):
+ raise self.failureException('path exists when it should not: %s' % path)
+
+if unittest_skip_shims:
+ # Shim code for <python-2.7.
+ class SkipTest(Exception):
+ """unittest.SkipTest shim for <python-2.7"""
+
+ def skipTest(self, reason):
+ raise SkipTest(reason)
+ setattr(TestCase, 'skipTest', skipTest)
+
+ def assertIn(self, member, container, msg=None):
+ self.assertTrue(member in container, msg=msg)
+ setattr(TestCase, 'assertIn', assertIn)
+
+ def assertNotIn(self, member, container, msg=None):
+ self.assertFalse(member in container, msg=msg)
+ setattr(TestCase, 'assertNotIn', assertNotIn)
+
+class TextTestRunner(unittest.TextTestRunner):
+ """
+ We subclass unittest.TextTestRunner to output SKIP for tests that fail but are skippable
+ """
+
+ def _makeResult(self):
+ return TextTestResult(self.stream, self.descriptions, self.verbosity)
+
+ def run(self, test):
+ """
+ Run the given test case or test suite.
+ """
+ result = self._makeResult()
+ startTime = time.time()
+ test(result)
+ stopTime = time.time()
+ timeTaken = stopTime - startTime
+ result.printErrors()
+ self.stream.writeln(result.separator2)
+ run = result.testsRun
+ self.stream.writeln("Ran %d test%s in %.3fs" %
+ (run, run != 1 and "s" or "", timeTaken))
+ self.stream.writeln()
+ if not result.wasSuccessful():
+ self.stream.write("FAILED (")
+ failed = len(result.failures)
+ errored = len(result.errors)
+ if failed:
+ self.stream.write("failures=%d" % failed)
+ if errored:
+ if failed: self.stream.write(", ")
+ self.stream.write("errors=%d" % errored)
+ self.stream.writeln(")")
+ else:
+ self.stream.writeln("OK")
+ return result
+
+test_cps = ['sys-apps/portage', 'virtual/portage']
+test_versions = ['1.0', '1.0-r1', '2.3_p4', '1.0_alpha57']
+test_slots = [None, '1', 'gentoo-sources-2.6.17', 'spankywashere']
+test_usedeps = ['foo', '-bar', ('foo', 'bar'),
+ ('foo', '-bar'), ('foo?', '!bar?')]
diff --git a/lib/portage/tests/bin/__init__.py b/lib/portage/tests/bin/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/bin/__init__.py
diff --git a/lib/portage/tests/bin/__test__.py b/lib/portage/tests/bin/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/bin/__test__.py
diff --git a/lib/portage/tests/bin/setup_env.py b/lib/portage/tests/bin/setup_env.py
new file mode 100644
index 000000000..9cc26df08
--- /dev/null
+++ b/lib/portage/tests/bin/setup_env.py
@@ -0,0 +1,87 @@
+# setup_env.py -- Make sure bin subdir has sane env for testing
+# Copyright 2007-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+
+from portage import os
+from portage import shutil
+from portage.const import PORTAGE_BIN_PATH
+from portage.const import PORTAGE_PYM_PATH
+from portage.tests import TestCase
+from portage.process import spawn
+
+bindir = PORTAGE_BIN_PATH
+basedir = None
+env = None
+
+def binTestsCleanup():
+ global basedir
+ if basedir is None:
+ return
+ if os.access(basedir, os.W_OK):
+ shutil.rmtree(basedir)
+ basedir = None
+
+def binTestsInit():
+ binTestsCleanup()
+ global basedir, env
+ basedir = tempfile.mkdtemp()
+ env = {}
+ env['EAPI'] = '0'
+ env['D'] = os.path.join(basedir, 'image')
+ env['T'] = os.path.join(basedir, 'temp')
+ env['S'] = os.path.join(basedir, 'workdir')
+ env['PF'] = 'portage-tests-0.09-r1'
+ env['PATH'] = bindir + ':' + os.environ['PATH']
+ env['PORTAGE_BIN_PATH'] = bindir
+ env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
+ env['PORTAGE_INST_UID'] = str(os.getuid())
+ env['PORTAGE_INST_GID'] = str(os.getgid())
+ env['DESTTREE'] = '/usr'
+ os.mkdir(env['D'])
+ os.mkdir(env['T'])
+ os.mkdir(env['S'])
+
+class BinTestCase(TestCase):
+ def init(self):
+ binTestsInit()
+ def cleanup(self):
+ binTestsCleanup()
+
+def _exists_in_D(path):
+ # Note: do not use os.path.join() here, we assume D to end in /
+ return os.access(env['D'] + path, os.W_OK)
+def exists_in_D(path):
+ if not _exists_in_D(path):
+ raise TestCase.failureException
+def xexists_in_D(path):
+ if _exists_in_D(path):
+ raise TestCase.failureException
+
+def portage_func(func, args, exit_status=0):
+ # we don't care about the output of the programs,
+ # just their exit value and the state of $D
+ global env
+ f = open('/dev/null', 'wb')
+ fd_pipes = {0:0,1:f.fileno(),2:f.fileno()}
+ def pre_exec():
+ os.chdir(env['S'])
+ spawn([func] + args.split(), env=env,
+ fd_pipes=fd_pipes, pre_exec=pre_exec)
+ f.close()
+
+def create_portage_wrapper(bin):
+ def derived_func(*args):
+ newargs = list(args)
+ newargs.insert(0, bin)
+ return portage_func(*newargs)
+ return derived_func
+
+for bin in os.listdir(os.path.join(bindir, 'ebuild-helpers')):
+ if bin.startswith('do') or \
+ bin.startswith('new') or \
+ bin.startswith('prep') or \
+ bin in ('ecompress', 'ecompressdir', 'fowners', 'fperms'):
+ globals()[bin] = create_portage_wrapper(
+ os.path.join(bindir, 'ebuild-helpers', bin))
diff --git a/lib/portage/tests/bin/test_dobin.py b/lib/portage/tests/bin/test_dobin.py
new file mode 100644
index 000000000..6f50d7aba
--- /dev/null
+++ b/lib/portage/tests/bin/test_dobin.py
@@ -0,0 +1,16 @@
+# test_dobin.py -- Portage Unit Testing Functionality
+# Copyright 2007-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests.bin.setup_env import BinTestCase, dobin, xexists_in_D
+
+class DoBin(BinTestCase):
+ def testDoBin(self):
+ self.init()
+ try:
+ dobin("does-not-exist", 1)
+ xexists_in_D("does-not-exist")
+ xexists_in_D("/bin/does-not-exist")
+ xexists_in_D("/usr/bin/does-not-exist")
+ finally:
+ self.cleanup()
diff --git a/lib/portage/tests/bin/test_dodir.py b/lib/portage/tests/bin/test_dodir.py
new file mode 100644
index 000000000..5d4018161
--- /dev/null
+++ b/lib/portage/tests/bin/test_dodir.py
@@ -0,0 +1,18 @@
+# test_dodir.py -- Portage Unit Testing Functionality
+# Copyright 2007-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests.bin.setup_env import BinTestCase, dodir, exists_in_D
+
+class DoDir(BinTestCase):
+ def testDoDir(self):
+ self.init()
+ try:
+ dodir("usr /usr")
+ exists_in_D("/usr")
+ dodir("boot")
+ exists_in_D("/boot")
+ dodir("/var/lib/moocow")
+ exists_in_D("/var/lib/moocow")
+ finally:
+ self.cleanup()
diff --git a/lib/portage/tests/bin/test_doins.py b/lib/portage/tests/bin/test_doins.py
new file mode 100644
index 000000000..e3d5153b3
--- /dev/null
+++ b/lib/portage/tests/bin/test_doins.py
@@ -0,0 +1,355 @@
+# test_doins.py -- Portage Unit Testing Functionality
+# Copyright 2007-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import grp
+import os
+import pwd
+import stat
+
+from portage.tests.bin import setup_env
+from portage import tests
+
+doins = setup_env.doins
+exists_in_D = setup_env.exists_in_D
+
+
+class DoIns(setup_env.BinTestCase):
+ def testDoIns(self):
+ """Tests the most basic senario."""
+ self.init()
+ try:
+ env = setup_env.env
+ # Create a file to be installed.
+ test_path = os.path.join(env['S'], 'test')
+ with open(test_path, 'w'):
+ pass
+ doins('test')
+ exists_in_D('/test')
+ st = os.lstat(env['D'] + '/test')
+ # By default, `install`'s permission is 755.
+ if stat.S_IMODE(st.st_mode) != 0o755:
+ raise tests.TestCase.failureException
+ finally:
+ self.cleanup()
+
+ def testDoInsOption(self):
+ """Tests with INSOPTIONS doins.py understands."""
+ self.init()
+ try:
+ env = setup_env.env
+ env['INSOPTIONS'] = '-pm0644'
+ with open(os.path.join(env['S'], 'test'), 'w'):
+ pass
+ doins('test')
+ st = os.lstat(env['D'] + '/test')
+ if stat.S_IMODE(st.st_mode) != 0o644:
+ raise tests.TestCase.failureException
+ self.assertEqual(
+ os.stat(os.path.join(env['S'], 'test'))[stat.ST_MTIME],
+ st[stat.ST_MTIME])
+ finally:
+ self.cleanup()
+
+ def testDoInsOptionUnsupportedMode(self):
+ """Tests with INSOPTIONS in the format doins.py doesn't know."""
+ self.init()
+ try:
+ env = setup_env.env
+ # Parse test for -m with unsupported format.
+ # It should fall back to `install` command.
+ env['INSOPTIONS'] = '-m u+r'
+ with open(os.path.join(env['S'], 'test'), 'w'):
+ pass
+ doins('test')
+ st = os.lstat(env['D'] + '/test')
+ if stat.S_IMODE(st.st_mode) != 0o400:
+ raise tests.TestCase.failureException
+ finally:
+ self.cleanup()
+
+ def testDoInsOptionUid(self):
+ """Tests setting owner by uid works."""
+ self.init()
+ try:
+ env = setup_env.env
+ with open(os.path.join(env['S'], 'test'), 'w'):
+ pass
+ uid = os.lstat(os.path.join(env['S'], 'test')).st_uid
+ # Set owner option with uid. No guarantee that this
+ # runs with capability, we set the current UID so that
+ # chown should success, although it is difficult to
+ # check if chown actually runs or not.
+ env['INSOPTIONS'] = '-o %d' % uid
+ doins('test')
+ st = os.lstat(env['D'] + '/test')
+ if st.st_uid != uid:
+ raise tests.TestCase.failureException
+ finally:
+ self.cleanup()
+
+ def testDoInsOptionUserName(self):
+ """Tests setting owner by name works."""
+ self.init()
+ try:
+ env = setup_env.env
+ with open(os.path.join(env['S'], 'test'), 'w'):
+ pass
+ uid = os.lstat(os.path.join(env['S'], 'test')).st_uid
+ pw = pwd.getpwuid(uid)
+ # Similary to testDoInsOptionUid, use user name.
+ env['INSOPTIONS'] = '-o %s' % pw.pw_name
+ doins('test')
+ st = os.lstat(env['D'] + '/test')
+ if st.st_uid != uid:
+ raise tests.TestCase.failureException
+ finally:
+ self.cleanup()
+
+ def testDoInsOptionGid(self):
+ """Tests setting group by gid works."""
+ self.init()
+ try:
+ env = setup_env.env
+ with open(os.path.join(env['S'], 'test'), 'w'):
+ pass
+ gid = os.lstat(os.path.join(env['S'], 'test')).st_gid
+ # Similary to testDoInsOptionUid, use gid.
+ env['INSOPTIONS'] = '-g %d' % gid
+ doins('test')
+ st = os.lstat(env['D'] + '/test')
+ if st.st_gid != gid:
+ raise tests.TestCase.failureException
+ finally:
+ self.cleanup()
+
+ def testDoInsOptionGroupName(self):
+ """Tests setting group by name works."""
+ self.init()
+ try:
+ env = setup_env.env
+ with open(os.path.join(env['S'], 'test'), 'w'):
+ pass
+ gid = os.lstat(os.path.join(env['S'], 'test')).st_gid
+ gr = grp.getgrgid(gid)
+ # Similary to testDoInsOptionUid, use group name.
+ env['INSOPTIONS'] = '-g %s' % gr.gr_name
+ doins('test')
+ st = os.lstat(env['D'] + '/test')
+ if st.st_gid != gid:
+ raise tests.TestCase.failureException
+ finally:
+ self.cleanup()
+
+ def testDoInsFallback(self):
+ """Tests if falling back to the `install` command works."""
+ self.init()
+ try:
+ env = setup_env.env
+ # Use an option which doins.py does not know.
+ # Then, fallback to `install` command is expected.
+ env['INSOPTIONS'] = '-b'
+ with open(os.path.join(env['S'], 'test'), 'w'):
+ pass
+ doins('test')
+ # So `install` should still work.
+ exists_in_D('/test')
+ finally:
+ self.cleanup()
+
+ def testDoInsRecursive(self):
+ """Tests installing a directory recursively."""
+ self.init()
+ try:
+ env = setup_env.env
+ os.mkdir(os.path.join(env['S'], 'testdir'))
+ with open(os.path.join(env['S'], 'testdir/test'), 'w'):
+ pass
+ doins('-r testdir')
+ exists_in_D('/testdir/test')
+ finally:
+ self.cleanup()
+
+ def testDoDirOption(self):
+ """Tests with DIROPTIONS."""
+ self.init()
+ try:
+ env = setup_env.env
+ # Use an option which doins.py knows.
+ env['DIROPTIONS'] = '-m0755'
+ os.mkdir(os.path.join(env['S'], 'testdir'))
+ with open(os.path.join(env['S'], 'testdir/test'), 'w'):
+ pass
+ doins('-r testdir')
+ st = os.lstat(env['D'] + '/testdir')
+ if stat.S_IMODE(st.st_mode) != 0o755:
+ raise tests.TestCase.failureException
+ finally:
+ self.cleanup()
+
+ def testDoDirFallback(self):
+ """Tests with DIROPTIONS which doins.py doesn't understand."""
+ self.init()
+ try:
+ env = setup_env.env
+ # Use an option which doins.py does not know.
+ # Then, fallback to `install` command is expected.
+ env['DIROPTIONS'] = '-p'
+ os.mkdir(os.path.join(env['S'], 'testdir'))
+ with open(os.path.join(env['S'], 'testdir/test'), 'w'):
+ pass
+ doins('-r testdir')
+ # So, `install` should still work.
+ exists_in_D('/testdir/test')
+ finally:
+ self.cleanup()
+
+ def testSymlinkFile(self):
+ """Tests if installing a symlink works.
+
+ In EAPI=4 and later, installing a symlink should creates a
+ symlink at destination.
+ """
+ self.init()
+ try:
+ env = setup_env.env
+ env['EAPI'] = '4' # Enable symlink.
+ env['ED'] = env['D']
+ env['PORTAGE_ACTUAL_DISTDIR'] = '/foo'
+ # Create a file to be installed.
+ test_path = os.path.join(env['S'], 'test')
+ with open(test_path, 'w'):
+ pass
+ symlink_path = os.path.join(env['S'], 'symlink')
+ os.symlink('test', symlink_path)
+ doins('test symlink')
+ exists_in_D('/symlink')
+ # Make sure installed symlink is actually a symbolic
+ # link pointing to test.
+ if not os.path.islink(env['D'] + '/symlink'):
+ raise tests.TestCase.failureException
+ if os.readlink(env['D'] + '/symlink') != 'test':
+ raise tests.TestCase.failureException
+ finally:
+ self.cleanup()
+
+ def testSymlinkFileRecursive(self):
+ """Tests if installing a symlink in a directory works."""
+ self.init()
+ try:
+ env = setup_env.env
+ env['EAPI'] = '4' # Enable symlink.
+ env['ED'] = env['D']
+ env['PORTAGE_ACTUAL_DISTDIR'] = '/foo'
+ # Create a file to be installed.
+ parent_path = os.path.join(env['S'], 'test')
+ os.mkdir(parent_path)
+ with open(os.path.join(parent_path, 'test'), 'w'):
+ pass
+ symlink_path = os.path.join(
+ env['S'], 'test', 'symlink')
+ os.symlink('test', symlink_path)
+ doins('-r test')
+ exists_in_D('/test/symlink')
+ # Make sure installed symlink is actually a symbolic
+ # link pointing to test.
+ if not os.path.islink(env['D'] + '/test/symlink'):
+ raise tests.TestCase.failureException
+ if os.readlink(env['D'] + '/test/symlink') != 'test':
+ raise tests.TestCase.failureException
+ finally:
+ self.cleanup()
+
+ def testSymlinkDir(self):
+ """Tests installing a symlink to a directory."""
+ self.init()
+ try:
+ env = setup_env.env
+ env['EAPI'] = '4' # Enable symlink.
+ env['ED'] = env['D']
+ env['PORTAGE_ACTUAL_DISTDIR'] = '/foo'
+ # Create a dir to be installed.
+ os.mkdir(os.path.join(env['S'], 'test'))
+ symlink_path = os.path.join(env['S'], 'symlink')
+ os.symlink('test', symlink_path)
+ doins('test symlink')
+ # Make sure installed symlink is actually a symbolic
+ # link pointing to test.
+ if not os.path.islink(env['D'] + '/symlink'):
+ raise tests.TestCase.failureException
+ if os.readlink(env['D'] + '/symlink') != 'test':
+ raise tests.TestCase.failureException
+ finally:
+ self.cleanup()
+
+ def testSymlinkDirRecursive(self):
+ """Tests installing a symlink to a directory under a directory.
+ """
+ self.init()
+ try:
+ env = setup_env.env
+ env['EAPI'] = '4' # Enable symlink.
+ env['ED'] = env['D']
+ env['PORTAGE_ACTUAL_DISTDIR'] = '/foo'
+ # Create a file to be installed.
+ parent_path = os.path.join(env['S'], 'test')
+ os.mkdir(parent_path)
+ os.mkdir(os.path.join(parent_path, 'test'))
+ symlink_path = os.path.join(
+ env['S'], 'test', 'symlink')
+ os.symlink('test', symlink_path)
+ doins('-r test')
+ # Make sure installed symlink is actually a symbolic
+ # link pointing to test.
+ if not os.path.islink(env['D'] + '/test/symlink'):
+ raise tests.TestCase.failureException
+ if not os.path.isdir(env['D'] + '/test/symlink'):
+ raise tests.TestCase.failureException
+ if os.readlink(env['D'] + '/test/symlink') != 'test':
+ raise tests.TestCase.failureException
+ finally:
+ self.cleanup()
+
+ def testSymlinkOverwrite(self):
+ """Tests installing a file to overwrite the existing file.
+
+ Specifically, if the existing file is a symlink, it should be
+ removed once, and the file content should be copied.
+ """
+ self.init()
+ try:
+ env = setup_env.env
+ test_path = os.path.join(env['S'], 'test')
+ with open(test_path, 'w'):
+ pass
+ # Create a dangling symlink. If removal does not work,
+ # this would easily cause ENOENT error.
+ os.symlink('foo/bar', env['D'] + '/test')
+ doins('test')
+ # Actual file should be installed.
+ if os.path.islink(env['D'] + '/test'):
+ raise tests.TestCase.failureException
+ finally:
+ self.cleanup()
+
+ def testHardlinkOverwrite(self):
+ """Tests installing a file to overwrite the hardlink.
+
+ If the existing file is a hardlink, it should be removed once,
+ and the file content should be copied.
+ """
+ self.init()
+ try:
+ env = setup_env.env
+ test_path = os.path.join(env['S'], 'test')
+ with open(test_path, 'w'):
+ pass
+ # Create hardlink at the dest.
+ os.link(test_path, env['D'] + '/test')
+ doins('test')
+ # The hardlink should be unlinked, and then a copy
+ # should be created.
+ if os.path.samefile(test_path, env['D'] + '/test'):
+ raise tests.TestCase.failureException
+ finally:
+ self.cleanup()
diff --git a/lib/portage/tests/bin/test_eapi7_ver_funcs.py b/lib/portage/tests/bin/test_eapi7_ver_funcs.py
new file mode 100644
index 000000000..408975298
--- /dev/null
+++ b/lib/portage/tests/bin/test_eapi7_ver_funcs.py
@@ -0,0 +1,240 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+import tempfile
+
+from portage.const import PORTAGE_BIN_PATH
+from portage.tests import TestCase
+
+
+class TestEAPI7VerFuncs(TestCase):
+ def _test_output(self, test_cases):
+ """
+ Test that commands in test_cases produce expected output.
+ """
+ with tempfile.NamedTemporaryFile('w') as test_script:
+ test_script.write('source "%s"/eapi7-ver-funcs.sh\n'
+ % (PORTAGE_BIN_PATH,))
+ for cmd, exp in test_cases:
+ test_script.write('%s\n' % (cmd,))
+ test_script.flush()
+
+ s = subprocess.Popen(['bash', test_script.name],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ sout, serr = s.communicate()
+ self.assertEqual(s.returncode, 0)
+
+ for test_case, result in zip(test_cases, sout.decode().splitlines()):
+ cmd, exp = test_case
+ self.assertEqual(result, exp,
+ '%s -> %s; expected: %s' % (cmd, result, exp))
+
+ def _test_return(self, test_cases):
+ """
+ Test that commands in test_cases give appropriate exit codes.
+ """
+ with tempfile.NamedTemporaryFile('w+') as test_script:
+ test_script.write('source "%s"/eapi7-ver-funcs.sh\n'
+ % (PORTAGE_BIN_PATH,))
+ for cmd, exp in test_cases:
+ test_script.write('%s; echo $?\n' % (cmd,))
+ test_script.flush()
+
+ s = subprocess.Popen(['bash', test_script.name],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ sout, serr = s.communicate()
+ self.assertEqual(s.returncode, 0)
+
+ for test_case, result in zip(test_cases, sout.decode().splitlines()):
+ cmd, exp = test_case
+ self.assertEqual(result, exp,
+ '%s -> %s; expected: %s' % (cmd, result, exp))
+
+ def _test_fail(self, test_cases):
+ """
+ Test that commands in test_cases fail.
+ """
+
+ for cmd in test_cases:
+ test = '''
+source "%s"/eapi7-ver-funcs.sh
+die() { exit 1; }
+%s''' % (PORTAGE_BIN_PATH, cmd)
+
+ s = subprocess.Popen(['bash', '-c', test],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ sout, serr = s.communicate()
+ self.assertEqual(s.returncode, 1,
+ '"%s" did not fail; output: %s; %s)'
+ % (cmd, sout.decode(), serr.decode()))
+
+ def test_ver_cut(self):
+ test_cases = [
+ # (command, output)
+ ('ver_cut 1 1.2.3', '1'),
+ ('ver_cut 1-1 1.2.3', '1'),
+ ('ver_cut 1-2 1.2.3', '1.2'),
+ ('ver_cut 2- 1.2.3', '2.3'),
+ ('ver_cut 1- 1.2.3', '1.2.3'),
+ ('ver_cut 3-4 1.2.3b_alpha4', '3b'),
+ ('ver_cut 5 1.2.3b_alpha4', 'alpha'),
+ ('ver_cut 1-2 .1.2.3', '1.2'),
+ ('ver_cut 0-2 .1.2.3', '.1.2'),
+ ('ver_cut 2-3 1.2.3.', '2.3'),
+ ('ver_cut 2- 1.2.3.', '2.3.'),
+ ('ver_cut 2-4 1.2.3.', '2.3.'),
+ ]
+ self._test_output(test_cases)
+
+ def test_ver_rs(self):
+ test_cases = [
+ # (command, output)
+ ('ver_rs 1 - 1.2.3', '1-2.3'),
+ ('ver_rs 2 - 1.2.3', '1.2-3'),
+ ('ver_rs 1-2 - 1.2.3.4', '1-2-3.4'),
+ ('ver_rs 2- - 1.2.3.4', '1.2-3-4'),
+ ('ver_rs 2 . 1.2-3', '1.2.3'),
+ ('ver_rs 3 . 1.2.3a', '1.2.3.a'),
+ ('ver_rs 2-3 - 1.2_alpha4', '1.2-alpha-4'),
+ ('ver_rs 3 - 2 "" 1.2.3b_alpha4', '1.23-b_alpha4'),
+ ('ver_rs 3-5 _ 4-6 - a1b2c3d4e5', 'a1b_2-c-3-d4e5'),
+ ('ver_rs 1 - .1.2.3', '.1-2.3'),
+ ('ver_rs 0 - .1.2.3', '-1.2.3'),
+ ]
+ self._test_output(test_cases)
+
+ def test_truncated_range(self):
+ test_cases = [
+ # (command, output)
+ ('ver_cut 0-2 1.2.3', '1.2'),
+ ('ver_cut 2-5 1.2.3', '2.3'),
+ ('ver_cut 4 1.2.3', ''),
+ ('ver_cut 0 1.2.3', ''),
+ ('ver_cut 4- 1.2.3', ''),
+ ('ver_rs 0 - 1.2.3', '1.2.3'),
+ ('ver_rs 3 . 1.2.3', '1.2.3'),
+ ('ver_rs 3- . 1.2.3', '1.2.3'),
+ ('ver_rs 3-5 . 1.2.3', '1.2.3'),
+ ]
+ self._test_output(test_cases)
+
+ def test_invalid_range(self):
+ test_cases = [
+ 'ver_cut foo 1.2.3',
+ 'ver_rs -3 _ a1b2c3d4e5',
+ 'ver_rs 5-3 _ a1b2c3d4e5',
+ ]
+ self._test_fail(test_cases)
+
+ def test_ver_test(self):
+ test_cases = [
+ # Tests from Portage's test_vercmp.py
+ ('ver_test 6.0 -gt 5.0', '0'),
+ ('ver_test 5.0 -gt 5', '0'),
+ ('ver_test 1.0-r1 -gt 1.0-r0', '0'),
+ ('ver_test 999999999999999999 -gt 999999999999999998', '0'), # 18 digits
+ ('ver_test 1.0.0 -gt 1.0', '0'),
+ ('ver_test 1.0.0 -gt 1.0b', '0'),
+ ('ver_test 1b -gt 1', '0'),
+ ('ver_test 1b_p1 -gt 1_p1', '0'),
+ ('ver_test 1.1b -gt 1.1', '0'),
+ ('ver_test 12.2.5 -gt 12.2b', '0'),
+ ('ver_test 4.0 -lt 5.0', '0'),
+ ('ver_test 5 -lt 5.0', '0'),
+ ('ver_test 1.0_pre2 -lt 1.0_p2', '0'),
+ ('ver_test 1.0_alpha2 -lt 1.0_p2', '0'),
+ ('ver_test 1.0_alpha1 -lt 1.0_beta1', '0'),
+ ('ver_test 1.0_beta3 -lt 1.0_rc3', '0'),
+ ('ver_test 1.001000000000000001 -lt 1.001000000000000002', '0'),
+ ('ver_test 1.00100000000 -lt 1.001000000000000001', '0'),
+ ('ver_test 999999999999999998 -lt 999999999999999999', '0'),
+ ('ver_test 1.01 -lt 1.1', '0'),
+ ('ver_test 1.0-r0 -lt 1.0-r1', '0'),
+ ('ver_test 1.0 -lt 1.0-r1', '0'),
+ ('ver_test 1.0 -lt 1.0.0', '0'),
+ ('ver_test 1.0b -lt 1.0.0', '0'),
+ ('ver_test 1_p1 -lt 1b_p1', '0'),
+ ('ver_test 1 -lt 1b', '0'),
+ ('ver_test 1.1 -lt 1.1b', '0'),
+ ('ver_test 12.2b -lt 12.2.5', '0'),
+ ('ver_test 4.0 -eq 4.0', '0'),
+ ('ver_test 1.0 -eq 1.0', '0'),
+ ('ver_test 1.0-r0 -eq 1.0', '0'),
+ ('ver_test 1.0 -eq 1.0-r0', '0'),
+ ('ver_test 1.0-r0 -eq 1.0-r0', '0'),
+ ('ver_test 1.0-r1 -eq 1.0-r1', '0'),
+ ('ver_test 1 -eq 2', '1'),
+ ('ver_test 1.0_alpha -eq 1.0_pre', '1'),
+ ('ver_test 1.0_beta -eq 1.0_alpha', '1'),
+ ('ver_test 1 -eq 0.0', '1'),
+ ('ver_test 1.0-r0 -eq 1.0-r1', '1'),
+ ('ver_test 1.0-r1 -eq 1.0-r0', '1'),
+ ('ver_test 1.0 -eq 1.0-r1', '1'),
+ ('ver_test 1.0-r1 -eq 1.0', '1'),
+ ('ver_test 1.0 -eq 1.0.0', '1'),
+ ('ver_test 1_p1 -eq 1b_p1', '1'),
+ ('ver_test 1b -eq 1', '1'),
+ ('ver_test 1.1b -eq 1.1', '1'),
+ ('ver_test 12.2b -eq 12.2', '1'),
+
+ # A subset of tests from Paludis
+ ('ver_test 1.0_alpha -gt 1_alpha', '0'),
+ ('ver_test 1.0_alpha -gt 1', '0'),
+ ('ver_test 1.0_alpha -lt 1.0', '0'),
+ ('ver_test 1.2.0.0_alpha7-r4 -gt 1.2_alpha7-r4', '0'),
+ ('ver_test 0001 -eq 1', '0'),
+ ('ver_test 01 -eq 001', '0'),
+ ('ver_test 0001.1 -eq 1.1', '0'),
+ ('ver_test 01.01 -eq 1.01', '0'),
+ ('ver_test 1.010 -eq 1.01', '0'),
+ ('ver_test 1.00 -eq 1.0', '0'),
+ ('ver_test 1.0100 -eq 1.010', '0'),
+ ('ver_test 1-r00 -eq 1-r0', '0'),
+
+ # Additional tests
+ ('ver_test 0_rc99 -lt 0', '0'),
+ ('ver_test 011 -eq 11', '0'),
+ ('ver_test 019 -eq 19', '0'),
+ ('ver_test 1.2 -eq 001.2', '0'),
+ ('ver_test 1.2 -gt 1.02', '0'),
+ ('ver_test 1.2a -lt 1.2b', '0'),
+ ('ver_test 1.2_pre1 -gt 1.2_pre1_beta2', '0'),
+ ('ver_test 1.2_pre1 -lt 1.2_pre1_p2', '0'),
+ ('ver_test 1.00 -lt 1.0.0', '0'),
+ ('ver_test 1.010 -eq 1.01', '0'),
+ ('ver_test 1.01 -lt 1.1', '0'),
+ ('ver_test 1.2_pre08-r09 -eq 1.2_pre8-r9', '0'),
+ ('ver_test 0 -lt 576460752303423488', '0'), # 2**59
+ ('ver_test 0 -lt 9223372036854775808', '0'), # 2**63
+ ]
+ self._test_return(test_cases)
+
+ def test_invalid_test(self):
+ test_cases = [
+ # Bad number or ordering of arguments
+ 'ver_test 1',
+ 'ver_test 1 -lt 2 3',
+ 'ver_test -lt 1 2',
+
+ # Bad operators
+ 'ver_test 1 "<" 2',
+ 'ver_test 1 lt 2',
+ 'ver_test 1 -foo 2',
+
+ # Malformed versions
+ 'ver_test "" -ne 1',
+ 'ver_test 1. -ne 1',
+ 'ver_test 1ab -ne 1',
+ 'ver_test b -ne 1',
+ 'ver_test 1-r1_pre -ne 1',
+ 'ver_test 1-pre1 -ne 1',
+ 'ver_test 1_foo -ne 1',
+ 'ver_test 1_pre1.1 -ne 1',
+ 'ver_test 1-r1.0 -ne 1',
+ 'ver_test cvs.9999 -ne 9999',
+ ]
+ self._test_fail(test_cases)
diff --git a/lib/portage/tests/bin/test_filter_bash_env.py b/lib/portage/tests/bin/test_filter_bash_env.py
new file mode 100644
index 000000000..d906ea793
--- /dev/null
+++ b/lib/portage/tests/bin/test_filter_bash_env.py
@@ -0,0 +1,115 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import difflib
+import os
+import subprocess
+
+import portage
+from portage.const import PORTAGE_BIN_PATH
+from portage.tests import TestCase
+
+
+class TestFilterBashEnv(TestCase):
+ def testTestFilterBashEnv(self):
+
+ test_cases = (
+ (
+ 'RDEPEND BASH.* _EPATCH_ECLASS',
+ br'''declare -ir BASHPID="28997"
+declare -rx A="portage-2.3.24.tar.bz2"
+declare -- DESKTOP_DATABASE_DIR="/usr/share/applications"
+declare PDEPEND="
+ !build? (
+ >=net-misc/rsync-2.6.4
+ userland_GNU? ( >=sys-apps/coreutils-6.4 )
+ ) "
+declare RDEPEND="
+ >=app-arch/tar-1.27
+ dev-lang/python-exec:2"
+declare -x PF="portage-2.3.24"
+declare -a PYTHON_COMPAT=([0]="pypy" [1]="python3_4" [2]="python3_5" [3]="python3_6" [4]="python2_7")
+declare -- _EPATCH_ECLASS="1"
+declare -- _EUTILS_ECLASS="1"
+declare -- f
+get_libdir ()
+{
+ local CONF_LIBDIR;
+ if [ -n "${CONF_LIBDIR_OVERRIDE}" ]; then
+ echo ${CONF_LIBDIR_OVERRIDE};
+ else
+ get_abi_LIBDIR;
+ fi
+}
+make_wrapper ()
+{
+ cat <<-EOF
+export ${var}="\${${var}}:${EPREFIX}${libdir}"
+EOF
+}
+use_if_iuse ()
+{
+ in_iuse $1 || return 1;
+ use $1
+}
+''',
+ br'''declare -x A="portage-2.3.24.tar.bz2"
+declare -- DESKTOP_DATABASE_DIR="/usr/share/applications"
+declare PDEPEND="
+ !build? (
+ >=net-misc/rsync-2.6.4
+ userland_GNU? ( >=sys-apps/coreutils-6.4 )
+ ) "
+declare -x PF="portage-2.3.24"
+declare -a PYTHON_COMPAT=([0]="pypy" [1]="python3_4" [2]="python3_5" [3]="python3_6" [4]="python2_7")
+declare -- _EUTILS_ECLASS="1"
+declare -- f
+get_libdir ()
+{
+ local CONF_LIBDIR;
+ if [ -n "${CONF_LIBDIR_OVERRIDE}" ]; then
+ echo ${CONF_LIBDIR_OVERRIDE};
+ else
+ get_abi_LIBDIR;
+ fi
+}
+make_wrapper ()
+{
+ cat <<-EOF
+export ${var}="\${${var}}:${EPREFIX}${libdir}"
+EOF
+}
+use_if_iuse ()
+{
+ in_iuse $1 || return 1;
+ use $1
+}
+'''),
+ )
+
+ for filter_vars, env_in, env_out in test_cases:
+ proc = None
+ try:
+ proc = subprocess.Popen(
+ [
+ portage._python_interpreter,
+ os.path.join(PORTAGE_BIN_PATH, 'filter-bash-environment.py'),
+ filter_vars,
+ ],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ )
+ proc.stdin.write(env_in)
+ proc.stdin.close()
+ result = proc.stdout.read()
+ finally:
+ if proc is not None:
+ proc.stdin.close()
+ proc.wait()
+ proc.stdout.close()
+
+ diff = list(difflib.unified_diff(
+ env_out.decode('utf_8').splitlines(),
+ result.decode('utf_8').splitlines()))
+
+ self.assertEqual(diff, [])
diff --git a/lib/portage/tests/dbapi/__init__.py b/lib/portage/tests/dbapi/__init__.py
new file mode 100644
index 000000000..532918b6a
--- /dev/null
+++ b/lib/portage/tests/dbapi/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/tests/dbapi/__test__.py b/lib/portage/tests/dbapi/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/dbapi/__test__.py
diff --git a/lib/portage/tests/dbapi/test_fakedbapi.py b/lib/portage/tests/dbapi/test_fakedbapi.py
new file mode 100644
index 000000000..19ea9cd00
--- /dev/null
+++ b/lib/portage/tests/dbapi/test_fakedbapi.py
@@ -0,0 +1,92 @@
+# Copyright 2011-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+
+import portage
+from portage import os
+from portage import shutil
+from portage.dbapi.virtual import fakedbapi
+from portage.package.ebuild.config import config
+from portage.tests import TestCase
+
+class TestFakedbapi(TestCase):
+
+ def testFakedbapi(self):
+ packages = (
+ ("app-misc/foo-1", {
+ "EAPI" : "2", # does not support IUSE_EFFECTIVE
+ "IUSE" : "",
+ "repository" : "gentoo",
+ "SLOT" : "1",
+ "USE" : "missing-iuse",
+ }),
+ ("app-misc/foo-2", {
+ "EAPI" : "5", # supports IUSE_EFFECTIVE
+ "IUSE" : "",
+ "repository" : "gentoo",
+ "SLOT" : "2",
+ "USE" : "missing-iuse",
+ }),
+ ("sys-apps/portage-2.1.10", {
+ "EAPI" : "2",
+ "IUSE" : "ipc doc",
+ "repository" : "gentoo",
+ "SLOT" : "0",
+ "USE" : "ipc missing-iuse",
+ }),
+ ("virtual/package-manager-0", {
+ "EAPI" : "0",
+ "repository" : "gentoo",
+ "SLOT" : "0",
+ }),
+ )
+
+ match_tests = (
+ # The missing-iuse match is only intended to work for binary
+ # packages with EAPIs that support IUSE_EFFECTIVE (bug 640318).
+ ("app-misc/foo[missing-iuse]", ["app-misc/foo-2"]),
+ ("app-misc/foo[-missing-iuse]", []),
+ ("app-misc/foo", ["app-misc/foo-1", "app-misc/foo-2"]),
+
+ ("sys-apps/portage:0[ipc]", ["sys-apps/portage-2.1.10"]),
+ ("sys-apps/portage:0[-ipc]", []),
+ ("sys-apps/portage:0[doc]", []),
+ ("sys-apps/portage:0[-doc]", ["sys-apps/portage-2.1.10"]),
+ ("sys-apps/portage:0", ["sys-apps/portage-2.1.10"]),
+ ("sys-apps/portage:0[missing-iuse]", []),
+ ("sys-apps/portage:0[-missing-iuse]", []),
+ ("sys-apps/portage:0::gentoo[ipc]", ["sys-apps/portage-2.1.10"]),
+ ("sys-apps/portage:0::multilib[ipc]", []),
+ ("virtual/package-manager", ["virtual/package-manager-0"]),
+ )
+
+ tempdir = tempfile.mkdtemp()
+ try:
+ test_repo = os.path.join(tempdir, "var", "repositories", "test_repo")
+ os.makedirs(os.path.join(test_repo, "profiles"))
+ with open(os.path.join(test_repo, "profiles", "repo_name"), "w") as f:
+ f.write("test_repo")
+ env = {
+ "PORTAGE_REPOSITORIES": "[DEFAULT]\nmain-repo = test_repo\n[test_repo]\nlocation = %s" % test_repo
+ }
+
+ # Tests may override portage.const.EPREFIX in order to
+ # simulate a prefix installation. It's reasonable to do
+ # this because tests should be self-contained such that
+ # the "real" value of portage.const.EPREFIX is entirely
+ # irrelevant (see bug #492932).
+ portage.const.EPREFIX = tempdir
+
+ fakedb = fakedbapi(settings=config(config_profile_path="",
+ env=env, eprefix=tempdir))
+ for cpv, metadata in packages:
+ fakedb.cpv_inject(cpv, metadata=metadata)
+
+ for atom, expected_result in match_tests:
+ result = fakedb.match(atom)
+ self.assertEqual(fakedb.match(atom), expected_result,
+ "fakedb.match('%s') = %s != %s" %
+ (atom, result, expected_result))
+ finally:
+ shutil.rmtree(tempdir)
diff --git a/lib/portage/tests/dbapi/test_portdb_cache.py b/lib/portage/tests/dbapi/test_portdb_cache.py
new file mode 100644
index 000000000..d3101b120
--- /dev/null
+++ b/lib/portage/tests/dbapi/test_portdb_cache.py
@@ -0,0 +1,184 @@
+# Copyright 2012-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+import sys
+import textwrap
+
+import portage
+from portage import os
+from portage import _unicode_decode
+from portage.const import (BASH_BINARY, PORTAGE_PYM_PATH, USER_CONFIG_PATH)
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util import ensure_dirs
+
+class PortdbCacheTestCase(TestCase):
+
+ def testPortdbCache(self):
+ debug = False
+
+ ebuilds = {
+ "dev-libs/A-1": {},
+ "dev-libs/A-2": {},
+ "sys-apps/B-1": {},
+ "sys-apps/B-2": {},
+ }
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=debug)
+ settings = playground.settings
+ eprefix = settings["EPREFIX"]
+ test_repo_location = settings.repositories["test_repo"].location
+ user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH)
+ metadata_dir = os.path.join(test_repo_location, "metadata")
+ md5_cache_dir = os.path.join(metadata_dir, "md5-cache")
+ pms_cache_dir = os.path.join(metadata_dir, "cache")
+ layout_conf_path = os.path.join(metadata_dir, "layout.conf")
+
+ portage_python = portage._python_interpreter
+ egencache_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.bindir, "egencache"),
+ "--update-manifests", "--sign-manifests=n",
+ "--repo", "test_repo",
+ "--repositories-configuration", settings.repositories.config_string())
+ python_cmd = (portage_python, "-b", "-Wd", "-c")
+
+ test_commands = (
+ (lambda: not os.path.exists(pms_cache_dir),),
+ (lambda: not os.path.exists(md5_cache_dir),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ if portage.portdb.repositories['test_repo'].location in portage.portdb._pregen_auxdb:
+ sys.exit(1)
+ """),),
+
+ egencache_cmd + ("--update",),
+ (lambda: not os.path.exists(pms_cache_dir),),
+ (lambda: os.path.exists(md5_cache_dir),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ if portage.portdb.repositories['test_repo'].location not in portage.portdb._pregen_auxdb:
+ sys.exit(1)
+ """),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ from portage.cache.flat_hash import md5_database
+ if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.repositories['test_repo'].location], md5_database):
+ sys.exit(1)
+ """),),
+
+ (BASH_BINARY, "-c", "echo %s > %s" %
+ tuple(map(portage._shell_quote,
+ ("cache-formats = md5-dict pms", layout_conf_path,)))),
+ egencache_cmd + ("--update",),
+ (lambda: os.path.exists(md5_cache_dir),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ if portage.portdb.repositories['test_repo'].location not in portage.portdb._pregen_auxdb:
+ sys.exit(1)
+ """),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ from portage.cache.flat_hash import md5_database
+ if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.repositories['test_repo'].location], md5_database):
+ sys.exit(1)
+ """),),
+
+ # Disable DeprecationWarnings, since the pms format triggers them
+ # in portdbapi._create_pregen_cache().
+ (BASH_BINARY, "-c", "echo %s > %s" %
+ tuple(map(portage._shell_quote,
+ ("cache-formats = pms md5-dict", layout_conf_path,)))),
+ (portage_python, "-b", "-Wd", "-Wi::DeprecationWarning", "-c") + (textwrap.dedent("""
+ import os, sys, portage
+ if portage.portdb.repositories['test_repo'].location not in portage.portdb._pregen_auxdb:
+ sys.exit(1)
+ """),),
+ (portage_python, "-b", "-Wd", "-Wi::DeprecationWarning", "-c") + (textwrap.dedent("""
+ import os, sys, portage
+ from portage.cache.metadata import database as pms_database
+ if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.repositories['test_repo'].location], pms_database):
+ sys.exit(1)
+ """),),
+
+ # Test auto-detection and preference for md5-cache when both
+ # cache formats are available but layout.conf is absent.
+ (BASH_BINARY, "-c", "rm %s" % portage._shell_quote(layout_conf_path)),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ if portage.portdb.repositories['test_repo'].location not in portage.portdb._pregen_auxdb:
+ sys.exit(1)
+ """),),
+ python_cmd + (textwrap.dedent("""
+ import os, sys, portage
+ from portage.cache.flat_hash import md5_database
+ if not isinstance(portage.portdb._pregen_auxdb[portage.portdb.repositories['test_repo'].location], md5_database):
+ sys.exit(1)
+ """),),
+ )
+
+ pythonpath = os.environ.get("PYTHONPATH")
+ if pythonpath is not None and not pythonpath.strip():
+ pythonpath = None
+ if pythonpath is not None and \
+ pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
+ pass
+ else:
+ if pythonpath is None:
+ pythonpath = ""
+ else:
+ pythonpath = ":" + pythonpath
+ pythonpath = PORTAGE_PYM_PATH + pythonpath
+
+ env = {
+ "PATH" : os.environ.get("PATH", ""),
+ "PORTAGE_OVERRIDE_EPREFIX" : eprefix,
+ "PORTAGE_PYTHON" : portage_python,
+ "PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
+ "PYTHONDONTWRITEBYTECODE" : os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
+ "PYTHONPATH" : pythonpath,
+ }
+
+ if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+ env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
+
+ dirs = [user_config_dir]
+
+ try:
+ for d in dirs:
+ ensure_dirs(d)
+
+ if debug:
+ # The subprocess inherits both stdout and stderr, for
+ # debugging purposes.
+ stdout = None
+ else:
+ # The subprocess inherits stderr so that any warnings
+ # triggered by python -Wd will be visible.
+ stdout = subprocess.PIPE
+
+ for i, args in enumerate(test_commands):
+
+ if hasattr(args[0], '__call__'):
+ self.assertTrue(args[0](),
+ "callable at index %s failed" % (i,))
+ continue
+
+ proc = subprocess.Popen(args,
+ env=env, stdout=stdout)
+
+ if debug:
+ proc.wait()
+ else:
+ output = proc.stdout.readlines()
+ proc.wait()
+ proc.stdout.close()
+ if proc.returncode != os.EX_OK:
+ for line in output:
+ sys.stderr.write(_unicode_decode(line))
+
+ self.assertEqual(os.EX_OK, proc.returncode,
+ "command %d failed with args %s" % (i, args,))
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/dep/__init__.py b/lib/portage/tests/dep/__init__.py
new file mode 100644
index 000000000..9c3f52476
--- /dev/null
+++ b/lib/portage/tests/dep/__init__.py
@@ -0,0 +1,3 @@
+# tests/portage.dep/__init__.py -- Portage Unit Test functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/tests/dep/__test__.py b/lib/portage/tests/dep/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/dep/__test__.py
diff --git a/lib/portage/tests/dep/testAtom.py b/lib/portage/tests/dep/testAtom.py
new file mode 100644
index 000000000..da58be27c
--- /dev/null
+++ b/lib/portage/tests/dep/testAtom.py
@@ -0,0 +1,341 @@
+# Copyright 2006-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import Atom
+from portage.exception import InvalidAtom
+
+class TestAtom(TestCase):
+
+ def testAtom(self):
+
+ tests = (
+ ("=sys-apps/portage-2.1-r1:0[doc,a=,!b=,c?,!d?,-e]",
+ ('=', 'sys-apps/portage', '2.1-r1', '0', '[doc,a=,!b=,c?,!d?,-e]', None), False, False),
+ ("=sys-apps/portage-2.1-r1*:0[doc]",
+ ('=*', 'sys-apps/portage', '2.1-r1', '0', '[doc]', None), False, False),
+ ("sys-apps/portage:0[doc]",
+ (None, 'sys-apps/portage', None, '0', '[doc]', None), False, False),
+ ("sys-apps/portage:0[doc]",
+ (None, 'sys-apps/portage', None, '0', '[doc]', None), False, False),
+ ("*/*",
+ (None, '*/*', None, None, None, None), True, False),
+ ("=*/*-*9999*",
+ ('=*', '*/*', '*9999*', None, None, None), True, False),
+ ("=*/*-*9999*:0::repo_name",
+ ('=*', '*/*', '*9999*', '0', None, 'repo_name'), True, True),
+ ("=*/*-*_beta*",
+ ('=*', '*/*', '*_beta*', None, None, None), True, False),
+ ("=*/*-*_beta*:0::repo_name",
+ ('=*', '*/*', '*_beta*', '0', None, 'repo_name'), True, True),
+ ("sys-apps/*",
+ (None, 'sys-apps/*', None, None, None, None), True, False),
+ ("*/portage",
+ (None, '*/portage', None, None, None, None), True, False),
+ ("s*s-*/portage:1",
+ (None, 's*s-*/portage', None, '1', None, None), True, False),
+ ("*/po*ge:2",
+ (None, '*/po*ge', None, '2', None, None), True, False),
+ ("!dev-libs/A",
+ (None, 'dev-libs/A', None, None, None, None), True, True),
+ ("!!dev-libs/A",
+ (None, 'dev-libs/A', None, None, None, None), True, True),
+ ("!!dev-libs/A",
+ (None, 'dev-libs/A', None, None, None, None), True, True),
+ ("dev-libs/A[foo(+)]",
+ (None, 'dev-libs/A', None, None, "[foo(+)]", None), True, True),
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]",
+ (None, 'dev-libs/A', None, None, "[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", None), True, True),
+ ("dev-libs/A:2[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]",
+ (None, 'dev-libs/A', None, "2", "[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", None), True, True),
+
+ ("=sys-apps/portage-2.1-r1:0::repo_name[doc,a=,!b=,c?,!d?,-e]",
+ ('=', 'sys-apps/portage', '2.1-r1', '0', '[doc,a=,!b=,c?,!d?,-e]', 'repo_name'), False, True),
+ ("=sys-apps/portage-2.1-r1*:0::repo_name[doc]",
+ ('=*', 'sys-apps/portage', '2.1-r1', '0', '[doc]', 'repo_name'), False, True),
+ ("sys-apps/portage:0::repo_name[doc]",
+ (None, 'sys-apps/portage', None, '0', '[doc]', 'repo_name'), False, True),
+
+ ("*/*::repo_name",
+ (None, '*/*', None, None, None, 'repo_name'), True, True),
+ ("sys-apps/*::repo_name",
+ (None, 'sys-apps/*', None, None, None, 'repo_name'), True, True),
+ ("*/portage::repo_name",
+ (None, '*/portage', None, None, None, 'repo_name'), True, True),
+ ("s*s-*/portage:1::repo_name",
+ (None, 's*s-*/portage', None, '1', None, 'repo_name'), True, True),
+ )
+
+ tests_xfail = (
+ (Atom("sys-apps/portage"), False, False),
+ ("cat/pkg[a!]", False, False),
+ ("cat/pkg[!a]", False, False),
+ ("cat/pkg[!a!]", False, False),
+ ("cat/pkg[!a-]", False, False),
+ ("cat/pkg[-a=]", False, False),
+ ("cat/pkg[-a?]", False, False),
+ ("cat/pkg[-a!]", False, False),
+ ("cat/pkg[=a]", False, False),
+ ("cat/pkg[=a=]", False, False),
+ ("cat/pkg[=a?]", False, False),
+ ("cat/pkg[=a!]", False, False),
+ ("cat/pkg[=a-]", False, False),
+ ("cat/pkg[?a]", False, False),
+ ("cat/pkg[?a=]", False, False),
+ ("cat/pkg[?a?]", False, False),
+ ("cat/pkg[?a!]", False, False),
+ ("cat/pkg[?a-]", False, False),
+ ("sys-apps/portage[doc]:0", False, False),
+ ("*/*", False, False),
+ ("sys-apps/*", False, False),
+ ("*/portage", False, False),
+ ("*/**", True, False),
+ ("*/portage[use]", True, False),
+ ("cat/pkg[a()]", False, False),
+ ("cat/pkg[a(]", False, False),
+ ("cat/pkg[a)]", False, False),
+ ("cat/pkg[a(,b]", False, False),
+ ("cat/pkg[a),b]", False, False),
+ ("cat/pkg[a(*)]", False, False),
+ ("cat/pkg[a(*)]", True, False),
+ ("cat/pkg[a(+-)]", False, False),
+ ("cat/pkg[a()]", False, False),
+ ("cat/pkg[(+)a]", False, False),
+ ("cat/pkg[a=(+)]", False, False),
+ ("cat/pkg[!(+)a=]", False, False),
+ ("cat/pkg[!a=(+)]", False, False),
+ ("cat/pkg[a?(+)]", False, False),
+ ("cat/pkg[!a?(+)]", False, False),
+ ("cat/pkg[!(+)a?]", False, False),
+ ("cat/pkg[-(+)a]", False, False),
+ ("cat/pkg[a(+),-a]", False, False),
+ ("cat/pkg[a(-),-a]", False, False),
+ ("cat/pkg[-a,a(+)]", False, False),
+ ("cat/pkg[-a,a(-)]", False, False),
+ ("cat/pkg[-a(+),a(-)]", False, False),
+ ("cat/pkg[-a(-),a(+)]", False, False),
+ ("sys-apps/portage[doc]::repo_name", False, False),
+ ("sys-apps/portage:0[doc]::repo_name", False, False),
+ ("sys-apps/portage[doc]:0::repo_name", False, False),
+ ("=sys-apps/portage-2.1-r1:0::repo_name[doc,a=,!b=,c?,!d?,-e]", False, False),
+ ("=sys-apps/portage-2.1-r1*:0::repo_name[doc]", False, False),
+ ("sys-apps/portage:0::repo_name[doc]", False, False),
+ ("*/*::repo_name", True, False),
+ )
+
+ for atom, parts, allow_wildcard, allow_repo in tests:
+ a = Atom(atom, allow_wildcard=allow_wildcard, allow_repo=allow_repo)
+ op, cp, ver, slot, use, repo = parts
+ self.assertEqual(op, a.operator,
+ msg="Atom('%s').operator = %s == '%s'" % (atom, a.operator, op))
+ self.assertEqual(cp, a.cp,
+ msg="Atom('%s').cp = %s == '%s'" % (atom, a.cp, cp))
+ if ver is not None:
+ cpv = "%s-%s" % (cp, ver)
+ else:
+ cpv = cp
+ self.assertEqual(cpv, a.cpv,
+ msg="Atom('%s').cpv = %s == '%s'" % (atom, a.cpv, cpv))
+ self.assertEqual(slot, a.slot,
+ msg="Atom('%s').slot = %s == '%s'" % (atom, a.slot, slot))
+ self.assertEqual(repo, a.repo,
+ msg="Atom('%s').repo == %s == '%s'" % (atom, a.repo, repo))
+
+ if a.use:
+ returned_use = str(a.use)
+ else:
+ returned_use = None
+ self.assertEqual(use, returned_use,
+ msg="Atom('%s').use = %s == '%s'" % (atom, returned_use, use))
+
+ for atom, allow_wildcard, allow_repo in tests_xfail:
+ self.assertRaisesMsg(atom, (InvalidAtom, TypeError), Atom, atom,
+ allow_wildcard=allow_wildcard, allow_repo=allow_repo)
+
+ def testSlotAbiAtom(self):
+ tests = (
+ ("virtual/ffmpeg:0/53", "4-slot-abi", {"slot": "0", "sub_slot": "53", "slot_operator": None}),
+ ("virtual/ffmpeg:0/53=", "4-slot-abi", {"slot": "0", "sub_slot": "53", "slot_operator": "="}),
+ ("virtual/ffmpeg:=", "4-slot-abi", {"slot": None, "sub_slot": None, "slot_operator": "="}),
+ ("virtual/ffmpeg:0=", "4-slot-abi", {"slot": "0", "sub_slot": None, "slot_operator": "="}),
+ ("virtual/ffmpeg:*", "4-slot-abi", {"slot": None, "sub_slot": None, "slot_operator": "*"}),
+ ("virtual/ffmpeg:0", "4-slot-abi", {"slot": "0", "sub_slot": None, "slot_operator": None}),
+ ("virtual/ffmpeg", "4-slot-abi", {"slot": None, "sub_slot": None, "slot_operator": None}),
+ )
+
+ for atom, eapi, parts in tests:
+ a = Atom(atom, eapi=eapi)
+ for k, v in parts.items():
+ self.assertEqual(v, getattr(a, k),
+ msg="Atom('%s').%s = %s == '%s'" %
+ (atom, k, getattr(a, k), v))
+
+ def test_intersects(self):
+ test_cases = (
+ ("dev-libs/A", "dev-libs/A", True),
+ ("dev-libs/A", "dev-libs/B", False),
+ ("dev-libs/A", "sci-libs/A", False),
+ ("dev-libs/A[foo]", "sci-libs/A[bar]", False),
+ ("dev-libs/A[foo(+)]", "sci-libs/A[foo(-)]", False),
+ ("=dev-libs/A-1", "=dev-libs/A-1-r1", False),
+ ("~dev-libs/A-1", "=dev-libs/A-1", False),
+ ("=dev-libs/A-1:1", "=dev-libs/A-1", True),
+ ("=dev-libs/A-1:1", "=dev-libs/A-1:1", True),
+ ("=dev-libs/A-1:1", "=dev-libs/A-1:2", False),
+ )
+
+ for atom, other, expected_result in test_cases:
+ self.assertEqual(Atom(atom).intersects(Atom(other)), expected_result,
+ "%s and %s should intersect: %s" % (atom, other, expected_result))
+
+ def test_violated_conditionals(self):
+ test_cases = (
+ ("dev-libs/A", ["foo"], ["foo"], None, "dev-libs/A"),
+ ("dev-libs/A[foo]", [], ["foo"], None, "dev-libs/A[foo]"),
+ ("dev-libs/A[foo]", ["foo"], ["foo"], None, "dev-libs/A"),
+ ("dev-libs/A[foo]", [], ["foo"], [], "dev-libs/A[foo]"),
+ ("dev-libs/A[foo]", ["foo"], ["foo"], [], "dev-libs/A"),
+
+ ("dev-libs/A:0[foo]", ["foo"], ["foo"], [], "dev-libs/A:0"),
+
+ ("dev-libs/A[foo,-bar]", [], ["foo", "bar"], None, "dev-libs/A[foo]"),
+ ("dev-libs/A[-foo,bar]", [], ["foo", "bar"], None, "dev-libs/A[bar]"),
+
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=]"),
+
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["b"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,b=,!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["c"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["e"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=,!e?]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["f"], ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a,!c=,-f]"),
+
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a"], ["a", "b", "c", "d", "e", "f"], ["a"], "dev-libs/A[!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["b"], ["a", "b", "c", "d", "e", "f"], ["b"], "dev-libs/A[a,!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["c"], ["a", "b", "c", "d", "e", "f"], ["c"], "dev-libs/A[a,!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d"], ["a", "b", "c", "d", "e", "f"], ["d"], "dev-libs/A[a,!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["e"], ["a", "b", "c", "d", "e", "f"], ["e"], "dev-libs/A[a,!c=]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["f"], ["a", "b", "c", "d", "e", "f"], ["f"], "dev-libs/A[a,!c=,-f]"),
+
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["a"], ["a", "b", "c", "d", "e", "f"], ["a"], "dev-libs/A[!c(+)=]"),
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["b"], ["a", "b", "c", "d", "e", "f"], ["b"], "dev-libs/A[a(-),!c(-)=]"),
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["c"], ["a", "b", "c", "d", "e", "f"], ["c"], "dev-libs/A[a(+),!c(+)=]"),
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["d"], ["a", "b", "c", "d", "e", "f"], ["d"], "dev-libs/A[a(-),!c(-)=]"),
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["e"], ["a", "b", "c", "d", "e", "f"], ["e"], "dev-libs/A[a(+),!c(+)=]"),
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["f"], ["a", "b", "c", "d", "e", "f"], ["f"], "dev-libs/A[a(-),!c(-)=,-f(+)]"),
+
+ ("dev-libs/A[a(+),b(+)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["a"], ["a"], ["a"], "dev-libs/A[b(+)=,!e(+)?]"),
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["b"], ["b"], ["b"], "dev-libs/A[a(-),!c(-)=,-f(+)]"),
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["c"], ["c"], ["c"], "dev-libs/A[!c(+)=,!e(+)?]"),
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["d"], ["d"], ["d"], "dev-libs/A[a(-),b(+)=,!c(-)=,-f(+)]"),
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["e"], ["e"], ["e"], "dev-libs/A"),
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["f"], ["f"], ["f"], "dev-libs/A[a(-),b(+)=,!c(-)=,-f(+)]"),
+
+ #Some more test cases to trigger all remaining code paths
+ ("dev-libs/B[x?]", [], ["x"], ["x"], "dev-libs/B[x?]"),
+ ("dev-libs/B[x(+)?]", [], [], ["x"], "dev-libs/B"),
+ ("dev-libs/B[x(-)?]", [], [], ["x"], "dev-libs/B[x(-)?]"),
+
+ ("dev-libs/C[x=]", [], ["x"], ["x"], "dev-libs/C[x=]"),
+ ("dev-libs/C[x(+)=]", [], [], ["x"], "dev-libs/C"),
+ ("dev-libs/C[x(-)=]", [], [], ["x"], "dev-libs/C[x(-)=]"),
+
+ ("dev-libs/D[!x=]", [], ["x"], ["x"], "dev-libs/D"),
+ ("dev-libs/D[!x(+)=]", [], [], ["x"], "dev-libs/D[!x(+)=]"),
+ ("dev-libs/D[!x(-)=]", [], [], ["x"], "dev-libs/D"),
+
+ #Missing IUSE test cases
+ ("dev-libs/B[x]", [], [], [], "dev-libs/B[x]"),
+ ("dev-libs/B[-x]", [], [], [], "dev-libs/B[-x]"),
+ ("dev-libs/B[x?]", [], [], [], "dev-libs/B[x?]"),
+ ("dev-libs/B[x=]", [], [], [], "dev-libs/B[x=]"),
+ ("dev-libs/B[!x=]", [], [], ["x"], "dev-libs/B[!x=]"),
+ ("dev-libs/B[!x?]", [], [], ["x"], "dev-libs/B[!x?]"),
+ )
+
+ test_cases_xfail = (
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["a", "b", "c", "d", "e", "f"], None),
+ )
+
+ class use_flag_validator(object):
+ def __init__(self, iuse):
+ self.iuse = iuse
+
+ def is_valid_flag(self, flag):
+ return flag in iuse
+
+ for atom, other_use, iuse, parent_use, expected_violated_atom in test_cases:
+ a = Atom(atom)
+ validator = use_flag_validator(iuse)
+ violated_atom = a.violated_conditionals(other_use, validator.is_valid_flag, parent_use)
+ if parent_use is None:
+ fail_msg = "Atom: %s, other_use: %s, iuse: %s, parent_use: %s, got: %s, expected: %s" % \
+ (atom, " ".join(other_use), " ".join(iuse), "None", str(violated_atom), expected_violated_atom)
+ else:
+ fail_msg = "Atom: %s, other_use: %s, iuse: %s, parent_use: %s, got: %s, expected: %s" % \
+ (atom, " ".join(other_use), " ".join(iuse), " ".join(parent_use), str(violated_atom), expected_violated_atom)
+ self.assertEqual(str(violated_atom), expected_violated_atom, fail_msg)
+
+ for atom, other_use, iuse, parent_use in test_cases_xfail:
+ a = Atom(atom)
+ validator = use_flag_validator(iuse)
+ self.assertRaisesMsg(atom, InvalidAtom,
+ a.violated_conditionals, other_use, validator.is_valid_flag, parent_use)
+
+ def test_evaluate_conditionals(self):
+ test_cases = (
+ ("dev-libs/A[foo]", [], "dev-libs/A[foo]"),
+ ("dev-libs/A[foo]", ["foo"], "dev-libs/A[foo]"),
+
+ ("dev-libs/A:0[foo=]", ["foo"], "dev-libs/A:0[foo]"),
+
+ ("dev-libs/A[foo,-bar]", [], "dev-libs/A[foo,-bar]"),
+ ("dev-libs/A[-foo,bar]", [], "dev-libs/A[-foo,bar]"),
+
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], "dev-libs/A[a,-b,c,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a"], "dev-libs/A[a,-b,c,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["b"], "dev-libs/A[a,b,c,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["c"], "dev-libs/A[a,-b,-c,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d"], "dev-libs/A[a,-b,c,d,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["e"], "dev-libs/A[a,-b,c,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["f"], "dev-libs/A[a,-b,c,-e,-f]"),
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]", ["d"], "dev-libs/A[a(-),-b(+),c(-),d(+),-e(-),-f(+)]"),
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]", ["f"], "dev-libs/A[a(+),-b(-),c(+),-e(+),-f(-)]"),
+ )
+
+ for atom, use, expected_atom in test_cases:
+ a = Atom(atom)
+ b = a.evaluate_conditionals(use)
+ self.assertEqual(str(b), expected_atom)
+ self.assertEqual(str(b.unevaluated_atom), atom)
+
+ def test__eval_qa_conditionals(self):
+ test_cases = (
+ ("dev-libs/A[foo]", [], [], "dev-libs/A[foo]"),
+ ("dev-libs/A[foo]", ["foo"], [], "dev-libs/A[foo]"),
+ ("dev-libs/A[foo]", [], ["foo"], "dev-libs/A[foo]"),
+
+ ("dev-libs/A:0[foo]", [], [], "dev-libs/A:0[foo]"),
+ ("dev-libs/A:0[foo]", ["foo"], [], "dev-libs/A:0[foo]"),
+ ("dev-libs/A:0[foo]", [], ["foo"], "dev-libs/A:0[foo]"),
+ ("dev-libs/A:0[foo=]", [], ["foo"], "dev-libs/A:0[foo]"),
+
+ ("dev-libs/A[foo,-bar]", ["foo"], ["bar"], "dev-libs/A[foo,-bar]"),
+ ("dev-libs/A[-foo,bar]", ["foo", "bar"], [], "dev-libs/A[-foo,bar]"),
+
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["a", "b", "c"], [], "dev-libs/A[a,-b,c,d,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["a", "b", "c"], "dev-libs/A[a,b,-c,d,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", ["d", "e", "f"], [], "dev-libs/A[a,b,-b,c,-c,-e,-f]"),
+ ("dev-libs/A[a,b=,!c=,d?,!e?,-f]", [], ["d", "e", "f"], "dev-libs/A[a,b,-b,c,-c,d,-f]"),
+
+ ("dev-libs/A[a(-),b(+)=,!c(-)=,d(+)?,!e(-)?,-f(+)]",
+ ["a", "b", "c", "d", "e", "f"], [], "dev-libs/A[a(-),-b(+),c(-),-e(-),-f(+)]"),
+ ("dev-libs/A[a(+),b(-)=,!c(+)=,d(-)?,!e(+)?,-f(-)]",
+ [], ["a", "b", "c", "d", "e", "f"], "dev-libs/A[a(+),b(-),-c(+),d(-),-f(-)]"),
+ )
+
+ for atom, use_mask, use_force, expected_atom in test_cases:
+ a = Atom(atom)
+ b = a._eval_qa_conditionals(use_mask, use_force)
+ self.assertEqual(str(b), expected_atom)
+ self.assertEqual(str(b.unevaluated_atom), atom)
diff --git a/lib/portage/tests/dep/testCheckRequiredUse.py b/lib/portage/tests/dep/testCheckRequiredUse.py
new file mode 100644
index 000000000..c4128c29a
--- /dev/null
+++ b/lib/portage/tests/dep/testCheckRequiredUse.py
@@ -0,0 +1,234 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import check_required_use
+from portage.exception import InvalidDependString
+
+class TestCheckRequiredUse(TestCase):
+
+ def testCheckRequiredUse(self):
+ test_cases = (
+ ("|| ( a b )", [], ["a", "b"], False),
+ ("|| ( a b )", ["a"], ["a", "b"], True),
+ ("|| ( a b )", ["b"], ["a", "b"], True),
+ ("|| ( a b )", ["a", "b"], ["a", "b"], True),
+
+ ("^^ ( a b )", [], ["a", "b"], False),
+ ("^^ ( a b )", ["a"], ["a", "b"], True),
+ ("^^ ( a b )", ["b"], ["a", "b"], True),
+ ("^^ ( a b )", ["a", "b"], ["a", "b"], False),
+ ("?? ( a b )", ["a", "b"], ["a", "b"], False),
+ ("?? ( a b )", ["a"], ["a", "b"], True),
+ ("?? ( a b )", ["b"], ["a", "b"], True),
+ ("?? ( a b )", [], ["a", "b"], True),
+ ("?? ( )", [], [], True),
+
+ ("^^ ( || ( a b ) c )", [], ["a", "b", "c"], False),
+ ("^^ ( || ( a b ) c )", ["a"], ["a", "b", "c"], True),
+
+ ("^^ ( || ( ( a b ) ) ( c ) )", [], ["a", "b", "c"], False),
+ ("( ^^ ( ( || ( ( a ) ( b ) ) ) ( ( c ) ) ) )", ["a"], ["a", "b", "c"], True),
+
+ ("a || ( b c )", ["a"], ["a", "b", "c"], False),
+ ("|| ( b c ) a", ["a"], ["a", "b", "c"], False),
+
+ ("|| ( a b c )", ["a"], ["a", "b", "c"], True),
+ ("|| ( a b c )", ["b"], ["a", "b", "c"], True),
+ ("|| ( a b c )", ["c"], ["a", "b", "c"], True),
+
+ ("^^ ( a b c )", ["a"], ["a", "b", "c"], True),
+ ("^^ ( a b c )", ["b"], ["a", "b", "c"], True),
+ ("^^ ( a b c )", ["c"], ["a", "b", "c"], True),
+ ("^^ ( a b c )", ["a", "b"], ["a", "b", "c"], False),
+ ("^^ ( a b c )", ["b", "c"], ["a", "b", "c"], False),
+ ("^^ ( a b c )", ["a", "c"], ["a", "b", "c"], False),
+ ("^^ ( a b c )", ["a", "b", "c"], ["a", "b", "c"], False),
+
+ ("a? ( ^^ ( b c ) )", [], ["a", "b", "c"], True),
+ ("a? ( ^^ ( b c ) )", ["a"], ["a", "b", "c"], False),
+ ("a? ( ^^ ( b c ) )", ["b"], ["a", "b", "c"], True),
+ ("a? ( ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
+ ("a? ( ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], True),
+ ("a? ( ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], False),
+
+ ("^^ ( a? ( !b ) !c? ( d ) )", [], ["a", "b", "c", "d"], False),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a"], ["a", "b", "c", "d"], True),
+ # note: this one is EAPI-dependent, it used to be True for EAPI <7
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["c"], ["a", "b", "c", "d"], False),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a", "c"], ["a", "b", "c", "d"], True),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "c"], ["a", "b", "c", "d"], False),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "d"], ["a", "b", "c", "d"], True),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a", "b", "d"], ["a", "b", "c", "d"], True),
+ ("^^ ( a? ( !b ) !c? ( d ) )", ["a", "d"], ["a", "b", "c", "d"], False),
+
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"], False),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["a"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["b"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "c"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["b", "c"], ["a", "b", "c"], True),
+ ("|| ( ^^ ( a b ) ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], False),
+
+ ("^^ ( || ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"], False),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["a"], ["a", "b", "c"], True),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["b"], ["a", "b", "c"], False),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["c"], ["a", "b", "c"], True),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["a", "b"], ["a", "b", "c"], False),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["a", "c"], ["a", "b", "c"], False),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["b", "c"], ["a", "b", "c"], True),
+ ("^^ ( || ( a b ) ^^ ( b c ) )", ["a", "b", "c"], ["a", "b", "c"], True),
+
+ ("|| ( ( a b ) c )", ["a", "b", "c"], ["a", "b", "c"], True),
+ ("|| ( ( a b ) c )", ["b", "c"], ["a", "b", "c"], True),
+ ("|| ( ( a b ) c )", ["a", "c"], ["a", "b", "c"], True),
+ ("|| ( ( a b ) c )", ["a", "b"], ["a", "b", "c"], True),
+ ("|| ( ( a b ) c )", ["a"], ["a", "b", "c"], False),
+ ("|| ( ( a b ) c )", ["b"], ["a", "b", "c"], False),
+ ("|| ( ( a b ) c )", ["c"], ["a", "b", "c"], True),
+ ("|| ( ( a b ) c )", [], ["a", "b", "c"], False),
+
+ ("^^ ( ( a b ) c )", ["a", "b", "c"], ["a", "b", "c"], False),
+ ("^^ ( ( a b ) c )", ["b", "c"], ["a", "b", "c"], True),
+ ("^^ ( ( a b ) c )", ["a", "c"], ["a", "b", "c"], True),
+ ("^^ ( ( a b ) c )", ["a", "b"], ["a", "b", "c"], True),
+ ("^^ ( ( a b ) c )", ["a"], ["a", "b", "c"], False),
+ ("^^ ( ( a b ) c )", ["b"], ["a", "b", "c"], False),
+ ("^^ ( ( a b ) c )", ["c"], ["a", "b", "c"], True),
+ ("^^ ( ( a b ) c )", [], ["a", "b", "c"], False),
+ )
+
+ test_cases_xfail = (
+ ("^^ ( || ( a b ) ^^ ( b c ) )", [], ["a", "b"]),
+ ("^^ ( || ( a b ) ^^ ( b c )", [], ["a", "b", "c"]),
+ ("^^( || ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"]),
+ ("^^ || ( a b ) ^^ ( b c )", [], ["a", "b", "c"]),
+ ("^^ ( ( || ) ( a b ) ^^ ( b c ) )", [], ["a", "b", "c"]),
+ ("^^ ( || ( a b ) ) ^^ ( b c ) )", [], ["a", "b", "c"]),
+ )
+
+ test_cases_xfail_eapi = (
+ ("?? ( a b )", [], ["a", "b"], "4"),
+ )
+
+ for required_use, use, iuse, expected in test_cases:
+ self.assertEqual(bool(check_required_use(required_use, use, iuse.__contains__)), \
+ expected, required_use + ", USE = " + " ".join(use))
+
+ for required_use, use, iuse in test_cases_xfail:
+ self.assertRaisesMsg(required_use + ", USE = " + " ".join(use), \
+ InvalidDependString, check_required_use, required_use, use, iuse.__contains__)
+
+ for required_use, use, iuse, eapi in test_cases_xfail_eapi:
+ self.assertRaisesMsg(required_use + ", USE = " + " ".join(use), \
+ InvalidDependString, check_required_use, required_use, use,
+ iuse.__contains__, eapi=eapi)
+
+ def testCheckRequiredUseFilterSatisfied(self):
+ """
+ Test filtering of satisfied parts of REQUIRED_USE,
+ in order to reduce noise for bug #353234.
+ """
+ test_cases = (
+ (
+ "bindist? ( !amr !faac !win32codecs ) cdio? ( !cdparanoia !cddb ) dvdnav? ( dvd )",
+ ("cdio", "cdparanoia"),
+ "cdio? ( !cdparanoia )"
+ ),
+ (
+ "|| ( !amr !faac !win32codecs ) cdio? ( !cdparanoia !cddb ) ^^ ( foo bar )",
+ ["cdio", "cdparanoia", "foo"],
+ "cdio? ( !cdparanoia )"
+ ),
+ (
+ "^^ ( || ( a b ) c )",
+ ("a", "b", "c"),
+ "^^ ( || ( a b ) c )"
+ ),
+ (
+ "^^ ( || ( ( a b ) ) ( c ) )",
+ ("a", "b", "c"),
+ "^^ ( ( a b ) c )"
+ ),
+ (
+ "a? ( ( c e ) ( b d ) )",
+ ("a", "c", "e"),
+ "a? ( b d )"
+ ),
+ (
+ "a? ( ( c e ) ( b d ) )",
+ ("a", "b", "c", "e"),
+ "a? ( d )"
+ ),
+ (
+ "a? ( ( c e ) ( c e b c d e c ) )",
+ ("a", "c", "e"),
+ "a? ( b d )"
+ ),
+ (
+ "^^ ( || ( a b ) ^^ ( b c ) )",
+ ("a", "b"),
+ "^^ ( || ( a b ) ^^ ( b c ) )"
+ ),
+ (
+ "^^ ( || ( a b ) ^^ ( b c ) )",
+ ["a", "c"],
+ "^^ ( || ( a b ) ^^ ( b c ) )"
+ ),
+ (
+ "^^ ( || ( a b ) ^^ ( b c ) )",
+ ["b", "c"],
+ ""
+ ),
+ (
+ "^^ ( || ( a b ) ^^ ( b c ) )",
+ ["a", "b", "c"],
+ ""
+ ),
+ (
+ "^^ ( ( a b c ) ( b c d ) )",
+ ["a", "b", "c"],
+ ""
+ ),
+ (
+ "^^ ( ( a b c ) ( b c d ) )",
+ ["a", "b", "c", "d"],
+ "^^ ( ( a b c ) ( b c d ) )"
+ ),
+ (
+ "^^ ( ( a b c ) ( b c !d ) )",
+ ["a", "b", "c"],
+ "^^ ( ( a b c ) ( b c !d ) )"
+ ),
+ (
+ "^^ ( ( a b c ) ( b c !d ) )",
+ ["a", "b", "c", "d"],
+ ""
+ ),
+ (
+ "( ( ( a ) ) ( ( ( b c ) ) ) )",
+ [""],
+ "a b c"
+ ),
+ (
+ "|| ( ( ( ( a ) ) ( ( ( b c ) ) ) ) )",
+ [""],
+ "a b c"
+ ),
+ (
+ "|| ( ( a ( ( ) ( ) ) ( ( ) ) ( b ( ) c ) ) )",
+ [""],
+ "a b c"
+ ),
+ (
+ "|| ( ( a b c ) ) || ( ( d e f ) )",
+ [""],
+ "a b c d e f"
+ ),
+ )
+ for required_use, use, expected in test_cases:
+ result = check_required_use(required_use, use, lambda k: True).tounicode()
+ self.assertEqual(result, expected,
+ "REQUIRED_USE = '%s', USE = '%s', '%s' != '%s'" % \
+ (required_use, " ".join(use), result, expected))
diff --git a/lib/portage/tests/dep/testExtendedAtomDict.py b/lib/portage/tests/dep/testExtendedAtomDict.py
new file mode 100644
index 000000000..69d092e38
--- /dev/null
+++ b/lib/portage/tests/dep/testExtendedAtomDict.py
@@ -0,0 +1,18 @@
+# test_isvalidatom.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import ExtendedAtomDict
+
+class TestExtendedAtomDict(TestCase):
+
+ def testExtendedAtomDict(self):
+ d = ExtendedAtomDict(dict)
+ d["*/*"] = { "test1": "x" }
+ d["dev-libs/*"] = { "test2": "y" }
+ d.setdefault("sys-apps/portage", {})["test3"] = "z"
+ self.assertEqual(d.get("dev-libs/A"), { "test1": "x", "test2": "y" })
+ self.assertEqual(d.get("sys-apps/portage"), { "test1": "x", "test3": "z" })
+ self.assertEqual(d["dev-libs/*"], { "test2": "y" })
+ self.assertEqual(d["sys-apps/portage"], {'test1': 'x', 'test3': 'z'})
diff --git a/lib/portage/tests/dep/testExtractAffectingUSE.py b/lib/portage/tests/dep/testExtractAffectingUSE.py
new file mode 100644
index 000000000..026a55274
--- /dev/null
+++ b/lib/portage/tests/dep/testExtractAffectingUSE.py
@@ -0,0 +1,75 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import extract_affecting_use
+from portage.exception import InvalidDependString
+
+class TestExtractAffectingUSE(TestCase):
+
+ def testExtractAffectingUSE(self):
+ test_cases = (
+ ("a? ( A ) !b? ( B ) !c? ( C ) d? ( D )", "A", ("a",)),
+ ("a? ( A ) !b? ( B ) !c? ( C ) d? ( D )", "B", ("b",)),
+ ("a? ( A ) !b? ( B ) !c? ( C ) d? ( D )", "C", ("c",)),
+ ("a? ( A ) !b? ( B ) !c? ( C ) d? ( D )", "D", ("d",)),
+
+ ("a? ( b? ( AB ) )", "AB", ("a", "b")),
+ ("a? ( b? ( c? ( ABC ) ) )", "ABC", ("a", "b", "c")),
+
+ ("a? ( A b? ( c? ( ABC ) AB ) )", "A", ("a",)),
+ ("a? ( A b? ( c? ( ABC ) AB ) )", "AB", ("a", "b")),
+ ("a? ( A b? ( c? ( ABC ) AB ) )", "ABC", ("a", "b", "c")),
+ ("a? ( A b? ( c? ( ABC ) AB ) ) X", "X", []),
+ ("X a? ( A b? ( c? ( ABC ) AB ) )", "X", []),
+
+ ("ab? ( || ( A B ) )", "A", ("ab",)),
+ ("!ab? ( || ( A B ) )", "B", ("ab",)),
+ ("ab? ( || ( A || ( b? ( || ( B C ) ) ) ) )", "A", ("ab",)),
+ ("ab? ( || ( A || ( b? ( || ( B C ) ) ) ) )", "B", ("ab", "b")),
+ ("ab? ( || ( A || ( b? ( || ( B C ) ) ) ) )", "C", ("ab", "b")),
+
+ ("( ab? ( || ( ( A ) || ( b? ( ( ( || ( B ( C ) ) ) ) ) ) ) ) )", "A", ("ab",)),
+ ("( ab? ( || ( ( A ) || ( b? ( ( ( || ( B ( C ) ) ) ) ) ) ) ) )", "B", ("ab", "b")),
+ ("( ab? ( || ( ( A ) || ( b? ( ( ( || ( B ( C ) ) ) ) ) ) ) ) )", "C", ("ab", "b")),
+
+ ("a? ( A )", "B", []),
+
+ ("a? ( || ( A B ) )", "B", ["a"]),
+
+ # test USE dep defaults for bug #363073
+ ("a? ( >=dev-lang/php-5.2[pcre(+)] )", ">=dev-lang/php-5.2[pcre(+)]", ["a"]),
+ )
+
+ test_cases_xfail = (
+ ("? ( A )", "A"),
+ ("!? ( A )", "A"),
+ ("( A", "A"),
+ ("A )", "A"),
+
+ ("||( A B )", "A"),
+ ("|| (A B )", "A"),
+ ("|| ( A B)", "A"),
+ ("|| ( A B", "A"),
+ ("|| A B )", "A"),
+ ("|| A B", "A"),
+ ("|| ( A B ) )", "A"),
+ ("|| || B C", "A"),
+ ("|| ( A B || )", "A"),
+ ("a? A", "A"),
+ ("( || ( || || ( A ) foo? ( B ) ) )", "A"),
+ ("( || ( || bar? ( A ) foo? ( B ) ) )", "A"),
+ )
+
+ for dep, atom, expected in test_cases:
+ expected = set(expected)
+ result = extract_affecting_use(dep, atom, eapi="0")
+ fail_msg = "dep: " + dep + ", atom: " + atom + ", got: " + \
+ " ".join(sorted(result)) + ", expected: " + " ".join(sorted(expected))
+ self.assertEqual(result, expected, fail_msg)
+
+ for dep, atom in test_cases_xfail:
+ fail_msg = "dep: " + dep + ", atom: " + atom + ", got: " + \
+ " ".join(sorted(result)) + ", expected: " + " ".join(sorted(expected))
+ self.assertRaisesMsg(fail_msg, \
+ InvalidDependString, extract_affecting_use, dep, atom, eapi="0")
diff --git a/lib/portage/tests/dep/testStandalone.py b/lib/portage/tests/dep/testStandalone.py
new file mode 100644
index 000000000..88e3f39f8
--- /dev/null
+++ b/lib/portage/tests/dep/testStandalone.py
@@ -0,0 +1,37 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import cpvequal
+from portage.exception import PortageException
+
+class TestStandalone(TestCase):
+ """ Test some small functions portage.dep
+ """
+
+ def testCPVequal(self):
+
+ test_cases = (
+ ("sys-apps/portage-2.1", "sys-apps/portage-2.1", True),
+ ("sys-apps/portage-2.1", "sys-apps/portage-2.0", False),
+ ("sys-apps/portage-2.1", "sys-apps/portage-2.1-r1", False),
+ ("sys-apps/portage-2.1-r1", "sys-apps/portage-2.1", False),
+ ("sys-apps/portage-2.1_alpha3", "sys-apps/portage-2.1", False),
+ ("sys-apps/portage-2.1_alpha3_p6", "sys-apps/portage-2.1_alpha3", False),
+ ("sys-apps/portage-2.1_alpha3", "sys-apps/portage-2.1", False),
+ ("sys-apps/portage-2.1", "sys-apps/X-2.1", False),
+ ("sys-apps/portage-2.1", "portage-2.1", False),
+ )
+
+ test_cases_xfail = (
+ ("sys-apps/portage", "sys-apps/portage"),
+ ("sys-apps/portage-2.1-6", "sys-apps/portage-2.1-6"),
+ )
+
+ for cpv1, cpv2, expected_result in test_cases:
+ self.assertEqual(cpvequal(cpv1, cpv2), expected_result,
+ "cpvequal('%s', '%s') != %s" % (cpv1, cpv2, expected_result))
+
+ for cpv1, cpv2 in test_cases_xfail:
+ self.assertRaisesMsg("cpvequal(%s, %s)" % (cpv1, cpv2),
+ PortageException, cpvequal, cpv1, cpv2)
diff --git a/lib/portage/tests/dep/test_best_match_to_list.py b/lib/portage/tests/dep/test_best_match_to_list.py
new file mode 100644
index 000000000..586c8bc50
--- /dev/null
+++ b/lib/portage/tests/dep/test_best_match_to_list.py
@@ -0,0 +1,63 @@
+# test_best_match_to_list.py -- Portage Unit Testing Functionality
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from itertools import permutations
+
+from portage.tests import TestCase
+from portage.dep import Atom, best_match_to_list
+
+class Test_best_match_to_list(TestCase):
+
+ def best_match_to_list_wrapper(self, mypkg, mylist):
+ """
+ This function uses best_match_to_list to create sorted
+ list of matching atoms.
+ """
+ ret = []
+ mylist = list(mylist)
+ while mylist:
+ m = best_match_to_list(mypkg, mylist)
+ if m is not None:
+ ret.append(m)
+ mylist.remove(m)
+ else:
+ break
+
+ return ret
+
+ def testBest_match_to_list(self):
+ tests = [
+ ("dev-libs/A-4", [Atom(">=dev-libs/A-3"), Atom(">=dev-libs/A-2")],
+ [Atom(">=dev-libs/A-3"), Atom(">=dev-libs/A-2")], True),
+ ("dev-libs/A-4", [Atom("<=dev-libs/A-5"), Atom("<=dev-libs/A-6")],
+ [Atom("<=dev-libs/A-5"), Atom("<=dev-libs/A-6")], True),
+ ("dev-libs/A-1", [Atom("dev-libs/A"), Atom("=dev-libs/A-1")],
+ [Atom("=dev-libs/A-1"), Atom("dev-libs/A")], True),
+ ("dev-libs/A-1", [Atom("dev-libs/B"), Atom("=dev-libs/A-1:0")],
+ [Atom("=dev-libs/A-1:0")], True),
+ ("dev-libs/A-1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=dev-libs/A-1:0")],
+ [Atom("=dev-libs/A-1:0"), Atom("dev-libs/*", allow_wildcard=True)], True),
+ ("dev-libs/A-4.9999-r1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=*/*-*9999*", allow_wildcard=True)],
+ [Atom("=*/*-*9999*", allow_wildcard=True), Atom("dev-libs/*", allow_wildcard=True)], True),
+ ("dev-libs/A-4_beta-r1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=*/*-*_beta*", allow_wildcard=True)],
+ [Atom("=*/*-*_beta*", allow_wildcard=True), Atom("dev-libs/*", allow_wildcard=True)], True),
+ ("dev-libs/A-4_beta1-r1", [Atom("dev-libs/*", allow_wildcard=True), Atom("=*/*-*_beta*", allow_wildcard=True)],
+ [Atom("=*/*-*_beta*", allow_wildcard=True), Atom("dev-libs/*", allow_wildcard=True)], True),
+ ("dev-libs/A-1:0", [Atom("dev-*/*", allow_wildcard=True), Atom("dev-*/*:0", allow_wildcard=True),
+ Atom("dev-libs/A"), Atom("<=dev-libs/A-2"), Atom("dev-libs/A:0"),
+ Atom("=dev-libs/A-1*"), Atom("~dev-libs/A-1"), Atom("=dev-libs/A-1")],
+ [Atom("=dev-libs/A-1"), Atom("~dev-libs/A-1"), Atom("=dev-libs/A-1*"),
+ Atom("dev-libs/A:0"), Atom("<=dev-libs/A-2"), Atom("dev-libs/A"),
+ Atom("dev-*/*:0", allow_wildcard=True), Atom("dev-*/*", allow_wildcard=True)], False)
+ ]
+
+ for pkg, atom_list, result, all_permutations in tests:
+ if all_permutations:
+ atom_lists = permutations(atom_list)
+ else:
+ atom_lists = [atom_list]
+ for atom_list in atom_lists:
+ self.assertEqual(
+ self.best_match_to_list_wrapper(pkg, atom_list),
+ result)
diff --git a/lib/portage/tests/dep/test_dep_getcpv.py b/lib/portage/tests/dep/test_dep_getcpv.py
new file mode 100644
index 000000000..79c1514a1
--- /dev/null
+++ b/lib/portage/tests/dep/test_dep_getcpv.py
@@ -0,0 +1,37 @@
+# test_dep_getcpv.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import dep_getcpv
+
+class DepGetCPV(TestCase):
+ """ A simple testcase for isvalidatom
+ """
+
+ def testDepGetCPV(self):
+
+ prefix_ops = [
+ "<", ">", "=", "~", "<=",
+ ">=", "!=", "!<", "!>", "!~"
+ ]
+
+ bad_prefix_ops = [">~", "<~", "~>", "~<"]
+ postfix_ops = [("=", "*"),]
+
+ cpvs = ["sys-apps/portage-2.1", "sys-apps/portage-2.1",
+ "sys-apps/portage-2.1"]
+ slots = [None, ":foo", ":2"]
+ for cpv in cpvs:
+ for slot in slots:
+ for prefix in prefix_ops:
+ mycpv = prefix + cpv
+ if slot:
+ mycpv += slot
+ self.assertEqual(dep_getcpv(mycpv), cpv)
+
+ for prefix, postfix in postfix_ops:
+ mycpv = prefix + cpv + postfix
+ if slot:
+ mycpv += slot
+ self.assertEqual(dep_getcpv(mycpv), cpv)
diff --git a/lib/portage/tests/dep/test_dep_getrepo.py b/lib/portage/tests/dep/test_dep_getrepo.py
new file mode 100644
index 000000000..6c17d3cf7
--- /dev/null
+++ b/lib/portage/tests/dep/test_dep_getrepo.py
@@ -0,0 +1,29 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import dep_getrepo
+
+class DepGetRepo(TestCase):
+ """ A simple testcase for isvalidatom
+ """
+
+ def testDepGetRepo(self):
+
+ repo_char = "::"
+ repos = ("a", "repo-name", "repo_name", "repo123", None)
+ cpvs = ["sys-apps/portage"]
+ versions = ["2.1.1", "2.1-r1", None]
+ uses = ["[use]", None]
+ for cpv in cpvs:
+ for version in versions:
+ for use in uses:
+ for repo in repos:
+ pkg = cpv
+ if version:
+ pkg = '=' + pkg + '-' + version
+ if repo is not None:
+ pkg = pkg + repo_char + repo
+ if use:
+ pkg = pkg + use
+ self.assertEqual(dep_getrepo(pkg), repo)
diff --git a/lib/portage/tests/dep/test_dep_getslot.py b/lib/portage/tests/dep/test_dep_getslot.py
new file mode 100644
index 000000000..84828648b
--- /dev/null
+++ b/lib/portage/tests/dep/test_dep_getslot.py
@@ -0,0 +1,28 @@
+# test_dep_getslot.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import dep_getslot
+
+class DepGetSlot(TestCase):
+ """ A simple testcase for isvalidatom
+ """
+
+ def testDepGetSlot(self):
+
+ slot_char = ":"
+ slots = ("a", "1.2", "1", "IloveVapier", None)
+ cpvs = ["sys-apps/portage"]
+ versions = ["2.1.1", "2.1-r1"]
+ for cpv in cpvs:
+ for version in versions:
+ for slot in slots:
+ mycpv = cpv
+ if version:
+ mycpv = '=' + mycpv + '-' + version
+ if slot is not None:
+ self.assertEqual(dep_getslot(
+ mycpv + slot_char + slot), slot)
+ else:
+ self.assertEqual(dep_getslot(mycpv), slot)
diff --git a/lib/portage/tests/dep/test_dep_getusedeps.py b/lib/portage/tests/dep/test_dep_getusedeps.py
new file mode 100644
index 000000000..cd58eab35
--- /dev/null
+++ b/lib/portage/tests/dep/test_dep_getusedeps.py
@@ -0,0 +1,35 @@
+# test_dep_getusedeps.py -- Portage Unit Testing Functionality
+# Copyright 2007-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import dep_getusedeps
+
+from portage.tests import test_cps, test_slots, test_versions, test_usedeps
+
+class DepGetUseDeps(TestCase):
+ """ A simple testcase for dep_getusedeps
+ """
+
+ def testDepGetUseDeps(self):
+
+ for mycpv in test_cps:
+ for version in test_versions:
+ for slot in test_slots:
+ for use in test_usedeps:
+ cpv = mycpv[:]
+ if version:
+ cpv += version
+ if slot:
+ cpv += ":" + slot
+ if isinstance(use, tuple):
+ cpv += "[%s]" % (",".join(use),)
+ self.assertEqual(dep_getusedeps(
+ cpv), use)
+ else:
+ if len(use):
+ self.assertEqual(dep_getusedeps(
+ cpv + "[" + use + "]"), (use,))
+ else:
+ self.assertEqual(dep_getusedeps(
+ cpv + "[" + use + "]"), ())
diff --git a/lib/portage/tests/dep/test_dnf_convert.py b/lib/portage/tests/dep/test_dnf_convert.py
new file mode 100644
index 000000000..b92778d4a
--- /dev/null
+++ b/lib/portage/tests/dep/test_dnf_convert.py
@@ -0,0 +1,48 @@
+# Copyright 2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import use_reduce
+from portage.dep._dnf import dnf_convert
+
+class DNFConvertTestCase(TestCase):
+
+ def testDNFConvert(self):
+
+ test_cases = (
+ (
+ '|| ( A B ) || ( C D )',
+ [['||', ['A', 'C'], ['A', 'D'], ['B', 'C'], ['B', 'D']]],
+ ),
+ (
+ '|| ( A B ) || ( B C )',
+ [['||', ['A', 'B'], ['A', 'C'], ['B', 'B'], ['B', 'C']]],
+ ),
+ (
+ '|| ( A ( B C D ) )',
+ [['||', 'A', ['B', 'C', 'D']]],
+ ),
+ (
+ '|| ( A ( B C D ) ) E',
+ [['||', ['E', 'A'], ['E', 'B', 'C', 'D']]],
+ ),
+ (
+ '|| ( A ( B C ) ) || ( D E ) F',
+ [['||', ['F', 'A', 'D'], ['F', 'A', 'E'], ['F', 'B', 'C', 'D'], ['F', 'B', 'C', 'E']]],
+ ),
+ (
+ '|| ( A ( B C || ( D E ) ) ( F G ) H )',
+ [['||', 'A', ['B', 'C', 'D'], ['B', 'C', 'E'], ['F', 'G'], 'H']],
+ ),
+ (
+ '|| ( A ( B C || ( D E ) ) F )',
+ [['||', 'A', ['B', 'C', 'D'], ['B', 'C', 'E'], 'F']],
+ ),
+ (
+ '|| ( A ( C || ( D E ) || ( F G ) ) H )',
+ [['||', 'A', ['C', 'D', 'F'], ['C', 'D', 'G'], ['C', 'E', 'F'], ['C', 'E', 'G'], 'H']],
+ ),
+ )
+
+ for dep_str, result in test_cases:
+ self.assertEqual(dnf_convert(use_reduce(dep_str, opconvert=True)), result)
diff --git a/lib/portage/tests/dep/test_get_operator.py b/lib/portage/tests/dep/test_get_operator.py
new file mode 100644
index 000000000..5076e2107
--- /dev/null
+++ b/lib/portage/tests/dep/test_get_operator.py
@@ -0,0 +1,37 @@
+# test_get_operator.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import get_operator
+
+class GetOperator(TestCase):
+
+ def testGetOperator(self):
+
+ # get_operator does not validate operators
+ tests = [
+ ("~", "~"),
+ ("=", "="),
+ (">", ">"),
+ (">=", ">="),
+ ("<=", "<="),
+ ]
+
+ test_cpvs = ["sys-apps/portage-2.1"]
+ slots = [None, "1", "linux-2.5.6"]
+ for cpv in test_cpvs:
+ for test in tests:
+ for slot in slots:
+ atom = cpv[:]
+ if slot:
+ atom += ":" + slot
+ result = get_operator(test[0] + atom)
+ self.assertEqual(result, test[1],
+ msg="get_operator(%s) != %s" % (test[0] + atom, test[1]))
+
+ result = get_operator("sys-apps/portage")
+ self.assertEqual(result, None)
+
+ result = get_operator("=sys-apps/portage-2.1*")
+ self.assertEqual(result , "=*")
diff --git a/lib/portage/tests/dep/test_get_required_use_flags.py b/lib/portage/tests/dep/test_get_required_use_flags.py
new file mode 100644
index 000000000..90e096c78
--- /dev/null
+++ b/lib/portage/tests/dep/test_get_required_use_flags.py
@@ -0,0 +1,44 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import get_required_use_flags
+from portage.exception import InvalidDependString
+
+class TestCheckRequiredUse(TestCase):
+
+ def testCheckRequiredUse(self):
+ test_cases = (
+ ("a b c", ["a", "b", "c"]),
+
+ ("|| ( a b c )", ["a", "b", "c"]),
+ ("^^ ( a b c )", ["a", "b", "c"]),
+ ("?? ( a b c )", ["a", "b", "c"]),
+ ("?? ( )", []),
+
+ ("|| ( a b ^^ ( d e f ) )", ["a", "b", "d", "e", "f"]),
+ ("^^ ( a b || ( d e f ) )", ["a", "b", "d", "e", "f"]),
+
+ ("( ^^ ( a ( b ) ( || ( ( d e ) ( f ) ) ) ) )", ["a", "b", "d", "e", "f"]),
+
+ ("a? ( ^^ ( b c ) )", ["a", "b", "c"]),
+ ("a? ( ^^ ( !b !d? ( c ) ) )", ["a", "b", "c", "d"]),
+ )
+
+ test_cases_xfail = (
+ ("^^ ( || ( a b ) ^^ ( b c )"),
+ ("^^( || ( a b ) ^^ ( b c ) )"),
+ ("^^ || ( a b ) ^^ ( b c )"),
+ ("^^ ( ( || ) ( a b ) ^^ ( b c ) )"),
+ ("^^ ( || ( a b ) ) ^^ ( b c ) )"),
+ )
+
+ for required_use, expected in test_cases:
+ result = get_required_use_flags(required_use)
+ expected = set(expected)
+ self.assertEqual(result, expected, \
+ "REQUIRED_USE: '%s', expected: '%s', got: '%s'" % (required_use, expected, result))
+
+ for required_use in test_cases_xfail:
+ self.assertRaisesMsg("REQUIRED_USE: '%s'" % (required_use,), \
+ InvalidDependString, get_required_use_flags, required_use)
diff --git a/lib/portage/tests/dep/test_isjustname.py b/lib/portage/tests/dep/test_isjustname.py
new file mode 100644
index 000000000..9b95bcd0f
--- /dev/null
+++ b/lib/portage/tests/dep/test_isjustname.py
@@ -0,0 +1,24 @@
+# test_isjustname.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import isjustname
+
+class IsJustName(TestCase):
+
+ def testIsJustName(self):
+
+ cats = ("", "sys-apps/", "foo/", "virtual/")
+ pkgs = ("portage", "paludis", "pkgcore", "notARealPkg")
+ vers = ("", "-2.0-r3", "-1.0_pre2", "-3.1b")
+
+ for pkg in pkgs:
+ for cat in cats:
+ for ver in vers:
+ if len(ver):
+ self.assertFalse(isjustname(cat + pkg + ver),
+ msg="isjustname(%s) is True!" % (cat + pkg + ver))
+ else:
+ self.assertTrue(isjustname(cat + pkg + ver),
+ msg="isjustname(%s) is False!" % (cat + pkg + ver))
diff --git a/lib/portage/tests/dep/test_isvalidatom.py b/lib/portage/tests/dep/test_isvalidatom.py
new file mode 100644
index 000000000..9d3367aab
--- /dev/null
+++ b/lib/portage/tests/dep/test_isvalidatom.py
@@ -0,0 +1,162 @@
+# Copyright 2006-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import isvalidatom
+
+class IsValidAtomTestCase(object):
+ def __init__(self, atom, expected, allow_wildcard=False,
+ allow_repo=False, allow_build_id=False):
+ self.atom = atom
+ self.expected = expected
+ self.allow_wildcard = allow_wildcard
+ self.allow_repo = allow_repo
+ self.allow_build_id = allow_build_id
+
+class IsValidAtom(TestCase):
+
+ def testIsValidAtom(self):
+
+ test_cases = (
+ IsValidAtomTestCase("sys-apps/portage", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.1", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.1*", True),
+ IsValidAtomTestCase(">=sys-apps/portage-2.1", True),
+ IsValidAtomTestCase("<=sys-apps/portage-2.1", True),
+ IsValidAtomTestCase(">sys-apps/portage-2.1", True),
+ IsValidAtomTestCase("<sys-apps/portage-2.1", True),
+ IsValidAtomTestCase("~sys-apps/portage-2.1", True),
+ IsValidAtomTestCase("sys-apps/portage:foo", True),
+ IsValidAtomTestCase("sys-apps/portage-2.1:foo", False),
+ IsValidAtomTestCase("sys-apps/portage-2.1:", False),
+ IsValidAtomTestCase("sys-apps/portage-2.1:", False),
+ IsValidAtomTestCase("sys-apps/portage-2.1:[foo]", False),
+ IsValidAtomTestCase("sys-apps/portage", True),
+ IsValidAtomTestCase("sys-apps/portage", True),
+ IsValidAtomTestCase("sys-apps/portage", True),
+ IsValidAtomTestCase("sys-apps/portage", True),
+ IsValidAtomTestCase("sys-apps/portage", True),
+ IsValidAtomTestCase("sys-apps/portage", True),
+ IsValidAtomTestCase("sys-apps/portage", True),
+
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar?,!baz?,!doc=,build=]", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[doc?]", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!doc?]", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[doc=]", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!doc=]", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!doc]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!-doc]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!-doc=]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[!-doc?]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[-doc?]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[-doc=]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[-doc!=]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[-doc=]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar][-baz][doc?][!build?]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar,-baz,doc?,!build?]", True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar,-baz,doc?,!build?,]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[,bar,-baz,doc?,!build?]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar,-baz][doc?,!build?]", False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo[bar][doc,build]", False),
+ IsValidAtomTestCase(">~cate-gory/foo-1.0", False),
+ IsValidAtomTestCase(">~category/foo-1.0", False),
+ IsValidAtomTestCase("<~category/foo-1.0", False),
+ IsValidAtomTestCase("###cat/foo-1.0", False),
+ IsValidAtomTestCase("~sys-apps/portage", False),
+ IsValidAtomTestCase("portage", False),
+ IsValidAtomTestCase("=portage", False),
+ IsValidAtomTestCase(">=portage-2.1", False),
+ IsValidAtomTestCase("~portage-2.1", False),
+ IsValidAtomTestCase("=portage-2.1*", False),
+ IsValidAtomTestCase("null/portage", True),
+ IsValidAtomTestCase("null/portage*:0", False),
+ IsValidAtomTestCase(">=null/portage-2.1", True),
+ IsValidAtomTestCase(">=null/portage", False),
+ IsValidAtomTestCase(">null/portage", False),
+ IsValidAtomTestCase("=null/portage*", False),
+ IsValidAtomTestCase("=null/portage", False),
+ IsValidAtomTestCase("~null/portage", False),
+ IsValidAtomTestCase("<=null/portage", False),
+ IsValidAtomTestCase("<null/portage", False),
+ IsValidAtomTestCase("~null/portage-2.1", True),
+ IsValidAtomTestCase("=null/portage-2.1*", True),
+ IsValidAtomTestCase("null/portage-2.1*", False),
+ IsValidAtomTestCase("app-doc/php-docs-20071125", False),
+ IsValidAtomTestCase("app-doc/php-docs-20071125-r2", False),
+ IsValidAtomTestCase("=foo/bar-1-r1-1-r1", False),
+ IsValidAtomTestCase("foo/-z-1", False),
+
+ # These are invalid because pkg name must not end in hyphen
+ # followed by numbers
+ IsValidAtomTestCase("=foo/bar-1-r1-1-r1", False),
+ IsValidAtomTestCase("=foo/bar-123-1", False),
+ IsValidAtomTestCase("=foo/bar-123-1*", False),
+ IsValidAtomTestCase("foo/bar-123", False),
+ IsValidAtomTestCase("=foo/bar-123-1-r1", False),
+ IsValidAtomTestCase("=foo/bar-123-1-r1*", False),
+ IsValidAtomTestCase("foo/bar-123-r1", False),
+ IsValidAtomTestCase("foo/bar-1", False),
+
+ IsValidAtomTestCase("=foo/bar--baz-1-r1", True),
+ IsValidAtomTestCase("=foo/bar-baz--1-r1", True),
+ IsValidAtomTestCase("=foo/bar-baz---1-r1", True),
+ IsValidAtomTestCase("=foo/bar-baz---1", True),
+ IsValidAtomTestCase("=foo/bar-baz-1--r1", False),
+ IsValidAtomTestCase("games-strategy/ufo2000", True),
+ IsValidAtomTestCase("~games-strategy/ufo2000-0.1", True),
+ IsValidAtomTestCase("=media-libs/x264-20060810", True),
+ IsValidAtomTestCase("foo/b", True),
+ IsValidAtomTestCase("app-text/7plus", True),
+ IsValidAtomTestCase("foo/666", True),
+ IsValidAtomTestCase("=dev-libs/poppler-qt3-0.11*", True),
+
+ #Testing atoms with repositories
+ IsValidAtomTestCase("sys-apps/portage::repo_123-name", True, allow_repo=True),
+ IsValidAtomTestCase("=sys-apps/portage-2.1::repo", True, allow_repo=True),
+ IsValidAtomTestCase("=sys-apps/portage-2.1*::repo", True, allow_repo=True),
+ IsValidAtomTestCase("sys-apps/portage:foo::repo", True, allow_repo=True),
+ IsValidAtomTestCase("sys-apps/portage-2.1:foo::repo", False, allow_repo=True),
+ IsValidAtomTestCase("sys-apps/portage-2.1:::repo", False, allow_repo=True),
+ IsValidAtomTestCase("sys-apps/portage-2.1:::repo[foo]", False, allow_repo=True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[bar?,!baz?,!doc=,build=]", True, allow_repo=True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[doc?]", True, allow_repo=True),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[!doc]", False, allow_repo=True),
+ IsValidAtomTestCase("###cat/foo-1.0::repo", False, allow_repo=True),
+ IsValidAtomTestCase("~sys-apps/portage::repo", False, allow_repo=True),
+ IsValidAtomTestCase("portage::repo", False, allow_repo=True),
+ IsValidAtomTestCase("=portage::repo", False, allow_repo=True),
+ IsValidAtomTestCase("null/portage::repo", True, allow_repo=True),
+ IsValidAtomTestCase("app-doc/php-docs-20071125::repo", False, allow_repo=True),
+ IsValidAtomTestCase("=foo/bar-1-r1-1-r1::repo", False, allow_repo=True),
+
+ IsValidAtomTestCase("sys-apps/portage::repo_123-name", False, allow_repo=False),
+ IsValidAtomTestCase("=sys-apps/portage-2.1::repo", False, allow_repo=False),
+ IsValidAtomTestCase("=sys-apps/portage-2.1*::repo", False, allow_repo=False),
+ IsValidAtomTestCase("sys-apps/portage:foo::repo", False, allow_repo=False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[bar?,!baz?,!doc=,build=]", False, allow_repo=False),
+ IsValidAtomTestCase("=sys-apps/portage-2.2*:foo::repo[doc?]", False, allow_repo=False),
+ IsValidAtomTestCase("null/portage::repo", False, allow_repo=False),
+
+ IsValidAtomTestCase("virtual/ffmpeg:0/53", True),
+ IsValidAtomTestCase("virtual/ffmpeg:0/53=", True),
+ IsValidAtomTestCase("virtual/ffmpeg:0/53*", False),
+ IsValidAtomTestCase("virtual/ffmpeg:=", True),
+ IsValidAtomTestCase("virtual/ffmpeg:0=", True),
+ IsValidAtomTestCase("virtual/ffmpeg:*", True),
+ IsValidAtomTestCase("virtual/ffmpeg:0*", False),
+ IsValidAtomTestCase("virtual/ffmpeg:0", True),
+
+ # Wildcard atoms
+ IsValidAtomTestCase("*/portage-2.1", False, allow_wildcard=True),
+ )
+
+ for test_case in test_cases:
+ if test_case.expected:
+ atom_type = "valid"
+ else:
+ atom_type = "invalid"
+ self.assertEqual(bool(isvalidatom(test_case.atom, allow_wildcard=test_case.allow_wildcard,
+ allow_repo=test_case.allow_repo,
+ allow_build_id=test_case.allow_build_id)),
+ test_case.expected,
+ msg="isvalidatom(%s) != %s" % (test_case.atom, test_case.expected))
diff --git a/lib/portage/tests/dep/test_match_from_list.py b/lib/portage/tests/dep/test_match_from_list.py
new file mode 100644
index 000000000..3080479c2
--- /dev/null
+++ b/lib/portage/tests/dep/test_match_from_list.py
@@ -0,0 +1,146 @@
+# Copyright 2006-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import sys
+from portage.tests import TestCase
+from portage.dep import Atom, match_from_list, _repo_separator
+from portage.versions import catpkgsplit, _pkg_str
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+class Package(object):
+ """
+ Provides a minimal subset of attributes of _emerge.Package.Package
+ """
+ def __init__(self, atom):
+ atom = Atom(atom, allow_repo=True)
+ self.cp = atom.cp
+ slot = atom.slot
+ if atom.sub_slot:
+ slot = "%s/%s" % (slot, atom.sub_slot)
+ if not slot:
+ slot = '0'
+ self.cpv = _pkg_str(atom.cpv, slot=slot, repo=atom.repo)
+ self.cpv_split = catpkgsplit(self.cpv)
+ self.slot = self.cpv.slot
+ self.sub_slot = self.cpv.sub_slot
+ self.repo = atom.repo
+ if atom.use:
+ self.use = self._use_class(atom.use.enabled)
+ self.iuse = self._iuse_class(atom.use.required)
+ else:
+ self.use = self._use_class([])
+ self.iuse = self._iuse_class([])
+
+ class _use_class(object):
+ def __init__(self, use):
+ self.enabled = frozenset(use)
+
+ class _iuse_class(object):
+ def __init__(self, iuse):
+ self.all = frozenset(iuse)
+
+ def is_valid_flag(self, flags):
+ if isinstance(flags, basestring):
+ flags = [flags]
+ for flag in flags:
+ if not flag in self.all:
+ return False
+ return True
+
+class Test_match_from_list(TestCase):
+
+ def testMatch_from_list(self):
+ tests = (
+ ("=sys-apps/portage-45*", [], []),
+ ("=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("!!=sys-apps/portage-45*", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("=sys-apps/portage-045", ["sys-apps/portage-046"], []),
+ ("~sys-apps/portage-045", ["sys-apps/portage-045-r1"], ["sys-apps/portage-045-r1"]),
+ ("~sys-apps/portage-045", ["sys-apps/portage-046-r1"], []),
+ ("<=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("<=sys-apps/portage-045", ["sys-apps/portage-046"], []),
+ ("<sys-apps/portage-046", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ ("<sys-apps/portage-046", ["sys-apps/portage-046"], []),
+ (">=sys-apps/portage-045", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ (">=sys-apps/portage-047", ["sys-apps/portage-046-r1"], []),
+ (">sys-apps/portage-044", ["sys-apps/portage-045"], ["sys-apps/portage-045"]),
+ (">sys-apps/portage-047", ["sys-apps/portage-046-r1"], []),
+ ("sys-apps/portage:0", [Package("=sys-apps/portage-045:0")], ["sys-apps/portage-045"]),
+ ("sys-apps/portage:0", [Package("=sys-apps/portage-045:1")], []),
+ ("=cat/pkg-1-r1*", ["cat/pkg-1_alpha1"], []),
+ # =* glob matches only on boundaries between version parts,
+ # so 1* does not match 10 (bug 560466).
+ ("=cat/pkg-1.1*", ["cat/pkg-1.1-r1", "cat/pkg-1.10-r1"], ["cat/pkg-1.1-r1"]),
+ ("=cat/pkg-1-r1*", ["cat/pkg-1-r11"], []),
+ ("=cat/pkg-1_pre*", ["cat/pkg-1_pre1"], ["cat/pkg-1_pre1"]),
+ ("=cat/pkg-1-r1*", ["cat/pkg-1-r1"], ["cat/pkg-1-r1"]),
+ ("=cat/pkg-1-r11*", ["cat/pkg-1-r11"], ["cat/pkg-1-r11"]),
+ ("=cat/pkg-1-r11*", ["cat/pkg-01-r11"], ["cat/pkg-01-r11"]),
+ ("=cat/pkg-01-r11*", ["cat/pkg-1-r11"], ["cat/pkg-1-r11"]),
+ ("=cat/pkg-01-r11*", ["cat/pkg-001-r11"], ["cat/pkg-001-r11"]),
+ ("=sys-fs/udev-1*", ["sys-fs/udev-123", "sys-fs/udev-123-r1"], []),
+ ("=sys-fs/udev-123*", ["sys-fs/udev-123"], ["sys-fs/udev-123"]),
+ ("=sys-fs/udev-123*", ["sys-fs/udev-123-r1"], ["sys-fs/udev-123-r1"]),
+ ("=sys-fs/udev-4*", ["sys-fs/udev-456", "sys-fs/udev-456-r1"], []),
+ ("=sys-fs/udev-456*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]),
+ ("*/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]),
+ ("*/*:0", ["sys-fs/udev-456:0"], ["sys-fs/udev-456:0"]),
+ ("*/*:1", ["sys-fs/udev-456:0"], []),
+ ("sys-fs/*", ["sys-fs/udev-456"], ["sys-fs/udev-456"]),
+ ("*/udev", ["sys-fs/udev-456"], ["sys-fs/udev-456"]),
+ ("=sys-apps/portage-2*", ["sys-apps/portage-2.1"], ["sys-apps/portage-2.1"]),
+ ("=sys-apps/portage-2.1*", ["sys-apps/portage-2.1.2"], ["sys-apps/portage-2.1.2"]),
+ ("dev-libs/*", ["sys-apps/portage-2.1.2"], []),
+ ("*/tar", ["sys-apps/portage-2.1.2"], []),
+ ("*/*", ["dev-libs/A-1", "dev-libs/B-1"], ["dev-libs/A-1", "dev-libs/B-1"]),
+ ("dev-libs/*", ["dev-libs/A-1", "sci-libs/B-1"], ["dev-libs/A-1"]),
+
+ ("dev-libs/A[foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-1"]),
+ ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], ["dev-libs/A-2"]),
+ ("dev-libs/A[-foo]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2")], []),
+ ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo]")], []),
+ ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[-foo,bar]")], []),
+ ("dev-libs/A[foo,bar]", [Package("=dev-libs/A-1[foo]"), Package("=dev-libs/A-2[foo,bar]")], ["dev-libs/A-2"]),
+ ("dev-libs/A[foo,bar(+)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"]),
+ ("dev-libs/A[foo,bar(-)]", [Package("=dev-libs/A-1[-foo]"), Package("=dev-libs/A-2[foo]")], []),
+ ("dev-libs/A[foo,-bar(-)]", [Package("=dev-libs/A-1[-foo,bar]"), Package("=dev-libs/A-2[foo]")], ["dev-libs/A-2"]),
+
+ ("dev-libs/A::repo1", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo1"]),
+ ("dev-libs/A::repo2", [Package("=dev-libs/A-1::repo1"), Package("=dev-libs/A-1::repo2")], ["dev-libs/A-1::repo2"]),
+ ("dev-libs/A::repo2[foo]", [Package("=dev-libs/A-1::repo1[foo]"), Package("=dev-libs/A-1::repo2[-foo]")], []),
+ ("dev-libs/A::repo2[foo]", [Package("=dev-libs/A-1::repo1[-foo]"), Package("=dev-libs/A-1::repo2[foo]")], ["dev-libs/A-1::repo2"]),
+ ("dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:1::repo1"), Package("=dev-libs/A-1:2::repo2")], []),
+ ("dev-libs/A:1::repo2[foo]", [Package("=dev-libs/A-1:2::repo1"), Package("=dev-libs/A-1:1::repo2[foo]")], ["dev-libs/A-1::repo2"]),
+
+ ("virtual/ffmpeg:0/53", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
+ ("virtual/ffmpeg:0/53=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
+ ("virtual/ffmpeg:0/52", [Package("=virtual/ffmpeg-0.10.3:0/53")], []),
+ ("virtual/ffmpeg:=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
+ ("virtual/ffmpeg:0=", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
+ ("virtual/ffmpeg:*", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
+ ("virtual/ffmpeg:0", [Package("=virtual/ffmpeg-0.10.3:0/53")], ["virtual/ffmpeg-0.10.3"]),
+
+ ("sys-libs/db:4.8/4.8", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]),
+ ("sys-libs/db:4.8/4.8=", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]),
+ ("sys-libs/db:4.8=", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]),
+ ("sys-libs/db:*", [Package("=sys-libs/db-4.8.30:4.8")], ["sys-libs/db-4.8.30"]),
+ ("sys-libs/db:4.8/0", [Package("=sys-libs/db-4.8.30:4.8")], []),
+ ("sys-libs/db:4.8/0=", [Package("=sys-libs/db-4.8.30:4.8")], []),
+ )
+
+ for atom, cpv_list, expected_result in tests:
+ result = []
+ for pkg in match_from_list(atom, cpv_list):
+ if isinstance(pkg, Package):
+ if pkg.repo:
+ result.append(pkg.cpv + _repo_separator + pkg.repo)
+ else:
+ result.append(pkg.cpv)
+ else:
+ result.append(pkg)
+ self.assertEqual(result, expected_result)
diff --git a/lib/portage/tests/dep/test_overlap_dnf.py b/lib/portage/tests/dep/test_overlap_dnf.py
new file mode 100644
index 000000000..ee48e5556
--- /dev/null
+++ b/lib/portage/tests/dep/test_overlap_dnf.py
@@ -0,0 +1,28 @@
+# Copyright 2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import Atom, use_reduce
+from portage.dep.dep_check import _overlap_dnf
+
+class OverlapDNFTestCase(TestCase):
+
+ def testOverlapDNF(self):
+
+ test_cases = (
+ (
+ '|| ( cat/A cat/B ) cat/E || ( cat/C cat/D )',
+ [['||', 'cat/A', 'cat/B'], 'cat/E', ['||', 'cat/C', 'cat/D']],
+ ),
+ (
+ '|| ( cat/A cat/B ) cat/D || ( cat/B cat/C )',
+ ['cat/D', ['||', ['cat/A', 'cat/B'], ['cat/A', 'cat/C'], ['cat/B', 'cat/B'], ['cat/B', 'cat/C']]],
+ ),
+ (
+ '|| ( cat/A cat/B ) || ( cat/C cat/D ) || ( ( cat/B cat/E ) cat/F )',
+ [['||', ['cat/A', 'cat/B', 'cat/E'], ['cat/A', 'cat/F'], ['cat/B', 'cat/B', 'cat/E'], ['cat/B', 'cat/F']], ['||', 'cat/C', 'cat/D']],
+ ),
+ )
+
+ for dep_str, result in test_cases:
+ self.assertEqual(_overlap_dnf(use_reduce(dep_str, token_class=Atom, opconvert=True)), result)
diff --git a/lib/portage/tests/dep/test_paren_reduce.py b/lib/portage/tests/dep/test_paren_reduce.py
new file mode 100644
index 000000000..324465289
--- /dev/null
+++ b/lib/portage/tests/dep/test_paren_reduce.py
@@ -0,0 +1,69 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.dep import paren_reduce
+from portage.exception import InvalidDependString
+
+class TestParenReduce(TestCase):
+
+ def testParenReduce(self):
+
+ test_cases = (
+ ("A", ["A"]),
+ ("( A )", ["A"]),
+ ("|| ( A B )", ["||", ["A", "B"]]),
+ ("|| ( A || ( B C ) )", ["||", ["A", "||", ["B", "C"]]]),
+ ("|| ( A || ( B C D ) )", ["||", ["A", "||", ["B", "C", "D"]]]),
+ ("|| ( A || ( B || ( C D ) E ) )", ["||", ["A", "||", ["B", "||", ["C", "D"], "E"]]]),
+ ("a? ( A )", ["a?", ["A"]]),
+
+ ("( || ( ( ( A ) B ) ) )", ["A", "B"]),
+ ("( || ( || ( ( A ) B ) ) )", ["||", ["A", "B"]]),
+ ("|| ( A )", ["A"]),
+ ("( || ( || ( || ( A ) foo? ( B ) ) ) )", ["||", ["A", "foo?", ["B"]]]),
+ ("( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )", ["||", ["bar?", ["A"], "foo?", ["B"]]]),
+ ("A || ( ) foo? ( ) B", ["A", "B"]),
+
+ ("|| ( A ) || ( B )", ["A", "B"]),
+ ("foo? ( A ) foo? ( B )", ["foo?", ["A"], "foo?", ["B"]]),
+
+ ("|| ( ( A B ) C )", ["||", [["A", "B"], "C"]]),
+ ("|| ( ( A B ) ( C ) )", ["||", [["A", "B"], "C"]]),
+ # test USE dep defaults for bug #354003
+ (">=dev-lang/php-5.2[pcre(+)]", [">=dev-lang/php-5.2[pcre(+)]"]),
+ )
+
+ test_cases_xfail = (
+ "( A",
+ "A )",
+
+ "||( A B )",
+ "|| (A B )",
+ "|| ( A B)",
+ "|| ( A B",
+ "|| A B )",
+
+ "|| A B",
+ "|| ( A B ) )",
+ "|| || B C",
+
+ "|| ( A B || )",
+
+ "a? A",
+
+ "( || ( || || ( A ) foo? ( B ) ) )",
+ "( || ( || bar? ( A ) foo? ( B ) ) )",
+ )
+
+ for dep_str, expected_result in test_cases:
+ self.assertEqual(paren_reduce(dep_str, _deprecation_warn=False),
+ expected_result,
+ "input: '%s' result: %s != %s" % (dep_str,
+ paren_reduce(dep_str, _deprecation_warn=False),
+ expected_result))
+
+ for dep_str in test_cases_xfail:
+ self.assertRaisesMsg(dep_str,
+ InvalidDependString, paren_reduce, dep_str,
+ _deprecation_warn=False)
diff --git a/lib/portage/tests/dep/test_use_reduce.py b/lib/portage/tests/dep/test_use_reduce.py
new file mode 100644
index 000000000..4f65567cf
--- /dev/null
+++ b/lib/portage/tests/dep/test_use_reduce.py
@@ -0,0 +1,626 @@
+# Copyright 2009-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.exception import InvalidDependString
+from portage.dep import Atom, use_reduce
+
+class UseReduceTestCase(object):
+ def __init__(self, deparray, uselist=[], masklist=[],
+ matchall=0, excludeall=[], is_src_uri=False,
+ eapi='0', opconvert=False, flat=False, expected_result=None,
+ is_valid_flag=None, token_class=None):
+ self.deparray = deparray
+ self.uselist = uselist
+ self.masklist = masklist
+ self.matchall = matchall
+ self.excludeall = excludeall
+ self.is_src_uri = is_src_uri
+ self.eapi = eapi
+ self.opconvert = opconvert
+ self.flat = flat
+ self.is_valid_flag = is_valid_flag
+ self.token_class = token_class
+ self.expected_result = expected_result
+
+ def run(self):
+ try:
+ return use_reduce(self.deparray, self.uselist, self.masklist,
+ self.matchall, self.excludeall, self.is_src_uri, self.eapi,
+ self.opconvert, self.flat, self.is_valid_flag, self.token_class)
+ except InvalidDependString as e:
+ raise InvalidDependString("%s: %s" % (e, self.deparray))
+
+class UseReduce(TestCase):
+
+ def always_true(self, ununsed_parameter):
+ return True
+
+ def always_false(self, ununsed_parameter):
+ return False
+
+ def testUseReduce(self):
+
+ EAPI_WITH_SRC_URI_ARROWS = "2"
+ EAPI_WITHOUT_SRC_URI_ARROWS = "0"
+
+ test_cases = (
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ uselist=["a", "b", "c", "d"],
+ expected_result=["A", "B"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ uselist=["a", "b", "c"],
+ expected_result=["A", "B", "D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ uselist=["b", "c"],
+ expected_result=["B", "D"]
+ ),
+
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ matchall=True,
+ expected_result=["A", "B", "C", "D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ masklist=["a", "c"],
+ expected_result=["C", "D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ matchall=True,
+ masklist=["a", "c"],
+ expected_result=["B", "C", "D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ uselist=["a", "b"],
+ masklist=["a", "c"],
+ expected_result=["B", "C", "D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ excludeall=["a", "c"],
+ expected_result=["D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ uselist=["b"],
+ excludeall=["a", "c"],
+ expected_result=["B", "D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ matchall=True,
+ excludeall=["a", "c"],
+ expected_result=["A", "B", "D"]
+ ),
+ UseReduceTestCase(
+ "a? ( A ) b? ( B ) !c? ( C ) !d? ( D )",
+ matchall=True,
+ excludeall=["a", "c"],
+ masklist=["b"],
+ expected_result=["A", "D"]
+ ),
+
+ UseReduceTestCase(
+ "a? ( b? ( AB ) )",
+ uselist=["a", "b"],
+ expected_result=["AB"]
+ ),
+ UseReduceTestCase(
+ "a? ( b? ( AB ) C )",
+ uselist=["a"],
+ expected_result=["C"]
+ ),
+ UseReduceTestCase(
+ "a? ( b? ( || ( AB CD ) ) )",
+ uselist=["a", "b"],
+ expected_result=["||", ["AB", "CD"]]
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( A ) b? ( B ) ) )",
+ uselist=["a", "b"],
+ expected_result=["||", ["A", "B"]]
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( A ) b? ( B ) ) )",
+ uselist=["a"],
+ expected_result=["A"]
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( A ) b? ( B ) ) )",
+ uselist=[],
+ expected_result=[]
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
+ uselist=[],
+ expected_result=[]
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
+ uselist=["a"],
+ expected_result=["A"]
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
+ uselist=["b"],
+ expected_result=["B"]
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
+ uselist=["c"],
+ expected_result=[]
+ ),
+ UseReduceTestCase(
+ "|| ( || ( a? ( || ( A c? ( C ) ) ) b? ( B ) ) )",
+ uselist=["a", "c"],
+ expected_result=["||", ["A", "C"]]
+ ),
+
+ # paren_reduce tests
+ UseReduceTestCase(
+ "A",
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "( A )",
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "|| ( A B )",
+ expected_result=["||", ["A", "B"]]),
+ UseReduceTestCase(
+ "|| ( ( A B ) C )",
+ expected_result=["||", [["A", "B"], "C"]]),
+ UseReduceTestCase(
+ "|| ( ( A B ) ( C ) )",
+ expected_result=["||", [["A", "B"], "C"]]),
+ UseReduceTestCase(
+ "|| ( A || ( B C ) )",
+ expected_result=["||", ["A", "B", "C"]]),
+ UseReduceTestCase(
+ "|| ( A || ( B C D ) )",
+ expected_result=["||", ["A", "B", "C", "D"]]),
+ UseReduceTestCase(
+ "|| ( A || ( B || ( C D ) E ) )",
+ expected_result=["||", ["A", "B", "C", "D", "E"]]),
+ UseReduceTestCase(
+ "( || ( ( ( A ) B ) ) )",
+ expected_result=["A", "B"]),
+ UseReduceTestCase(
+ "( || ( || ( ( A ) B ) ) )",
+ expected_result=["||", ["A", "B"]]),
+ UseReduceTestCase(
+ "( || ( || ( ( A ) B ) ) )",
+ expected_result=["||", ["A", "B"]]),
+ UseReduceTestCase(
+ "|| ( A )",
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "( || ( || ( || ( A ) foo? ( B ) ) ) )",
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "( || ( || ( || ( A ) foo? ( B ) ) ) )",
+ uselist=["foo"],
+ expected_result=["||", ["A", "B"]]),
+ UseReduceTestCase(
+ "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+ expected_result=[]),
+ UseReduceTestCase(
+ "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+ uselist=["foo", "bar"],
+ expected_result=["||", ["A", "B"]]),
+ UseReduceTestCase(
+ "A || ( bar? ( C ) ) foo? ( bar? ( C ) ) B",
+ expected_result=["A", "B"]),
+ UseReduceTestCase(
+ "|| ( A ) || ( B )",
+ expected_result=["A", "B"]),
+ UseReduceTestCase(
+ "foo? ( A ) foo? ( B )",
+ expected_result=[]),
+ UseReduceTestCase(
+ "foo? ( A ) foo? ( B )",
+ uselist=["foo"],
+ expected_result=["A", "B"]),
+ UseReduceTestCase(
+ "|| ( A B ) C",
+ expected_result=['||', ['A', 'B'], 'C']),
+ UseReduceTestCase(
+ "A || ( B C )",
+ expected_result=['A', '||', ['B', 'C']]),
+
+ # SRC_URI stuff
+ UseReduceTestCase(
+ "http://foo/bar -> blah.tbz2",
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=["http://foo/bar", "->", "blah.tbz2"]),
+ UseReduceTestCase(
+ "foo? ( http://foo/bar -> blah.tbz2 )",
+ uselist=[],
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=[]),
+ UseReduceTestCase(
+ "foo? ( http://foo/bar -> blah.tbz2 )",
+ uselist=["foo"],
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=["http://foo/bar", "->", "blah.tbz2"]),
+ UseReduceTestCase(
+ "http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )",
+ uselist=[],
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=["http://foo/bar", "->", "bar.tbz2"]),
+ UseReduceTestCase(
+ "http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )",
+ uselist=["foo"],
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=["http://foo/bar", "->", "bar.tbz2", "ftp://foo/a"]),
+ UseReduceTestCase(
+ "http://foo.com/foo http://foo/bar -> blah.tbz2",
+ uselist=["foo"],
+ is_src_uri=True,
+ eapi=EAPI_WITH_SRC_URI_ARROWS,
+ expected_result=["http://foo.com/foo", "http://foo/bar", "->", "blah.tbz2"]),
+
+ # opconvert tests
+ UseReduceTestCase(
+ "A",
+ opconvert=True,
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "( A )",
+ opconvert=True,
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "|| ( A B )",
+ opconvert=True,
+ expected_result=[['||', 'A', 'B']]),
+ UseReduceTestCase(
+ "|| ( ( A B ) C )",
+ opconvert=True,
+ expected_result=[['||', ['A', 'B'], 'C']]),
+ UseReduceTestCase(
+ "|| ( A || ( B C ) )",
+ opconvert=True,
+ expected_result=[['||', 'A', 'B', 'C']]),
+ UseReduceTestCase(
+ "|| ( A || ( B C D ) )",
+ opconvert=True,
+ expected_result=[['||', 'A', 'B', 'C', 'D']]),
+ UseReduceTestCase(
+ "|| ( A || ( B || ( C D ) E ) )",
+ expected_result=["||", ["A", "B", "C", "D", "E"]]),
+ UseReduceTestCase(
+ "( || ( ( ( A ) B ) ) )",
+ opconvert=True,
+ expected_result=['A', 'B']),
+ UseReduceTestCase(
+ "( || ( || ( ( A ) B ) ) )",
+ opconvert=True,
+ expected_result=[['||', 'A', 'B']]),
+ UseReduceTestCase(
+ "|| ( A B ) C",
+ opconvert=True,
+ expected_result=[['||', 'A', 'B'], 'C']),
+ UseReduceTestCase(
+ "A || ( B C )",
+ opconvert=True,
+ expected_result=['A', ['||', 'B', 'C']]),
+ UseReduceTestCase(
+ "A foo? ( || ( B || ( bar? ( || ( C D E ) ) !bar? ( F ) ) ) ) G",
+ uselist=["foo", "bar"],
+ opconvert=True,
+ expected_result=['A', ['||', 'B', 'C', 'D', 'E'], 'G']),
+ UseReduceTestCase(
+ "A foo? ( || ( B || ( bar? ( || ( C D E ) ) !bar? ( F ) ) ) ) G",
+ uselist=["foo", "bar"],
+ opconvert=False,
+ expected_result=['A', '||', ['B', 'C', 'D', 'E'], 'G']),
+
+ UseReduceTestCase(
+ "|| ( A )",
+ opconvert=True,
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "( || ( || ( || ( A ) foo? ( B ) ) ) )",
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "( || ( || ( || ( A ) foo? ( B ) ) ) )",
+ uselist=["foo"],
+ opconvert=True,
+ expected_result=[['||', 'A', 'B']]),
+ UseReduceTestCase(
+ "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+ opconvert=True,
+ expected_result=[]),
+ UseReduceTestCase(
+ "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+ uselist=["foo", "bar"],
+ opconvert=True,
+ expected_result=[['||', 'A', 'B']]),
+ UseReduceTestCase(
+ "A || ( bar? ( C ) ) foo? ( bar? ( C ) ) B",
+ opconvert=True,
+ expected_result=["A", "B"]),
+ UseReduceTestCase(
+ "|| ( A ) || ( B )",
+ opconvert=True,
+ expected_result=["A", "B"]),
+ UseReduceTestCase(
+ "foo? ( A ) foo? ( B )",
+ opconvert=True,
+ expected_result=[]),
+ UseReduceTestCase(
+ "foo? ( A ) foo? ( B )",
+ uselist=["foo"],
+ opconvert=True,
+ expected_result=["A", "B"]),
+ UseReduceTestCase(
+ "|| ( foo? ( || ( A B ) ) )",
+ uselist=["foo"],
+ opconvert=True,
+ expected_result=[['||', 'A', 'B']]),
+
+ UseReduceTestCase(
+ "|| ( ( A B ) foo? ( || ( C D ) ) )",
+ uselist=["foo"],
+ opconvert=True,
+ expected_result=[['||', ['A', 'B'], 'C', 'D']]),
+
+ UseReduceTestCase(
+ "|| ( ( A B ) foo? ( || ( C D ) ) )",
+ uselist=["foo"],
+ opconvert=False,
+ expected_result=['||', [['A', 'B'], 'C', 'D']]),
+
+ UseReduceTestCase(
+ "|| ( ( A B ) || ( C D ) )",
+ expected_result=['||', [['A', 'B'], 'C', 'D']]),
+
+ UseReduceTestCase(
+ "|| ( ( A B ) || ( C D || ( E ( F G ) || ( H ) ) ) )",
+ expected_result=['||', [['A', 'B'], 'C', 'D', 'E', ['F', 'G'], 'H']]),
+
+ UseReduceTestCase(
+ "|| ( ( A B ) || ( C D || ( E ( F G ) || ( H ) ) ) )",
+ opconvert=True,
+ expected_result=[['||', ['A', 'B'], 'C', 'D', 'E', ['F', 'G'], 'H']]),
+
+ UseReduceTestCase(
+ "|| ( foo? ( A B ) )",
+ uselist=["foo"],
+ expected_result=['A', 'B']),
+
+ UseReduceTestCase(
+ "|| ( || ( foo? ( A B ) ) )",
+ uselist=["foo"],
+ expected_result=['A', 'B']),
+
+ UseReduceTestCase(
+ "|| ( || ( || ( a? ( b? ( c? ( || ( || ( || ( d? ( e? ( f? ( A B ) ) ) ) ) ) ) ) ) ) ) )",
+ uselist=["a", "b", "c", "d", "e", "f"],
+ expected_result=['A', 'B']),
+
+ UseReduceTestCase(
+ "|| ( || ( ( || ( a? ( ( b? ( c? ( || ( || ( || ( ( d? ( e? ( f? ( A B ) ) ) ) ) ) ) ) ) ) ) ) ) ) )",
+ uselist=["a", "b", "c", "d", "e", "f"],
+ expected_result=['A', 'B']),
+
+ UseReduceTestCase(
+ "|| ( ( A ( || ( B ) ) ) )",
+ expected_result=['A', 'B']),
+
+ UseReduceTestCase(
+ "|| ( ( A B ) || ( foo? ( bar? ( ( C D || ( baz? ( E ) ( F G ) || ( H ) ) ) ) ) ) )",
+ uselist=["foo", "bar", "baz"],
+ expected_result=['||', [['A', 'B'], ['C', 'D', '||', ['E', ['F', 'G'], 'H']]]]),
+
+ UseReduceTestCase(
+ "|| ( ( A B ) || ( foo? ( bar? ( ( C D || ( baz? ( E ) ( F G ) || ( H ) ) ) ) ) ) )",
+ uselist=["foo", "bar", "baz"],
+ opconvert=True,
+ expected_result=[['||', ['A', 'B'], ['C', 'D', ['||', 'E', ['F', 'G'], 'H']]]]),
+
+ UseReduceTestCase(
+ "|| ( foo? ( A B ) )",
+ uselist=["foo"],
+ opconvert=True,
+ expected_result=['A', 'B']),
+
+ UseReduceTestCase(
+ "|| ( || ( foo? ( A B ) ) )",
+ uselist=["foo"],
+ opconvert=True,
+ expected_result=['A', 'B']),
+
+ UseReduceTestCase(
+ "|| ( || ( || ( a? ( b? ( c? ( || ( || ( || ( d? ( e? ( f? ( A B ) ) ) ) ) ) ) ) ) ) ) )",
+ uselist=["a", "b", "c", "d", "e", "f"],
+ opconvert=True,
+ expected_result=['A', 'B']),
+
+ # flat test
+ UseReduceTestCase(
+ "A",
+ flat=True,
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "( A )",
+ flat=True,
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "|| ( A B )",
+ flat=True,
+ expected_result=["||", "A", "B"]),
+ UseReduceTestCase(
+ "|| ( A || ( B C ) )",
+ flat=True,
+ expected_result=["||", "A", "||", "B", "C"]),
+ UseReduceTestCase(
+ "|| ( A || ( B C D ) )",
+ flat=True,
+ expected_result=["||", "A", "||", "B", "C", "D"]),
+ UseReduceTestCase(
+ "|| ( A || ( B || ( C D ) E ) )",
+ flat=True,
+ expected_result=["||", "A", "||", "B", "||", "C", "D", "E"]),
+ UseReduceTestCase(
+ "( || ( ( ( A ) B ) ) )",
+ flat=True,
+ expected_result=["||", "A", "B"]),
+ UseReduceTestCase(
+ "( || ( || ( ( A ) B ) ) )",
+ flat=True,
+ expected_result=["||", "||", "A", "B"]),
+ UseReduceTestCase(
+ "( || ( || ( ( A ) B ) ) )",
+ flat=True,
+ expected_result=["||", "||", "A", "B"]),
+ UseReduceTestCase(
+ "|| ( A )",
+ flat=True,
+ expected_result=["||", "A"]),
+ UseReduceTestCase(
+ "( || ( || ( || ( A ) foo? ( B ) ) ) )",
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "( || ( || ( || ( A ) foo? ( B ) ) ) )",
+ uselist=["foo"],
+ flat=True,
+ expected_result=["||", "||", "||", "A", "B"]),
+ UseReduceTestCase(
+ "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+ flat=True,
+ expected_result=["||", "||", "||"]),
+ UseReduceTestCase(
+ "( || ( || ( bar? ( A ) || ( foo? ( B ) ) ) ) )",
+ uselist=["foo", "bar"],
+ flat=True,
+ expected_result=["||", "||", "A", "||", "B"]),
+ UseReduceTestCase(
+ "A || ( bar? ( C ) ) foo? ( bar? ( C ) ) B",
+ flat=True,
+ expected_result=["A", "||", "B"]),
+ UseReduceTestCase(
+ "|| ( A ) || ( B )",
+ flat=True,
+ expected_result=["||", "A", "||", "B"]),
+ UseReduceTestCase(
+ "foo? ( A ) foo? ( B )",
+ flat=True,
+ expected_result=[]),
+ UseReduceTestCase(
+ "foo? ( A ) foo? ( B )",
+ uselist=["foo"],
+ flat=True,
+ expected_result=["A", "B"]),
+
+ # use flag validation
+ UseReduceTestCase(
+ "foo? ( A )",
+ uselist=["foo"],
+ is_valid_flag=self.always_true,
+ expected_result=["A"]),
+ UseReduceTestCase(
+ "foo? ( A )",
+ is_valid_flag=self.always_true,
+ expected_result=[]),
+
+ # token_class
+ UseReduceTestCase(
+ "foo? ( dev-libs/A )",
+ uselist=["foo"],
+ token_class=Atom,
+ expected_result=["dev-libs/A"]),
+ UseReduceTestCase(
+ "foo? ( dev-libs/A )",
+ token_class=Atom,
+ expected_result=[]),
+ )
+
+ test_cases_xfail = (
+ UseReduceTestCase("? ( A )"),
+ UseReduceTestCase("!? ( A )"),
+ UseReduceTestCase("( A"),
+ UseReduceTestCase("A )"),
+ UseReduceTestCase("||( A B )"),
+ UseReduceTestCase("|| (A B )"),
+ UseReduceTestCase("|| ( A B)"),
+ UseReduceTestCase("|| ( A B"),
+ UseReduceTestCase("|| A B )"),
+ UseReduceTestCase("|| A B"),
+ UseReduceTestCase("|| ( A B ) )"),
+ UseReduceTestCase("|| || B C"),
+ UseReduceTestCase("|| ( A B || )"),
+ UseReduceTestCase("a? A"),
+ UseReduceTestCase("( || ( || || ( A ) foo? ( B ) ) )"),
+ UseReduceTestCase("( || ( || bar? ( A ) foo? ( B ) ) )"),
+ UseReduceTestCase("foo?"),
+ UseReduceTestCase("foo? || ( A )"),
+ UseReduceTestCase("|| ( )"),
+ UseReduceTestCase("foo? ( )"),
+
+ # SRC_URI stuff
+ UseReduceTestCase("http://foo/bar -> blah.tbz2", is_src_uri=True, eapi=EAPI_WITHOUT_SRC_URI_ARROWS),
+ UseReduceTestCase("|| ( http://foo/bar -> blah.tbz2 )", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar -> foo? ( ftp://foo/a )", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar blah.tbz2 ->", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("-> http://foo/bar blah.tbz2 )", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar ->", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar -> foo? ( http://foo.com/foo )", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("foo? ( http://foo/bar -> ) blah.tbz2", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar -> foo/blah.tbz2", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+ UseReduceTestCase("http://foo/bar -> -> bar.tbz2 foo? ( ftp://foo/a )", is_src_uri=True, eapi=EAPI_WITH_SRC_URI_ARROWS),
+
+ UseReduceTestCase("http://foo/bar -> bar.tbz2 foo? ( ftp://foo/a )", is_src_uri=False, eapi=EAPI_WITH_SRC_URI_ARROWS),
+
+ UseReduceTestCase(
+ "A",
+ opconvert=True,
+ flat=True),
+
+ # use flag validation
+ UseReduceTestCase("1.0? ( A )"),
+ UseReduceTestCase("!1.0? ( A )"),
+ UseReduceTestCase("!? ( A )"),
+ UseReduceTestCase("!?? ( A )"),
+ UseReduceTestCase(
+ "foo? ( A )",
+ is_valid_flag=self.always_false,
+ ),
+ UseReduceTestCase(
+ "foo? ( A )",
+ uselist=["foo"],
+ is_valid_flag=self.always_false,
+ ),
+
+ # token_class
+ UseReduceTestCase(
+ "foo? ( A )",
+ uselist=["foo"],
+ token_class=Atom),
+ UseReduceTestCase(
+ "A(B",
+ token_class=Atom),
+ )
+
+ for test_case in test_cases:
+ # If it fails then show the input, since lots of our
+ # test cases have the same output but different input,
+ # making it difficult deduce which test has failed.
+ self.assertEqual(test_case.run(), test_case.expected_result,
+ "input: '%s' result: %s != %s" % (test_case.deparray,
+ test_case.run(), test_case.expected_result))
+
+ for test_case in test_cases_xfail:
+ self.assertRaisesMsg(test_case.deparray, (InvalidDependString, ValueError), test_case.run)
diff --git a/lib/portage/tests/ebuild/__init__.py b/lib/portage/tests/ebuild/__init__.py
new file mode 100644
index 000000000..e2d487ecb
--- /dev/null
+++ b/lib/portage/tests/ebuild/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 1998-2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/tests/ebuild/__test__.py b/lib/portage/tests/ebuild/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/ebuild/__test__.py
diff --git a/lib/portage/tests/ebuild/test_array_fromfile_eof.py b/lib/portage/tests/ebuild/test_array_fromfile_eof.py
new file mode 100644
index 000000000..1f2a1ab7b
--- /dev/null
+++ b/lib/portage/tests/ebuild/test_array_fromfile_eof.py
@@ -0,0 +1,47 @@
+# Copyright 2009-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import array
+import tempfile
+
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.tests import TestCase
+
+class ArrayFromfileEofTestCase(TestCase):
+
+ def testArrayFromfileEof(self):
+ # This tests if the following python issue is fixed
+ # in the currently running version of python:
+ # https://bugs.python.org/issue5334
+
+ input_data = "an arbitrary string"
+ input_bytes = _unicode_encode(input_data,
+ encoding='utf_8', errors='strict')
+ f = tempfile.TemporaryFile()
+ f.write(input_bytes)
+
+ f.seek(0)
+ data = []
+ eof = False
+ while not eof:
+ a = array.array('B')
+ try:
+ a.fromfile(f, len(input_bytes) + 1)
+ except (EOFError, IOError):
+ # python-3.0 lost data here
+ eof = True
+
+ if not a:
+ eof = True
+ else:
+ try:
+ # Python >=3.2
+ data.append(a.tobytes())
+ except AttributeError:
+ data.append(a.tostring())
+
+ f.close()
+
+ self.assertEqual(input_data, _unicode_decode(b''.join(data),
+ encoding='utf_8', errors='strict'))
diff --git a/lib/portage/tests/ebuild/test_config.py b/lib/portage/tests/ebuild/test_config.py
new file mode 100644
index 000000000..dcb5ffe0d
--- /dev/null
+++ b/lib/portage/tests/ebuild/test_config.py
@@ -0,0 +1,346 @@
+# Copyright 2010-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import io
+import tempfile
+
+import portage
+from portage import os, shutil, _encodings
+from portage.const import USER_CONFIG_PATH
+from portage.dep import Atom
+from portage.package.ebuild.config import config
+from portage.package.ebuild._config.LicenseManager import LicenseManager
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+from portage.util import normalize_path
+
+class ConfigTestCase(TestCase):
+
+ def testClone(self):
+ """
+ Test the clone via constructor.
+ """
+
+ ebuilds = {
+ "dev-libs/A-1": { },
+ }
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ settings = config(clone=playground.settings)
+ result = playground.run(["=dev-libs/A-1"])
+ pkg, existing_node = result.depgraph._select_package(
+ playground.eroot, Atom("=dev-libs/A-1"))
+ settings.setcpv(pkg)
+
+ # clone after setcpv tests deepcopy of LazyItemsDict
+ settings2 = config(clone=settings)
+ finally:
+ playground.cleanup()
+
+ def testFeaturesMutation(self):
+ """
+ Test whether mutation of config.features updates the FEATURES
+ variable and persists through config.regenerate() calls. Also
+ verify that features_set._prune_overrides() works correctly.
+ """
+ playground = ResolverPlayground()
+ try:
+ settings = config(clone=playground.settings)
+
+ settings.features.add('noclean')
+ self.assertEqual('noclean' in settings['FEATURES'].split(), True)
+ settings.regenerate()
+ self.assertEqual('noclean' in settings['FEATURES'].split(), True)
+
+ settings.features.discard('noclean')
+ self.assertEqual('noclean' in settings['FEATURES'].split(), False)
+ settings.regenerate()
+ self.assertEqual('noclean' in settings['FEATURES'].split(), False)
+
+ settings.features.add('noclean')
+ self.assertEqual('noclean' in settings['FEATURES'].split(), True)
+ settings.regenerate()
+ self.assertEqual('noclean' in settings['FEATURES'].split(), True)
+
+ # before: ['noclean', '-noclean', 'noclean']
+ settings.features._prune_overrides()
+ # after: ['noclean']
+ self.assertEqual(settings._features_overrides.count('noclean'), 1)
+ self.assertEqual(settings._features_overrides.count('-noclean'), 0)
+
+ settings.features.remove('noclean')
+
+ # before: ['noclean', '-noclean']
+ settings.features._prune_overrides()
+ # after: ['-noclean']
+ self.assertEqual(settings._features_overrides.count('noclean'), 0)
+ self.assertEqual(settings._features_overrides.count('-noclean'), 1)
+ finally:
+ playground.cleanup()
+
+ def testLicenseManager(self):
+
+ user_config = {
+ "package.license":
+ (
+ "dev-libs/* TEST",
+ "dev-libs/A -TEST2",
+ "=dev-libs/A-2 TEST3 @TEST",
+ "*/* @EULA TEST2",
+ "=dev-libs/C-1 *",
+ "=dev-libs/C-2 -*",
+ ),
+ }
+
+ playground = ResolverPlayground(user_config=user_config)
+ try:
+ portage.util.noiselimit = -2
+
+ license_group_locations = (os.path.join(playground.settings.repositories["test_repo"].location, "profiles"),)
+ pkg_license = os.path.join(playground.eroot, "etc", "portage")
+
+ lic_man = LicenseManager(license_group_locations, pkg_license)
+
+ self.assertEqual(lic_man._accept_license_str, None)
+ self.assertEqual(lic_man._accept_license, None)
+ self.assertEqual(lic_man._license_groups, {"EULA": frozenset(["TEST"])})
+ self.assertEqual(lic_man._undef_lic_groups, set(["TEST"]))
+
+ self.assertEqual(lic_man.extract_global_changes(), "TEST TEST2")
+ self.assertEqual(lic_man.extract_global_changes(), "")
+
+ lic_man.set_accept_license_str("TEST TEST2")
+ self.assertEqual(lic_man._getPkgAcceptLicense("dev-libs/B-1", "0", None), ["TEST", "TEST2", "TEST"])
+ self.assertEqual(lic_man._getPkgAcceptLicense("dev-libs/A-1", "0", None), ["TEST", "TEST2", "TEST", "-TEST2"])
+ self.assertEqual(lic_man._getPkgAcceptLicense("dev-libs/A-2", "0", None), ["TEST", "TEST2", "TEST", "-TEST2", "TEST3", "@TEST"])
+
+ self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/B-1", [], "TEST", "0", None), "TEST")
+ self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/A-1", [], "-TEST2", "0", None), "")
+ self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/A-2", [], "|| ( TEST TEST2 )", "0", None), "TEST")
+ self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/C-1", [], "TEST5", "0", None), "TEST5")
+ self.assertEqual(lic_man.get_prunned_accept_license("dev-libs/C-2", [], "TEST2", "0", None), "")
+
+ self.assertEqual(lic_man.getMissingLicenses("dev-libs/B-1", [], "TEST", "0", None), [])
+ self.assertEqual(lic_man.getMissingLicenses("dev-libs/A-1", [], "-TEST2", "0", None), ["-TEST2"])
+ self.assertEqual(lic_man.getMissingLicenses("dev-libs/A-2", [], "|| ( TEST TEST2 )", "0", None), [])
+ self.assertEqual(lic_man.getMissingLicenses("dev-libs/A-3", [], "|| ( TEST2 || ( TEST3 TEST4 ) )", "0", None), ["TEST2", "TEST3", "TEST4"])
+ self.assertEqual(lic_man.getMissingLicenses("dev-libs/C-1", [], "TEST5", "0", None), [])
+ self.assertEqual(lic_man.getMissingLicenses("dev-libs/C-2", [], "TEST2", "0", None), ["TEST2"])
+ self.assertEqual(lic_man.getMissingLicenses("dev-libs/D-1", [], "", "0", None), [])
+ finally:
+ portage.util.noiselimit = 0
+ playground.cleanup()
+
+ def testPackageMaskOrder(self):
+
+ ebuilds = {
+ "dev-libs/A-1": { },
+ "dev-libs/B-1": { },
+ "dev-libs/C-1": { },
+ "dev-libs/D-1": { },
+ "dev-libs/E-1": { },
+ }
+
+ repo_configs = {
+ "test_repo": {
+ "package.mask":
+ (
+ "dev-libs/A",
+ "dev-libs/C",
+ ),
+ }
+ }
+
+ profile = {
+ "package.mask":
+ (
+ "-dev-libs/A",
+ "dev-libs/B",
+ "-dev-libs/B",
+ "dev-libs/D",
+ ),
+ }
+
+ user_config = {
+ "package.mask":
+ (
+ "-dev-libs/C",
+ "-dev-libs/D",
+ "dev-libs/E",
+ ),
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = { "--autounmask": 'n' },
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ success = True,
+ mergelist = ["dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ success = True,
+ mergelist = ["dev-libs/C-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/D"],
+ success = True,
+ mergelist = ["dev-libs/D-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E"],
+ options = { "--autounmask": 'n' },
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, repo_configs=repo_configs, \
+ profile=profile, user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testManifest(self):
+
+ distfiles = {
+ 'B-2.tar.bz2': b'binary\0content',
+ 'C-2.zip': b'binary\0content',
+ 'C-2.tar.bz2': b'binary\0content',
+ }
+
+ ebuilds = {
+ "dev-libs/A-1::old_repo": { },
+ "dev-libs/A-2::new_repo": { },
+ "dev-libs/B-2::new_repo": {"SRC_URI" : "B-2.tar.bz2"},
+ "dev-libs/C-2::new_repo": {"SRC_URI" : "C-2.zip C-2.tar.bz2"},
+ }
+
+ repo_configs = {
+ "new_repo": {
+ "layout.conf":
+ (
+ "profile-formats = pms",
+ "thin-manifests = true",
+ "manifest-hashes = SHA256 SHA512 WHIRLPOOL",
+ "manifest-required-hashes = SHA512",
+ "# use implicit masters"
+ ),
+ }
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1"],
+ mergelist= ["dev-libs/A-1"],
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-2"],
+ mergelist= ["dev-libs/A-2"],
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ repo_configs=repo_configs, distfiles=distfiles)
+ settings = playground.settings
+
+ new_repo_config = settings.repositories["new_repo"]
+ old_repo_config = settings.repositories["old_repo"]
+ self.assertTrue(len(new_repo_config.masters) > 0, "new_repo has no default master")
+ self.assertEqual(new_repo_config.masters[0].location, playground.settings.repositories["test_repo"].location,
+ "new_repo default master is not test_repo")
+ self.assertEqual(new_repo_config.thin_manifest, True,
+ "new_repo_config.thin_manifest != True")
+
+ new_manifest_file = os.path.join(new_repo_config.location, "dev-libs", "A", "Manifest")
+ self.assertNotExists(new_manifest_file)
+
+ new_manifest_file = os.path.join(new_repo_config.location, "dev-libs", "B", "Manifest")
+ f = open(new_manifest_file)
+ self.assertEqual(len(list(f)), 1)
+ f.close()
+
+ new_manifest_file = os.path.join(new_repo_config.location, "dev-libs", "C", "Manifest")
+ f = open(new_manifest_file)
+ self.assertEqual(len(list(f)), 2)
+ f.close()
+
+ old_manifest_file = os.path.join(old_repo_config.location, "dev-libs", "A", "Manifest")
+ f = open(old_manifest_file)
+ self.assertEqual(len(list(f)), 1)
+ f.close()
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testSetCpv(self):
+ """
+ Test the clone via constructor.
+ """
+
+ ebuilds = {
+ "dev-libs/A-1": {"IUSE": "static-libs"},
+ "dev-libs/B-1": {"IUSE": "static-libs"},
+ }
+
+ env_files = {
+ "A" : ("USE=\"static-libs\"",)
+ }
+
+ package_env = (
+ "dev-libs/A A",
+ )
+
+ eprefix = normalize_path(tempfile.mkdtemp())
+ playground = None
+ try:
+ user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH)
+ os.makedirs(user_config_dir)
+
+ with io.open(os.path.join(user_config_dir, "package.env"),
+ mode='w', encoding=_encodings['content']) as f:
+ for line in package_env:
+ f.write(line + "\n")
+
+ env_dir = os.path.join(user_config_dir, "env")
+ os.makedirs(env_dir)
+ for k, v in env_files.items():
+ with io.open(os.path.join(env_dir, k), mode='w',
+ encoding=_encodings['content']) as f:
+ for line in v:
+ f.write(line + "\n")
+
+ playground = ResolverPlayground(eprefix=eprefix, ebuilds=ebuilds)
+ settings = config(clone=playground.settings)
+
+ result = playground.run(["=dev-libs/A-1"])
+ pkg, existing_node = result.depgraph._select_package(
+ playground.eroot, Atom("=dev-libs/A-1"))
+ settings.setcpv(pkg)
+ self.assertTrue("static-libs" in
+ settings["PORTAGE_USE"].split())
+
+ # Test bug #522362, where a USE=static-libs package.env
+ # setting leaked from one setcpv call to the next.
+ pkg, existing_node = result.depgraph._select_package(
+ playground.eroot, Atom("=dev-libs/B-1"))
+ settings.setcpv(pkg)
+ self.assertTrue("static-libs" not in
+ settings["PORTAGE_USE"].split())
+
+ finally:
+ if playground is None:
+ shutil.rmtree(eprefix)
+ else:
+ playground.cleanup()
diff --git a/lib/portage/tests/ebuild/test_doebuild_fd_pipes.py b/lib/portage/tests/ebuild/test_doebuild_fd_pipes.py
new file mode 100644
index 000000000..b89421822
--- /dev/null
+++ b/lib/portage/tests/ebuild/test_doebuild_fd_pipes.py
@@ -0,0 +1,138 @@
+# Copyright 2013-2016 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage import os
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.package.ebuild._ipc.QueryCommand import QueryCommand
+from portage.util._async.ForkProcess import ForkProcess
+from portage.util._async.TaskScheduler import TaskScheduler
+from _emerge.Package import Package
+from _emerge.PipeReader import PipeReader
+
+class DoebuildProcess(ForkProcess):
+
+ __slots__ = ('doebuild_kwargs', 'doebuild_pargs')
+
+ def _run(self):
+ return portage.doebuild(*self.doebuild_pargs, **self.doebuild_kwargs)
+
+class DoebuildFdPipesTestCase(TestCase):
+
+ def testDoebuild(self):
+ """
+ Invoke portage.doebuild() with the fd_pipes parameter, and
+ check that the expected output appears in the pipe. This
+ functionality is not used by portage internally, but it is
+ supported for API consumers (see bug #475812).
+ """
+
+ output_fd = 200
+ ebuild_body = ['S=${WORKDIR}']
+ for phase_func in ('pkg_info', 'pkg_nofetch', 'pkg_pretend',
+ 'pkg_setup', 'src_unpack', 'src_prepare', 'src_configure',
+ 'src_compile', 'src_test', 'src_install'):
+ ebuild_body.append(('%s() { echo ${EBUILD_PHASE}'
+ ' 1>&%s; }') % (phase_func, output_fd))
+
+ ebuild_body.append('')
+ ebuild_body = '\n'.join(ebuild_body)
+
+ ebuilds = {
+ 'app-misct/foo-1': {
+ 'EAPI' : '5',
+ "MISC_CONTENT": ebuild_body,
+ }
+ }
+
+ # Override things that may be unavailable, or may have portability
+ # issues when running tests in exotic environments.
+ # prepstrip - bug #447810 (bash read builtin EINTR problem)
+ true_symlinks = ("find", "prepstrip", "sed", "scanelf")
+ true_binary = portage.process.find_binary("true")
+ self.assertEqual(true_binary is None, False,
+ "true command not found")
+
+ dev_null = open(os.devnull, 'wb')
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ QueryCommand._db = playground.trees
+ root_config = playground.trees[playground.eroot]['root_config']
+ portdb = root_config.trees["porttree"].dbapi
+ settings = portage.config(clone=playground.settings)
+ if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+ settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
+ settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS")
+
+ settings.features.add("noauto")
+ settings.features.add("test")
+ settings['PORTAGE_PYTHON'] = portage._python_interpreter
+ settings['PORTAGE_QUIET'] = "1"
+ settings['PYTHONDONTWRITEBYTECODE'] = os.environ.get("PYTHONDONTWRITEBYTECODE", "")
+
+ fake_bin = os.path.join(settings["EPREFIX"], "bin")
+ portage.util.ensure_dirs(fake_bin)
+ for x in true_symlinks:
+ os.symlink(true_binary, os.path.join(fake_bin, x))
+
+ settings["__PORTAGE_TEST_PATH_OVERRIDE"] = fake_bin
+ settings.backup_changes("__PORTAGE_TEST_PATH_OVERRIDE")
+
+ cpv = 'app-misct/foo-1'
+ metadata = dict(zip(Package.metadata_keys,
+ portdb.aux_get(cpv, Package.metadata_keys)))
+
+ pkg = Package(built=False, cpv=cpv, installed=False,
+ metadata=metadata, root_config=root_config,
+ type_name='ebuild')
+ settings.setcpv(pkg)
+ ebuildpath = portdb.findname(cpv)
+ self.assertNotEqual(ebuildpath, None)
+
+ for phase in ('info', 'nofetch',
+ 'pretend', 'setup', 'unpack', 'prepare', 'configure',
+ 'compile', 'test', 'install', 'qmerge', 'clean', 'merge'):
+
+ pr, pw = os.pipe()
+
+ producer = DoebuildProcess(doebuild_pargs=(ebuildpath, phase),
+ doebuild_kwargs={"settings" : settings,
+ "mydbapi": portdb, "tree": "porttree",
+ "vartree": root_config.trees["vartree"],
+ "fd_pipes": {
+ 1: dev_null.fileno(),
+ 2: dev_null.fileno(),
+ output_fd: pw,
+ },
+ "prev_mtimes": {}})
+
+ consumer = PipeReader(
+ input_files={"producer" : pr})
+
+ task_scheduler = TaskScheduler(iter([producer, consumer]),
+ max_jobs=2)
+
+ try:
+ task_scheduler.start()
+ finally:
+ # PipeReader closes pr
+ os.close(pw)
+
+ task_scheduler.wait()
+ output = portage._unicode_decode(
+ consumer.getvalue()).rstrip("\n")
+
+ if task_scheduler.returncode != os.EX_OK:
+ portage.writemsg(output, noiselevel=-1)
+
+ self.assertEqual(task_scheduler.returncode, os.EX_OK)
+
+ if phase not in ('clean', 'merge', 'qmerge'):
+ self.assertEqual(phase, output)
+
+ finally:
+ dev_null.close()
+ playground.cleanup()
+ QueryCommand._db = None
diff --git a/lib/portage/tests/ebuild/test_doebuild_spawn.py b/lib/portage/tests/ebuild/test_doebuild_spawn.py
new file mode 100644
index 000000000..6b344658f
--- /dev/null
+++ b/lib/portage/tests/ebuild/test_doebuild_spawn.py
@@ -0,0 +1,106 @@
+# Copyright 2010-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import textwrap
+
+from portage import os
+from portage import _python_interpreter
+from portage import _shell_quote
+from portage.const import EBUILD_SH_BINARY
+from portage.package.ebuild.config import config
+from portage.package.ebuild.doebuild import spawn as doebuild_spawn
+from portage.package.ebuild._spawn_nofetch import spawn_nofetch
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.global_event_loop import global_event_loop
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
+from _emerge.Package import Package
+
+class DoebuildSpawnTestCase(TestCase):
+ """
+ Invoke portage.package.ebuild.doebuild.spawn() with a
+ minimal environment. This gives coverage to some of
+ the ebuild execution internals, like ebuild.sh,
+ AbstractEbuildProcess, and EbuildIpcDaemon.
+ """
+
+ def testDoebuildSpawn(self):
+
+ ebuild_body = textwrap.dedent("""
+ pkg_nofetch() { : ; }
+ """)
+
+ ebuilds = {
+ 'sys-apps/portage-2.1': {
+ 'EAPI' : '2',
+ 'IUSE' : 'build doc epydoc python3 selinux',
+ 'KEYWORDS' : 'x86',
+ 'LICENSE' : 'GPL-2',
+ 'RDEPEND' : '>=app-shells/bash-3.2_p17 >=dev-lang/python-2.6',
+ 'SLOT' : '0',
+ "MISC_CONTENT": ebuild_body,
+ }
+ }
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ root_config = playground.trees[playground.eroot]['root_config']
+ portdb = root_config.trees["porttree"].dbapi
+ settings = config(clone=playground.settings)
+ if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+ settings["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
+ settings.backup_changes("__PORTAGE_TEST_HARDLINK_LOCKS")
+
+ cpv = 'sys-apps/portage-2.1'
+ metadata = dict(zip(Package.metadata_keys,
+ portdb.aux_get(cpv, Package.metadata_keys)))
+
+ pkg = Package(built=False, cpv=cpv, installed=False,
+ metadata=metadata, root_config=root_config,
+ type_name='ebuild')
+ settings.setcpv(pkg)
+ settings['PORTAGE_PYTHON'] = _python_interpreter
+ settings['PORTAGE_BUILDDIR'] = os.path.join(
+ settings['PORTAGE_TMPDIR'], cpv)
+ settings['PYTHONDONTWRITEBYTECODE'] = os.environ.get('PYTHONDONTWRITEBYTECODE', '')
+ settings['T'] = os.path.join(
+ settings['PORTAGE_BUILDDIR'], 'temp')
+ for x in ('PORTAGE_BUILDDIR', 'T'):
+ os.makedirs(settings[x])
+ # Create a fake environment, to pretend as if the ebuild
+ # has been sourced already.
+ open(os.path.join(settings['T'], 'environment'), 'wb').close()
+
+ scheduler = SchedulerInterface(global_event_loop())
+ for phase in ('_internal_test',):
+
+ # Test EbuildSpawnProcess by calling doebuild.spawn() with
+ # returnpid=False. This case is no longer used by portage
+ # internals since EbuildPhase is used instead and that passes
+ # returnpid=True to doebuild.spawn().
+ rval = doebuild_spawn("%s %s" % (_shell_quote(
+ os.path.join(settings["PORTAGE_BIN_PATH"],
+ os.path.basename(EBUILD_SH_BINARY))), phase),
+ settings, free=1)
+ self.assertEqual(rval, os.EX_OK)
+
+ ebuild_phase = EbuildPhase(background=False,
+ phase=phase, scheduler=scheduler,
+ settings=settings)
+ ebuild_phase.start()
+ ebuild_phase.wait()
+ self.assertEqual(ebuild_phase.returncode, os.EX_OK)
+
+ ebuild_phase = MiscFunctionsProcess(background=False,
+ commands=['success_hooks'],
+ scheduler=scheduler, settings=settings)
+ ebuild_phase.start()
+ ebuild_phase.wait()
+ self.assertEqual(ebuild_phase.returncode, os.EX_OK)
+
+ spawn_nofetch(portdb, portdb.findname(cpv), settings=settings)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/ebuild/test_ipc_daemon.py b/lib/portage/tests/ebuild/test_ipc_daemon.py
new file mode 100644
index 000000000..e6da51a76
--- /dev/null
+++ b/lib/portage/tests/ebuild/test_ipc_daemon.py
@@ -0,0 +1,162 @@
+# Copyright 2010-2016 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+import time
+from portage import os
+from portage import shutil
+from portage import _python_interpreter
+from portage.tests import TestCase
+from portage.const import PORTAGE_BIN_PATH
+from portage.const import PORTAGE_PYM_PATH
+from portage.const import BASH_BINARY
+from portage.locks import hardlock_cleanup
+from portage.package.ebuild._ipc.ExitCommand import ExitCommand
+from portage.util import ensure_dirs
+from portage.util._async.ForkProcess import ForkProcess
+from portage.util._async.TaskScheduler import TaskScheduler
+from portage.util._eventloop.global_event_loop import global_event_loop
+from _emerge.SpawnProcess import SpawnProcess
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EbuildIpcDaemon import EbuildIpcDaemon
+
+class SleepProcess(ForkProcess):
+ """
+ Emulate the sleep command, in order to ensure a consistent
+ return code when it is killed by SIGTERM (see bug #437180).
+ """
+ __slots__ = ('seconds',)
+ def _run(self):
+ time.sleep(self.seconds)
+
+class IpcDaemonTestCase(TestCase):
+
+ _SCHEDULE_TIMEOUT = 40 # seconds
+
+ def testIpcDaemon(self):
+ event_loop = global_event_loop()
+ tmpdir = tempfile.mkdtemp()
+ build_dir = None
+ try:
+ env = {}
+
+ # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
+ # need to be inherited by ebuild subprocesses.
+ if 'PORTAGE_USERNAME' in os.environ:
+ env['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
+ if 'PORTAGE_GRPNAME' in os.environ:
+ env['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']
+
+ env['PORTAGE_PYTHON'] = _python_interpreter
+ env['PORTAGE_BIN_PATH'] = PORTAGE_BIN_PATH
+ env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
+ env['PORTAGE_BUILDDIR'] = os.path.join(tmpdir, 'cat', 'pkg-1')
+ env['PYTHONDONTWRITEBYTECODE'] = os.environ.get('PYTHONDONTWRITEBYTECODE', '')
+
+ if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+ env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
+
+ build_dir = EbuildBuildDir(
+ scheduler=event_loop,
+ settings=env)
+ event_loop.run_until_complete(build_dir.async_lock())
+ ensure_dirs(env['PORTAGE_BUILDDIR'])
+
+ input_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_in')
+ output_fifo = os.path.join(env['PORTAGE_BUILDDIR'], '.ipc_out')
+ os.mkfifo(input_fifo)
+ os.mkfifo(output_fifo)
+
+ for exitcode in (0, 1, 2):
+ exit_command = ExitCommand()
+ commands = {'exit' : exit_command}
+ daemon = EbuildIpcDaemon(commands=commands,
+ input_fifo=input_fifo,
+ output_fifo=output_fifo)
+ proc = SpawnProcess(
+ args=[BASH_BINARY, "-c",
+ '"$PORTAGE_BIN_PATH"/ebuild-ipc exit %d' % exitcode],
+ env=env)
+ task_scheduler = TaskScheduler(iter([daemon, proc]),
+ max_jobs=2, event_loop=event_loop)
+
+ self.received_command = False
+ def exit_command_callback():
+ self.received_command = True
+ task_scheduler.cancel()
+
+ exit_command.reply_hook = exit_command_callback
+ start_time = time.time()
+ self._run(event_loop, task_scheduler, self._SCHEDULE_TIMEOUT)
+
+ hardlock_cleanup(env['PORTAGE_BUILDDIR'],
+ remove_all_locks=True)
+
+ self.assertEqual(self.received_command, True,
+ "command not received after %d seconds" % \
+ (time.time() - start_time,))
+ self.assertEqual(proc.isAlive(), False)
+ self.assertEqual(daemon.isAlive(), False)
+ self.assertEqual(exit_command.exitcode, exitcode)
+
+ # Intentionally short timeout test for EventLoop/AsyncScheduler.
+ # Use a ridiculously long sleep_time_s in case the user's
+ # system is heavily loaded (see bug #436334).
+ sleep_time_s = 600 # seconds
+ short_timeout_s = 0.010 # seconds
+
+ for i in range(3):
+ exit_command = ExitCommand()
+ commands = {'exit' : exit_command}
+ daemon = EbuildIpcDaemon(commands=commands,
+ input_fifo=input_fifo,
+ output_fifo=output_fifo)
+ proc = SleepProcess(seconds=sleep_time_s)
+ task_scheduler = TaskScheduler(iter([daemon, proc]),
+ max_jobs=2, event_loop=event_loop)
+
+ self.received_command = False
+ def exit_command_callback():
+ self.received_command = True
+ task_scheduler.cancel()
+
+ exit_command.reply_hook = exit_command_callback
+ start_time = time.time()
+ self._run(event_loop, task_scheduler, short_timeout_s)
+
+ hardlock_cleanup(env['PORTAGE_BUILDDIR'],
+ remove_all_locks=True)
+
+ self.assertEqual(self.received_command, False,
+ "command received after %d seconds" % \
+ (time.time() - start_time,))
+ self.assertEqual(proc.isAlive(), False)
+ self.assertEqual(daemon.isAlive(), False)
+ self.assertEqual(proc.returncode == os.EX_OK, False)
+
+ finally:
+ if build_dir is not None:
+ event_loop.run_until_complete(build_dir.async_unlock())
+ shutil.rmtree(tmpdir)
+
+ def _timeout_callback(self, task_scheduler):
+ task_scheduler.cancel()
+ self._exit_callback(task_scheduler)
+
+ def _exit_callback(self, task_scheduler):
+ if not self._run_done.done():
+ self._run_done.set_result(True)
+
+ def _run(self, event_loop, task_scheduler, timeout):
+ self._run_done = event_loop.create_future()
+ timeout_handle = event_loop.call_later(timeout,
+ self._timeout_callback, task_scheduler)
+ task_scheduler.addExitListener(self._exit_callback)
+
+ try:
+ task_scheduler.start()
+ event_loop.run_until_complete(self._run_done)
+ event_loop.run_until_complete(task_scheduler.async_wait())
+ finally:
+ timeout_handle.cancel()
diff --git a/lib/portage/tests/ebuild/test_spawn.py b/lib/portage/tests/ebuild/test_spawn.py
new file mode 100644
index 000000000..a38e10972
--- /dev/null
+++ b/lib/portage/tests/ebuild/test_spawn.py
@@ -0,0 +1,57 @@
+# Copyright 1998-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import io
+import sys
+import tempfile
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage.const import BASH_BINARY
+from portage.tests import TestCase
+from portage.util._eventloop.global_event_loop import global_event_loop
+from _emerge.SpawnProcess import SpawnProcess
+
+class SpawnTestCase(TestCase):
+
+ def testLogfile(self):
+ logfile = None
+ try:
+ fd, logfile = tempfile.mkstemp()
+ os.close(fd)
+ null_fd = os.open('/dev/null', os.O_RDWR)
+ test_string = 2 * "blah blah blah\n"
+ proc = SpawnProcess(
+ args=[BASH_BINARY, "-c",
+ "echo -n '%s'" % test_string],
+ env={},
+ fd_pipes={
+ 0: portage._get_stdin().fileno(),
+ 1: null_fd,
+ 2: null_fd
+ },
+ scheduler=global_event_loop(),
+ logfile=logfile)
+ proc.start()
+ os.close(null_fd)
+ self.assertEqual(proc.wait(), os.EX_OK)
+ f = io.open(_unicode_encode(logfile,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='strict')
+ log_content = f.read()
+ f.close()
+ # When logging passes through a pty, this comparison will fail
+ # unless the oflag terminal attributes have the termios.OPOST
+ # bit disabled. Otherwise, tranformations such as \n -> \r\n
+ # may occur.
+ self.assertEqual(test_string, log_content)
+ finally:
+ if logfile:
+ try:
+ os.unlink(logfile)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
diff --git a/lib/portage/tests/ebuild/test_use_expand_incremental.py b/lib/portage/tests/ebuild/test_use_expand_incremental.py
new file mode 100644
index 000000000..a58f08cb9
--- /dev/null
+++ b/lib/portage/tests/ebuild/test_use_expand_incremental.py
@@ -0,0 +1,132 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import io
+
+from portage import os, _encodings
+from portage.dep import Atom
+from portage.package.ebuild.config import config
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util import ensure_dirs
+
+class UseExpandIncrementalTestCase(TestCase):
+
+ def testUseExpandIncremental(self):
+
+ profiles = (
+ (
+ 'base',
+ {
+ "eapi": ("5",),
+ "parent": ("..",),
+ "make.defaults": (
+ "INPUT_DEVICES=\"keyboard mouse\"",
+ "PYTHON_TARGETS=\"python2_7 python3_3\"",
+ ("USE_EXPAND=\"INPUT_DEVICES PYTHON_TARGETS "
+ "VIDEO_CARDS\""),
+ )
+ }
+ ),
+ (
+ 'default/linux',
+ {
+ "eapi": ("5",),
+ "make.defaults": (
+ "VIDEO_CARDS=\"dummy fbdev v4l\"",
+ )
+ }
+ ),
+ (
+ 'default/linux/x86',
+ {
+ "eapi": ("5",),
+ "make.defaults": (
+ # Test negative incremental for bug 530222.
+ "PYTHON_TARGETS=\"-python3_3\"",
+ ),
+ "parent": ("../../../base",
+ "../../../mixins/python/3.4",
+ ".."
+ )
+ }
+ ),
+ (
+ 'mixins/python/3.4',
+ {
+ "eapi": ("5",),
+ "make.defaults": (
+ "PYTHON_TARGETS=\"python3_4\"",
+ )
+ }
+ ),
+ )
+
+ # USE_EXPAND variable settings in make.conf will cause
+ # profile settings for the same variable to be discarded
+ # (non-incremental behavior). PMS does not govern make.conf
+ # behavior.
+ user_config = {
+ "make.conf" : (
+ "VIDEO_CARDS=\"intel\"",
+ )
+ }
+
+ ebuilds = {
+ "x11-base/xorg-drivers-1.15": {
+ "EAPI": "5",
+ "IUSE": ("input_devices_keyboard input_devices_mouse "
+ "videos_cards_dummy video_cards_fbdev "
+ "video_cards_v4l video_cards_intel")
+ },
+ "sys-apps/portage-2.2.14": {
+ "EAPI": "5",
+ "IUSE": ("python_targets_python2_7 "
+ "python_targets_python3_3 python_targets_python3_4")
+ },
+ }
+
+ package_expected_use = (
+ ("x11-base/xorg-drivers-1.15", ("input_devices_keyboard",
+ "input_devices_mouse", "video_cards_intel",)),
+ ("sys-apps/portage-2.2.14", ("python_targets_python2_7",
+ "python_targets_python3_4"))
+ )
+
+ playground = ResolverPlayground(debug=False,
+ ebuilds=ebuilds, user_config=user_config)
+ try:
+ repo_dir = (playground.settings.repositories.
+ get_location_for_name("test_repo"))
+ profile_root = os.path.join(repo_dir, "profiles")
+
+ for p, data in profiles:
+ prof_path = os.path.join(profile_root, p)
+ ensure_dirs(prof_path)
+ for k, v in data.items():
+ with io.open(os.path.join(prof_path, k), mode="w",
+ encoding=_encodings["repo.content"]) as f:
+ for line in v:
+ f.write("%s\n" % line)
+
+ # The config must be reloaded in order to account
+ # for the above profile customizations.
+ playground.reload_config()
+
+ depgraph = playground.run(
+ ["=x11-base/xorg-drivers-1.15"]).depgraph
+ settings = config(clone=playground.settings)
+
+ for cpv, expected_use in package_expected_use:
+ pkg, existing_node = depgraph._select_package(
+ playground.eroot, Atom("=" + cpv))
+ settings.setcpv(pkg)
+ expected = frozenset(expected_use)
+ got = frozenset(settings["PORTAGE_USE"].split())
+ self.assertEqual(got, expected,
+ "%s != %s" % (got, expected))
+
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/emerge/__init__.py b/lib/portage/tests/emerge/__init__.py
new file mode 100644
index 000000000..532918b6a
--- /dev/null
+++ b/lib/portage/tests/emerge/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/tests/emerge/__test__.py b/lib/portage/tests/emerge/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/emerge/__test__.py
diff --git a/lib/portage/tests/emerge/test_config_protect.py b/lib/portage/tests/emerge/test_config_protect.py
new file mode 100644
index 000000000..06ab059ee
--- /dev/null
+++ b/lib/portage/tests/emerge/test_config_protect.py
@@ -0,0 +1,293 @@
+# Copyright 2014-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import io
+from functools import partial
+import shutil
+import stat
+import subprocess
+import sys
+import time
+
+import portage
+from portage import os
+from portage import _encodings, _unicode_decode
+from portage.const import BASH_BINARY, PORTAGE_PYM_PATH
+from portage.process import find_binary
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util import (ensure_dirs, find_updated_config_files,
+ shlex_split)
+
+class ConfigProtectTestCase(TestCase):
+
+ def testConfigProtect(self):
+ """
+ Demonstrates many different scenarios. For example:
+
+ * regular file replaces regular file
+ * regular file replaces symlink
+ * regular file replaces directory
+ * symlink replaces symlink
+ * symlink replaces regular file
+ * symlink replaces directory
+ * directory replaces regular file
+ * directory replaces symlink
+ """
+
+ debug = False
+
+ content_A_1 = """
+S="${WORKDIR}"
+
+src_install() {
+ insinto /etc/A
+ keepdir /etc/A/dir_a
+ keepdir /etc/A/symlink_replaces_dir
+ keepdir /etc/A/regular_replaces_dir
+ echo regular_a_1 > "${T}"/regular_a
+ doins "${T}"/regular_a
+ echo regular_b_1 > "${T}"/regular_b
+ doins "${T}"/regular_b
+ dosym regular_a /etc/A/regular_replaces_symlink
+ dosym regular_b /etc/A/symlink_replaces_symlink
+ echo regular_replaces_regular_1 > \
+ "${T}"/regular_replaces_regular
+ doins "${T}"/regular_replaces_regular
+ echo symlink_replaces_regular > \
+ "${T}"/symlink_replaces_regular
+ doins "${T}"/symlink_replaces_regular
+}
+
+"""
+
+ content_A_2 = """
+S="${WORKDIR}"
+
+src_install() {
+ insinto /etc/A
+ keepdir /etc/A/dir_a
+ dosym dir_a /etc/A/symlink_replaces_dir
+ echo regular_replaces_dir > "${T}"/regular_replaces_dir
+ doins "${T}"/regular_replaces_dir
+ echo regular_a_2 > "${T}"/regular_a
+ doins "${T}"/regular_a
+ echo regular_b_2 > "${T}"/regular_b
+ doins "${T}"/regular_b
+ echo regular_replaces_symlink > \
+ "${T}"/regular_replaces_symlink
+ doins "${T}"/regular_replaces_symlink
+ dosym regular_b /etc/A/symlink_replaces_symlink
+ echo regular_replaces_regular_2 > \
+ "${T}"/regular_replaces_regular
+ doins "${T}"/regular_replaces_regular
+ dosym regular_a /etc/A/symlink_replaces_regular
+}
+
+"""
+
+ ebuilds = {
+ "dev-libs/A-1": {
+ "EAPI" : "5",
+ "IUSE" : "+flag",
+ "KEYWORDS": "x86",
+ "LICENSE": "GPL-2",
+ "MISC_CONTENT": content_A_1,
+ },
+ "dev-libs/A-2": {
+ "EAPI" : "5",
+ "IUSE" : "+flag",
+ "KEYWORDS": "x86",
+ "LICENSE": "GPL-2",
+ "MISC_CONTENT": content_A_2,
+ },
+ }
+
+ playground = ResolverPlayground(
+ ebuilds=ebuilds, debug=debug)
+ settings = playground.settings
+ eprefix = settings["EPREFIX"]
+ eroot = settings["EROOT"]
+ var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
+
+ portage_python = portage._python_interpreter
+ dispatch_conf_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.sbindir, "dispatch-conf"))
+ emerge_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.bindir, "emerge"))
+ etc_update_cmd = (BASH_BINARY,
+ os.path.join(self.sbindir, "etc-update"))
+ etc_update_auto = etc_update_cmd + ("--automode", "-5",)
+
+ config_protect = "/etc"
+
+ def modify_files(dir_path):
+ for name in os.listdir(dir_path):
+ path = os.path.join(dir_path, name)
+ st = os.lstat(path)
+ if stat.S_ISREG(st.st_mode):
+ with io.open(path, mode='a',
+ encoding=_encodings["stdio"]) as f:
+ f.write("modified at %d\n" % time.time())
+ elif stat.S_ISLNK(st.st_mode):
+ old_dest = os.readlink(path)
+ os.unlink(path)
+ os.symlink(old_dest +
+ " modified at %d" % time.time(), path)
+
+ def updated_config_files(count):
+ self.assertEqual(count,
+ sum(len(x[1]) for x in find_updated_config_files(eroot,
+ shlex_split(config_protect))))
+
+ test_commands = (
+ etc_update_cmd,
+ dispatch_conf_cmd,
+ emerge_cmd + ("-1", "=dev-libs/A-1"),
+ partial(updated_config_files, 0),
+ emerge_cmd + ("-1", "=dev-libs/A-2"),
+ partial(updated_config_files, 2),
+ etc_update_auto,
+ partial(updated_config_files, 0),
+ emerge_cmd + ("-1", "=dev-libs/A-2"),
+ partial(updated_config_files, 0),
+ # Test bug #523684, where a file renamed or removed by the
+ # admin forces replacement files to be merged with config
+ # protection.
+ partial(shutil.rmtree,
+ os.path.join(eprefix, "etc", "A")),
+ emerge_cmd + ("-1", "=dev-libs/A-2"),
+ partial(updated_config_files, 8),
+ etc_update_auto,
+ partial(updated_config_files, 0),
+ # Modify some config files, and verify that it triggers
+ # config protection.
+ partial(modify_files,
+ os.path.join(eroot, "etc", "A")),
+ emerge_cmd + ("-1", "=dev-libs/A-2"),
+ partial(updated_config_files, 6),
+ etc_update_auto,
+ partial(updated_config_files, 0),
+ # Modify some config files, downgrade to A-1, and verify
+ # that config protection works properly when the file
+ # types are changing.
+ partial(modify_files,
+ os.path.join(eroot, "etc", "A")),
+ emerge_cmd + ("-1", "--noconfmem", "=dev-libs/A-1"),
+ partial(updated_config_files, 6),
+ etc_update_auto,
+ partial(updated_config_files, 0),
+ )
+
+ distdir = playground.distdir
+ fake_bin = os.path.join(eprefix, "bin")
+ portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
+
+ path = os.environ.get("PATH")
+ if path is not None and not path.strip():
+ path = None
+ if path is None:
+ path = ""
+ else:
+ path = ":" + path
+ path = fake_bin + path
+
+ pythonpath = os.environ.get("PYTHONPATH")
+ if pythonpath is not None and not pythonpath.strip():
+ pythonpath = None
+ if pythonpath is not None and \
+ pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
+ pass
+ else:
+ if pythonpath is None:
+ pythonpath = ""
+ else:
+ pythonpath = ":" + pythonpath
+ pythonpath = PORTAGE_PYM_PATH + pythonpath
+
+ env = {
+ "PORTAGE_OVERRIDE_EPREFIX" : eprefix,
+ "CLEAN_DELAY" : "0",
+ "CONFIG_PROTECT": config_protect,
+ "DISTDIR" : distdir,
+ "EMERGE_DEFAULT_OPTS": "-v",
+ "EMERGE_WARNING_DELAY" : "0",
+ "INFODIR" : "",
+ "INFOPATH" : "",
+ "PATH" : path,
+ "PORTAGE_INST_GID" : str(portage.data.portage_gid),
+ "PORTAGE_INST_UID" : str(portage.data.portage_uid),
+ "PORTAGE_PYTHON" : portage_python,
+ "PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
+ "PORTAGE_TMPDIR" : portage_tmpdir,
+ "PYTHONDONTWRITEBYTECODE" : os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
+ "PYTHONPATH" : pythonpath,
+ "__PORTAGE_TEST_PATH_OVERRIDE" : fake_bin,
+ }
+
+ if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+ env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
+
+ dirs = [distdir, fake_bin, portage_tmpdir,
+ var_cache_edb]
+ etc_symlinks = ("dispatch-conf.conf", "etc-update.conf")
+ # Override things that may be unavailable, or may have portability
+ # issues when running tests in exotic environments.
+ # prepstrip - bug #447810 (bash read builtin EINTR problem)
+ true_symlinks = ["prepstrip", "scanelf"]
+ true_binary = find_binary("true")
+ self.assertEqual(true_binary is None, False,
+ "true command not found")
+ try:
+ for d in dirs:
+ ensure_dirs(d)
+ for x in true_symlinks:
+ os.symlink(true_binary, os.path.join(fake_bin, x))
+ for x in etc_symlinks:
+ os.symlink(os.path.join(self.cnf_etc_path, x),
+ os.path.join(eprefix, "etc", x))
+ with open(os.path.join(var_cache_edb, "counter"), 'wb') as f:
+ f.write(b"100")
+
+ if debug:
+ # The subprocess inherits both stdout and stderr, for
+ # debugging purposes.
+ stdout = None
+ else:
+ # The subprocess inherits stderr so that any warnings
+ # triggered by python -Wd will be visible.
+ stdout = subprocess.PIPE
+
+ for args in test_commands:
+
+ if hasattr(args, '__call__'):
+ args()
+ continue
+
+ if isinstance(args[0], dict):
+ local_env = env.copy()
+ local_env.update(args[0])
+ args = args[1:]
+ else:
+ local_env = env
+
+ proc = subprocess.Popen(args,
+ env=local_env, stdout=stdout)
+
+ if debug:
+ proc.wait()
+ else:
+ output = proc.stdout.readlines()
+ proc.wait()
+ proc.stdout.close()
+ if proc.returncode != os.EX_OK:
+ for line in output:
+ sys.stderr.write(_unicode_decode(line))
+
+ self.assertEqual(os.EX_OK, proc.returncode,
+ "emerge failed with args %s" % (args,))
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/emerge/test_emerge_blocker_file_collision.py b/lib/portage/tests/emerge/test_emerge_blocker_file_collision.py
new file mode 100644
index 000000000..10d09d843
--- /dev/null
+++ b/lib/portage/tests/emerge/test_emerge_blocker_file_collision.py
@@ -0,0 +1,168 @@
+# Copyright 2016 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+import sys
+
+import portage
+from portage import os
+from portage import _unicode_decode
+from portage.const import PORTAGE_PYM_PATH, USER_CONFIG_PATH
+from portage.process import find_binary
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util import ensure_dirs
+
+class BlockerFileCollisionEmergeTestCase(TestCase):
+
+ def testBlockerFileCollision(self):
+
+ debug = False
+
+ install_something = """
+S="${WORKDIR}"
+
+src_install() {
+ einfo "installing something..."
+ insinto /usr/lib
+ echo "${PN}" > "${T}/file-collision"
+ doins "${T}/file-collision"
+}
+"""
+
+ ebuilds = {
+ "dev-libs/A-1" : {
+ "EAPI": "6",
+ "MISC_CONTENT": install_something,
+ "RDEPEND": "!dev-libs/B",
+ },
+ "dev-libs/B-1" : {
+ "EAPI": "6",
+ "MISC_CONTENT": install_something,
+ "RDEPEND": "!dev-libs/A",
+ },
+ }
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=debug)
+ settings = playground.settings
+ eprefix = settings["EPREFIX"]
+ eroot = settings["EROOT"]
+ var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
+ user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH)
+
+ portage_python = portage._python_interpreter
+ emerge_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.bindir, "emerge"))
+
+ file_collision = os.path.join(eroot, 'usr/lib/file-collision')
+
+ test_commands = (
+ emerge_cmd + ("--oneshot", "dev-libs/A",),
+ (lambda: portage.util.grablines(file_collision) == ["A\n"],),
+ emerge_cmd + ("--oneshot", "dev-libs/B",),
+ (lambda: portage.util.grablines(file_collision) == ["B\n"],),
+ emerge_cmd + ("--oneshot", "dev-libs/A",),
+ (lambda: portage.util.grablines(file_collision) == ["A\n"],),
+ ({"FEATURES":"parallel-install"},) + emerge_cmd + ("--oneshot", "dev-libs/B",),
+ (lambda: portage.util.grablines(file_collision) == ["B\n"],),
+ ({"FEATURES":"parallel-install"},) + emerge_cmd + ("-Cq", "dev-libs/B",),
+ (lambda: not os.path.exists(file_collision),),
+ )
+
+ fake_bin = os.path.join(eprefix, "bin")
+ portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
+ profile_path = settings.profile_path
+
+ path = os.environ.get("PATH")
+ if path is not None and not path.strip():
+ path = None
+ if path is None:
+ path = ""
+ else:
+ path = ":" + path
+ path = fake_bin + path
+
+ pythonpath = os.environ.get("PYTHONPATH")
+ if pythonpath is not None and not pythonpath.strip():
+ pythonpath = None
+ if pythonpath is not None and \
+ pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
+ pass
+ else:
+ if pythonpath is None:
+ pythonpath = ""
+ else:
+ pythonpath = ":" + pythonpath
+ pythonpath = PORTAGE_PYM_PATH + pythonpath
+
+ env = {
+ "PORTAGE_OVERRIDE_EPREFIX" : eprefix,
+ "PATH" : path,
+ "PORTAGE_PYTHON" : portage_python,
+ "PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
+ "PYTHONDONTWRITEBYTECODE" : os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
+ "PYTHONPATH" : pythonpath,
+ }
+
+ if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+ env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
+
+ dirs = [playground.distdir, fake_bin, portage_tmpdir,
+ user_config_dir, var_cache_edb]
+ true_symlinks = ["chown", "chgrp"]
+ true_binary = find_binary("true")
+ self.assertEqual(true_binary is None, False,
+ "true command not found")
+ try:
+ for d in dirs:
+ ensure_dirs(d)
+ for x in true_symlinks:
+ os.symlink(true_binary, os.path.join(fake_bin, x))
+ with open(os.path.join(var_cache_edb, "counter"), 'wb') as f:
+ f.write(b"100")
+ # non-empty system set keeps --depclean quiet
+ with open(os.path.join(profile_path, "packages"), 'w') as f:
+ f.write("*dev-libs/token-system-pkg")
+
+ if debug:
+ # The subprocess inherits both stdout and stderr, for
+ # debugging purposes.
+ stdout = None
+ else:
+ # The subprocess inherits stderr so that any warnings
+ # triggered by python -Wd will be visible.
+ stdout = subprocess.PIPE
+
+ for i, args in enumerate(test_commands):
+
+ if hasattr(args[0], '__call__'):
+ self.assertTrue(args[0](),
+ "callable at index %s failed" % (i,))
+ continue
+
+ if isinstance(args[0], dict):
+ local_env = env.copy()
+ local_env.update(args[0])
+ args = args[1:]
+ else:
+ local_env = env
+
+ proc = subprocess.Popen(args,
+ env=local_env, stdout=stdout)
+
+ if debug:
+ proc.wait()
+ else:
+ output = proc.stdout.readlines()
+ proc.wait()
+ proc.stdout.close()
+ if proc.returncode != os.EX_OK:
+ for line in output:
+ sys.stderr.write(_unicode_decode(line))
+
+ self.assertEqual(os.EX_OK, proc.returncode,
+ "emerge failed with args %s" % (args,))
+ finally:
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/emerge/test_emerge_slot_abi.py b/lib/portage/tests/emerge/test_emerge_slot_abi.py
new file mode 100644
index 000000000..200699396
--- /dev/null
+++ b/lib/portage/tests/emerge/test_emerge_slot_abi.py
@@ -0,0 +1,179 @@
+# Copyright 2012-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+import sys
+
+import portage
+from portage import os
+from portage import _unicode_decode
+from portage.const import (BASH_BINARY, PORTAGE_PYM_PATH, USER_CONFIG_PATH)
+from portage.process import find_binary
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util import ensure_dirs
+
+class SlotAbiEmergeTestCase(TestCase):
+
+ def testSlotAbiEmerge(self):
+
+ debug = False
+
+ ebuilds = {
+ "dev-libs/glib-1.2.10" : {
+ "SLOT": "1"
+ },
+ "dev-libs/glib-2.30.2" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.30"
+ },
+ "dev-libs/glib-2.32.3" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.32"
+ },
+ "dev-libs/dbus-glib-0.98" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/glib:2=",
+ "RDEPEND": "dev-libs/glib:2="
+ },
+ }
+ installed = {
+ "dev-libs/glib-1.2.10" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "1"
+ },
+ "dev-libs/glib-2.30.2" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.30"
+ },
+ "dev-libs/dbus-glib-0.98" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/glib:2/2.30=",
+ "RDEPEND": "dev-libs/glib:2/2.30="
+ },
+ }
+
+ world = ["dev-libs/glib:1", "dev-libs/dbus-glib"]
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=debug)
+ settings = playground.settings
+ eprefix = settings["EPREFIX"]
+ eroot = settings["EROOT"]
+ trees = playground.trees
+ portdb = trees[eroot]["porttree"].dbapi
+ vardb = trees[eroot]["vartree"].dbapi
+ var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
+ user_config_dir = os.path.join(eprefix, USER_CONFIG_PATH)
+ package_mask_path = os.path.join(user_config_dir, "package.mask")
+
+ portage_python = portage._python_interpreter
+ ebuild_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.bindir, "ebuild"))
+ emerge_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.bindir, "emerge"))
+
+ test_ebuild = portdb.findname("dev-libs/dbus-glib-0.98")
+ self.assertFalse(test_ebuild is None)
+
+ test_commands = (
+ emerge_cmd + ("--oneshot", "dev-libs/glib",),
+ (lambda: "dev-libs/glib:2/2.32=" in vardb.aux_get("dev-libs/dbus-glib-0.98", ["RDEPEND"])[0],),
+ (BASH_BINARY, "-c", "echo %s >> %s" %
+ tuple(map(portage._shell_quote,
+ (">=dev-libs/glib-2.32", package_mask_path,)))),
+ emerge_cmd + ("--oneshot", "dev-libs/glib",),
+ (lambda: "dev-libs/glib:2/2.30=" in vardb.aux_get("dev-libs/dbus-glib-0.98", ["RDEPEND"])[0],),
+ )
+
+ distdir = playground.distdir
+ pkgdir = playground.pkgdir
+ fake_bin = os.path.join(eprefix, "bin")
+ portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
+ profile_path = settings.profile_path
+
+ path = os.environ.get("PATH")
+ if path is not None and not path.strip():
+ path = None
+ if path is None:
+ path = ""
+ else:
+ path = ":" + path
+ path = fake_bin + path
+
+ pythonpath = os.environ.get("PYTHONPATH")
+ if pythonpath is not None and not pythonpath.strip():
+ pythonpath = None
+ if pythonpath is not None and \
+ pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
+ pass
+ else:
+ if pythonpath is None:
+ pythonpath = ""
+ else:
+ pythonpath = ":" + pythonpath
+ pythonpath = PORTAGE_PYM_PATH + pythonpath
+
+ env = {
+ "PORTAGE_OVERRIDE_EPREFIX" : eprefix,
+ "PATH" : path,
+ "PORTAGE_PYTHON" : portage_python,
+ "PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
+ "PYTHONDONTWRITEBYTECODE" : os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
+ "PYTHONPATH" : pythonpath,
+ }
+
+ if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+ env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
+
+ dirs = [distdir, fake_bin, portage_tmpdir,
+ user_config_dir, var_cache_edb]
+ true_symlinks = ["chown", "chgrp"]
+ true_binary = find_binary("true")
+ self.assertEqual(true_binary is None, False,
+ "true command not found")
+ try:
+ for d in dirs:
+ ensure_dirs(d)
+ for x in true_symlinks:
+ os.symlink(true_binary, os.path.join(fake_bin, x))
+ with open(os.path.join(var_cache_edb, "counter"), 'wb') as f:
+ f.write(b"100")
+ # non-empty system set keeps --depclean quiet
+ with open(os.path.join(profile_path, "packages"), 'w') as f:
+ f.write("*dev-libs/token-system-pkg")
+
+ if debug:
+ # The subprocess inherits both stdout and stderr, for
+ # debugging purposes.
+ stdout = None
+ else:
+ # The subprocess inherits stderr so that any warnings
+ # triggered by python -Wd will be visible.
+ stdout = subprocess.PIPE
+
+ for i, args in enumerate(test_commands):
+
+ if hasattr(args[0], '__call__'):
+ self.assertTrue(args[0](),
+ "callable at index %s failed" % (i,))
+ continue
+
+ proc = subprocess.Popen(args,
+ env=env, stdout=stdout)
+
+ if debug:
+ proc.wait()
+ else:
+ output = proc.stdout.readlines()
+ proc.wait()
+ proc.stdout.close()
+ if proc.returncode != os.EX_OK:
+ for line in output:
+ sys.stderr.write(_unicode_decode(line))
+
+ self.assertEqual(os.EX_OK, proc.returncode,
+ "emerge failed with args %s" % (args,))
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/emerge/test_global_updates.py b/lib/portage/tests/emerge/test_global_updates.py
new file mode 100644
index 000000000..eb5431059
--- /dev/null
+++ b/lib/portage/tests/emerge/test_global_updates.py
@@ -0,0 +1,41 @@
+# Copyright 2011-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.update import parse_updates
+from portage.dep import Atom
+
+class ParseUpdatesTestCase(TestCase):
+
+ def testParseUpdates(self):
+ test_cases = (
+ (
+ """
+slotmove invalid_atom 0 3
+slotmove !=invalid/blocker-3* 0 3
+slotmove =valid/atom-3* 0 3 invalid_extra_token
+slotmove =valid/atom-3* 0 3
+slotmove =valid/atom-3* 0 3/3.1
+slotmove =valid/atom-3* 0/0 3
+move valid/atom1 valid/atom2 invalid_extra_token
+move valid/atom1 invalid_atom2
+move invalid_atom1 valid/atom2
+move !invalid/blocker1 valid/atom2
+move valid/atom1 !invalid/blocker2
+move =invalid/operator-1* valid/atom2
+move valid/atom1 =invalid/operator-2*
+move valid/atom1 valid/atom2
+""",
+ [
+ ['slotmove', Atom('=valid/atom-3*'), '0', '3'],
+ ['move', Atom('valid/atom1'), Atom('valid/atom2')],
+ ],
+ 12,
+ ),
+
+ )
+
+ for input_content, expected_output, expected_error_count in test_cases:
+ output_data, errors = parse_updates(input_content)
+ self.assertEqual(output_data, expected_output)
+ self.assertEqual(len(errors), expected_error_count)
diff --git a/lib/portage/tests/emerge/test_simple.py b/lib/portage/tests/emerge/test_simple.py
new file mode 100644
index 000000000..b1402ddd5
--- /dev/null
+++ b/lib/portage/tests/emerge/test_simple.py
@@ -0,0 +1,505 @@
+# Copyright 2011-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+import sys
+
+import portage
+from portage import os
+from portage import _unicode_decode
+from portage.const import (BASH_BINARY, PORTAGE_BASE_PATH,
+ PORTAGE_PYM_PATH, USER_CONFIG_PATH)
+from portage.process import find_binary
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util import (ensure_dirs, find_updated_config_files,
+ shlex_split)
+
+class SimpleEmergeTestCase(TestCase):
+
+ def _have_python_xml(self):
+ try:
+ __import__("xml.etree.ElementTree")
+ __import__("xml.parsers.expat").parsers.expat.ExpatError
+ except (AttributeError, ImportError):
+ return False
+ return True
+
+ def testSimple(self):
+
+ debug = False
+
+ install_something = """
+S="${WORKDIR}"
+
+pkg_pretend() {
+ einfo "called pkg_pretend for $CATEGORY/$PF"
+}
+
+src_install() {
+ einfo "installing something..."
+ insinto /usr/lib/${P}
+ echo "blah blah blah" > "${T}"/regular-file
+ doins "${T}"/regular-file
+ dosym regular-file /usr/lib/${P}/symlink || die
+
+ # Test CONFIG_PROTECT
+ insinto /etc
+ newins "${T}"/regular-file ${PN}-${SLOT%/*}
+
+ # Test code for bug #381629, using a copyright symbol encoded with latin-1.
+ # We use $(printf "\\xa9") rather than $'\\xa9', since printf apparently
+ # works in any case, while $'\\xa9' transforms to \\xef\\xbf\\xbd under
+ # some conditions. TODO: Find out why it transforms to \\xef\\xbf\\xbd when
+ # running tests for Python 3.2 (even though it's bash that is ultimately
+ # responsible for performing the transformation).
+ local latin_1_dir=/usr/lib/${P}/latin-1-$(printf "\\xa9")-directory
+ insinto "${latin_1_dir}"
+ echo "blah blah blah" > "${T}"/latin-1-$(printf "\\xa9")-regular-file || die
+ doins "${T}"/latin-1-$(printf "\\xa9")-regular-file
+ dosym latin-1-$(printf "\\xa9")-regular-file ${latin_1_dir}/latin-1-$(printf "\\xa9")-symlink || die
+
+ call_has_and_best_version
+}
+
+pkg_config() {
+ einfo "called pkg_config for $CATEGORY/$PF"
+}
+
+pkg_info() {
+ einfo "called pkg_info for $CATEGORY/$PF"
+}
+
+pkg_preinst() {
+ if ! ___eapi_best_version_and_has_version_support_-b_-d_-r; then
+ # The BROOT variable is unset during pkg_* phases for EAPI 7,
+ # therefore best/has_version -b is expected to fail if we attempt
+ # to call it for EAPI 7 here.
+ call_has_and_best_version
+ fi
+}
+
+call_has_and_best_version() {
+ local root_arg
+ if ___eapi_best_version_and_has_version_support_-b_-d_-r; then
+ root_arg="-b"
+ else
+ root_arg="--host-root"
+ fi
+ einfo "called ${EBUILD_PHASE_FUNC} for $CATEGORY/$PF"
+ einfo "EPREFIX=${EPREFIX}"
+ einfo "PORTAGE_OVERRIDE_EPREFIX=${PORTAGE_OVERRIDE_EPREFIX}"
+ einfo "ROOT=${ROOT}"
+ einfo "EROOT=${EROOT}"
+ einfo "SYSROOT=${SYSROOT}"
+ einfo "ESYSROOT=${ESYSROOT}"
+ einfo "BROOT=${BROOT}"
+ # Test that has_version and best_version work correctly with
+ # prefix (involves internal ROOT -> EROOT calculation in order
+ # to support ROOT override via the environment with EAPIs 3
+ # and later which support prefix).
+ if has_version $CATEGORY/$PN:$SLOT ; then
+ einfo "has_version detects an installed instance of $CATEGORY/$PN:$SLOT"
+ einfo "best_version reports that the installed instance is $(best_version $CATEGORY/$PN:$SLOT)"
+ else
+ einfo "has_version does not detect an installed instance of $CATEGORY/$PN:$SLOT"
+ fi
+ if [[ ${EPREFIX} != ${PORTAGE_OVERRIDE_EPREFIX} ]] ; then
+ if has_version ${root_arg} $CATEGORY/$PN:$SLOT ; then
+ einfo "has_version ${root_arg} detects an installed instance of $CATEGORY/$PN:$SLOT"
+ einfo "best_version ${root_arg} reports that the installed instance is $(best_version ${root_arg} $CATEGORY/$PN:$SLOT)"
+ else
+ einfo "has_version ${root_arg} does not detect an installed instance of $CATEGORY/$PN:$SLOT"
+ fi
+ fi
+}
+
+"""
+
+ ebuilds = {
+ "dev-libs/A-1": {
+ "EAPI" : "5",
+ "IUSE" : "+flag",
+ "KEYWORDS": "x86",
+ "LICENSE": "GPL-2",
+ "MISC_CONTENT": install_something,
+ "RDEPEND": "flag? ( dev-libs/B[flag] )",
+ },
+ "dev-libs/B-1": {
+ "EAPI" : "5",
+ "IUSE" : "+flag",
+ "KEYWORDS": "x86",
+ "LICENSE": "GPL-2",
+ "MISC_CONTENT": install_something,
+ },
+ "dev-libs/C-1": {
+ "EAPI" : "7",
+ "KEYWORDS": "~x86",
+ "RDEPEND": "dev-libs/D[flag]",
+ "MISC_CONTENT": install_something,
+ },
+ "dev-libs/D-1": {
+ "EAPI" : "7",
+ "KEYWORDS": "~x86",
+ "IUSE" : "flag",
+ "MISC_CONTENT": install_something,
+ },
+ "virtual/foo-0": {
+ "EAPI" : "5",
+ "KEYWORDS": "x86",
+ "LICENSE": "GPL-2",
+ },
+ }
+
+ installed = {
+ "dev-libs/A-1": {
+ "EAPI" : "5",
+ "IUSE" : "+flag",
+ "KEYWORDS": "x86",
+ "LICENSE": "GPL-2",
+ "RDEPEND": "flag? ( dev-libs/B[flag] )",
+ "USE": "flag",
+ },
+ "dev-libs/B-1": {
+ "EAPI" : "5",
+ "IUSE" : "+flag",
+ "KEYWORDS": "x86",
+ "LICENSE": "GPL-2",
+ "USE": "flag",
+ },
+ "dev-libs/depclean-me-1": {
+ "EAPI" : "5",
+ "IUSE" : "",
+ "KEYWORDS": "x86",
+ "LICENSE": "GPL-2",
+ "USE": "",
+ },
+ "app-misc/depclean-me-1": {
+ "EAPI" : "5",
+ "IUSE" : "",
+ "KEYWORDS": "x86",
+ "LICENSE": "GPL-2",
+ "RDEPEND": "dev-libs/depclean-me",
+ "USE": "",
+ },
+ }
+
+ metadata_xml_files = (
+ (
+ "dev-libs/A",
+ {
+ "flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
+ },
+ ),
+ (
+ "dev-libs/B",
+ {
+ "flags" : "<flag name='flag'>Description of how USE='flag' affects this package</flag>",
+ },
+ ),
+ )
+
+ playground = ResolverPlayground(
+ ebuilds=ebuilds, installed=installed, debug=debug)
+ settings = playground.settings
+ eprefix = settings["EPREFIX"]
+ eroot = settings["EROOT"]
+ trees = playground.trees
+ portdb = trees[eroot]["porttree"].dbapi
+ test_repo_location = settings.repositories["test_repo"].location
+ var_cache_edb = os.path.join(eprefix, "var", "cache", "edb")
+ cachedir = os.path.join(var_cache_edb, "dep")
+ cachedir_pregen = os.path.join(test_repo_location, "metadata", "md5-cache")
+
+ portage_python = portage._python_interpreter
+ dispatch_conf_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.sbindir, "dispatch-conf"))
+ ebuild_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.bindir, "ebuild"))
+ egencache_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.bindir, "egencache"),
+ "--repo", "test_repo",
+ "--repositories-configuration", settings.repositories.config_string())
+ emerge_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.bindir, "emerge"))
+ emaint_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.sbindir, "emaint"))
+ env_update_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.sbindir, "env-update"))
+ etc_update_cmd = (BASH_BINARY,
+ os.path.join(self.sbindir, "etc-update"))
+ fixpackages_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.sbindir, "fixpackages"))
+ portageq_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.bindir, "portageq"))
+ quickpkg_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.bindir, "quickpkg"))
+ regenworld_cmd = (portage_python, "-b", "-Wd",
+ os.path.join(self.sbindir, "regenworld"))
+
+ rm_binary = find_binary("rm")
+ self.assertEqual(rm_binary is None, False,
+ "rm command not found")
+ rm_cmd = (rm_binary,)
+
+ egencache_extra_args = []
+ if self._have_python_xml():
+ egencache_extra_args.append("--update-use-local-desc")
+
+ test_ebuild = portdb.findname("dev-libs/A-1")
+ self.assertFalse(test_ebuild is None)
+
+ cross_prefix = os.path.join(eprefix, "cross_prefix")
+ cross_root = os.path.join(eprefix, "cross_root")
+ cross_eroot = os.path.join(cross_root, eprefix.lstrip(os.sep))
+
+ test_commands = (
+ env_update_cmd,
+ portageq_cmd + ("envvar", "-v", "CONFIG_PROTECT", "EROOT",
+ "PORTAGE_CONFIGROOT", "PORTAGE_TMPDIR", "USERLAND"),
+ etc_update_cmd,
+ dispatch_conf_cmd,
+ emerge_cmd + ("--version",),
+ emerge_cmd + ("--info",),
+ emerge_cmd + ("--info", "--verbose"),
+ emerge_cmd + ("--list-sets",),
+ emerge_cmd + ("--check-news",),
+ rm_cmd + ("-rf", cachedir),
+ rm_cmd + ("-rf", cachedir_pregen),
+ emerge_cmd + ("--regen",),
+ rm_cmd + ("-rf", cachedir),
+ ({"FEATURES" : "metadata-transfer"},) + \
+ emerge_cmd + ("--regen",),
+ rm_cmd + ("-rf", cachedir),
+ ({"FEATURES" : "metadata-transfer"},) + \
+ emerge_cmd + ("--regen",),
+ rm_cmd + ("-rf", cachedir),
+ egencache_cmd + ("--update",) + tuple(egencache_extra_args),
+ ({"FEATURES" : "metadata-transfer"},) + \
+ emerge_cmd + ("--metadata",),
+ rm_cmd + ("-rf", cachedir),
+ ({"FEATURES" : "metadata-transfer"},) + \
+ emerge_cmd + ("--metadata",),
+ emerge_cmd + ("--metadata",),
+ rm_cmd + ("-rf", cachedir),
+ emerge_cmd + ("--oneshot", "virtual/foo"),
+ lambda: self.assertFalse(os.path.exists(
+ os.path.join(pkgdir, "virtual", "foo-0.tbz2"))),
+ ({"FEATURES" : "unmerge-backup"},) + \
+ emerge_cmd + ("--unmerge", "virtual/foo"),
+ lambda: self.assertTrue(os.path.exists(
+ os.path.join(pkgdir, "virtual", "foo-0.tbz2"))),
+ emerge_cmd + ("--pretend", "dev-libs/A"),
+ ebuild_cmd + (test_ebuild, "manifest", "clean", "package", "merge"),
+ emerge_cmd + ("--pretend", "--tree", "--complete-graph", "dev-libs/A"),
+ emerge_cmd + ("-p", "dev-libs/B"),
+ emerge_cmd + ("-p", "--newrepo", "dev-libs/B"),
+ emerge_cmd + ("-B", "dev-libs/B",),
+ emerge_cmd + ("--oneshot", "--usepkg", "dev-libs/B",),
+
+ # trigger clean prior to pkg_pretend as in bug #390711
+ ebuild_cmd + (test_ebuild, "unpack"),
+ emerge_cmd + ("--oneshot", "dev-libs/A",),
+
+ emerge_cmd + ("--noreplace", "dev-libs/A",),
+ emerge_cmd + ("--config", "dev-libs/A",),
+ emerge_cmd + ("--info", "dev-libs/A", "dev-libs/B"),
+ emerge_cmd + ("--pretend", "--depclean", "--verbose", "dev-libs/B"),
+ emerge_cmd + ("--pretend", "--depclean",),
+ emerge_cmd + ("--depclean",),
+ quickpkg_cmd + ("--include-config", "y", "dev-libs/A",),
+ # Test bug #523684, where a file renamed or removed by the
+ # admin forces replacement files to be merged with config
+ # protection.
+ lambda: self.assertEqual(0,
+ len(list(find_updated_config_files(eroot,
+ shlex_split(settings["CONFIG_PROTECT"]))))),
+ lambda: os.unlink(os.path.join(eprefix, "etc", "A-0")),
+ emerge_cmd + ("--usepkgonly", "dev-libs/A"),
+ lambda: self.assertEqual(1,
+ len(list(find_updated_config_files(eroot,
+ shlex_split(settings["CONFIG_PROTECT"]))))),
+ emaint_cmd + ("--check", "all"),
+ emaint_cmd + ("--fix", "all"),
+ fixpackages_cmd,
+ regenworld_cmd,
+ portageq_cmd + ("match", eroot, "dev-libs/A"),
+ portageq_cmd + ("best_visible", eroot, "dev-libs/A"),
+ portageq_cmd + ("best_visible", eroot, "binary", "dev-libs/A"),
+ portageq_cmd + ("contents", eroot, "dev-libs/A-1"),
+ portageq_cmd + ("metadata", eroot, "ebuild", "dev-libs/A-1", "EAPI", "IUSE", "RDEPEND"),
+ portageq_cmd + ("metadata", eroot, "binary", "dev-libs/A-1", "EAPI", "USE", "RDEPEND"),
+ portageq_cmd + ("metadata", eroot, "installed", "dev-libs/A-1", "EAPI", "USE", "RDEPEND"),
+ portageq_cmd + ("owners", eroot, eroot + "usr"),
+ emerge_cmd + ("-p", eroot + "usr"),
+ emerge_cmd + ("-p", "--unmerge", "-q", eroot + "usr"),
+ emerge_cmd + ("--unmerge", "--quiet", "dev-libs/A"),
+ emerge_cmd + ("-C", "--quiet", "dev-libs/B"),
+
+ # If EMERGE_DEFAULT_OPTS contains --autounmask=n, then --autounmask
+ # must be specified with --autounmask-continue.
+ ({"EMERGE_DEFAULT_OPTS" : "--autounmask=n"},) + \
+ emerge_cmd + ("--autounmask", "--autounmask-continue", "dev-libs/C",),
+ # Verify that the above --autounmask-continue command caused
+ # USE=flag to be applied correctly to dev-libs/D.
+ portageq_cmd + ("match", eroot, "dev-libs/D[flag]"),
+
+ # Test cross-prefix usage, including chpathtool for binpkgs.
+ # EAPI 7
+ ({"EPREFIX" : cross_prefix},) + \
+ emerge_cmd + ("dev-libs/C",),
+ ({"EPREFIX" : cross_prefix},) + \
+ portageq_cmd + ("has_version", cross_prefix, "dev-libs/C"),
+ ({"EPREFIX" : cross_prefix},) + \
+ portageq_cmd + ("has_version", cross_prefix, "dev-libs/D"),
+ ({"ROOT": cross_root},) + emerge_cmd + ("dev-libs/D",),
+ portageq_cmd + ("has_version", cross_eroot, "dev-libs/D"),
+ # EAPI 5
+ ({"EPREFIX" : cross_prefix},) + \
+ emerge_cmd + ("--usepkgonly", "dev-libs/A"),
+ ({"EPREFIX" : cross_prefix},) + \
+ portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"),
+ ({"EPREFIX" : cross_prefix},) + \
+ portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"),
+ ({"EPREFIX" : cross_prefix},) + \
+ emerge_cmd + ("-C", "--quiet", "dev-libs/B"),
+ ({"EPREFIX" : cross_prefix},) + \
+ emerge_cmd + ("-C", "--quiet", "dev-libs/A"),
+ ({"EPREFIX" : cross_prefix},) + \
+ emerge_cmd + ("dev-libs/A",),
+ ({"EPREFIX" : cross_prefix},) + \
+ portageq_cmd + ("has_version", cross_prefix, "dev-libs/A"),
+ ({"EPREFIX" : cross_prefix},) + \
+ portageq_cmd + ("has_version", cross_prefix, "dev-libs/B"),
+
+ # Test ROOT support
+ ({"ROOT": cross_root},) + emerge_cmd + ("dev-libs/B",),
+ portageq_cmd + ("has_version", cross_eroot, "dev-libs/B"),
+ )
+
+ distdir = playground.distdir
+ pkgdir = playground.pkgdir
+ fake_bin = os.path.join(eprefix, "bin")
+ portage_tmpdir = os.path.join(eprefix, "var", "tmp", "portage")
+ profile_path = settings.profile_path
+ user_config_dir = os.path.join(os.sep, eprefix, USER_CONFIG_PATH)
+
+ path = os.environ.get("PATH")
+ if path is not None and not path.strip():
+ path = None
+ if path is None:
+ path = ""
+ else:
+ path = ":" + path
+ path = fake_bin + path
+
+ pythonpath = os.environ.get("PYTHONPATH")
+ if pythonpath is not None and not pythonpath.strip():
+ pythonpath = None
+ if pythonpath is not None and \
+ pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
+ pass
+ else:
+ if pythonpath is None:
+ pythonpath = ""
+ else:
+ pythonpath = ":" + pythonpath
+ pythonpath = PORTAGE_PYM_PATH + pythonpath
+
+ env = {
+ "PORTAGE_OVERRIDE_EPREFIX" : eprefix,
+ "CLEAN_DELAY" : "0",
+ "DISTDIR" : distdir,
+ "EMERGE_WARNING_DELAY" : "0",
+ "INFODIR" : "",
+ "INFOPATH" : "",
+ "PATH" : path,
+ "PKGDIR" : pkgdir,
+ "PORTAGE_INST_GID" : str(portage.data.portage_gid),
+ "PORTAGE_INST_UID" : str(portage.data.portage_uid),
+ "PORTAGE_PYTHON" : portage_python,
+ "PORTAGE_REPOSITORIES" : settings.repositories.config_string(),
+ "PORTAGE_TMPDIR" : portage_tmpdir,
+ "PORT_LOGDIR" : portage_tmpdir,
+ "PYTHONDONTWRITEBYTECODE" : os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
+ "PYTHONPATH" : pythonpath,
+ "__PORTAGE_TEST_PATH_OVERRIDE" : fake_bin,
+ }
+
+ if "__PORTAGE_TEST_HARDLINK_LOCKS" in os.environ:
+ env["__PORTAGE_TEST_HARDLINK_LOCKS"] = \
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"]
+
+ updates_dir = os.path.join(test_repo_location, "profiles", "updates")
+ dirs = [cachedir, cachedir_pregen, cross_eroot, cross_prefix,
+ distdir, fake_bin, portage_tmpdir, updates_dir,
+ user_config_dir, var_cache_edb]
+ etc_symlinks = ("dispatch-conf.conf", "etc-update.conf")
+ # Override things that may be unavailable, or may have portability
+ # issues when running tests in exotic environments.
+ # prepstrip - bug #447810 (bash read builtin EINTR problem)
+ true_symlinks = ["find", "prepstrip", "sed", "scanelf"]
+ true_binary = find_binary("true")
+ self.assertEqual(true_binary is None, False,
+ "true command not found")
+ try:
+ for d in dirs:
+ ensure_dirs(d)
+ for x in true_symlinks:
+ os.symlink(true_binary, os.path.join(fake_bin, x))
+ for x in etc_symlinks:
+ os.symlink(os.path.join(self.cnf_etc_path, x),
+ os.path.join(eprefix, "etc", x))
+ with open(os.path.join(var_cache_edb, "counter"), 'wb') as f:
+ f.write(b"100")
+ # non-empty system set keeps --depclean quiet
+ with open(os.path.join(profile_path, "packages"), 'w') as f:
+ f.write("*dev-libs/token-system-pkg")
+ for cp, xml_data in metadata_xml_files:
+ with open(os.path.join(test_repo_location, cp, "metadata.xml"), 'w') as f:
+ f.write(playground.metadata_xml_template % xml_data)
+ with open(os.path.join(updates_dir, "1Q-2010"), 'w') as f:
+ f.write("""
+slotmove =app-doc/pms-3 2 3
+move dev-util/git dev-vcs/git
+""")
+
+ if debug:
+ # The subprocess inherits both stdout and stderr, for
+ # debugging purposes.
+ stdout = None
+ else:
+ # The subprocess inherits stderr so that any warnings
+ # triggered by python -Wd will be visible.
+ stdout = subprocess.PIPE
+
+ for args in test_commands:
+
+ if hasattr(args, '__call__'):
+ args()
+ continue
+
+ if isinstance(args[0], dict):
+ local_env = env.copy()
+ local_env.update(args[0])
+ args = args[1:]
+ else:
+ local_env = env
+
+ proc = subprocess.Popen(args,
+ env=local_env, stdout=stdout)
+
+ if debug:
+ proc.wait()
+ else:
+ output = proc.stdout.readlines()
+ proc.wait()
+ proc.stdout.close()
+ if proc.returncode != os.EX_OK:
+ for line in output:
+ sys.stderr.write(_unicode_decode(line))
+
+ self.assertEqual(os.EX_OK, proc.returncode,
+ "emerge failed with args %s" % (args,))
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/env/__init__.py b/lib/portage/tests/env/__init__.py
new file mode 100644
index 000000000..cbeabe5c6
--- /dev/null
+++ b/lib/portage/tests/env/__init__.py
@@ -0,0 +1,4 @@
+# tests/portage/env/__init__.py -- Portage Unit Test functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
diff --git a/lib/portage/tests/env/__test__.py b/lib/portage/tests/env/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/env/__test__.py
diff --git a/lib/portage/tests/env/config/__init__.py b/lib/portage/tests/env/config/__init__.py
new file mode 100644
index 000000000..ef5cc43b6
--- /dev/null
+++ b/lib/portage/tests/env/config/__init__.py
@@ -0,0 +1,4 @@
+# tests/portage/env/config/__init__.py -- Portage Unit Test functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
diff --git a/lib/portage/tests/env/config/__test__.py b/lib/portage/tests/env/config/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/env/config/__test__.py
diff --git a/lib/portage/tests/env/config/test_PackageKeywordsFile.py b/lib/portage/tests/env/config/test_PackageKeywordsFile.py
new file mode 100644
index 000000000..609c0fda2
--- /dev/null
+++ b/lib/portage/tests/env/config/test_PackageKeywordsFile.py
@@ -0,0 +1,40 @@
+# test_PackageKeywordsFile.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.tests import TestCase
+from portage.env.config import PackageKeywordsFile
+from tempfile import mkstemp
+
+class PackageKeywordsFileTestCase(TestCase):
+
+ cpv = ['sys-apps/portage']
+ keywords = ['~x86', 'amd64', '-mips']
+
+ def testPackageKeywordsFile(self):
+ """
+ A simple test to ensure the load works properly
+ """
+
+ self.BuildFile()
+ try:
+ f = PackageKeywordsFile(self.fname)
+ f.load()
+ i = 0
+ for cpv, keyword in f.items():
+ self.assertEqual(cpv, self.cpv[i])
+ [k for k in keyword if self.assertTrue(k in self.keywords)]
+ i = i + 1
+ finally:
+ self.NukeFile()
+
+ def BuildFile(self):
+ fd, self.fname = mkstemp()
+ f = os.fdopen(fd, 'w')
+ for c in self.cpv:
+ f.write("%s %s\n" % (c, ' '.join(self.keywords)))
+ f.close()
+
+ def NukeFile(self):
+ os.unlink(self.fname)
diff --git a/lib/portage/tests/env/config/test_PackageMaskFile.py b/lib/portage/tests/env/config/test_PackageMaskFile.py
new file mode 100644
index 000000000..0c5b30f53
--- /dev/null
+++ b/lib/portage/tests/env/config/test_PackageMaskFile.py
@@ -0,0 +1,29 @@
+# test_PackageMaskFile.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.env.config import PackageMaskFile
+from portage.tests import TestCase, test_cps
+from tempfile import mkstemp
+
+class PackageMaskFileTestCase(TestCase):
+
+ def testPackageMaskFile(self):
+ self.BuildFile()
+ try:
+ f = PackageMaskFile(self.fname)
+ f.load()
+ for atom in f:
+ self.assertTrue(atom in test_cps)
+ finally:
+ self.NukeFile()
+
+ def BuildFile(self):
+ fd, self.fname = mkstemp()
+ f = os.fdopen(fd, 'w')
+ f.write("\n".join(test_cps))
+ f.close()
+
+ def NukeFile(self):
+ os.unlink(self.fname)
diff --git a/lib/portage/tests/env/config/test_PackageUseFile.py b/lib/portage/tests/env/config/test_PackageUseFile.py
new file mode 100644
index 000000000..b1a6ccbde
--- /dev/null
+++ b/lib/portage/tests/env/config/test_PackageUseFile.py
@@ -0,0 +1,37 @@
+# test_PackageUseFile.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.tests import TestCase
+from portage.env.config import PackageUseFile
+from tempfile import mkstemp
+
+
+class PackageUseFileTestCase(TestCase):
+
+ cpv = 'sys-apps/portage'
+ useflags = ['cdrom', 'far', 'boo', 'flag', 'blat']
+
+ def testPackageUseFile(self):
+ """
+ A simple test to ensure the load works properly
+ """
+ self.BuildFile()
+ try:
+ f = PackageUseFile(self.fname)
+ f.load()
+ for cpv, use in f.items():
+ self.assertEqual(cpv, self.cpv)
+ [flag for flag in use if self.assertTrue(flag in self.useflags)]
+ finally:
+ self.NukeFile()
+
+ def BuildFile(self):
+ fd, self.fname = mkstemp()
+ f = os.fdopen(fd, 'w')
+ f.write("%s %s" % (self.cpv, ' '.join(self.useflags)))
+ f.close()
+
+ def NukeFile(self):
+ os.unlink(self.fname)
diff --git a/lib/portage/tests/env/config/test_PortageModulesFile.py b/lib/portage/tests/env/config/test_PortageModulesFile.py
new file mode 100644
index 000000000..05584a5f8
--- /dev/null
+++ b/lib/portage/tests/env/config/test_PortageModulesFile.py
@@ -0,0 +1,38 @@
+# Copyright 2006-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.tests import TestCase
+from portage.env.config import PortageModulesFile
+from tempfile import mkstemp
+
+class PortageModulesFileTestCase(TestCase):
+
+ keys = ['foo.bar', 'baz', 'bob', 'extra_key']
+ invalid_keys = ['', ""]
+ modules = ['spanky', 'zmedico', 'antarus', 'ricer', '5', '6']
+
+ def setUp(self):
+ self.items = {}
+ for k, v in zip(self.keys + self.invalid_keys, self.modules):
+ self.items[k] = v
+
+ def testPortageModulesFile(self):
+ self.BuildFile()
+ f = PortageModulesFile(self.fname)
+ f.load()
+ for k in self.keys:
+ self.assertEqual(f[k], self.items[k])
+ for ik in self.invalid_keys:
+ self.assertEqual(False, ik in f)
+ self.NukeFile()
+
+ def BuildFile(self):
+ fd, self.fname = mkstemp()
+ f = os.fdopen(fd, 'w')
+ for k, v in self.items.items():
+ f.write('%s=%s\n' % (k, v))
+ f.close()
+
+ def NukeFile(self):
+ os.unlink(self.fname)
diff --git a/lib/portage/tests/glsa/__init__.py b/lib/portage/tests/glsa/__init__.py
new file mode 100644
index 000000000..6cde9320b
--- /dev/null
+++ b/lib/portage/tests/glsa/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/tests/glsa/__test__.py b/lib/portage/tests/glsa/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/glsa/__test__.py
diff --git a/lib/portage/tests/glsa/test_security_set.py b/lib/portage/tests/glsa/test_security_set.py
new file mode 100644
index 000000000..e73deaba9
--- /dev/null
+++ b/lib/portage/tests/glsa/test_security_set.py
@@ -0,0 +1,144 @@
+# Copyright 2013-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import io
+
+import portage
+from portage import os, _encodings
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SecuritySetTestCase(TestCase):
+
+ glsa_template = """\
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet href="/xsl/glsa.xsl" type="text/xsl"?>
+<?xml-stylesheet href="/xsl/guide.xsl" type="text/xsl"?>
+<!DOCTYPE glsa SYSTEM "http://www.gentoo.org/dtd/glsa.dtd">
+<glsa id="%(glsa_id)s">
+ <title>%(pkgname)s: Multiple vulnerabilities</title>
+ <synopsis>Multiple vulnerabilities have been found in %(pkgname)s.
+ </synopsis>
+ <product type="ebuild">%(pkgname)s</product>
+ <announced>January 18, 2013</announced>
+ <revised>January 18, 2013: 1</revised>
+ <bug>55555</bug>
+ <access>remote</access>
+ <affected>
+ <package name="%(cp)s" auto="yes" arch="*">
+ <unaffected range="ge">%(unaffected_version)s</unaffected>
+ <vulnerable range="lt">%(unaffected_version)s</vulnerable>
+ </package>
+ </affected>
+ <background>
+ <p>%(pkgname)s is software package.</p>
+ </background>
+ <description>
+ <p>Multiple vulnerabilities have been discovered in %(pkgname)s.
+ </p>
+ </description>
+ <impact type="normal">
+ <p>A remote attacker could exploit these vulnerabilities.</p>
+ </impact>
+ <workaround>
+ <p>There is no known workaround at this time.</p>
+ </workaround>
+ <resolution>
+ <p>All %(pkgname)s users should upgrade to the latest version:</p>
+ <code>
+ # emerge --sync
+ # emerge --ask --oneshot --verbose "&gt;=%(cp)s-%(unaffected_version)s"
+ </code>
+ </resolution>
+ <references>
+ </references>
+</glsa>
+"""
+
+ def _must_skip(self):
+ try:
+ __import__("xml.etree.ElementTree")
+ __import__("xml.parsers.expat").parsers.expat.ExpatError
+ except (AttributeError, ImportError):
+ return "python is missing xml support"
+
+ def testSecuritySet(self):
+
+ skip_reason = self._must_skip()
+ if skip_reason:
+ self.portage_skip = skip_reason
+ self.assertFalse(True, skip_reason)
+ return
+
+ ebuilds = {
+ "cat/A-vulnerable-2.2": {
+ "KEYWORDS": "x86"
+ },
+ "cat/B-not-vulnerable-4.5": {
+ "KEYWORDS": "x86"
+ },
+ }
+
+ installed = {
+ "cat/A-vulnerable-2.1": {
+ "KEYWORDS": "x86"
+ },
+ "cat/B-not-vulnerable-4.4": {
+ "KEYWORDS": "x86"
+ },
+ }
+
+ glsas = (
+ {
+ "glsa_id": "201301-01",
+ "pkgname": "A-vulnerable",
+ "cp": "cat/A-vulnerable",
+ "unaffected_version": "2.2"
+ },
+ {
+ "glsa_id": "201301-02",
+ "pkgname": "B-not-vulnerable",
+ "cp": "cat/B-not-vulnerable",
+ "unaffected_version": "4.4"
+ },
+ {
+ "glsa_id": "201301-03",
+ "pkgname": "NotInstalled",
+ "cp": "cat/NotInstalled",
+ "unaffected_version": "3.5"
+ },
+ )
+
+ world = ["cat/A"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["@security"],
+ options = {},
+ success = True,
+ mergelist = ["cat/A-vulnerable-2.2"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+
+ try:
+
+ portdb = playground.trees[playground.eroot]["porttree"].dbapi
+ glsa_dir = os.path.join(portdb.repositories['test_repo'].location, 'metadata', 'glsa')
+ portage.util.ensure_dirs(glsa_dir)
+ for glsa in glsas:
+ with io.open(os.path.join(glsa_dir,
+ 'glsa-' + glsa["glsa_id"] + '.xml'),
+ encoding=_encodings['repo.content'], mode='w') as f:
+ f.write(self.glsa_template % glsa)
+
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/lafilefixer/__init__.py b/lib/portage/tests/lafilefixer/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/lafilefixer/__init__.py
diff --git a/lib/portage/tests/lafilefixer/__test__.py b/lib/portage/tests/lafilefixer/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/lafilefixer/__test__.py
diff --git a/lib/portage/tests/lafilefixer/test_lafilefixer.py b/lib/portage/tests/lafilefixer/test_lafilefixer.py
new file mode 100644
index 000000000..0bcffaada
--- /dev/null
+++ b/lib/portage/tests/lafilefixer/test_lafilefixer.py
@@ -0,0 +1,145 @@
+# test_lafilefixer.py -- Portage Unit Testing Functionality
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.exception import InvalidData
+
+class test_lafilefixer(TestCase):
+
+ def get_test_cases_clean(self):
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' -lm'\n" + \
+ b"current=6\n" + \
+ b"age=0\n" + \
+ b"revision=2\n" + \
+ b"installed=yes\n" + \
+ b"dlopen=''\n" + \
+ b"dlpreopen=''\n" + \
+ b"libdir='/usr/lib64'\n"
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' -lm'\n" + \
+ b"current=6\n" + \
+ b"age=0\n" + \
+ b"revision=2\n" + \
+ b"installed=yes\n" + \
+ b"dlopen=''\n" + \
+ b"dlpreopen=''\n" + \
+ b"libdir='/usr/lib64'\n"
+ yield b"dependency_libs=' liba.la /usr/lib64/bar.la -lc'\n"
+
+ def get_test_cases_update(self):
+ #.la -> -l*
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc'\n", \
+ b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' -L/usr/lib64 -la -lb -lc'\n"
+ #move stuff into inherited_linker_flags
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la -pthread /usr/lib64/libb.la -lc'\n" + \
+ b"inherited_linker_flags=''\n", \
+ b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' -L/usr/lib64 -la -lb -lc'\n" + \
+ b"inherited_linker_flags=' -pthread'\n"
+ #reorder
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la -R/usr/lib64 /usr/lib64/libb.la -lc'\n", \
+ b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' -R/usr/lib64 -L/usr/lib64 -la -lb -lc'\n"
+ #remove duplicates from dependency_libs (the original version didn't do it for inherited_linker_flags)
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libc.la -pthread -mt" + \
+ b" -L/usr/lib -R/usr/lib64 -lc /usr/lib64/libb.la -lc'\n" +\
+ b"inherited_linker_flags=' -pthread -pthread'\n", \
+ b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' -R/usr/lib64 -L/usr/lib64 -L/usr/lib -la -lc -lb'\n" +\
+ b"inherited_linker_flags=' -pthread -pthread -mt'\n"
+ #-L rewriting
+ yield b"dependency_libs=' -L/usr/X11R6/lib'\n", \
+ b"dependency_libs=' -L/usr/lib'\n"
+ yield b"dependency_libs=' -L/usr/local/lib'\n", \
+ b"dependency_libs=' -L/usr/lib'\n"
+ yield b"dependency_libs=' -L/usr/lib64/pkgconfig/../..'\n", \
+ b"dependency_libs=' -L/usr'\n"
+ yield b"dependency_libs=' -L/usr/lib/pkgconfig/..'\n", \
+ b"dependency_libs=' -L/usr/lib'\n"
+ yield b"dependency_libs=' -L/usr/lib/pkgconfig/../.. -L/usr/lib/pkgconfig/..'\n", \
+ b"dependency_libs=' -L/usr -L/usr/lib'\n"
+ #we once got a backtrace on this one
+ yield b"dependency_libs=' /usr/lib64/libMagickCore.la -L/usr/lib64 -llcms2 /usr/lib64/libtiff.la " + \
+ b"-ljbig -lc /usr/lib64/libfreetype.la /usr/lib64/libjpeg.la /usr/lib64/libXext.la " + \
+ b"/usr/lib64/libXt.la /usr/lib64/libSM.la -lICE -luuid /usr/lib64/libICE.la /usr/lib64/libX11.la " + \
+ b"/usr/lib64/libxcb.la /usr/lib64/libXau.la /usr/lib64/libXdmcp.la -lbz2 -lz -lm " + \
+ b"/usr/lib/gcc/x86_64-pc-linux-gnu/4.4.4/libgomp.la -lrt -lpthread /usr/lib64/libltdl.la -ldl " + \
+ b"/usr/lib64/libfpx.la -lstdc++'", \
+ b"dependency_libs=' -L/usr/lib64 -L/usr/lib/gcc/x86_64-pc-linux-gnu/4.4.4 -lMagickCore -llcms2 " + \
+ b"-ltiff -ljbig -lc -lfreetype -ljpeg -lXext -lXt -lSM -lICE -luuid -lX11 -lxcb -lXau -lXdmcp " + \
+ b"-lbz2 -lz -lm -lgomp -lrt -lpthread -lltdl -ldl -lfpx -lstdc++'"
+
+
+ def get_test_cases_broken(self):
+ yield b""
+ #no dependency_libs
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"current=6\n" + \
+ b"age=0\n" + \
+ b"revision=2\n"
+ #borken dependency_libs
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc' \n"
+ #borken dependency_libs
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc\n"
+ #crap in dependency_libs
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc /-lstdc++'\n"
+ #dependency_libs twice
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc /-lstdc++'\n" +\
+ b"dependency_libs=' /usr/lib64/liba.la /usr/lib64/libb.la -lc /-lstdc++'\n"
+ #inherited_linker_flags twice
+ yield b"dlname='libfoo.so.1'\n" + \
+ b"library_names='libfoo.so.1.0.2 libfoo.so.1 libfoo.so'\n" + \
+ b"old_library='libpdf.a'\n" + \
+ b"inherited_linker_flags=''\n" +\
+ b"inherited_linker_flags=''\n"
+
+ def testlafilefixer(self):
+ from portage.util.lafilefixer import _parse_lafile_contents, rewrite_lafile
+
+ for clean_contents in self.get_test_cases_clean():
+ self.assertEqual(rewrite_lafile(clean_contents), (False, None))
+
+ for original_contents, fixed_contents in self.get_test_cases_update():
+ self.assertEqual(rewrite_lafile(original_contents), (True, fixed_contents))
+
+ for broken_contents in self.get_test_cases_broken():
+ self.assertRaises(InvalidData, rewrite_lafile, broken_contents)
diff --git a/lib/portage/tests/lazyimport/__init__.py b/lib/portage/tests/lazyimport/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/lazyimport/__init__.py
diff --git a/lib/portage/tests/lazyimport/__test__.py b/lib/portage/tests/lazyimport/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/lazyimport/__test__.py
diff --git a/lib/portage/tests/lazyimport/test_lazy_import_portage_baseline.py b/lib/portage/tests/lazyimport/test_lazy_import_portage_baseline.py
new file mode 100644
index 000000000..080cf3f98
--- /dev/null
+++ b/lib/portage/tests/lazyimport/test_lazy_import_portage_baseline.py
@@ -0,0 +1,81 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import re
+import portage
+from portage import os
+from portage.const import PORTAGE_PYM_PATH
+from portage.tests import TestCase
+from portage.util._eventloop.global_event_loop import global_event_loop
+
+from _emerge.PipeReader import PipeReader
+from _emerge.SpawnProcess import SpawnProcess
+
+class LazyImportPortageBaselineTestCase(TestCase):
+
+ _module_re = re.compile(r'^(portage|repoman|_emerge)\.')
+
+ _baseline_imports = frozenset([
+ 'portage.const', 'portage.localization',
+ 'portage.proxy', 'portage.proxy.lazyimport',
+ 'portage.proxy.objectproxy',
+ 'portage._selinux',
+ ])
+
+ _baseline_import_cmd = [portage._python_interpreter, '-c', '''
+import os
+import sys
+sys.path.insert(0, os.environ["PORTAGE_PYM_PATH"])
+import portage
+sys.stdout.write(" ".join(k for k in sys.modules
+ if sys.modules[k] is not None))
+''']
+
+ def testLazyImportPortageBaseline(self):
+ """
+ Check what modules are imported by a baseline module import.
+ """
+
+ env = os.environ.copy()
+ pythonpath = env.get('PYTHONPATH')
+ if pythonpath is not None and not pythonpath.strip():
+ pythonpath = None
+ if pythonpath is None:
+ pythonpath = ''
+ else:
+ pythonpath = ':' + pythonpath
+ pythonpath = PORTAGE_PYM_PATH + pythonpath
+ env['PYTHONPATH'] = pythonpath
+
+ # If python is patched to insert the path of the
+ # currently installed portage module into sys.path,
+ # then the above PYTHONPATH override doesn't help.
+ env['PORTAGE_PYM_PATH'] = PORTAGE_PYM_PATH
+
+ scheduler = global_event_loop()
+ master_fd, slave_fd = os.pipe()
+ master_file = os.fdopen(master_fd, 'rb', 0)
+ slave_file = os.fdopen(slave_fd, 'wb')
+ producer = SpawnProcess(
+ args=self._baseline_import_cmd,
+ env=env, fd_pipes={1:slave_fd},
+ scheduler=scheduler)
+ producer.start()
+ slave_file.close()
+
+ consumer = PipeReader(
+ input_files={"producer" : master_file},
+ scheduler=scheduler)
+
+ consumer.start()
+ consumer.wait()
+ self.assertEqual(producer.wait(), os.EX_OK)
+ self.assertEqual(consumer.wait(), os.EX_OK)
+
+ output = consumer.getvalue().decode('ascii', 'replace').split()
+
+ unexpected_modules = " ".join(sorted(x for x in output \
+ if self._module_re.match(x) is not None and \
+ x not in self._baseline_imports))
+
+ self.assertEqual("", unexpected_modules)
diff --git a/lib/portage/tests/lazyimport/test_preload_portage_submodules.py b/lib/portage/tests/lazyimport/test_preload_portage_submodules.py
new file mode 100644
index 000000000..9d20ebacf
--- /dev/null
+++ b/lib/portage/tests/lazyimport/test_preload_portage_submodules.py
@@ -0,0 +1,16 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.tests import TestCase
+
+class PreloadPortageSubmodulesTestCase(TestCase):
+
+ def testPreloadPortageSubmodules(self):
+ """
+ Verify that _preload_portage_submodules() doesn't leave any
+ remaining proxies that refer to the portage.* namespace.
+ """
+ portage.proxy.lazyimport._preload_portage_submodules()
+ for name in portage.proxy.lazyimport._module_proxies:
+ self.assertEqual(name.startswith('portage.'), False)
diff --git a/lib/portage/tests/lint/__init__.py b/lib/portage/tests/lint/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/lint/__init__.py
diff --git a/lib/portage/tests/lint/__test__.py b/lib/portage/tests/lint/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/lint/__test__.py
diff --git a/lib/portage/tests/lint/metadata.py b/lib/portage/tests/lint/metadata.py
new file mode 100644
index 000000000..e3f90cbf2
--- /dev/null
+++ b/lib/portage/tests/lint/metadata.py
@@ -0,0 +1,11 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+module_metadata = {
+}
+
+script_metadata = {
+ 'socks5-server.py': {
+ 'required_python': '3.3',
+ },
+}
diff --git a/lib/portage/tests/lint/test_bash_syntax.py b/lib/portage/tests/lint/test_bash_syntax.py
new file mode 100644
index 000000000..fdbb6fe88
--- /dev/null
+++ b/lib/portage/tests/lint/test_bash_syntax.py
@@ -0,0 +1,54 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from itertools import chain
+import stat
+import subprocess
+import sys
+
+from portage.const import BASH_BINARY, PORTAGE_BASE_PATH, PORTAGE_BIN_PATH
+from portage.tests import TestCase
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode, _unicode_encode
+
+class BashSyntaxTestCase(TestCase):
+
+ def testBashSyntax(self):
+ locations = [PORTAGE_BIN_PATH]
+ misc_dir = os.path.join(PORTAGE_BASE_PATH, "misc")
+ if os.path.isdir(misc_dir):
+ locations.append(misc_dir)
+ for parent, dirs, files in \
+ chain.from_iterable(os.walk(x) for x in locations):
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ for x in files:
+ x = _unicode_decode(x,
+ encoding=_encodings['fs'], errors='strict')
+ ext = x.split('.')[-1]
+ if ext in ('.py', '.pyc', '.pyo'):
+ continue
+ x = os.path.join(parent, x)
+ st = os.lstat(x)
+ if not stat.S_ISREG(st.st_mode):
+ continue
+
+ # Check for bash shebang
+ f = open(_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ line = _unicode_decode(f.readline(),
+ encoding=_encodings['content'], errors='replace')
+ f.close()
+ if line[:2] == '#!' and \
+ 'bash' in line:
+ cmd = [BASH_BINARY, "-n", x]
+ cmd = [_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict') for x in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ output = _unicode_decode(proc.communicate()[0],
+ encoding=_encodings['fs'])
+ status = proc.wait()
+ self.assertEqual(os.WIFEXITED(status) and \
+ os.WEXITSTATUS(status) == os.EX_OK, True, msg=output)
diff --git a/lib/portage/tests/lint/test_compile_modules.py b/lib/portage/tests/lint/test_compile_modules.py
new file mode 100644
index 000000000..51eb8cd8a
--- /dev/null
+++ b/lib/portage/tests/lint/test_compile_modules.py
@@ -0,0 +1,67 @@
+# Copyright 2009-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import itertools
+import stat
+import sys
+
+from portage.const import PORTAGE_BIN_PATH, PORTAGE_PYM_PATH, PORTAGE_PYM_PACKAGES
+from portage.tests import TestCase
+from portage.tests.lint.metadata import module_metadata, script_metadata
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode, _unicode_encode
+
+class CompileModulesTestCase(TestCase):
+
+ def testCompileModules(self):
+ iters = [os.walk(os.path.join(PORTAGE_PYM_PATH, x))
+ for x in PORTAGE_PYM_PACKAGES]
+ iters.append(os.walk(PORTAGE_BIN_PATH))
+
+ for parent, _dirs, files in itertools.chain(*iters):
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ for x in files:
+ x = _unicode_decode(x,
+ encoding=_encodings['fs'], errors='strict')
+ if x[-4:] in ('.pyc', '.pyo'):
+ continue
+ x = os.path.join(parent, x)
+ st = os.lstat(x)
+ if not stat.S_ISREG(st.st_mode):
+ continue
+
+ bin_path = os.path.relpath(x, PORTAGE_BIN_PATH)
+ mod_path = os.path.relpath(x, PORTAGE_PYM_PATH)
+
+ meta = module_metadata.get(mod_path) or script_metadata.get(bin_path)
+ if meta:
+ req_py = tuple(int(x) for x
+ in meta.get('required_python', '0.0').split('.'))
+ if sys.version_info < req_py:
+ continue
+
+ do_compile = False
+ if x[-3:] == '.py':
+ do_compile = True
+ else:
+ # Check for python shebang.
+ try:
+ with open(_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict'), 'rb') as f:
+ line = _unicode_decode(f.readline(),
+ encoding=_encodings['content'], errors='replace')
+ except IOError as e:
+ # Some tests create files that are unreadable by the
+ # user (by design), so ignore EACCES issues.
+ if e.errno != errno.EACCES:
+ raise
+ continue
+ if line[:2] == '#!' and 'python' in line:
+ do_compile = True
+ if do_compile:
+ with open(_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict'), 'rb') as f:
+ compile(f.read(), x, 'exec')
diff --git a/lib/portage/tests/lint/test_import_modules.py b/lib/portage/tests/lint/test_import_modules.py
new file mode 100644
index 000000000..fcdcb3b33
--- /dev/null
+++ b/lib/portage/tests/lint/test_import_modules.py
@@ -0,0 +1,44 @@
+# Copyright 2011-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from itertools import chain
+
+from portage.const import PORTAGE_PYM_PATH, PORTAGE_PYM_PACKAGES
+from portage.tests import TestCase
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+
+class ImportModulesTestCase(TestCase):
+
+ def testImportModules(self):
+ expected_failures = frozenset((
+ ))
+
+ iters = (self._iter_modules(os.path.join(PORTAGE_PYM_PATH, x))
+ for x in PORTAGE_PYM_PACKAGES)
+ for mod in chain(*iters):
+ try:
+ __import__(mod)
+ except ImportError as e:
+ if mod not in expected_failures:
+ self.assertTrue(False, "failed to import '%s': %s" % (mod, e))
+ del e
+
+ def _iter_modules(self, base_dir):
+ for parent, dirs, files in os.walk(base_dir):
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ parent_mod = parent[len(PORTAGE_PYM_PATH)+1:]
+ parent_mod = parent_mod.replace("/", ".")
+ for x in files:
+ x = _unicode_decode(x,
+ encoding=_encodings['fs'], errors='strict')
+ if x[-3:] != '.py':
+ continue
+ x = x[:-3]
+ if x[-8:] == '__init__':
+ x = parent_mod
+ else:
+ x = parent_mod + "." + x
+ yield x
diff --git a/lib/portage/tests/locks/__init__.py b/lib/portage/tests/locks/__init__.py
new file mode 100644
index 000000000..21a391aee
--- /dev/null
+++ b/lib/portage/tests/locks/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/tests/locks/__test__.py b/lib/portage/tests/locks/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/locks/__test__.py
diff --git a/lib/portage/tests/locks/test_asynchronous_lock.py b/lib/portage/tests/locks/test_asynchronous_lock.py
new file mode 100644
index 000000000..338d91e09
--- /dev/null
+++ b/lib/portage/tests/locks/test_asynchronous_lock.py
@@ -0,0 +1,181 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import signal
+import tempfile
+
+try:
+ import dummy_threading
+except ImportError:
+ dummy_threading = None
+
+from portage import os
+from portage import shutil
+from portage.tests import TestCase
+from portage.util._eventloop.global_event_loop import global_event_loop
+from _emerge.AsynchronousLock import AsynchronousLock
+
+class AsynchronousLockTestCase(TestCase):
+
+ def _testAsynchronousLock(self):
+ scheduler = global_event_loop()
+ tempdir = tempfile.mkdtemp()
+ try:
+ path = os.path.join(tempdir, 'lock_me')
+ for force_async in (True, False):
+ for force_dummy in ((False,) if dummy_threading is None
+ else (True, False)):
+ async_lock = AsynchronousLock(path=path,
+ scheduler=scheduler, _force_async=force_async,
+ _force_thread=True,
+ _force_dummy=force_dummy)
+ async_lock.start()
+ self.assertEqual(async_lock.wait(), os.EX_OK)
+ self.assertEqual(async_lock.returncode, os.EX_OK)
+ scheduler.run_until_complete(async_lock.async_unlock())
+
+ async_lock = AsynchronousLock(path=path,
+ scheduler=scheduler, _force_async=force_async,
+ _force_process=True)
+ async_lock.start()
+ self.assertEqual(async_lock.wait(), os.EX_OK)
+ self.assertEqual(async_lock.returncode, os.EX_OK)
+ scheduler.run_until_complete(async_lock.async_unlock())
+ finally:
+ shutil.rmtree(tempdir)
+
+ def testAsynchronousLock(self):
+ self._testAsynchronousLock()
+
+ def testAsynchronousLockHardlink(self):
+ prev_state = os.environ.pop("__PORTAGE_TEST_HARDLINK_LOCKS", None)
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = "1"
+ try:
+ self._testAsynchronousLock()
+ finally:
+ os.environ.pop("__PORTAGE_TEST_HARDLINK_LOCKS", None)
+ if prev_state is not None:
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = prev_state
+
+ def _testAsynchronousLockWait(self):
+ scheduler = global_event_loop()
+ tempdir = tempfile.mkdtemp()
+ try:
+ path = os.path.join(tempdir, 'lock_me')
+ lock1 = AsynchronousLock(path=path, scheduler=scheduler)
+ lock1.start()
+ self.assertEqual(lock1.wait(), os.EX_OK)
+ self.assertEqual(lock1.returncode, os.EX_OK)
+
+ # lock2 requires _force_async=True since the portage.locks
+ # module is not designed to work as intended here if the
+ # same process tries to lock the same file more than
+ # one time concurrently.
+ lock2 = AsynchronousLock(path=path, scheduler=scheduler,
+ _force_async=True, _force_process=True)
+ lock2.start()
+ # lock2 should be waiting for lock1 to release
+ self.assertEqual(lock2.poll(), None)
+ self.assertEqual(lock2.returncode, None)
+
+ scheduler.run_until_complete(lock1.async_unlock())
+ self.assertEqual(lock2.wait(), os.EX_OK)
+ self.assertEqual(lock2.returncode, os.EX_OK)
+ scheduler.run_until_complete(lock2.async_unlock())
+ finally:
+ shutil.rmtree(tempdir)
+
+ def testAsynchronousLockWait(self):
+ self._testAsynchronousLockWait()
+
+ def testAsynchronousLockWaitHardlink(self):
+ prev_state = os.environ.pop("__PORTAGE_TEST_HARDLINK_LOCKS", None)
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = "1"
+ try:
+ self._testAsynchronousLockWait()
+ finally:
+ os.environ.pop("__PORTAGE_TEST_HARDLINK_LOCKS", None)
+ if prev_state is not None:
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = prev_state
+
+ def _testAsynchronousLockWaitCancel(self):
+ scheduler = global_event_loop()
+ tempdir = tempfile.mkdtemp()
+ try:
+ path = os.path.join(tempdir, 'lock_me')
+ lock1 = AsynchronousLock(path=path, scheduler=scheduler)
+ lock1.start()
+ self.assertEqual(lock1.wait(), os.EX_OK)
+ self.assertEqual(lock1.returncode, os.EX_OK)
+ lock2 = AsynchronousLock(path=path, scheduler=scheduler,
+ _force_async=True, _force_process=True)
+ lock2.start()
+ # lock2 should be waiting for lock1 to release
+ self.assertEqual(lock2.poll(), None)
+ self.assertEqual(lock2.returncode, None)
+
+ # Cancel lock2 and then check wait() and returncode results.
+ lock2.cancel()
+ self.assertEqual(lock2.wait() == os.EX_OK, False)
+ self.assertEqual(lock2.returncode == os.EX_OK, False)
+ self.assertEqual(lock2.returncode is None, False)
+ scheduler.run_until_complete(lock1.async_unlock())
+ finally:
+ shutil.rmtree(tempdir)
+
+ def testAsynchronousLockWaitCancel(self):
+ self._testAsynchronousLockWaitCancel()
+
+ def testAsynchronousLockWaitCancelHardlink(self):
+ prev_state = os.environ.pop("__PORTAGE_TEST_HARDLINK_LOCKS", None)
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = "1"
+ try:
+ self._testAsynchronousLockWaitCancel()
+ finally:
+ os.environ.pop("__PORTAGE_TEST_HARDLINK_LOCKS", None)
+ if prev_state is not None:
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = prev_state
+
+ def _testAsynchronousLockWaitKill(self):
+ scheduler = global_event_loop()
+ tempdir = tempfile.mkdtemp()
+ try:
+ path = os.path.join(tempdir, 'lock_me')
+ lock1 = AsynchronousLock(path=path, scheduler=scheduler)
+ lock1.start()
+ self.assertEqual(lock1.wait(), os.EX_OK)
+ self.assertEqual(lock1.returncode, os.EX_OK)
+ lock2 = AsynchronousLock(path=path, scheduler=scheduler,
+ _force_async=True, _force_process=True)
+ lock2.start()
+ # lock2 should be waiting for lock1 to release
+ self.assertEqual(lock2.poll(), None)
+ self.assertEqual(lock2.returncode, None)
+
+ # Kill lock2's process and then check wait() and
+ # returncode results. This is intended to simulate
+ # a SIGINT sent via the controlling tty.
+ self.assertEqual(lock2._imp is not None, True)
+ self.assertEqual(lock2._imp._proc is not None, True)
+ self.assertEqual(lock2._imp._proc.pid is not None, True)
+ lock2._imp._kill_test = True
+ os.kill(lock2._imp._proc.pid, signal.SIGTERM)
+ self.assertEqual(lock2.wait() == os.EX_OK, False)
+ self.assertEqual(lock2.returncode == os.EX_OK, False)
+ self.assertEqual(lock2.returncode is None, False)
+ scheduler.run_until_complete(lock1.async_unlock())
+ finally:
+ shutil.rmtree(tempdir)
+
+ def testAsynchronousLockWaitKill(self):
+ self._testAsynchronousLockWaitKill()
+
+ def testAsynchronousLockWaitKillHardlink(self):
+ prev_state = os.environ.pop("__PORTAGE_TEST_HARDLINK_LOCKS", None)
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = "1"
+ try:
+ self._testAsynchronousLockWaitKill()
+ finally:
+ os.environ.pop("__PORTAGE_TEST_HARDLINK_LOCKS", None)
+ if prev_state is not None:
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = prev_state
diff --git a/lib/portage/tests/locks/test_lock_nonblock.py b/lib/portage/tests/locks/test_lock_nonblock.py
new file mode 100644
index 000000000..2ff7b3527
--- /dev/null
+++ b/lib/portage/tests/locks/test_lock_nonblock.py
@@ -0,0 +1,62 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+import traceback
+
+import portage
+from portage import os
+from portage import shutil
+from portage.tests import TestCase
+
+class LockNonblockTestCase(TestCase):
+
+ def _testLockNonblock(self):
+ tempdir = tempfile.mkdtemp()
+ try:
+ path = os.path.join(tempdir, 'lock_me')
+ lock1 = portage.locks.lockfile(path)
+ pid = os.fork()
+ if pid == 0:
+ portage.locks._close_fds()
+ # Disable close_fds since we don't exec
+ # (see _setup_pipes docstring).
+ portage.process._setup_pipes({0:0, 1:1, 2:2}, close_fds=False)
+ rval = 2
+ try:
+ try:
+ lock2 = portage.locks.lockfile(path, flags=os.O_NONBLOCK)
+ except portage.exception.TryAgain:
+ rval = os.EX_OK
+ else:
+ rval = 1
+ portage.locks.unlockfile(lock2)
+ except SystemExit:
+ raise
+ except:
+ traceback.print_exc()
+ finally:
+ os._exit(rval)
+
+ self.assertEqual(pid > 0, True)
+ pid, status = os.waitpid(pid, 0)
+ self.assertEqual(os.WIFEXITED(status), True)
+ self.assertEqual(os.WEXITSTATUS(status), os.EX_OK)
+
+ portage.locks.unlockfile(lock1)
+ finally:
+ shutil.rmtree(tempdir)
+
+ def testLockNonblock(self):
+ self._testLockNonblock()
+
+ def testLockNonblockHardlink(self):
+ prev_state = os.environ.pop("__PORTAGE_TEST_HARDLINK_LOCKS", None)
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = "1"
+ try:
+ self._testLockNonblock()
+ finally:
+ os.environ.pop("__PORTAGE_TEST_HARDLINK_LOCKS", None)
+ if prev_state is not None:
+ os.environ["__PORTAGE_TEST_HARDLINK_LOCKS"] = prev_state
+
diff --git a/lib/portage/tests/news/__init__.py b/lib/portage/tests/news/__init__.py
new file mode 100644
index 000000000..28a753f9c
--- /dev/null
+++ b/lib/portage/tests/news/__init__.py
@@ -0,0 +1,3 @@
+# tests/portage.news/__init__.py -- Portage Unit Test functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/tests/news/__test__.py b/lib/portage/tests/news/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/news/__test__.py
diff --git a/lib/portage/tests/news/test_NewsItem.py b/lib/portage/tests/news/test_NewsItem.py
new file mode 100644
index 000000000..2f183a7e0
--- /dev/null
+++ b/lib/portage/tests/news/test_NewsItem.py
@@ -0,0 +1,96 @@
+# test_NewsItem.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.tests import TestCase
+from portage.news import NewsItem
+from portage.dbapi.virtual import testdbapi
+from tempfile import mkstemp
+# TODO(antarus) Make newsitem use a loader so we can load using a string instead of a tempfile
+
+class NewsItemTestCase(TestCase):
+ """These tests suck: they use your running config instead of making their own"""
+ fakeItem = """
+Title: YourSQL Upgrades from 4.0 to 4.1
+Author: Ciaran McCreesh <ciaranm@gentoo.org>
+Content-Type: text/plain
+Posted: 01-Nov-2005
+Revision: 1
+News-Item-Format: 1.0
+#Display-If-Installed:
+#Display-If-Profile:
+#Display-If-Arch:
+
+YourSQL databases created using YourSQL version 4.0 are incompatible
+with YourSQL version 4.1 or later. There is no reliable way to
+automate the database format conversion, so action from the system
+administrator is required before an upgrade can take place.
+
+Please see the Gentoo YourSQL Upgrade Guide for instructions:
+
+ http://www.gentoo.org/doc/en/yoursql-upgrading.xml
+
+Also see the official YourSQL documentation:
+
+ http://dev.yoursql.com/doc/refman/4.1/en/upgrading-from-4-0.html
+
+After upgrading, you should also recompile any packages which link
+against YourSQL:
+
+ revdep-rebuild --library=libyoursqlclient.so.12
+
+The revdep-rebuild tool is provided by app-portage/gentoolkit.
+"""
+ def setUp(self):
+ self.profile = "/usr/portage/profiles/default-linux/x86/2007.0/"
+ self.keywords = "x86"
+ # Use fake/test dbapi to avoid slow tests
+ self.vardb = testdbapi()
+ # self.vardb.inject_cpv('sys-apps/portage-2.0', { 'SLOT' : 0 })
+ # Consumers only use ARCH, so avoid portage.settings by using a dict
+ self.settings = { 'ARCH' : 'x86' }
+
+ def testDisplayIfProfile(self):
+ tmpItem = self.fakeItem[:].replace("#Display-If-Profile:", "Display-If-Profile: %s" %
+ self.profile)
+
+ item = self._processItem(tmpItem)
+ try:
+ self.assertTrue(item.isRelevant(self.vardb, self.settings, self.profile),
+ msg="Expected %s to be relevant, but it was not!" % tmpItem)
+ finally:
+ os.unlink(item.path)
+
+ def testDisplayIfInstalled(self):
+ tmpItem = self.fakeItem[:].replace("#Display-If-Installed:", "Display-If-Installed: %s" %
+ "sys-apps/portage")
+
+ try:
+ item = self._processItem(tmpItem)
+ self.assertTrue(item.isRelevant(self.vardb, self.settings, self.profile),
+ msg="Expected %s to be relevant, but it was not!" % tmpItem)
+ finally:
+ os.unlink(item.path)
+
+ def testDisplayIfKeyword(self):
+ tmpItem = self.fakeItem[:].replace("#Display-If-Keyword:", "Display-If-Keyword: %s" %
+ self.keywords)
+
+ try:
+ item = self._processItem(tmpItem)
+ self.assertTrue(item.isRelevant(self.vardb, self.settings, self.profile),
+ msg="Expected %s to be relevant, but it was not!" % tmpItem)
+ finally:
+ os.unlink(item.path)
+
+ def _processItem(self, item):
+ filename = None
+ fd, filename = mkstemp()
+ f = os.fdopen(fd, 'w')
+ f.write(item)
+ f.close()
+ try:
+ return NewsItem(filename, 0)
+ except TypeError:
+ self.fail("Error while processing news item %s" % filename)
diff --git a/lib/portage/tests/process/__init__.py b/lib/portage/tests/process/__init__.py
new file mode 100644
index 000000000..d19e353cc
--- /dev/null
+++ b/lib/portage/tests/process/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 1998-2008 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/tests/process/__test__.py b/lib/portage/tests/process/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/process/__test__.py
diff --git a/lib/portage/tests/process/test_PopenProcess.py b/lib/portage/tests/process/test_PopenProcess.py
new file mode 100644
index 000000000..88da0b354
--- /dev/null
+++ b/lib/portage/tests/process/test_PopenProcess.py
@@ -0,0 +1,85 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+import tempfile
+
+from portage import os
+from portage.tests import TestCase
+from portage.util._async.PipeLogger import PipeLogger
+from portage.util._async.PopenProcess import PopenProcess
+from portage.util._eventloop.global_event_loop import global_event_loop
+from _emerge.PipeReader import PipeReader
+
+class PopenPipeTestCase(TestCase):
+ """
+ Test PopenProcess, which can be useful for Jython support, since it
+ uses the subprocess.Popen instead of os.fork().
+ """
+
+ _echo_cmd = "echo -n '%s'"
+
+ def _testPipeReader(self, test_string):
+ """
+ Use a poll loop to read data from a pipe and assert that
+ the data written to the pipe is identical to the data
+ read from the pipe.
+ """
+
+ producer = PopenProcess(proc=subprocess.Popen(
+ ["bash", "-c", self._echo_cmd % test_string],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
+ pipe_reader=PipeReader(), scheduler=global_event_loop())
+
+ consumer = producer.pipe_reader
+ consumer.input_files = {"producer" : producer.proc.stdout}
+
+ producer.start()
+ producer.wait()
+
+ self.assertEqual(producer.returncode, os.EX_OK)
+ self.assertEqual(consumer.returncode, os.EX_OK)
+
+ return consumer.getvalue().decode('ascii', 'replace')
+
+ def _testPipeLogger(self, test_string):
+
+ producer = PopenProcess(proc=subprocess.Popen(
+ ["bash", "-c", self._echo_cmd % test_string],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
+ scheduler=global_event_loop())
+
+ fd, log_file_path = tempfile.mkstemp()
+ try:
+
+ consumer = PipeLogger(background=True,
+ input_fd=producer.proc.stdout,
+ log_file_path=log_file_path)
+
+ producer.pipe_reader = consumer
+
+ producer.start()
+ producer.wait()
+
+ self.assertEqual(producer.returncode, os.EX_OK)
+ self.assertEqual(consumer.returncode, os.EX_OK)
+
+ with open(log_file_path, 'rb') as f:
+ content = f.read()
+
+ finally:
+ os.close(fd)
+ os.unlink(log_file_path)
+
+ return content.decode('ascii', 'replace')
+
+ def testPopenPipe(self):
+ for x in (1, 2, 5, 6, 7, 8, 2**5, 2**10, 2**12, 2**13, 2**14):
+ test_string = x * "a"
+ output = self._testPipeReader(test_string)
+ self.assertEqual(test_string, output,
+ "x = %s, len(output) = %s" % (x, len(output)))
+
+ output = self._testPipeLogger(test_string)
+ self.assertEqual(test_string, output,
+ "x = %s, len(output) = %s" % (x, len(output)))
diff --git a/lib/portage/tests/process/test_PopenProcessBlockingIO.py b/lib/portage/tests/process/test_PopenProcessBlockingIO.py
new file mode 100644
index 000000000..9ee291a39
--- /dev/null
+++ b/lib/portage/tests/process/test_PopenProcessBlockingIO.py
@@ -0,0 +1,63 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+
+try:
+ import threading
+except ImportError:
+ # dummy_threading will not suffice
+ threading = None
+
+from portage import os
+from portage.tests import TestCase
+from portage.util._async.PopenProcess import PopenProcess
+from portage.util._eventloop.global_event_loop import global_event_loop
+from portage.util._async.PipeReaderBlockingIO import PipeReaderBlockingIO
+
+class PopenPipeBlockingIOTestCase(TestCase):
+ """
+ Test PopenProcess, which can be useful for Jython support:
+ * use subprocess.Popen since Jython does not support os.fork()
+ * use blocking IO with threads, since Jython does not support
+ fcntl non-blocking IO)
+ """
+
+ _echo_cmd = "echo -n '%s'"
+
+ def _testPipeReader(self, test_string):
+ """
+ Use a poll loop to read data from a pipe and assert that
+ the data written to the pipe is identical to the data
+ read from the pipe.
+ """
+
+ producer = PopenProcess(proc=subprocess.Popen(
+ ["bash", "-c", self._echo_cmd % test_string],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT),
+ pipe_reader=PipeReaderBlockingIO(), scheduler=global_event_loop())
+
+ consumer = producer.pipe_reader
+ consumer.input_files = {"producer" : producer.proc.stdout}
+
+ producer.start()
+ producer.wait()
+
+ self.assertEqual(producer.returncode, os.EX_OK)
+ self.assertEqual(consumer.returncode, os.EX_OK)
+
+ return consumer.getvalue().decode('ascii', 'replace')
+
+ def testPopenPipeBlockingIO(self):
+
+ if threading is None:
+ skip_reason = "threading disabled"
+ self.portage_skip = "threading disabled"
+ self.assertFalse(True, skip_reason)
+ return
+
+ for x in (1, 2, 5, 6, 7, 8, 2**5, 2**10, 2**12, 2**13, 2**14):
+ test_string = x * "a"
+ output = self._testPipeReader(test_string)
+ self.assertEqual(test_string, output,
+ "x = %s, len(output) = %s" % (x, len(output)))
diff --git a/lib/portage/tests/process/test_poll.py b/lib/portage/tests/process/test_poll.py
new file mode 100644
index 000000000..f700a5585
--- /dev/null
+++ b/lib/portage/tests/process/test_poll.py
@@ -0,0 +1,111 @@
+# Copyright 1998-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import functools
+import pty
+import shutil
+import socket
+import sys
+import subprocess
+import tempfile
+
+from portage import os
+from portage.tests import TestCase
+from portage.util._async.PopenProcess import PopenProcess
+from portage.util._eventloop.global_event_loop import global_event_loop
+from _emerge.PipeReader import PipeReader
+
+class PipeReaderTestCase(TestCase):
+
+ _use_array = False
+ _echo_cmd = "echo -n '%s'"
+
+ def test_pipe(self):
+ def make_pipes():
+ return os.pipe(), None
+ self._do_test(make_pipes)
+
+ def test_pty_device(self):
+ def make_pipes():
+ try:
+ return pty.openpty(), None
+ except EnvironmentError:
+ self.skipTest('pty not available')
+ self._do_test(make_pipes)
+
+ def test_domain_socket(self):
+ def make_pipes():
+ if sys.version_info >= (3, 2):
+ read_end, write_end = socket.socketpair()
+ return (read_end.detach(), write_end.detach()), None
+ else:
+ self.skipTest('socket detach not supported')
+ self._do_test(make_pipes)
+
+ def test_named_pipe(self):
+ def make_pipes():
+ tempdir = tempfile.mkdtemp()
+ fifo_path = os.path.join(tempdir, 'fifo')
+ os.mkfifo(fifo_path)
+ return ((os.open(fifo_path, os.O_NONBLOCK|os.O_RDONLY),
+ os.open(fifo_path, os.O_NONBLOCK|os.O_WRONLY)),
+ functools.partial(shutil.rmtree, tempdir))
+ self._do_test(make_pipes)
+
+ def _testPipeReader(self, master_fd, slave_fd, test_string):
+ """
+ Use a poll loop to read data from a pipe and assert that
+ the data written to the pipe is identical to the data
+ read from the pipe.
+ """
+
+ # WARNING: It is very important to use unbuffered mode here,
+ # in order to avoid issue 5380 with python3.
+ master_file = os.fdopen(master_fd, 'rb', 0)
+ scheduler = global_event_loop()
+
+ consumer = PipeReader(
+ input_files={"producer" : master_file},
+ _use_array=self._use_array,
+ scheduler=scheduler)
+
+ producer = PopenProcess(
+ pipe_reader=consumer,
+ proc=subprocess.Popen(["bash", "-c", self._echo_cmd % test_string],
+ stdout=slave_fd),
+ scheduler=scheduler)
+
+ producer.start()
+ os.close(slave_fd)
+ producer.wait()
+ consumer.wait()
+
+ self.assertEqual(producer.returncode, os.EX_OK)
+ self.assertEqual(consumer.returncode, os.EX_OK)
+
+ return consumer.getvalue().decode('ascii', 'replace')
+
+ def _do_test(self, make_pipes):
+ for x in (1, 2, 5, 6, 7, 8, 2**5, 2**10, 2**12, 2**13, 2**14):
+ test_string = x * "a"
+ (read_end, write_end), cleanup = make_pipes()
+ try:
+ output = self._testPipeReader(read_end, write_end, test_string)
+ self.assertEqual(test_string, output,
+ "x = %s, len(output) = %s" % (x, len(output)))
+ finally:
+ if cleanup is not None:
+ cleanup()
+
+
+class PipeReaderArrayTestCase(PipeReaderTestCase):
+
+ _use_array = True
+ # sleep allows reliable triggering of the failure mode on fast computers
+ _echo_cmd = "sleep 0.1 ; echo -n '%s'"
+
+ def __init__(self, *args, **kwargs):
+ super(PipeReaderArrayTestCase, self).__init__(*args, **kwargs)
+ # https://bugs.python.org/issue5380
+ # https://bugs.pypy.org/issue956
+ self.todo = True
diff --git a/lib/portage/tests/resolver/ResolverPlayground.py b/lib/portage/tests/resolver/ResolverPlayground.py
new file mode 100644
index 000000000..e2e061669
--- /dev/null
+++ b/lib/portage/tests/resolver/ResolverPlayground.py
@@ -0,0 +1,842 @@
+# Copyright 2010-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from itertools import permutations
+import fnmatch
+import sys
+import tempfile
+import portage
+from portage import os
+from portage import shutil
+from portage.const import (GLOBAL_CONFIG_PATH, PORTAGE_BASE_PATH,
+ USER_CONFIG_PATH)
+from portage.dep import Atom, _repo_separator
+from portage.package.ebuild.config import config
+from portage.package.ebuild.digestgen import digestgen
+from portage._sets import load_default_config
+from portage._sets.base import InternalPackageSet
+from portage.tests import cnf_path
+from portage.util import ensure_dirs, normalize_path
+from portage.versions import catsplit
+
+import _emerge
+from _emerge.actions import calc_depclean
+from _emerge.Blocker import Blocker
+from _emerge.create_depgraph_params import create_depgraph_params
+from _emerge.depgraph import backtrack_depgraph
+from _emerge.RootConfig import RootConfig
+
+try:
+ from repoman.tests import cnf_path_repoman
+except ImportError:
+ cnf_path_repoman = None
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+class ResolverPlayground(object):
+ """
+ This class helps to create the necessary files on disk and
+ the needed settings instances, etc. for the resolver to do
+ its work.
+ """
+
+ config_files = frozenset(("eapi", "layout.conf", "make.conf", "package.accept_keywords",
+ "package.keywords", "package.license", "package.mask", "package.properties",
+ "package.provided", "packages",
+ "package.unmask", "package.use", "package.use.aliases", "package.use.stable.mask",
+ "soname.provided",
+ "unpack_dependencies", "use.aliases", "use.force", "use.mask", "layout.conf"))
+
+ metadata_xml_template = """<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE pkgmetadata SYSTEM "https://www.gentoo.org/dtd/metadata.dtd">
+<pkgmetadata>
+<maintainer type="person">
+<email>maintainer-needed@gentoo.org</email>
+<description>Description of the maintainership</description>
+</maintainer>
+<longdescription>Long description of the package</longdescription>
+<use>
+%(flags)s
+</use>
+</pkgmetadata>
+"""
+
+ def __init__(self, ebuilds={}, binpkgs={}, installed={}, profile={}, repo_configs={}, \
+ user_config={}, sets={}, world=[], world_sets=[], distfiles={},
+ eprefix=None, targetroot=False, debug=False):
+ """
+ ebuilds: cpv -> metadata mapping simulating available ebuilds.
+ installed: cpv -> metadata mapping simulating installed packages.
+ If a metadata key is missing, it gets a default value.
+ profile: settings defined by the profile.
+ """
+
+ self.debug = debug
+ if eprefix is None:
+ self.eprefix = normalize_path(tempfile.mkdtemp())
+ else:
+ self.eprefix = normalize_path(eprefix)
+
+ # Tests may override portage.const.EPREFIX in order to
+ # simulate a prefix installation. It's reasonable to do
+ # this because tests should be self-contained such that
+ # the "real" value of portage.const.EPREFIX is entirely
+ # irrelevant (see bug #492932).
+ portage.const.EPREFIX = self.eprefix.rstrip(os.sep)
+
+ self.eroot = self.eprefix + os.sep
+ if targetroot:
+ self.target_root = os.path.join(self.eroot, 'target_root')
+ else:
+ self.target_root = os.sep
+ self.distdir = os.path.join(self.eroot, "var", "portage", "distfiles")
+ self.pkgdir = os.path.join(self.eprefix, "pkgdir")
+ self.vdbdir = os.path.join(self.eroot, "var/db/pkg")
+ os.makedirs(self.vdbdir)
+
+ if not debug:
+ portage.util.noiselimit = -2
+
+ self._repositories = {}
+ #Make sure the main repo is always created
+ self._get_repo_dir("test_repo")
+
+ self._create_distfiles(distfiles)
+ self._create_ebuilds(ebuilds)
+ self._create_binpkgs(binpkgs)
+ self._create_installed(installed)
+ self._create_profile(ebuilds, installed, profile, repo_configs, user_config, sets)
+ self._create_world(world, world_sets)
+
+ self.settings, self.trees = self._load_config()
+
+ self._create_ebuild_manifests(ebuilds)
+
+ portage.util.noiselimit = 0
+
+ def reload_config(self):
+ """
+ Reload configuration from disk, which is useful if it has
+ been modified after the constructor has been called.
+ """
+ for eroot in self.trees:
+ portdb = self.trees[eroot]["porttree"].dbapi
+ portdb.close_caches()
+ self.settings, self.trees = self._load_config()
+
+ def _get_repo_dir(self, repo):
+ """
+ Create the repo directory if needed.
+ """
+ if repo not in self._repositories:
+ if repo == "test_repo":
+ self._repositories["DEFAULT"] = {"main-repo": repo}
+
+ repo_path = os.path.join(self.eroot, "var", "repositories", repo)
+ self._repositories[repo] = {"location": repo_path}
+ profile_path = os.path.join(repo_path, "profiles")
+
+ try:
+ os.makedirs(profile_path)
+ except os.error:
+ pass
+
+ repo_name_file = os.path.join(profile_path, "repo_name")
+ with open(repo_name_file, "w") as f:
+ f.write("%s\n" % repo)
+
+ return self._repositories[repo]["location"]
+
+ def _create_distfiles(self, distfiles):
+ os.makedirs(self.distdir)
+ for k, v in distfiles.items():
+ with open(os.path.join(self.distdir, k), 'wb') as f:
+ f.write(v)
+
+ def _create_ebuilds(self, ebuilds):
+ for cpv in ebuilds:
+ a = Atom("=" + cpv, allow_repo=True)
+ repo = a.repo
+ if repo is None:
+ repo = "test_repo"
+
+ metadata = ebuilds[cpv].copy()
+ copyright_header = metadata.pop("COPYRIGHT_HEADER", None)
+ eapi = metadata.pop("EAPI", "0")
+ misc_content = metadata.pop("MISC_CONTENT", None)
+ metadata.setdefault("DEPEND", "")
+ metadata.setdefault("SLOT", "0")
+ metadata.setdefault("KEYWORDS", "x86")
+ metadata.setdefault("IUSE", "")
+
+ unknown_keys = set(metadata).difference(
+ portage.dbapi.dbapi._known_keys)
+ if unknown_keys:
+ raise ValueError("metadata of ebuild '%s' contains unknown keys: %s" %
+ (cpv, sorted(unknown_keys)))
+
+ repo_dir = self._get_repo_dir(repo)
+ ebuild_dir = os.path.join(repo_dir, a.cp)
+ ebuild_path = os.path.join(ebuild_dir, a.cpv.split("/")[1] + ".ebuild")
+ try:
+ os.makedirs(ebuild_dir)
+ except os.error:
+ pass
+
+ with open(ebuild_path, "w") as f:
+ if copyright_header is not None:
+ f.write(copyright_header)
+ f.write('EAPI="%s"\n' % eapi)
+ for k, v in metadata.items():
+ f.write('%s="%s"\n' % (k, v))
+ if misc_content is not None:
+ f.write(misc_content)
+
+ def _create_ebuild_manifests(self, ebuilds):
+ tmpsettings = config(clone=self.settings)
+ tmpsettings['PORTAGE_QUIET'] = '1'
+ for cpv in ebuilds:
+ a = Atom("=" + cpv, allow_repo=True)
+ repo = a.repo
+ if repo is None:
+ repo = "test_repo"
+
+ repo_dir = self._get_repo_dir(repo)
+ ebuild_dir = os.path.join(repo_dir, a.cp)
+ ebuild_path = os.path.join(ebuild_dir, a.cpv.split("/")[1] + ".ebuild")
+
+ portdb = self.trees[self.eroot]["porttree"].dbapi
+ tmpsettings['O'] = ebuild_dir
+ if not digestgen(mysettings=tmpsettings, myportdb=portdb):
+ raise AssertionError('digest creation failed for %s' % ebuild_path)
+
+ def _create_binpkgs(self, binpkgs):
+ # When using BUILD_ID, there can be mutiple instances for the
+ # same cpv. Therefore, binpkgs may be an iterable instead of
+ # a dict.
+ items = getattr(binpkgs, 'items', None)
+ items = items() if items is not None else binpkgs
+ for cpv, metadata in items:
+ a = Atom("=" + cpv, allow_repo=True)
+ repo = a.repo
+ if repo is None:
+ repo = "test_repo"
+
+ pn = catsplit(a.cp)[1]
+ cat, pf = catsplit(a.cpv)
+ metadata = metadata.copy()
+ metadata.setdefault("SLOT", "0")
+ metadata.setdefault("KEYWORDS", "x86")
+ metadata.setdefault("BUILD_TIME", "0")
+ metadata["repository"] = repo
+ metadata["CATEGORY"] = cat
+ metadata["PF"] = pf
+
+ repo_dir = self.pkgdir
+ category_dir = os.path.join(repo_dir, cat)
+ if "BUILD_ID" in metadata:
+ binpkg_path = os.path.join(category_dir, pn,
+ "%s-%s.xpak"% (pf, metadata["BUILD_ID"]))
+ else:
+ binpkg_path = os.path.join(category_dir, pf + ".tbz2")
+
+ ensure_dirs(os.path.dirname(binpkg_path))
+ t = portage.xpak.tbz2(binpkg_path)
+ t.recompose_mem(portage.xpak.xpak_mem(metadata))
+
+ def _create_installed(self, installed):
+ for cpv in installed:
+ a = Atom("=" + cpv, allow_repo=True)
+ repo = a.repo
+ if repo is None:
+ repo = "test_repo"
+
+ vdb_pkg_dir = os.path.join(self.vdbdir, a.cpv)
+ try:
+ os.makedirs(vdb_pkg_dir)
+ except os.error:
+ pass
+
+ metadata = installed[cpv].copy()
+ metadata.setdefault("SLOT", "0")
+ metadata.setdefault("BUILD_TIME", "0")
+ metadata.setdefault("COUNTER", "0")
+ metadata.setdefault("KEYWORDS", "~x86")
+
+ unknown_keys = set(metadata).difference(
+ portage.dbapi.dbapi._known_keys)
+ unknown_keys.discard("BUILD_TIME")
+ unknown_keys.discard("BUILD_ID")
+ unknown_keys.discard("COUNTER")
+ unknown_keys.discard("repository")
+ unknown_keys.discard("USE")
+ unknown_keys.discard("PROVIDES")
+ unknown_keys.discard("REQUIRES")
+ if unknown_keys:
+ raise ValueError("metadata of installed '%s' contains unknown keys: %s" %
+ (cpv, sorted(unknown_keys)))
+
+ metadata["repository"] = repo
+ for k, v in metadata.items():
+ with open(os.path.join(vdb_pkg_dir, k), "w") as f:
+ f.write("%s\n" % v)
+
+ def _create_profile(self, ebuilds, installed, profile, repo_configs, user_config, sets):
+
+ user_config_dir = os.path.join(self.eroot, USER_CONFIG_PATH)
+
+ try:
+ os.makedirs(user_config_dir)
+ except os.error:
+ pass
+
+ for repo in self._repositories:
+ if repo == "DEFAULT":
+ continue
+
+ repo_dir = self._get_repo_dir(repo)
+ profile_dir = os.path.join(repo_dir, "profiles")
+ metadata_dir = os.path.join(repo_dir, "metadata")
+ os.makedirs(metadata_dir)
+
+ #Create $REPO/profiles/categories
+ categories = set()
+ for cpv in ebuilds:
+ ebuilds_repo = Atom("="+cpv, allow_repo=True).repo
+ if ebuilds_repo is None:
+ ebuilds_repo = "test_repo"
+ if ebuilds_repo == repo:
+ categories.add(catsplit(cpv)[0])
+
+ categories_file = os.path.join(profile_dir, "categories")
+ with open(categories_file, "w") as f:
+ for cat in categories:
+ f.write(cat + "\n")
+
+ #Create $REPO/profiles/license_groups
+ license_file = os.path.join(profile_dir, "license_groups")
+ with open(license_file, "w") as f:
+ f.write("EULA TEST\n")
+
+ repo_config = repo_configs.get(repo)
+ if repo_config:
+ for config_file, lines in repo_config.items():
+ if config_file not in self.config_files and not any(fnmatch.fnmatch(config_file, os.path.join(x, "*")) for x in self.config_files):
+ raise ValueError("Unknown config file: '%s'" % config_file)
+
+ if config_file in ("layout.conf",):
+ file_name = os.path.join(repo_dir, "metadata", config_file)
+ else:
+ file_name = os.path.join(profile_dir, config_file)
+ if "/" in config_file and not os.path.isdir(os.path.dirname(file_name)):
+ os.makedirs(os.path.dirname(file_name))
+ with open(file_name, "w") as f:
+ for line in lines:
+ f.write("%s\n" % line)
+ # Temporarily write empty value of masters until it becomes default.
+ # TODO: Delete all references to "# use implicit masters" when empty value becomes default.
+ if config_file == "layout.conf" and not any(line.startswith(("masters =", "# use implicit masters")) for line in lines):
+ f.write("masters =\n")
+
+ #Create $profile_dir/eclass (we fail to digest the ebuilds if it's not there)
+ os.makedirs(os.path.join(repo_dir, "eclass"))
+
+ # Temporarily write empty value of masters until it becomes default.
+ if not repo_config or "layout.conf" not in repo_config:
+ layout_conf_path = os.path.join(repo_dir, "metadata", "layout.conf")
+ with open(layout_conf_path, "w") as f:
+ f.write("masters =\n")
+
+ if repo == "test_repo":
+ #Create a minimal profile in /usr/portage
+ sub_profile_dir = os.path.join(profile_dir, "default", "linux", "x86", "test_profile")
+ os.makedirs(sub_profile_dir)
+
+ if not (profile and "eapi" in profile):
+ eapi_file = os.path.join(sub_profile_dir, "eapi")
+ with open(eapi_file, "w") as f:
+ f.write("0\n")
+
+ make_defaults_file = os.path.join(sub_profile_dir, "make.defaults")
+ with open(make_defaults_file, "w") as f:
+ f.write("ARCH=\"x86\"\n")
+ f.write("ACCEPT_KEYWORDS=\"x86\"\n")
+
+ use_force_file = os.path.join(sub_profile_dir, "use.force")
+ with open(use_force_file, "w") as f:
+ f.write("x86\n")
+
+ parent_file = os.path.join(sub_profile_dir, "parent")
+ with open(parent_file, "w") as f:
+ f.write("..\n")
+
+ if profile:
+ for config_file, lines in profile.items():
+ if config_file not in self.config_files:
+ raise ValueError("Unknown config file: '%s'" % config_file)
+
+ file_name = os.path.join(sub_profile_dir, config_file)
+ with open(file_name, "w") as f:
+ for line in lines:
+ f.write("%s\n" % line)
+
+ #Create profile symlink
+ os.symlink(sub_profile_dir, os.path.join(user_config_dir, "make.profile"))
+
+ make_conf = {
+ "ACCEPT_KEYWORDS": "x86",
+ "CLEAN_DELAY": "0",
+ "DISTDIR" : self.distdir,
+ "EMERGE_WARNING_DELAY": "0",
+ "PKGDIR": self.pkgdir,
+ "PORTAGE_INST_GID": str(portage.data.portage_gid),
+ "PORTAGE_INST_UID": str(portage.data.portage_uid),
+ "PORTAGE_TMPDIR": os.path.join(self.eroot, 'var/tmp'),
+ }
+
+ if os.environ.get("NOCOLOR"):
+ make_conf["NOCOLOR"] = os.environ["NOCOLOR"]
+
+ # Pass along PORTAGE_USERNAME and PORTAGE_GRPNAME since they
+ # need to be inherited by ebuild subprocesses.
+ if 'PORTAGE_USERNAME' in os.environ:
+ make_conf['PORTAGE_USERNAME'] = os.environ['PORTAGE_USERNAME']
+ if 'PORTAGE_GRPNAME' in os.environ:
+ make_conf['PORTAGE_GRPNAME'] = os.environ['PORTAGE_GRPNAME']
+
+ make_conf_lines = []
+ for k_v in make_conf.items():
+ make_conf_lines.append('%s="%s"' % k_v)
+
+ if "make.conf" in user_config:
+ make_conf_lines.extend(user_config["make.conf"])
+
+ if not portage.process.sandbox_capable or \
+ os.environ.get("SANDBOX_ON") == "1":
+ # avoid problems from nested sandbox instances
+ make_conf_lines.append('FEATURES="${FEATURES} -sandbox -usersandbox"')
+
+ configs = user_config.copy()
+ configs["make.conf"] = make_conf_lines
+
+ for config_file, lines in configs.items():
+ if config_file not in self.config_files:
+ raise ValueError("Unknown config file: '%s'" % config_file)
+
+ file_name = os.path.join(user_config_dir, config_file)
+ with open(file_name, "w") as f:
+ for line in lines:
+ f.write("%s\n" % line)
+
+ #Create /usr/share/portage/config/make.globals
+ make_globals_path = os.path.join(self.eroot,
+ GLOBAL_CONFIG_PATH.lstrip(os.sep), "make.globals")
+ ensure_dirs(os.path.dirname(make_globals_path))
+ os.symlink(os.path.join(cnf_path, "make.globals"),
+ make_globals_path)
+
+ #Create /usr/share/portage/config/sets/portage.conf
+ default_sets_conf_dir = os.path.join(self.eroot, "usr/share/portage/config/sets")
+
+ try:
+ os.makedirs(default_sets_conf_dir)
+ except os.error:
+ pass
+
+ provided_sets_portage_conf = (
+ os.path.join(cnf_path, "sets", "portage.conf"))
+ os.symlink(provided_sets_portage_conf, os.path.join(default_sets_conf_dir, "portage.conf"))
+
+ set_config_dir = os.path.join(user_config_dir, "sets")
+
+ try:
+ os.makedirs(set_config_dir)
+ except os.error:
+ pass
+
+ for sets_file, lines in sets.items():
+ file_name = os.path.join(set_config_dir, sets_file)
+ with open(file_name, "w") as f:
+ for line in lines:
+ f.write("%s\n" % line)
+
+ if cnf_path_repoman is not None:
+ #Create /usr/share/repoman
+ repoman_share_dir = os.path.join(self.eroot, 'usr', 'share', 'repoman')
+ os.symlink(cnf_path_repoman, repoman_share_dir)
+
+ def _create_world(self, world, world_sets):
+ #Create /var/lib/portage/world
+ var_lib_portage = os.path.join(self.eroot, "var", "lib", "portage")
+ os.makedirs(var_lib_portage)
+
+ world_file = os.path.join(var_lib_portage, "world")
+ world_set_file = os.path.join(var_lib_portage, "world_sets")
+
+ with open(world_file, "w") as f:
+ for atom in world:
+ f.write("%s\n" % atom)
+
+ with open(world_set_file, "w") as f:
+ for atom in world_sets:
+ f.write("%s\n" % atom)
+
+ def _load_config(self):
+
+ create_trees_kwargs = {}
+ if self.target_root != os.sep:
+ create_trees_kwargs["target_root"] = self.target_root
+
+ env = {
+ "PORTAGE_REPOSITORIES": "\n".join("[%s]\n%s" % (repo_name, "\n".join("%s = %s" % (k, v) for k, v in repo_config.items())) for repo_name, repo_config in self._repositories.items())
+ }
+
+ trees = portage.create_trees(env=env, eprefix=self.eprefix,
+ **create_trees_kwargs)
+
+ for root, root_trees in trees.items():
+ settings = root_trees["vartree"].settings
+ settings._init_dirs()
+ setconfig = load_default_config(settings, root_trees)
+ root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
+
+ return trees[trees._target_eroot]["vartree"].settings, trees
+
+ def run(self, atoms, options={}, action=None):
+ options = options.copy()
+ options["--pretend"] = True
+ if self.debug:
+ options["--debug"] = True
+
+ if action is None:
+ if options.get("--depclean"):
+ action = "depclean"
+ elif options.get("--prune"):
+ action = "prune"
+
+ if "--usepkgonly" in options:
+ options["--usepkg"] = True
+
+ global_noiselimit = portage.util.noiselimit
+ global_emergelog_disable = _emerge.emergelog._disable
+ try:
+
+ if not self.debug:
+ portage.util.noiselimit = -2
+ _emerge.emergelog._disable = True
+
+ if action in ("depclean", "prune"):
+ rval, cleanlist, ordered, req_pkg_count = \
+ calc_depclean(self.settings, self.trees, None,
+ options, action, InternalPackageSet(initial_atoms=atoms, allow_wildcard=True), None)
+ result = ResolverPlaygroundDepcleanResult(
+ atoms, rval, cleanlist, ordered, req_pkg_count)
+ else:
+ params = create_depgraph_params(options, action)
+ success, depgraph, favorites = backtrack_depgraph(
+ self.settings, self.trees, options, params, action, atoms, None)
+ depgraph._show_merge_list()
+ depgraph.display_problems()
+ result = ResolverPlaygroundResult(atoms, success, depgraph, favorites)
+ finally:
+ portage.util.noiselimit = global_noiselimit
+ _emerge.emergelog._disable = global_emergelog_disable
+
+ return result
+
+ def run_TestCase(self, test_case):
+ if not isinstance(test_case, ResolverPlaygroundTestCase):
+ raise TypeError("ResolverPlayground needs a ResolverPlaygroundTestCase")
+ for atoms in test_case.requests:
+ result = self.run(atoms, test_case.options, test_case.action)
+ if not test_case.compare_with_result(result):
+ return
+
+ def cleanup(self):
+ for eroot in self.trees:
+ portdb = self.trees[eroot]["porttree"].dbapi
+ portdb.close_caches()
+ if self.debug:
+ print("\nEROOT=%s" % self.eroot)
+ else:
+ shutil.rmtree(self.eroot)
+
+class ResolverPlaygroundTestCase(object):
+
+ def __init__(self, request, **kwargs):
+ self.all_permutations = kwargs.pop("all_permutations", False)
+ self.ignore_mergelist_order = kwargs.pop("ignore_mergelist_order", False)
+ self.ignore_cleanlist_order = kwargs.pop("ignore_cleanlist_order", False)
+ self.ambiguous_merge_order = kwargs.pop("ambiguous_merge_order", False)
+ self.ambiguous_slot_collision_solutions = kwargs.pop("ambiguous_slot_collision_solutions", False)
+ self.check_repo_names = kwargs.pop("check_repo_names", False)
+ self.merge_order_assertions = kwargs.pop("merge_order_assertions", False)
+
+ if self.all_permutations:
+ self.requests = list(permutations(request))
+ else:
+ self.requests = [request]
+
+ self.options = kwargs.pop("options", {})
+ self.action = kwargs.pop("action", None)
+ self.test_success = True
+ self.fail_msg = None
+ self._checks = kwargs.copy()
+
+ def compare_with_result(self, result):
+ checks = dict.fromkeys(result.checks)
+ for key, value in self._checks.items():
+ if not key in checks:
+ raise KeyError("Not an available check: '%s'" % key)
+ checks[key] = value
+
+ fail_msgs = []
+ for key, value in checks.items():
+ got = getattr(result, key)
+ expected = value
+
+ if key in result.optional_checks and expected is None:
+ continue
+
+ if key == "mergelist":
+ if not self.check_repo_names:
+ #Strip repo names if we don't check them
+ if got:
+ new_got = []
+ for cpv in got:
+ if cpv[:1] == "!":
+ new_got.append(cpv)
+ continue
+ new_got.append(cpv.split(_repo_separator)[0])
+ got = new_got
+ if expected:
+ new_expected = []
+ for obj in expected:
+ if isinstance(obj, basestring):
+ if obj[:1] == "!":
+ new_expected.append(obj)
+ continue
+ new_expected.append(
+ obj.split(_repo_separator)[0])
+ continue
+ new_expected.append(set())
+ for cpv in obj:
+ if cpv[:1] != "!":
+ cpv = cpv.split(_repo_separator)[0]
+ new_expected[-1].add(cpv)
+ expected = new_expected
+ if self.ignore_mergelist_order and got is not None:
+ got = set(got)
+ expected = set(expected)
+
+ if self.ambiguous_merge_order and got:
+ expected_stack = list(reversed(expected))
+ got_stack = list(reversed(got))
+ new_expected = []
+ match = True
+ while got_stack and expected_stack:
+ got_token = got_stack.pop()
+ expected_obj = expected_stack.pop()
+ if isinstance(expected_obj, basestring):
+ new_expected.append(expected_obj)
+ if got_token == expected_obj:
+ continue
+ # result doesn't match, so stop early
+ match = False
+ break
+ expected_obj = set(expected_obj)
+ try:
+ expected_obj.remove(got_token)
+ except KeyError:
+ # result doesn't match, so stop early
+ match = False
+ break
+ new_expected.append(got_token)
+ while got_stack and expected_obj:
+ got_token = got_stack.pop()
+ try:
+ expected_obj.remove(got_token)
+ except KeyError:
+ match = False
+ break
+ new_expected.append(got_token)
+ if not match:
+ # result doesn't match, so stop early
+ break
+ if expected_obj:
+ # result does not match, so stop early
+ match = False
+ new_expected.append(tuple(expected_obj))
+ break
+ if expected_stack:
+ # result does not match, add leftovers to new_expected
+ match = False
+ expected_stack.reverse()
+ new_expected.extend(expected_stack)
+ expected = new_expected
+
+ if match and self.merge_order_assertions:
+ for node1, node2 in self.merge_order_assertions:
+ if not (got.index(node1) < got.index(node2)):
+ fail_msgs.append("atoms: (" + \
+ ", ".join(result.atoms) + "), key: " + \
+ ("merge_order_assertions, expected: %s" % \
+ str((node1, node2))) + \
+ ", got: " + str(got))
+
+ elif key == "cleanlist" and self.ignore_cleanlist_order:
+ got = set(got)
+ expected = set(expected)
+
+ elif key == "slot_collision_solutions" and \
+ self.ambiguous_slot_collision_solutions:
+ # Tests that use all_permutations can have multiple
+ # outcomes here.
+ for x in expected:
+ if x == got:
+ expected = x
+ break
+ elif key in ("unstable_keywords", "needed_p_mask_changes",
+ "unsatisfied_deps", "required_use_unsatisfied") and \
+ expected is not None:
+ expected = set(expected)
+
+ elif key == "forced_rebuilds" and expected is not None:
+ expected = dict((k, set(v)) for k, v in expected.items())
+
+ if got != expected:
+ fail_msgs.append("atoms: (" + ", ".join(result.atoms) + "), key: " + \
+ key + ", expected: " + str(expected) + ", got: " + str(got))
+ if fail_msgs:
+ self.test_success = False
+ self.fail_msg = "\n".join(fail_msgs)
+ return False
+ return True
+
+class ResolverPlaygroundResult(object):
+
+ checks = (
+ "success", "mergelist", "use_changes", "license_changes",
+ "unstable_keywords", "slot_collision_solutions",
+ "circular_dependency_solutions", "needed_p_mask_changes",
+ "unsatisfied_deps", "forced_rebuilds", "required_use_unsatisfied"
+ )
+ optional_checks = (
+ "forced_rebuilds",
+ "required_use_unsatisfied",
+ "unsatisfied_deps"
+ )
+
+ def __init__(self, atoms, success, mydepgraph, favorites):
+ self.atoms = atoms
+ self.success = success
+ self.depgraph = mydepgraph
+ self.favorites = favorites
+ self.mergelist = None
+ self.use_changes = None
+ self.license_changes = None
+ self.unstable_keywords = None
+ self.needed_p_mask_changes = None
+ self.slot_collision_solutions = None
+ self.circular_dependency_solutions = None
+ self.unsatisfied_deps = frozenset()
+ self.forced_rebuilds = None
+ self.required_use_unsatisfied = None
+
+ if self.depgraph._dynamic_config._serialized_tasks_cache is not None:
+ self.mergelist = []
+ host_root = self.depgraph._frozen_config._running_root.root
+ for x in self.depgraph._dynamic_config._serialized_tasks_cache:
+ if isinstance(x, Blocker):
+ self.mergelist.append(x.atom)
+ else:
+ repo_str = ""
+ if x.repo != "test_repo":
+ repo_str = _repo_separator + x.repo
+ build_id_str = ""
+ if (x.type_name == "binary" and
+ x.cpv.build_id is not None):
+ build_id_str = "-%s" % x.cpv.build_id
+ mergelist_str = x.cpv + build_id_str + repo_str
+ if x.built:
+ if x.operation == "merge":
+ desc = x.type_name
+ else:
+ desc = x.operation
+ mergelist_str = "[%s]%s" % (desc, mergelist_str)
+ if x.root != host_root:
+ mergelist_str += "{targetroot}"
+ self.mergelist.append(mergelist_str)
+
+ if self.depgraph._dynamic_config._needed_use_config_changes:
+ self.use_changes = {}
+ for pkg, needed_use_config_changes in \
+ self.depgraph._dynamic_config._needed_use_config_changes.items():
+ new_use, changes = needed_use_config_changes
+ self.use_changes[pkg.cpv] = changes
+
+ if self.depgraph._dynamic_config._needed_unstable_keywords:
+ self.unstable_keywords = set()
+ for pkg in self.depgraph._dynamic_config._needed_unstable_keywords:
+ self.unstable_keywords.add(pkg.cpv)
+
+ if self.depgraph._dynamic_config._needed_p_mask_changes:
+ self.needed_p_mask_changes = set()
+ for pkg in self.depgraph._dynamic_config._needed_p_mask_changes:
+ self.needed_p_mask_changes.add(pkg.cpv)
+
+ if self.depgraph._dynamic_config._needed_license_changes:
+ self.license_changes = {}
+ for pkg, missing_licenses in self.depgraph._dynamic_config._needed_license_changes.items():
+ self.license_changes[pkg.cpv] = missing_licenses
+
+ if self.depgraph._dynamic_config._slot_conflict_handler is not None:
+ self.slot_collision_solutions = []
+ handler = self.depgraph._dynamic_config._slot_conflict_handler
+
+ for change in handler.changes:
+ new_change = {}
+ for pkg in change:
+ new_change[pkg.cpv] = change[pkg]
+ self.slot_collision_solutions.append(new_change)
+
+ if self.depgraph._dynamic_config._circular_dependency_handler is not None:
+ handler = self.depgraph._dynamic_config._circular_dependency_handler
+ sol = handler.solutions
+ self.circular_dependency_solutions = dict(zip([x.cpv for x in sol.keys()], sol.values()))
+
+ if self.depgraph._dynamic_config._unsatisfied_deps_for_display:
+ self.unsatisfied_deps = set(dep_info[0][1]
+ for dep_info in self.depgraph._dynamic_config._unsatisfied_deps_for_display)
+
+ if self.depgraph._forced_rebuilds:
+ self.forced_rebuilds = dict(
+ (child.cpv, set(parent.cpv for parent in parents))
+ for child_dict in self.depgraph._forced_rebuilds.values()
+ for child, parents in child_dict.items())
+
+ required_use_unsatisfied = []
+ for pargs, kwargs in \
+ self.depgraph._dynamic_config._unsatisfied_deps_for_display:
+ if "show_req_use" in kwargs:
+ required_use_unsatisfied.append(pargs[1])
+ if required_use_unsatisfied:
+ self.required_use_unsatisfied = set(required_use_unsatisfied)
+
+class ResolverPlaygroundDepcleanResult(object):
+
+ checks = (
+ "success", "cleanlist", "ordered", "req_pkg_count",
+ )
+ optional_checks = (
+ "ordered", "req_pkg_count",
+ )
+
+ def __init__(self, atoms, rval, cleanlist, ordered, req_pkg_count):
+ self.atoms = atoms
+ self.success = rval == 0
+ self.cleanlist = cleanlist
+ self.ordered = ordered
+ self.req_pkg_count = req_pkg_count
diff --git a/lib/portage/tests/resolver/__init__.py b/lib/portage/tests/resolver/__init__.py
new file mode 100644
index 000000000..21a391aee
--- /dev/null
+++ b/lib/portage/tests/resolver/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/tests/resolver/__test__.py b/lib/portage/tests/resolver/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/resolver/__test__.py
diff --git a/lib/portage/tests/resolver/binpkg_multi_instance/__init__.py b/lib/portage/tests/resolver/binpkg_multi_instance/__init__.py
new file mode 100644
index 000000000..4725d3317
--- /dev/null
+++ b/lib/portage/tests/resolver/binpkg_multi_instance/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/tests/resolver/binpkg_multi_instance/__test__.py b/lib/portage/tests/resolver/binpkg_multi_instance/__test__.py
new file mode 100644
index 000000000..4725d3317
--- /dev/null
+++ b/lib/portage/tests/resolver/binpkg_multi_instance/__test__.py
@@ -0,0 +1,2 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/tests/resolver/binpkg_multi_instance/test_build_id_profile_format.py b/lib/portage/tests/resolver/binpkg_multi_instance/test_build_id_profile_format.py
new file mode 100644
index 000000000..0397509f8
--- /dev/null
+++ b/lib/portage/tests/resolver/binpkg_multi_instance/test_build_id_profile_format.py
@@ -0,0 +1,134 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class BuildIdProfileFormatTestCase(TestCase):
+
+ def testBuildIdProfileFormat(self):
+
+ profile = {
+ "packages": ("=app-misc/A-1-2",),
+ "package.provided": ("sys-libs/zlib-1.2.8-r1",),
+ }
+
+ repo_configs = {
+ "test_repo": {
+ "layout.conf": (
+ "profile-formats = build-id profile-set",
+ ),
+ }
+ }
+
+ user_config = {
+ "make.conf":
+ (
+ "FEATURES=\"binpkg-multi-instance\"",
+ ),
+ }
+
+ ebuilds = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "RDEPEND": "sys-libs/zlib dev-libs/B[foo]",
+ "DEPEND": "sys-libs/zlib dev-libs/B[foo]",
+ },
+ "dev-libs/B-1" : {
+ "EAPI": "5",
+ "IUSE": "foo",
+ },
+ }
+
+ binpkgs = (
+ ("app-misc/A-1", {
+ "EAPI": "5",
+ "BUILD_ID": "1",
+ "BUILD_TIME": "1",
+ "RDEPEND": "sys-libs/zlib dev-libs/B[foo]",
+ "DEPEND": "sys-libs/zlib dev-libs/B[foo]",
+ }),
+ ("app-misc/A-1", {
+ "EAPI": "5",
+ "BUILD_ID": "2",
+ "BUILD_TIME": "2",
+ "RDEPEND": "sys-libs/zlib dev-libs/B[foo]",
+ "DEPEND": "sys-libs/zlib dev-libs/B[foo]",
+ }),
+ ("app-misc/A-1", {
+ "EAPI": "5",
+ "BUILD_ID": "3",
+ "BUILD_TIME": "3",
+ "RDEPEND": "sys-libs/zlib dev-libs/B[foo]",
+ "DEPEND": "sys-libs/zlib dev-libs/B[foo]",
+ }),
+ ("dev-libs/B-1", {
+ "EAPI": "5",
+ "IUSE": "foo",
+ "USE": "",
+ "BUILD_ID": "1",
+ "BUILD_TIME": "1",
+ }),
+ ("dev-libs/B-1", {
+ "EAPI": "5",
+ "IUSE": "foo",
+ "USE": "foo",
+ "BUILD_ID": "2",
+ "BUILD_TIME": "2",
+ }),
+ ("dev-libs/B-1", {
+ "EAPI": "5",
+ "IUSE": "foo",
+ "USE": "",
+ "BUILD_ID": "3",
+ "BUILD_TIME": "3",
+ }),
+ )
+
+ installed = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "BUILD_ID": "1",
+ "BUILD_TIME": "1",
+ "RDEPEND": "sys-libs/zlib",
+ "DEPEND": "sys-libs/zlib",
+ },
+ "dev-libs/B-1" : {
+ "EAPI": "5",
+ "IUSE": "foo",
+ "USE": "foo",
+ "BUILD_ID": "2",
+ "BUILD_TIME": "2",
+ },
+ }
+
+ world = ()
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--emptytree": True, "--usepkgonly": True},
+ success = True,
+ mergelist = [
+ "[binary]dev-libs/B-1-2",
+ "[binary]app-misc/A-1-2"
+ ]
+ ),
+
+ )
+
+ playground = ResolverPlayground(debug=False,
+ binpkgs=binpkgs, ebuilds=ebuilds, installed=installed,
+ repo_configs=repo_configs, profile=profile,
+ user_config=user_config, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ # Disable debug so that cleanup works.
+ #playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/binpkg_multi_instance/test_rebuilt_binaries.py b/lib/portage/tests/resolver/binpkg_multi_instance/test_rebuilt_binaries.py
new file mode 100644
index 000000000..5729df465
--- /dev/null
+++ b/lib/portage/tests/resolver/binpkg_multi_instance/test_rebuilt_binaries.py
@@ -0,0 +1,101 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class RebuiltBinariesCase(TestCase):
+
+ def testRebuiltBinaries(self):
+
+ user_config = {
+ "make.conf":
+ (
+ "FEATURES=\"binpkg-multi-instance\"",
+ ),
+ }
+
+ binpkgs = (
+ ("app-misc/A-1", {
+ "EAPI": "5",
+ "BUILD_ID": "1",
+ "BUILD_TIME": "1",
+ }),
+ ("app-misc/A-1", {
+ "EAPI": "5",
+ "BUILD_ID": "2",
+ "BUILD_TIME": "2",
+ }),
+ ("app-misc/A-1", {
+ "EAPI": "5",
+ "BUILD_ID": "3",
+ "BUILD_TIME": "3",
+ }),
+ ("dev-libs/B-1", {
+ "EAPI": "5",
+ "BUILD_ID": "1",
+ "BUILD_TIME": "1",
+ }),
+ ("dev-libs/B-1", {
+ "EAPI": "5",
+ "BUILD_ID": "2",
+ "BUILD_TIME": "2",
+ }),
+ ("dev-libs/B-1", {
+ "EAPI": "5",
+ "BUILD_ID": "3",
+ "BUILD_TIME": "3",
+ }),
+ )
+
+ installed = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "BUILD_ID": "1",
+ "BUILD_TIME": "1",
+ },
+ "dev-libs/B-1" : {
+ "EAPI": "5",
+ "BUILD_ID": "2",
+ "BUILD_TIME": "2",
+ },
+ }
+
+ world = (
+ "app-misc/A",
+ "dev-libs/B",
+ )
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--deep": True,
+ "--rebuilt-binaries": True,
+ "--update": True,
+ "--usepkgonly": True,
+ },
+ success = True,
+ ignore_mergelist_order=True,
+ mergelist = [
+ "[binary]dev-libs/B-1-3",
+ "[binary]app-misc/A-1-3"
+ ]
+ ),
+
+ )
+
+ playground = ResolverPlayground(debug=False,
+ binpkgs=binpkgs, installed=installed,
+ user_config=user_config, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ # Disable debug so that cleanup works.
+ #playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/soname/__init__.py b/lib/portage/tests/resolver/soname/__init__.py
new file mode 100644
index 000000000..4725d3317
--- /dev/null
+++ b/lib/portage/tests/resolver/soname/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/tests/resolver/soname/__test__.py b/lib/portage/tests/resolver/soname/__test__.py
new file mode 100644
index 000000000..4725d3317
--- /dev/null
+++ b/lib/portage/tests/resolver/soname/__test__.py
@@ -0,0 +1,2 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/tests/resolver/soname/test_autounmask.py b/lib/portage/tests/resolver/soname/test_autounmask.py
new file mode 100644
index 000000000..be0f94e17
--- /dev/null
+++ b/lib/portage/tests/resolver/soname/test_autounmask.py
@@ -0,0 +1,103 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground, ResolverPlaygroundTestCase)
+
+class SonameAutoUnmaskTestCase(TestCase):
+
+ def testSonameAutoUnmask(self):
+
+ binpkgs = {
+ "dev-libs/icu-49" : {
+ "KEYWORDS": "x86",
+ "PROVIDES": "x86_32: libicu.so.49",
+ },
+ "dev-libs/icu-4.8" : {
+ "KEYWORDS": "x86",
+ "PROVIDES": "x86_32: libicu.so.48",
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "KEYWORDS": "~x86",
+ "DEPEND": "dev-libs/icu",
+ "RDEPEND": "dev-libs/icu",
+ "REQUIRES": "x86_32: libicu.so.49",
+ },
+ }
+
+ installed = {
+ "dev-libs/icu-4.8" : {
+ "KEYWORDS": "x86",
+ "PROVIDES": "x86_32: libicu.so.48",
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "KEYWORDS": "~x86",
+ "DEPEND": "dev-libs/icu",
+ "RDEPEND": "dev-libs/icu",
+ "REQUIRES": "x86_32: libicu.so.48",
+ },
+ }
+
+ world = ["dev-libs/libxml2"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {
+ "--autounmask": True,
+ "--ignore-soname-deps": "n",
+ "--oneshot": True,
+ "--usepkgonly": True,
+ },
+ success = False,
+ mergelist = [
+ "[binary]dev-libs/icu-49",
+ "[binary]dev-libs/libxml2-2.7.8"
+ ],
+ unstable_keywords = ['dev-libs/libxml2-2.7.8'],
+ ),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {
+ "--autounmask": True,
+ "--ignore-soname-deps": "y",
+ "--oneshot": True,
+ "--usepkgonly": True,
+ },
+ success = True,
+ mergelist = [
+ "[binary]dev-libs/icu-49"
+ ]
+ ),
+
+ # Test that dev-libs/icu-49 update is skipped due to
+ # dev-libs/libxml2-2.7.8 being masked by KEYWORDS. Note
+ # that this result is questionable, since the installed
+ # dev-libs/libxml2-2.7.8 instance is also masked!
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--autounmask": True,
+ "--deep": True,
+ "--ignore-soname-deps": "n",
+ "--update": True,
+ "--usepkgonly": True,
+ },
+ success = True,
+ mergelist = [],
+ ),
+
+ )
+
+ playground = ResolverPlayground(binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/soname/test_depclean.py b/lib/portage/tests/resolver/soname/test_depclean.py
new file mode 100644
index 000000000..50cc169e3
--- /dev/null
+++ b/lib/portage/tests/resolver/soname/test_depclean.py
@@ -0,0 +1,61 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SonameDepcleanTestCase(TestCase):
+
+ def testSonameDepclean(self):
+
+ installed = {
+ "app-misc/A-1" : {
+ "RDEPEND": "dev-libs/B",
+ "DEPEND": "dev-libs/B",
+ "REQUIRES": "x86_32: libB.so.1 libc.so.6",
+ },
+ "dev-libs/B-1" : {
+ "PROVIDES": "x86_32: libB.so.1",
+ },
+ "sys-libs/glibc-2.19-r1" : {
+ "PROVIDES": "x86_32: libc.so.6"
+ },
+ }
+
+ world = ("app-misc/A",)
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ [],
+ options={
+ "--depclean": True,
+ "--ignore-soname-deps": "n",
+ },
+ success=True,
+ cleanlist=[]
+ ),
+
+ ResolverPlaygroundTestCase(
+ [],
+ options={
+ "--depclean": True,
+ "--ignore-soname-deps": "y",
+ },
+ success=True,
+ cleanlist=["sys-libs/glibc-2.19-r1"]
+ ),
+ )
+
+ playground = ResolverPlayground(debug=False,
+ installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ # Disable debug so that cleanup works.
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/soname/test_downgrade.py b/lib/portage/tests/resolver/soname/test_downgrade.py
new file mode 100644
index 000000000..a95be3406
--- /dev/null
+++ b/lib/portage/tests/resolver/soname/test_downgrade.py
@@ -0,0 +1,240 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SonameDowngradeTestCase(TestCase):
+
+ def testSingleSlot(self):
+
+ ebuilds = {
+ "dev-libs/icu-49" : {
+ },
+ "dev-libs/icu-4.8" : {
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "DEPEND": "dev-libs/icu",
+ "RDEPEND": "dev-libs/icu",
+ },
+ }
+
+ binpkgs = {
+ "dev-libs/icu-49" : {
+ "PROVIDES": "x86_32: libicu.so.49",
+ },
+ "dev-libs/icu-4.8" : {
+ "PROVIDES": "x86_32: libicu.so.48",
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "DEPEND": "dev-libs/icu",
+ "RDEPEND": "dev-libs/icu",
+ "REQUIRES": "x86_32: libicu.so.48",
+ },
+ }
+ installed = {
+ "dev-libs/icu-49" : {
+ "PROVIDES": "x86_32: libicu.so.49",
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "DEPEND": "dev-libs/icu",
+ "RDEPEND": "dev-libs/icu",
+ "REQUIRES": "x86_32: libicu.so.49",
+ },
+ }
+
+ user_config = {
+ "package.mask" : (
+ ">=dev-libs/icu-49",
+ ),
+ }
+
+ world = ["dev-libs/libxml2"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {
+ "--autounmask": "n",
+ "--ignore-soname-deps": "n",
+ "--oneshot": True,
+ "--usepkgonly": True
+ },
+ success = True,
+ mergelist = [
+ "[binary]dev-libs/icu-4.8",
+ "[binary]dev-libs/libxml2-2.7.8"
+ ]
+ ),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {
+ "--autounmask": "n",
+ "--ignore-soname-deps": "y",
+ "--oneshot": True,
+ "--usepkgonly": True
+ },
+ success = True,
+ mergelist = [
+ "[binary]dev-libs/icu-4.8",
+ ]
+ ),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--autounmask": "n",
+ "--deep": True,
+ "--ignore-soname-deps": "n",
+ "--update": True,
+ "--usepkgonly": True,
+ },
+ success = True,
+ mergelist = [
+ "[binary]dev-libs/icu-4.8",
+ "[binary]dev-libs/libxml2-2.7.8"
+ ]
+ ),
+
+ # In this case, soname dependencies are not respected,
+ # because --usepkgonly is not enabled. This could be
+ # handled differently, by respecting soname dependencies
+ # as long as no unbuilt ebuilds get pulled into the graph.
+ # However, that kind of conditional dependency accounting
+ # would add a significant amount of complexity.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--deep": True,
+ "--ignore-soname-deps": "n",
+ "--update": True,
+ "--usepkg": True,
+ },
+ success = True,
+ mergelist = [
+ "[binary]dev-libs/icu-4.8",
+ ]
+ ),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--deep": True,
+ "--update": True,
+ },
+ success = True,
+ mergelist = [
+ "dev-libs/icu-4.8",
+ ]
+ ),
+ )
+
+ playground = ResolverPlayground(binpkgs=binpkgs,
+ ebuilds=ebuilds, installed=installed,
+ user_config=user_config, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ # Disable debug so that cleanup works.
+ playground.debug = False
+ playground.cleanup()
+
+ def testTwoSlots(self):
+
+ ebuilds = {
+ "dev-libs/glib-1.2.10" : {
+ "SLOT": "1"
+ },
+ "dev-libs/glib-2.30.2" : {
+ "SLOT": "2"
+ },
+ "dev-libs/dbus-glib-0.98" : {
+ "EAPI": "1",
+ "DEPEND": "dev-libs/glib:2",
+ "RDEPEND": "dev-libs/glib:2"
+ },
+ }
+ binpkgs = {
+ "dev-libs/glib-1.2.10" : {
+ "SLOT": "1",
+ "PROVIDES": "x86_32: libglib-1.0.so.0",
+ },
+ "dev-libs/glib-2.30.2" : {
+ "PROVIDES": "x86_32: libglib-2.0.so.30",
+ "SLOT": "2",
+ },
+ "dev-libs/glib-2.32.3" : {
+ "PROVIDES": "x86_32: libglib-2.0.so.32",
+ "SLOT": "2",
+ },
+ "dev-libs/dbus-glib-0.98" : {
+ "EAPI": "1",
+ "DEPEND": "dev-libs/glib:2",
+ "RDEPEND": "dev-libs/glib:2",
+ "REQUIRES": "x86_32: libglib-2.0.so.30",
+ },
+ }
+ installed = {
+ "dev-libs/glib-1.2.10" : {
+ "PROVIDES": "x86_32: libglib-1.0.so.0",
+ "SLOT": "1",
+ },
+ "dev-libs/glib-2.32.3" : {
+ "PROVIDES": "x86_32: libglib-2.0.so.32",
+ "SLOT": "2",
+ },
+ "dev-libs/dbus-glib-0.98" : {
+ "EAPI": "1",
+ "DEPEND": "dev-libs/glib:2",
+ "RDEPEND": "dev-libs/glib:2",
+ "REQUIRES": "x86_32: libglib-2.0.so.32",
+ },
+ }
+
+ user_config = {
+ "package.mask" : (
+ ">=dev-libs/glib-2.32",
+ ),
+ }
+
+ world = [
+ "dev-libs/glib:1",
+ "dev-libs/dbus-glib",
+ ]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--autounmask": "n",
+ "--deep": True,
+ "--ignore-soname-deps": "n",
+ "--update": True,
+ "--usepkgonly": True,
+ },
+ success = True,
+ mergelist = [
+ "[binary]dev-libs/glib-2.30.2",
+ "[binary]dev-libs/dbus-glib-0.98"
+ ]
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, user_config=user_config, world=world,
+ debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ # Disable debug so that cleanup works.
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/soname/test_or_choices.py b/lib/portage/tests/resolver/soname/test_or_choices.py
new file mode 100644
index 000000000..2420cd399
--- /dev/null
+++ b/lib/portage/tests/resolver/soname/test_or_choices.py
@@ -0,0 +1,92 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SonameOrChoicesTestCase(TestCase):
+
+ def testSonameConflictMissedUpdate(self):
+
+ binpkgs = {
+ "dev-lang/ocaml-4.02.1" : {
+ "EAPI": "5",
+ "PROVIDES": "x86_32: libocaml-4.02.1.so",
+ },
+
+ "dev-lang/ocaml-4.01.0" : {
+ "EAPI": "5",
+ "PROVIDES": "x86_32: libocaml-4.01.0.so",
+ },
+
+ "dev-ml/lablgl-1.05" : {
+ "DEPEND": (">=dev-lang/ocaml-3.10.2 "
+ "|| ( dev-ml/labltk <dev-lang/ocaml-4.02 )"),
+ "RDEPEND": (">=dev-lang/ocaml-3.10.2 "
+ "|| ( dev-ml/labltk <dev-lang/ocaml-4.02 )"),
+ "REQUIRES": "x86_32: libocaml-4.02.1.so",
+ },
+
+ "dev-ml/labltk-8.06.0" : {
+ "EAPI": "5",
+ "SLOT": "0/8.06.0",
+ "DEPEND": ">=dev-lang/ocaml-4.02",
+ "RDEPEND": ">=dev-lang/ocaml-4.02",
+ "REQUIRES": "x86_32: libocaml-4.02.1.so",
+ },
+ }
+
+ installed = {
+ "dev-lang/ocaml-4.01.0" : {
+ "EAPI": "5",
+ "PROVIDES": "x86_32: libocaml-4.01.0.so",
+ },
+
+ "dev-ml/lablgl-1.05" : {
+ "DEPEND": (">=dev-lang/ocaml-3.10.2 "
+ "|| ( dev-ml/labltk <dev-lang/ocaml-4.02 )"),
+ "RDEPEND": (">=dev-lang/ocaml-3.10.2 "
+ "|| ( dev-ml/labltk <dev-lang/ocaml-4.02 )"),
+ "REQUIRES": "x86_32: libocaml-4.01.0.so",
+ },
+ }
+
+ world = (
+ "dev-lang/ocaml",
+ "dev-ml/lablgl",
+ )
+
+ test_cases = (
+
+ # bug #531656: If an ocaml update is desirable,
+ # then we need to pull in dev-ml/labltk.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--deep": True,
+ "--ignore-soname-deps": "n",
+ "--update": True,
+ "--usepkgonly": True
+ },
+ success = True,
+ mergelist = [
+ "[binary]dev-lang/ocaml-4.02.1",
+ "[binary]dev-ml/labltk-8.06.0",
+ "[binary]dev-ml/lablgl-1.05",
+ ]
+ ),
+
+ )
+
+ playground = ResolverPlayground(debug=False,
+ binpkgs=binpkgs, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ # Disable debug so that cleanup works.
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/soname/test_reinstall.py b/lib/portage/tests/resolver/soname/test_reinstall.py
new file mode 100644
index 000000000..b8f2d2c60
--- /dev/null
+++ b/lib/portage/tests/resolver/soname/test_reinstall.py
@@ -0,0 +1,87 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SonameReinstallTestCase(TestCase):
+
+ def testSonameReinstall(self):
+
+ binpkgs = {
+ "app-misc/A-1" : {
+ "RDEPEND": "dev-libs/B",
+ "DEPEND": "dev-libs/B",
+ "REQUIRES": "x86_32: libB.so.2",
+ },
+ "dev-libs/B-2" : {
+ "PROVIDES": "x86_32: libB.so.2",
+ },
+ "dev-libs/B-1" : {
+ "PROVIDES": "x86_32: libB.so.1",
+ },
+ }
+
+ installed = {
+ "app-misc/A-1" : {
+ "RDEPEND": "dev-libs/B",
+ "DEPEND": "dev-libs/B",
+ "REQUIRES": "x86_32: libB.so.1",
+ },
+ "dev-libs/B-1" : {
+ "PROVIDES": "x86_32: libB.so.1",
+ },
+ }
+
+ world = ("app-misc/A",)
+
+ test_cases = (
+
+ # Test that --ignore-soname-deps prevents the above
+ # rebuild from being triggered.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--deep": True,
+ "--ignore-soname-deps": "n",
+ "--update": True,
+ "--usepkgonly": True
+ },
+ success = True,
+ mergelist = [
+ "[binary]dev-libs/B-2",
+ "[binary]app-misc/A-1",
+ ]
+ ),
+
+ # Test that --ignore-soname-deps prevents the above
+ # reinstall from being triggered.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--deep": True,
+ "--ignore-soname-deps": "y",
+ "--update": True,
+ "--usepkgonly": True
+ },
+ success = True,
+ mergelist = [
+ "[binary]dev-libs/B-2",
+ ]
+ ),
+
+ )
+
+ playground = ResolverPlayground(debug=False,
+ binpkgs=binpkgs, installed=installed,
+ world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ # Disable debug so that cleanup works.
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/soname/test_skip_update.py b/lib/portage/tests/resolver/soname/test_skip_update.py
new file mode 100644
index 000000000..67e1e0242
--- /dev/null
+++ b/lib/portage/tests/resolver/soname/test_skip_update.py
@@ -0,0 +1,86 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SonameSkipUpdateTestCase(TestCase):
+
+ def testSonameSkipUpdate(self):
+
+ binpkgs = {
+ "app-misc/A-1" : {
+ "RDEPEND": "dev-libs/B",
+ "DEPEND": "dev-libs/B",
+ "REQUIRES": "x86_32: libB.so.1",
+ },
+ "dev-libs/B-2" : {
+ "PROVIDES": "x86_32: libB.so.2",
+ },
+ "dev-libs/B-1" : {
+ "PROVIDES": "x86_32: libB.so.1",
+ },
+ }
+
+ installed = {
+ "app-misc/A-1" : {
+ "RDEPEND": "dev-libs/B",
+ "DEPEND": "dev-libs/B",
+ "REQUIRES": "x86_32: libB.so.1",
+ },
+ "dev-libs/B-1" : {
+ "PROVIDES": "x86_32: libB.so.1",
+ },
+ }
+
+ world = ("app-misc/A",)
+
+ test_cases = (
+
+ # Test that --ignore-soname-deps allows the upgrade,
+ # even though it will break an soname dependency of
+ # app-misc/A-1.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--deep": True,
+ "--ignore-soname-deps": "y",
+ "--update": True,
+ "--usepkgonly": True
+ },
+ success = True,
+ mergelist = [
+ "[binary]dev-libs/B-2",
+ ]
+ ),
+
+ # Test that upgrade to B-2 is skipped with --usepkgonly
+ # because it will break an soname dependency that
+ # cannot be satisfied by the available binary packages.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--deep": True,
+ "--ignore-soname-deps": "n",
+ "--update": True,
+ "--usepkgonly": True
+ },
+ success = True,
+ mergelist = []
+ ),
+
+ )
+
+ playground = ResolverPlayground(debug=False,
+ binpkgs=binpkgs, installed=installed,
+ world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ # Disable debug so that cleanup works.
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/soname/test_slot_conflict_reinstall.py b/lib/portage/tests/resolver/soname/test_slot_conflict_reinstall.py
new file mode 100644
index 000000000..f7154442e
--- /dev/null
+++ b/lib/portage/tests/resolver/soname/test_slot_conflict_reinstall.py
@@ -0,0 +1,357 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground, ResolverPlaygroundTestCase)
+
+class SonameSlotConflictReinstallTestCase(TestCase):
+
+ def testSonameSlotConflictReinstall(self):
+
+ binpkgs = {
+
+ "app-misc/A-1" : {
+ "PROVIDES": "x86_32: libA-1.so",
+ },
+
+ "app-misc/A-2" : {
+ "PROVIDES": "x86_32: libA-2.so",
+ },
+
+ "app-misc/B-0" : {
+ "DEPEND": "app-misc/A",
+ "RDEPEND": "app-misc/A",
+ "REQUIRES": "x86_32: libA-2.so",
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "DEPEND": "<app-misc/A-2",
+ "RDEPEND": "<app-misc/A-2"
+ },
+
+ "app-misc/D-1" : {
+ "PROVIDES": "x86_32: libD-1.so",
+ },
+
+ "app-misc/D-2" : {
+ "PROVIDES": "x86_32: libD-2.so",
+ },
+
+ "app-misc/E-0" : {
+ "DEPEND": "app-misc/D",
+ "RDEPEND": "app-misc/D",
+ "REQUIRES": "x86_32: libD-2.so",
+ },
+
+ }
+
+ installed = {
+
+ "app-misc/A-1" : {
+ "PROVIDES": "x86_32: libA-1.so",
+ },
+
+ "app-misc/B-0" : {
+ "DEPEND": "app-misc/A",
+ "RDEPEND": "app-misc/A",
+ "REQUIRES": "x86_32: libA-1.so",
+ },
+
+ "app-misc/C-0" : {
+ "DEPEND": "<app-misc/A-2",
+ "RDEPEND": "<app-misc/A-2"
+ },
+
+ "app-misc/D-1" : {
+ "PROVIDES": "x86_32: libD-1.so",
+ },
+
+ "app-misc/E-0" : {
+ "DEPEND": "app-misc/D",
+ "RDEPEND": "app-misc/D",
+ "REQUIRES": "x86_32: libD-1.so",
+ },
+
+ }
+
+ world = ["app-misc/B", "app-misc/C", "app-misc/E"]
+
+ test_cases = (
+
+ # Test bug #439688, where a slot conflict prevents an
+ # upgrade and we don't want to trigger unnecessary rebuilds.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--deep": True,
+ "--ignore-soname-deps": "n",
+ "--update": True,
+ "--usepkgonly": True,
+ "--backtrack": 10,
+ },
+ success = True,
+ mergelist = [
+ "[binary]app-misc/D-2",
+ "[binary]app-misc/E-0"
+ ]
+ ),
+
+ )
+
+ playground = ResolverPlayground(binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success,
+ True, test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
+
+ def testSonameSlotConflictMassRebuild(self):
+ """
+ Bug 486580
+ Before this bug was fixed, emerge would backtrack for each
+ package that needs a rebuild. This could cause it to hit the
+ backtrack limit and not rebuild all needed packages.
+ """
+ binpkgs = {
+
+ "app-misc/A-1" : {
+ "DEPEND": "app-misc/B",
+ "RDEPEND": "app-misc/B",
+ "REQUIRES": "x86_32: libB-2.so",
+ },
+
+ "app-misc/B-1" : {
+ "SLOT": "1",
+ "PROVIDES": "x86_32: libB-1.so",
+ },
+
+ "app-misc/B-2" : {
+ "SLOT": "2",
+ "PROVIDES": "x86_32: libB-2.so",
+ },
+ }
+
+ installed = {
+ "app-misc/B-1" : {
+ "SLOT": "1",
+ "PROVIDES": "x86_32: libB-1.so",
+ },
+ }
+
+ expected_mergelist = [
+ '[binary]app-misc/A-1',
+ '[binary]app-misc/B-2'
+ ]
+
+ for i in range(5):
+ binpkgs["app-misc/C%sC-1" % i] = {
+ "DEPEND": "app-misc/B",
+ "RDEPEND": "app-misc/B",
+ "REQUIRES": "x86_32: libB-2.so",
+ }
+
+ installed["app-misc/C%sC-1" % i] = {
+ "DEPEND": "app-misc/B",
+ "RDEPEND": "app-misc/B",
+ "REQUIRES": "x86_32: libB-1.so",
+ }
+ for x in ("DEPEND", "RDEPEND"):
+ binpkgs["app-misc/A-1"][x] += " app-misc/C%sC" % i
+
+ expected_mergelist.append("[binary]app-misc/C%sC-1" % i)
+
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ ignore_mergelist_order=True,
+ all_permutations=True,
+ options = {
+ "--backtrack": 3,
+ "--deep": True,
+ "--ignore-soname-deps": "n",
+ "--update": True,
+ "--usepkgonly": True,
+ },
+ success = True,
+ mergelist = expected_mergelist),
+ )
+
+ world = []
+
+ playground = ResolverPlayground(binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success,
+ True, test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
+
+ def testSonameSlotConflictForgottenChild(self):
+ """
+ Similar to testSonameSlotConflictMassRebuild above, but this
+ time the rebuilds are scheduled, but the package causing the
+ rebuild (the child) is not installed.
+ """
+ binpkgs = {
+
+ "app-misc/A-2" : {
+ "DEPEND": "app-misc/B app-misc/C",
+ "RDEPEND": "app-misc/B app-misc/C",
+ "REQUIRES": "x86_32: libB-2.so",
+ },
+
+ "app-misc/B-2" : {
+ "PROVIDES": "x86_32: libB-2.so",
+ "SLOT": "2",
+ },
+
+ "app-misc/C-1": {
+ "DEPEND": "app-misc/B",
+ "RDEPEND": "app-misc/B",
+ "REQUIRES": "x86_32: libB-2.so",
+ },
+ }
+
+ installed = {
+ "app-misc/A-1" : {
+ "DEPEND": "app-misc/B app-misc/C",
+ "RDEPEND": "app-misc/B app-misc/C",
+ "REQUIRES": "x86_32: libB-1.so",
+ },
+
+ "app-misc/B-1" : {
+ "PROVIDES": "x86_32: libB-1.so",
+ "SLOT": "1",
+ },
+
+ "app-misc/C-1": {
+ "DEPEND": "app-misc/B",
+ "RDEPEND": "app-misc/B",
+ "REQUIRES": "x86_32: libB-1.so",
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ options = {
+ "--ignore-soname-deps": "n",
+ "--usepkgonly": True,
+ },
+ success = True,
+ mergelist = [
+ '[binary]app-misc/B-2',
+ '[binary]app-misc/A-2',
+ ]
+ ),
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--ignore-soname-deps": "n",
+ "--usepkgonly": True,
+ "--update": True,
+ "--deep": True,
+ },
+ success = True,
+ mergelist = [
+ '[binary]app-misc/B-2',
+ '[binary]app-misc/C-1',
+ '[binary]app-misc/A-2',
+ ]
+ ),
+ )
+
+ world = ['app-misc/A']
+
+ playground = ResolverPlayground(binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
+
+ def testSonameSlotConflictMixedDependencies(self):
+ """
+ Bug 487198
+ For parents with mixed >= and < dependencies, we scheduled
+ reinstalls for the >= atom, but in the end didn't install the
+ child update because of the < atom.
+ """
+ binpkgs = {
+ "cat/slotted-lib-1" : {
+ "PROVIDES": "x86_32: lib1.so",
+ "SLOT": "1",
+ },
+ "cat/slotted-lib-2" : {
+ "PROVIDES": "x86_32: lib2.so",
+ "SLOT": "2",
+ },
+ "cat/slotted-lib-3" : {
+ "PROVIDES": "x86_32: lib3.so",
+ "SLOT": "3",
+ },
+ "cat/slotted-lib-4" : {
+ "PROVIDES": "x86_32: lib4.so",
+ "SLOT": "4",
+ },
+ "cat/slotted-lib-5" : {
+ "PROVIDES": "x86_32: lib5.so",
+ "SLOT": "5",
+ },
+ "cat/user-1" : {
+ "DEPEND": ">=cat/slotted-lib-2 <cat/slotted-lib-4",
+ "RDEPEND": ">=cat/slotted-lib-2 <cat/slotted-lib-4",
+ "REQUIRES": "x86_32: lib3.so",
+ },
+ }
+
+ installed = {
+ "cat/slotted-lib-3" : {
+ "PROVIDES": "x86_32: lib3.so",
+ "SLOT": "3",
+ },
+ "cat/user-1" : {
+ "DEPEND": ">=cat/slotted-lib-2 <cat/slotted-lib-4",
+ "RDEPEND": ">=cat/slotted-lib-2 <cat/slotted-lib-4",
+ "REQUIRES": "x86_32: lib3.so",
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["cat/user"],
+ options = {
+ "--deep": True,
+ "--ignore-soname-deps": "n",
+ "--update": True,
+ "--usepkgonly": True,
+ },
+ success = True,
+ mergelist = []),
+ )
+
+ world = []
+
+ playground = ResolverPlayground(binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success,
+ True, test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/soname/test_slot_conflict_update.py b/lib/portage/tests/resolver/soname/test_slot_conflict_update.py
new file mode 100644
index 000000000..c6074967a
--- /dev/null
+++ b/lib/portage/tests/resolver/soname/test_slot_conflict_update.py
@@ -0,0 +1,117 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground, ResolverPlaygroundTestCase)
+
+class SonameSlotConflictUpdateTestCase(TestCase):
+
+ def testSonameSlotConflictUpdate(self):
+
+ binpkgs = {
+
+ "app-text/podofo-0.9.2" : {
+ "RDEPEND" : "dev-util/boost-build",
+ },
+
+ "dev-cpp/libcmis-0.3.1" : {
+ "DEPEND": "dev-libs/boost",
+ "RDEPEND": "dev-libs/boost",
+ "REQUIRES": "x86_32: libboost-1.53.so",
+ },
+
+ "dev-libs/boost-1.53.0" : {
+ "PROVIDES": "x86_32: libboost-1.53.so",
+ "RDEPEND" : "=dev-util/boost-build-1.53.0",
+ },
+
+ "dev-libs/boost-1.52.0" : {
+ "PROVIDES": "x86_32: libboost-1.52.so",
+ "RDEPEND" : "=dev-util/boost-build-1.52.0",
+ },
+
+ "dev-util/boost-build-1.53.0" : {
+ },
+
+ "dev-util/boost-build-1.52.0" : {
+ },
+
+
+ }
+
+ installed = {
+
+ "app-text/podofo-0.9.2" : {
+ "RDEPEND" : "dev-util/boost-build",
+ },
+
+ "dev-cpp/libcmis-0.3.1" : {
+ "DEPEND": "dev-libs/boost",
+ "RDEPEND": "dev-libs/boost",
+ "REQUIRES": "x86_32: libboost-1.52.so",
+ },
+
+ "dev-util/boost-build-1.52.0" : {
+ },
+
+ "dev-libs/boost-1.52.0" : {
+ "PROVIDES": "x86_32: libboost-1.52.so",
+ "RDEPEND" : "=dev-util/boost-build-1.52.0",
+ },
+
+ }
+
+ world = [
+ "dev-cpp/libcmis",
+ "dev-libs/boost",
+ "app-text/podofo",
+ ]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ world,
+ all_permutations = True,
+ options = {
+ "--deep": True,
+ "--ignore-soname-deps": "n",
+ "--update": True,
+ "--usepkgonly": True,
+ },
+ success = True,
+ mergelist = [
+ '[binary]dev-util/boost-build-1.53.0',
+ '[binary]dev-libs/boost-1.53.0',
+ '[binary]dev-cpp/libcmis-0.3.1'
+ ]
+ ),
+
+ ResolverPlaygroundTestCase(
+ world,
+ all_permutations = True,
+ options = {
+ "--deep": True,
+ "--ignore-soname-deps": "y",
+ "--update": True,
+ "--usepkgonly": True,
+ },
+ success = True,
+ mergelist = [
+ '[binary]dev-util/boost-build-1.53.0',
+ '[binary]dev-libs/boost-1.53.0',
+ ]
+ ),
+
+ )
+
+ playground = ResolverPlayground(binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success,
+ True, test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/soname/test_soname_provided.py b/lib/portage/tests/resolver/soname/test_soname_provided.py
new file mode 100644
index 000000000..162da47d1
--- /dev/null
+++ b/lib/portage/tests/resolver/soname/test_soname_provided.py
@@ -0,0 +1,78 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground, ResolverPlaygroundTestCase)
+
+class SonameProvidedTestCase(TestCase):
+
+ def testSonameProvided(self):
+
+ binpkgs = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "PROVIDES": "x86_32: libA.so.1",
+ },
+ "app-misc/B-1" : {
+ "DEPEND": "app-misc/A",
+ "RDEPEND": "app-misc/A",
+ "REQUIRES": "x86_32: libA.so.2",
+ },
+ "app-misc/B-0" : {
+ "DEPEND": "app-misc/A",
+ "RDEPEND": "app-misc/A",
+ "REQUIRES": "x86_32: libA.so.1",
+ },
+ }
+
+ installed = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "PROVIDES": "x86_32: libA.so.1",
+ },
+
+ "app-misc/B-0" : {
+ "DEPEND": "app-misc/A",
+ "RDEPEND": "app-misc/A",
+ "REQUIRES": "x86_32: libA.so.1",
+ },
+ }
+
+ world = ["app-misc/B"]
+
+ profile = {
+ "soname.provided": (
+ "x86_32 libA.so.2",
+ ),
+ }
+
+ test_cases = (
+
+ # Allow update due to soname dependency satisfied by
+ # soname.provided.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--deep": True,
+ "--ignore-soname-deps": "n",
+ "--update": True,
+ "--usepkgonly": True,
+ },
+ success = True,
+ mergelist = ["[binary]app-misc/B-1"],
+ ),
+
+ )
+
+ playground = ResolverPlayground(binpkgs=binpkgs, debug=False,
+ profile=profile, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(
+ test_case.test_success, True, test_case.fail_msg)
+ finally:
+ # Disable debug so that cleanup works.
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/soname/test_unsatisfiable.py b/lib/portage/tests/resolver/soname/test_unsatisfiable.py
new file mode 100644
index 000000000..039a9df26
--- /dev/null
+++ b/lib/portage/tests/resolver/soname/test_unsatisfiable.py
@@ -0,0 +1,71 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground, ResolverPlaygroundTestCase)
+
+class SonameUnsatisfiableTestCase(TestCase):
+
+ def testSonameUnsatisfiable(self):
+
+ binpkgs = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "PROVIDES": "x86_32: libA.so.1",
+ },
+ "app-misc/B-1" : {
+ "DEPEND": "app-misc/A",
+ "RDEPEND": "app-misc/A",
+ "REQUIRES": "x86_32: libA.so.2",
+ },
+ "app-misc/B-0" : {
+ "DEPEND": "app-misc/A",
+ "RDEPEND": "app-misc/A",
+ "REQUIRES": "x86_32: libA.so.1",
+ },
+ }
+
+ installed = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "PROVIDES": "x86_32: libA.so.1",
+ },
+
+ "app-misc/B-0" : {
+ "DEPEND": "app-misc/A",
+ "RDEPEND": "app-misc/A",
+ "REQUIRES": "x86_32: libA.so.1",
+ },
+ }
+
+ world = ["app-misc/B"]
+
+ test_cases = (
+
+ # Skip update due to unsatisfied soname dependency.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--deep": True,
+ "--ignore-soname-deps": "n",
+ "--update": True,
+ "--usepkgonly": True,
+ },
+ success = True,
+ mergelist = [],
+ ),
+
+ )
+
+ playground = ResolverPlayground(binpkgs=binpkgs, debug=False,
+ installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(
+ test_case.test_success, True, test_case.fail_msg)
+ finally:
+ # Disable debug so that cleanup works.
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/soname/test_unsatisfied.py b/lib/portage/tests/resolver/soname/test_unsatisfied.py
new file mode 100644
index 000000000..27cdcc440
--- /dev/null
+++ b/lib/portage/tests/resolver/soname/test_unsatisfied.py
@@ -0,0 +1,87 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SonameUnsatisfiedTestCase(TestCase):
+
+ def testSonameUnsatisfied(self):
+
+ binpkgs = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "PROVIDES": "x86_32: libA.so.1",
+ },
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "PROVIDES": "x86_32: libA.so.2",
+ },
+ "app-misc/B-0" : {
+ "DEPEND": "app-misc/A",
+ "RDEPEND": "app-misc/A",
+ "REQUIRES": "x86_32: libA.so.2",
+ }
+ }
+
+ installed = {
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "PROVIDES": "x86_32: libA.so.2",
+ },
+
+ "app-misc/B-0" : {
+ "DEPEND": "app-misc/A",
+ "RDEPEND": "app-misc/A",
+ "REQUIRES": "x86_32: libA.so.1",
+ }
+ }
+
+ world = ["app-misc/B"]
+
+ test_cases = (
+
+ # Demonstrate bug #439694, where a broken
+ # soname dependency needs to trigger a reinstall.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--deep": True,
+ "--ignore-soname-deps": "n",
+ "--update": True,
+ "--usepkgonly": True,
+ },
+ success = True,
+ mergelist = [
+ "[binary]app-misc/B-0"
+ ]
+ ),
+
+ # This doesn't trigger a reinstall, since there's no version
+ # change to trigger complete graph mode, and initially
+ # unsatisfied deps are ignored in complete graph mode anyway.
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ options = {
+ "--ignore-soname-deps": "n",
+ "--oneshot": True,
+ "--usepkgonly": True,
+ },
+ success = True,
+ mergelist = [
+ "[binary]app-misc/A-2"
+ ]
+ ),
+ )
+
+ playground = ResolverPlayground(binpkgs=binpkgs, debug=False,
+ installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ # Disable debug so that cleanup works.
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_autounmask.py b/lib/portage/tests/resolver/test_autounmask.py
new file mode 100644
index 000000000..809d42104
--- /dev/null
+++ b/lib/portage/tests/resolver/test_autounmask.py
@@ -0,0 +1,599 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class AutounmaskTestCase(TestCase):
+
+ def testAutounmask(self):
+
+ ebuilds = {
+ #ebuilds to test use changes
+ "dev-libs/A-1": { "SLOT": 1, "DEPEND": "dev-libs/B[foo]", "EAPI": 2},
+ "dev-libs/A-2": { "SLOT": 2, "DEPEND": "dev-libs/B[bar]", "EAPI": 2},
+ "dev-libs/B-1": { "DEPEND": "foo? ( dev-libs/C ) bar? ( dev-libs/D )", "IUSE": "foo bar"},
+ "dev-libs/C-1": {},
+ "dev-libs/D-1": {},
+
+ #ebuilds to test if we allow changing of masked or forced flags
+ "dev-libs/E-1": { "SLOT": 1, "DEPEND": "dev-libs/F[masked-flag]", "EAPI": 2},
+ "dev-libs/E-2": { "SLOT": 2, "DEPEND": "dev-libs/G[-forced-flag]", "EAPI": 2},
+ "dev-libs/F-1": { "IUSE": "masked-flag"},
+ "dev-libs/G-1": { "IUSE": "forced-flag"},
+
+ #ebuilds to test keyword changes
+ "app-misc/Z-1": { "KEYWORDS": "~x86", "DEPEND": "app-misc/Y" },
+ "app-misc/Y-1": { "KEYWORDS": "~x86" },
+ "app-misc/W-1": {},
+ "app-misc/W-2": { "KEYWORDS": "~x86" },
+ "app-misc/V-1": { "KEYWORDS": "~x86", "DEPEND": ">=app-misc/W-2"},
+
+ #ebuilds to test mask and keyword changes
+ "app-text/A-1": {},
+ "app-text/B-1": { "KEYWORDS": "~x86" },
+ "app-text/C-1": { "KEYWORDS": "" },
+ "app-text/D-1": { "KEYWORDS": "~x86" },
+ "app-text/D-2": { "KEYWORDS": "" },
+
+ #ebuilds for mixed test for || dep handling
+ "sci-libs/K-1": { "DEPEND": " || ( sci-libs/L[bar] || ( sci-libs/M sci-libs/P ) )", "EAPI": 2},
+ "sci-libs/K-2": { "DEPEND": " || ( sci-libs/L[bar] || ( sci-libs/P sci-libs/M ) )", "EAPI": 2},
+ "sci-libs/K-3": { "DEPEND": " || ( sci-libs/M || ( sci-libs/L[bar] sci-libs/P ) )", "EAPI": 2},
+ "sci-libs/K-4": { "DEPEND": " || ( sci-libs/M || ( sci-libs/P sci-libs/L[bar] ) )", "EAPI": 2},
+ "sci-libs/K-5": { "DEPEND": " || ( sci-libs/P || ( sci-libs/L[bar] sci-libs/M ) )", "EAPI": 2},
+ "sci-libs/K-6": { "DEPEND": " || ( sci-libs/P || ( sci-libs/M sci-libs/L[bar] ) )", "EAPI": 2},
+ "sci-libs/K-7": { "DEPEND": " || ( sci-libs/M sci-libs/L[bar] )", "EAPI": 2},
+ "sci-libs/K-8": { "DEPEND": " || ( sci-libs/L[bar] sci-libs/M )", "EAPI": 2},
+
+ "sci-libs/L-1": { "IUSE": "bar" },
+ "sci-libs/M-1": { "KEYWORDS": "~x86" },
+ "sci-libs/P-1": { },
+
+ #ebuilds to test these nice "required by cat/pkg[foo]" messages
+ "dev-util/Q-1": { "DEPEND": "foo? ( dev-util/R[bar] )", "IUSE": "+foo", "EAPI": 2 },
+ "dev-util/Q-2": { "RDEPEND": "!foo? ( dev-util/R[bar] )", "IUSE": "foo", "EAPI": 2 },
+ "dev-util/R-1": { "IUSE": "bar" },
+
+ #ebuilds to test interaction with REQUIRED_USE
+ "app-portage/A-1": { "DEPEND": "app-portage/B[foo]", "EAPI": 2 },
+ "app-portage/A-2": { "DEPEND": "app-portage/B[foo=]", "IUSE": "+foo", "REQUIRED_USE": "foo", "EAPI": "4" },
+
+ "app-portage/B-1": { "IUSE": "foo +bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+ "app-portage/C-1": { "IUSE": "+foo +bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+
+ "sci-mathematics/octave-4.2.2": {
+ "EAPI": 6,
+ "RDEPEND": ">=x11-libs/qscintilla-2.9.3-r2:=[qt5(+)]",
+ },
+ "x11-libs/qscintilla-2.9.4": {
+ "EAPI": 6,
+ "IUSE": "+qt4 qt5",
+ "REQUIRED_USE": "^^ ( qt4 qt5 )",
+ },
+ "x11-libs/qscintilla-2.10": {
+ "EAPI": 6,
+ "KEYWORDS": "~x86",
+ "IUSE": "qt4 +qt5",
+ },
+ }
+
+ test_cases = (
+ #Test USE changes.
+ #The simple case.
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A:1"],
+ options={"--autounmask": "n"},
+ success=False),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A:1"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"],
+ use_changes={ "dev-libs/B-1": {"foo": True} }),
+
+ #Make sure we restart if needed.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A:1", "dev-libs/B"],
+ options={"--autounmask": True, "--autounmask-backtrack": "y"},
+ all_permutations=True,
+ success=False,
+ mergelist=["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"],
+ use_changes={ "dev-libs/B-1": {"foo": True} }),
+
+ # With --autounmask-backtrack=y:
+ #[ebuild N ] dev-libs/C-1
+ #[ebuild N ] dev-libs/B-1 USE="foo -bar"
+ #[ebuild N ] dev-libs/A-1
+ #
+ #The following USE changes are necessary to proceed:
+ # (see "package.use" in the portage(5) man page for more details)
+ ## required by dev-libs/A-1::test_repo
+ ## required by dev-libs/A:1 (argument)
+ #>=dev-libs/B-1 foo
+
+ # Without --autounmask-backtrack=y:
+ #[ebuild N ] dev-libs/B-1 USE="foo -bar"
+ #[ebuild N ] dev-libs/A-1
+ #
+ #The following USE changes are necessary to proceed:
+ # (see "package.use" in the portage(5) man page for more details)
+ ## required by dev-libs/A-1::test_repo
+ ## required by dev-libs/A:1 (argument)
+ #>=dev-libs/B-1 foo
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A:1", "dev-libs/A:2", "dev-libs/B"],
+ options={"--autounmask": True, "--autounmask-backtrack": "y"},
+ all_permutations=True,
+ success=False,
+ mergelist=["dev-libs/D-1", "dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1", "dev-libs/A-2"],
+ ignore_mergelist_order=True,
+ use_changes={ "dev-libs/B-1": {"foo": True, "bar": True} }),
+
+ # With --autounmask-backtrack=y:
+ #[ebuild N ] dev-libs/C-1
+ #[ebuild N ] dev-libs/D-1
+ #[ebuild N ] dev-libs/B-1 USE="bar foo"
+ #[ebuild N ] dev-libs/A-2
+ #[ebuild N ] dev-libs/A-1
+ #
+ #The following USE changes are necessary to proceed:
+ # (see "package.use" in the portage(5) man page for more details)
+ ## required by dev-libs/A-2::test_repo
+ ## required by dev-libs/A:2 (argument)
+ #>=dev-libs/B-1 bar foo
+
+ # Without --autounmask-backtrack=y:
+ #[ebuild N ] dev-libs/B-1 USE="bar foo"
+ #[ebuild N ] dev-libs/A-1
+ #[ebuild N ] dev-libs/A-2
+ #
+ #The following USE changes are necessary to proceed:
+ # (see "package.use" in the portage(5) man page for more details)
+ ## required by dev-libs/A-1::test_repo
+ ## required by dev-libs/A:1 (argument)
+ #>=dev-libs/B-1 foo bar
+
+ # NOTE: The --autounmask-backtrack=n behavior is acceptable, but
+ # it would be nicer if it added the dev-libs/C-1 and dev-libs/D-1
+ # deps to the depgraph without backtracking. It could add two
+ # instances of dev-libs/B-1 to the graph with different USE flags,
+ # and then use _solve_non_slot_operator_slot_conflicts to eliminate
+ # the redundant instance.
+
+ #Test keywording.
+ #The simple case.
+
+ ResolverPlaygroundTestCase(
+ ["app-misc/Z"],
+ options={"--autounmask": "n"},
+ success=False),
+ ResolverPlaygroundTestCase(
+ ["app-misc/Z"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-misc/Y-1", "app-misc/Z-1"],
+ unstable_keywords=["app-misc/Y-1", "app-misc/Z-1"]),
+
+ #Make sure that the backtracking for slot conflicts handles our mess.
+
+ ResolverPlaygroundTestCase(
+ ["=app-misc/V-1", "app-misc/W"],
+ options={"--autounmask": True},
+ all_permutations=True,
+ success=False,
+ mergelist=["app-misc/W-2", "app-misc/V-1"],
+ unstable_keywords=["app-misc/W-2", "app-misc/V-1"]),
+
+ #Mixed testing
+ #Make sure we don't change use for something in a || dep if there is another choice
+ #that needs no change.
+
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-1"],
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-1"]),
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-2"],
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-2"]),
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-3"],
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-3"]),
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-4"],
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-4"]),
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-5"],
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-5"]),
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-6"],
+ options={"--autounmask": True},
+ success=True,
+ mergelist=["sci-libs/P-1", "sci-libs/K-6"]),
+
+ #Make sure we prefer use changes over keyword changes.
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-7"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["sci-libs/L-1", "sci-libs/K-7"],
+ use_changes={ "sci-libs/L-1": { "bar": True } }),
+ ResolverPlaygroundTestCase(
+ ["=sci-libs/K-8"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["sci-libs/L-1", "sci-libs/K-8"],
+ use_changes={ "sci-libs/L-1": { "bar": True } }),
+
+ #Test these nice "required by cat/pkg[foo]" messages.
+ ResolverPlaygroundTestCase(
+ ["=dev-util/Q-1"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-util/R-1", "dev-util/Q-1"],
+ use_changes={ "dev-util/R-1": { "bar": True } }),
+ ResolverPlaygroundTestCase(
+ ["=dev-util/Q-2"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-util/R-1", "dev-util/Q-2"],
+ use_changes={ "dev-util/R-1": { "bar": True } }),
+
+ #Test interaction with REQUIRED_USE.
+ # Some of these cases trigger USE change(s) that violate
+ # REQUIRED_USE, so the USE changes are shown along with
+ # the REQUIRED_USE violation that they would trigger.
+
+ # The following USE changes are necessary to proceed:
+ # (see "package.use" in the portage(5) man page for more details)
+ # # required by app-portage/A-1::test_repo
+ # # required by =app-portage/A-1 (argument)
+ # >=app-portage/B-1 foo
+ #
+ # !!! The ebuild selected to satisfy "app-portage/B[foo]" has unmet requirements.
+ # - app-portage/B-1::test_repo USE="bar (forced-flag) -foo"
+ #
+ # The following REQUIRED_USE flag constraints are unsatisfied:
+ # exactly-one-of ( foo bar )
+ ResolverPlaygroundTestCase(
+ ["=app-portage/A-1"],
+ options={ "--autounmask": True },
+ use_changes={"app-portage/B-1": {"foo": True}},
+ success=False),
+
+ # The following USE changes are necessary to proceed:
+ # (see "package.use" in the portage(5) man page for more details)
+ # # required by app-portage/A-2::test_repo
+ # # required by =app-portage/A-2 (argument)
+ # >=app-portage/B-1 foo
+ #
+ # !!! The ebuild selected to satisfy "app-portage/B[foo=]" has unmet requirements.
+ # - app-portage/B-1::test_repo USE="bar (forced-flag) -foo"
+ #
+ # The following REQUIRED_USE flag constraints are unsatisfied:
+ # exactly-one-of ( foo bar )
+ ResolverPlaygroundTestCase(
+ ["=app-portage/A-2"],
+ options={ "--autounmask": True },
+ use_changes={"app-portage/B-1": {"foo": True}},
+ success=False),
+ ResolverPlaygroundTestCase(
+ ["=app-portage/C-1"],
+ options={ "--autounmask": True },
+ use_changes=None,
+ success=False),
+
+ # Test bug 622462, where it inappropriately unmasked a newer
+ # version rather than report unsatisfied REQUIRED_USE.
+ #
+ # The following USE changes are necessary to proceed:
+ # (see "package.use" in the portage(5) man page for more details)
+ # # required by sci-mathematics/octave-4.2.2::test_repo
+ # # required by sci-mathematics/octave (argument)
+ # >=x11-libs/qscintilla-2.9.4 qt5
+ #
+ # !!! The ebuild selected to satisfy ">=x11-libs/qscintilla-2.9.3-r2:=[qt5(+)]" has unmet requirements.
+ # - x11-libs/qscintilla-2.9.4::test_repo USE="qt4 -qt5"
+ #
+ # The following REQUIRED_USE flag constraints are unsatisfied:
+ # exactly-one-of ( qt4 qt5 )
+ #
+ # (dependency required by "sci-mathematics/octave-4.2.2::test_repo" [ebuild])
+ # (dependency required by "sci-mathematics/octave" [argument])
+ ResolverPlaygroundTestCase(
+ ["sci-mathematics/octave"],
+ options={"--autounmask": True},
+ use_changes={"x11-libs/qscintilla-2.9.4": {"qt5": True}},
+ success=False),
+
+ #Make sure we don't change masked/forced flags.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E:1"],
+ options={"--autounmask": True},
+ use_changes=None,
+ success=False),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E:2"],
+ options={"--autounmask": True},
+ use_changes=None,
+ success=False),
+
+ #Test mask and keyword changes.
+ ResolverPlaygroundTestCase(
+ ["app-text/A"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-text/A-1"],
+ needed_p_mask_changes=["app-text/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["app-text/B"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-text/B-1"],
+ unstable_keywords=["app-text/B-1"],
+ needed_p_mask_changes=["app-text/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["app-text/C"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-text/C-1"],
+ unstable_keywords=["app-text/C-1"],
+ needed_p_mask_changes=["app-text/C-1"]),
+ #Make sure unstable keyword is preferred over missing keyword
+ ResolverPlaygroundTestCase(
+ ["app-text/D"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-text/D-1"],
+ unstable_keywords=["app-text/D-1"]),
+ #Test missing keyword
+ ResolverPlaygroundTestCase(
+ ["=app-text/D-2"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["app-text/D-2"],
+ unstable_keywords=["app-text/D-2"])
+ )
+
+ profile = {
+ "use.mask":
+ (
+ "masked-flag",
+ ),
+ "use.force":
+ (
+ "forced-flag",
+ ),
+ "package.mask":
+ (
+ "app-text/A",
+ "app-text/B",
+ "app-text/C",
+ ),
+ }
+
+ playground = ResolverPlayground(ebuilds=ebuilds, profile=profile)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testAutounmaskForLicenses(self):
+
+ ebuilds = {
+ "dev-libs/A-1": { "LICENSE": "TEST" },
+ "dev-libs/B-1": { "LICENSE": "TEST", "IUSE": "foo", "KEYWORDS": "~x86"},
+ "dev-libs/C-1": { "DEPEND": "dev-libs/B[foo]", "EAPI": 2 },
+
+ "dev-libs/D-1": { "DEPEND": "dev-libs/E dev-libs/F", "LICENSE": "TEST" },
+ "dev-libs/E-1": { "LICENSE": "TEST" },
+ "dev-libs/E-2": { "LICENSE": "TEST" },
+ "dev-libs/F-1": { "DEPEND": "=dev-libs/E-1", "LICENSE": "TEST" },
+
+ "dev-java/sun-jdk-1.6.0.32": { "LICENSE": "TEST", "KEYWORDS": "~x86" },
+ "dev-java/sun-jdk-1.6.0.31": { "LICENSE": "TEST", "KEYWORDS": "x86" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1"],
+ options={"--autounmask": 'n'},
+ success=False),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-libs/A-1"],
+ license_changes={ "dev-libs/A-1": set(["TEST"]) }),
+
+ #Test license+keyword+use change at once.
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/C-1"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-libs/B-1", "dev-libs/C-1"],
+ license_changes={ "dev-libs/B-1": set(["TEST"]) },
+ unstable_keywords=["dev-libs/B-1"],
+ use_changes={ "dev-libs/B-1": { "foo": True } }),
+
+ #Test license with backtracking.
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/D-1"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-libs/E-1", "dev-libs/F-1", "dev-libs/D-1"],
+ license_changes={ "dev-libs/D-1": set(["TEST"]), "dev-libs/E-1": set(["TEST"]), "dev-libs/E-2": set(["TEST"]), "dev-libs/F-1": set(["TEST"]) }),
+
+ #Test license only for bug #420847
+ ResolverPlaygroundTestCase(
+ ["dev-java/sun-jdk"],
+ options={"--autounmask": True},
+ success=False,
+ mergelist=["dev-java/sun-jdk-1.6.0.31"],
+ license_changes={ "dev-java/sun-jdk-1.6.0.31": set(["TEST"]) }),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testAutounmaskAndSets(self):
+
+ ebuilds = {
+ #ebuilds to test use changes
+ "dev-libs/A-1": { },
+ "dev-libs/A-2": { "KEYWORDS": "~x86" },
+ "dev-libs/B-1": { "DEPEND": "dev-libs/A" },
+ "dev-libs/C-1": { "DEPEND": ">=dev-libs/A-2" },
+ "dev-libs/D-1": { "DEPEND": "dev-libs/A" },
+ }
+
+ world_sets = ["@test-set"]
+ sets = {
+ "test-set": (
+ "dev-libs/A", "dev-libs/B", "dev-libs/C", "dev-libs/D",
+ ),
+ }
+
+ test_cases = (
+ #Test USE changes.
+ #The simple case.
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B", "dev-libs/C", "dev-libs/D"],
+ all_permutations=True,
+ options={"--autounmask": "y"},
+ mergelist=["dev-libs/A-2", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order=True,
+ unstable_keywords=["dev-libs/A-2"],
+ success=False),
+
+ ResolverPlaygroundTestCase(
+ ["@test-set"],
+ all_permutations=True,
+ options={"--autounmask": "y"},
+ mergelist=["dev-libs/A-2", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order=True,
+ unstable_keywords=["dev-libs/A-2"],
+ success=False),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ all_permutations=True,
+ options={"--autounmask": "y"},
+ mergelist=["dev-libs/A-2", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order=True,
+ unstable_keywords=["dev-libs/A-2"],
+ success=False),
+ )
+
+
+ playground = ResolverPlayground(ebuilds=ebuilds, world_sets=world_sets, sets=sets)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testAutounmaskKeepMasks(self):
+ """
+ Ensure that we try to use a masked version with keywords before trying
+ masked version with missing keywords (prefer masked regular version
+ over -9999 version).
+ """
+ ebuilds = {
+ "app-text/A-1": {},
+ }
+
+ test_cases = (
+ #Test mask and keyword changes.
+ ResolverPlaygroundTestCase(
+ ["app-text/A"],
+ options={"--autounmask": True,
+ "--autounmask-keep-masks": "y"},
+ success=False),
+ ResolverPlaygroundTestCase(
+ ["app-text/A"],
+ options={"--autounmask": True,
+ "--autounmask-keep-masks": "n"},
+ success=False,
+ mergelist=["app-text/A-1"],
+ needed_p_mask_changes=["app-text/A-1"]),
+ )
+
+ profile = {
+ "package.mask":
+ (
+ "app-text/A",
+ ),
+ }
+
+ playground = ResolverPlayground(ebuilds=ebuilds, profile=profile)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testAutounmask9999(self):
+
+ ebuilds = {
+ "dev-libs/A-1": { },
+ "dev-libs/A-2": { },
+ "dev-libs/A-9999": { "KEYWORDS": "" },
+ "dev-libs/B-1": { "DEPEND": ">=dev-libs/A-2" },
+ "dev-libs/C-1": { "DEPEND": ">=dev-libs/A-3" },
+ }
+
+ profile = {
+ "package.mask":
+ (
+ ">=dev-libs/A-2",
+ ),
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ success=False,
+ mergelist=["dev-libs/A-2", "dev-libs/B-1"],
+ needed_p_mask_changes=set(["dev-libs/A-2"])),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ success=False,
+ mergelist=["dev-libs/A-9999", "dev-libs/C-1"],
+ unstable_keywords=set(["dev-libs/A-9999"]),
+ needed_p_mask_changes=set(["dev-libs/A-9999"])),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, profile=profile)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_autounmask_binpkg_use.py b/lib/portage/tests/resolver/test_autounmask_binpkg_use.py
new file mode 100644
index 000000000..1ca4bf3d9
--- /dev/null
+++ b/lib/portage/tests/resolver/test_autounmask_binpkg_use.py
@@ -0,0 +1,64 @@
+# Copyright 2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class AutounmaskBinpkgUseTestCase(TestCase):
+
+ def testAutounmaskBinpkgUse(self):
+ ebuilds = {
+ "dev-libs/A-1": {
+ "EAPI": "6",
+ "DEPEND": "dev-libs/B[foo]",
+ "RDEPEND": "dev-libs/B[foo]",
+ },
+ "dev-libs/B-1": {
+ "EAPI": "6",
+ "IUSE": "foo",
+ },
+ }
+ binpkgs = {
+ "dev-libs/A-1": {
+ "EAPI": "6",
+ "DEPEND": "dev-libs/B[foo]",
+ "RDEPEND": "dev-libs/B[foo]",
+ },
+ "dev-libs/B-1": {
+ "EAPI": "6",
+ "IUSE": "foo",
+ "USE": "foo",
+ },
+ }
+ installed = {
+ }
+
+ test_cases = (
+ # Bug 619626: Test for unnecessary rebuild due
+ # to rejection of binary packages that would
+ # be acceptable after appplication of autounmask
+ # USE changes.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ all_permutations = True,
+ success = True,
+ options = {
+ "--usepkg": True,
+ },
+ mergelist = [
+ "[binary]dev-libs/B-1",
+ "[binary]dev-libs/A-1",
+ ],
+ use_changes = {"dev-libs/B-1": {"foo": True}}
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ binpkgs=binpkgs, installed=installed, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_autounmask_keep_keywords.py b/lib/portage/tests/resolver/test_autounmask_keep_keywords.py
new file mode 100644
index 000000000..79a4837a5
--- /dev/null
+++ b/lib/portage/tests/resolver/test_autounmask_keep_keywords.py
@@ -0,0 +1,72 @@
+# Copyright 2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground,
+ ResolverPlaygroundTestCase,
+)
+
+class AutounmaskKeepKeywordsTestCase(TestCase):
+
+ def testAutounmaskKeepKeywordsTestCase(self):
+ ebuilds = {
+ 'app-misc/A-2': {
+ 'EAPI': '6',
+ 'RDEPEND': 'app-misc/B',
+ },
+ 'app-misc/A-1': {
+ 'EAPI': '6',
+ 'RDEPEND': 'app-misc/C[foo]',
+ },
+ 'app-misc/B-1': {
+ 'EAPI': '6',
+ 'KEYWORDS': '~x86',
+ },
+ 'app-misc/C-1': {
+ 'EAPI': '6',
+ 'IUSE': 'foo',
+ },
+ }
+ installed = {
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ['app-misc/A'],
+ success = False,
+ options = {
+ '--autounmask-keep-keywords': 'n',
+ },
+ mergelist = [
+ 'app-misc/B-1',
+ 'app-misc/A-2',
+ ],
+ unstable_keywords={'app-misc/B-1'},
+ ),
+ # --autounmask-keep-keywords prefers app-misc/A-1 because
+ # it can be installed without accepting unstable
+ # keywords
+ ResolverPlaygroundTestCase(
+ ['app-misc/A'],
+ success = False,
+ options = {
+ '--autounmask-keep-keywords': 'y',
+ },
+ mergelist = [
+ 'app-misc/C-1',
+ 'app-misc/A-1',
+ ],
+ use_changes = {'app-misc/C-1': {'foo': True}},
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success,
+ True, test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_autounmask_multilib_use.py b/lib/portage/tests/resolver/test_autounmask_multilib_use.py
new file mode 100644
index 000000000..e160c77ce
--- /dev/null
+++ b/lib/portage/tests/resolver/test_autounmask_multilib_use.py
@@ -0,0 +1,85 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class AutounmaskMultilibUseTestCase(TestCase):
+
+ def testAutounmaskMultilibUse(self):
+
+ self.todo = True
+
+ ebuilds = {
+ "x11-proto/xextproto-7.2.1-r1": {
+ "EAPI": "5",
+ "IUSE": "abi_x86_32 abi_x86_64",
+ },
+ "x11-libs/libXaw-1.0.11-r2": {
+ "EAPI": "5",
+ "IUSE": "abi_x86_32 abi_x86_64",
+ "RDEPEND": "x11-proto/xextproto[abi_x86_32(-)?,abi_x86_64(-)?]"
+ },
+ "app-emulation/emul-linux-x86-xlibs-20130224-r2": {
+ "EAPI": "5",
+ "RDEPEND": "x11-libs/libXaw[abi_x86_32]"
+ },
+ "games-util/steam-client-meta-0-r20130514": {
+ "EAPI": "5",
+ "RDEPEND": "app-emulation/emul-linux-x86-xlibs"
+ }
+ }
+
+ installed = {
+ "x11-proto/xextproto-7.2.1-r1": {
+ "EAPI": "5",
+ "IUSE": "abi_x86_32 abi_x86_64",
+ "USE": "abi_x86_32 abi_x86_64"
+ },
+ "x11-libs/libXaw-1.0.11-r2": {
+ "EAPI": "5",
+ "IUSE": "abi_x86_32 abi_x86_64",
+ "RDEPEND": "x11-proto/xextproto[abi_x86_32(-)?,abi_x86_64(-)?]",
+ "USE": "abi_x86_32 abi_x86_64"
+ },
+ "app-emulation/emul-linux-x86-xlibs-20130224-r2": {
+ "EAPI": "5",
+ "RDEPEND": "x11-libs/libXaw[abi_x86_32]"
+ },
+ "games-util/steam-client-meta-0-r20130514": {
+ "EAPI": "5",
+ "RDEPEND": "app-emulation/emul-linux-x86-xlibs"
+ }
+ }
+
+ user_config = {
+ #"make.conf" : ("USE=\"abi_x86_32 abi_x86_64\"",)
+ "make.conf" : ("USE=\"abi_x86_64\"",)
+ }
+
+ world = ("games-util/steam-client-meta",)
+
+ test_cases = (
+
+ # Test autounmask solving of multilib use deps for bug #481628.
+ # We would like it to suggest some USE changes, but instead it
+ # currently fails with a SLOT conflict.
+
+ ResolverPlaygroundTestCase(
+ ["x11-proto/xextproto", "x11-libs/libXaw"],
+ options = {"--oneshot": True, "--autounmask": True,
+ "--backtrack": 30},
+ mergelist = ["x11-proto/xextproto-7.2.1-r1", "x11-libs/libXaw-1.0.11-r2"],
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed,
+ user_config=user_config, world=world, debug=False)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_autounmask_parent.py b/lib/portage/tests/resolver/test_autounmask_parent.py
new file mode 100644
index 000000000..042acabb0
--- /dev/null
+++ b/lib/portage/tests/resolver/test_autounmask_parent.py
@@ -0,0 +1,43 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground,
+ ResolverPlaygroundTestCase,
+)
+
+class AutounmaskParentTestCase(TestCase):
+
+ def testAutounmaskParentUse(self):
+
+ ebuilds = {
+ "dev-libs/B-1": {
+ "EAPI": "5",
+ "DEPEND": "dev-libs/D[foo(-)?,bar(-)?]",
+ "IUSE": "+bar +foo",
+ },
+ "dev-libs/D-1": {},
+ }
+
+ test_cases = (
+ # Test bug 566704
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/B-1"],
+ options={"--autounmask": True},
+ success=False,
+ use_changes={
+ "dev-libs/B-1": {
+ "foo": False,
+ "bar": False,
+ }
+ }),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_autounmask_use_backtrack.py b/lib/portage/tests/resolver/test_autounmask_use_backtrack.py
new file mode 100644
index 000000000..83edeafa4
--- /dev/null
+++ b/lib/portage/tests/resolver/test_autounmask_use_backtrack.py
@@ -0,0 +1,86 @@
+# Copyright 2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground,
+ ResolverPlaygroundTestCase,
+)
+
+class AutounmaskUseBacktrackTestCase(TestCase):
+
+ def testAutounmaskUseBacktrack(self):
+ ebuilds = {
+ 'dev-libs/A-1': {
+ 'EAPI': '6',
+ 'RDEPEND': 'dev-libs/C',
+ },
+ 'dev-libs/A-2': {
+ 'EAPI': '6',
+ 'RDEPEND': 'dev-libs/C[y]',
+ },
+ 'dev-libs/A-3': {
+ 'EAPI': '6',
+ 'RDEPEND': 'dev-libs/C',
+ },
+ 'dev-libs/B-1': {
+ 'EAPI': '6',
+ 'RDEPEND': '<dev-libs/A-3',
+ },
+ 'dev-libs/C-1': {
+ 'EAPI': '6',
+ 'IUSE': 'x y z',
+ },
+ 'dev-libs/D-1': {
+ 'EAPI': '6',
+ 'RDEPEND': '>=dev-libs/A-2 dev-libs/C[x]',
+ },
+ }
+
+ installed = {
+ 'dev-libs/A-1': {
+ 'EAPI': '6',
+ 'RDEPEND': 'dev-libs/C',
+ },
+ 'dev-libs/B-1': {
+ 'EAPI': '6',
+ 'RDEPEND': '<dev-libs/A-3',
+ },
+ 'dev-libs/C-1': {
+ 'EAPI': '6',
+ 'IUSE': 'x y z',
+ },
+ }
+
+ world = ['dev-libs/B']
+
+ test_cases = (
+ # Test bug 632598, where autounmask USE changes triggered
+ # unnecessary backtracking. The following case should
+ # require a --backtrack setting no larger than 2.
+ ResolverPlaygroundTestCase(
+ ['dev-libs/D'],
+ options={
+ '--autounmask-backtrack': 'y',
+ '--backtrack': 2,
+ },
+ success=False,
+ ambiguous_merge_order=True,
+ mergelist=[
+ ('dev-libs/C-1', 'dev-libs/A-2'),
+ 'dev-libs/D-1',
+ ],
+ use_changes={'dev-libs/C-1': {'y': True, 'x': True}},
+ ),
+ )
+
+ playground = ResolverPlayground(
+ ebuilds=ebuilds, installed=installed, world=world)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_autounmask_use_breakage.py b/lib/portage/tests/resolver/test_autounmask_use_breakage.py
new file mode 100644
index 000000000..173941629
--- /dev/null
+++ b/lib/portage/tests/resolver/test_autounmask_use_breakage.py
@@ -0,0 +1,103 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class AutounmaskUseBreakageTestCase(TestCase):
+
+ def testAutounmaskUseBreakage(self):
+
+ ebuilds = {
+
+ "app-misc/A-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/D[-foo]",
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/D[foo]"
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "RDEPEND": ">=app-misc/D-1"
+ },
+
+ "app-misc/D-0" : {
+ "EAPI": "5",
+ "IUSE": "foo"
+ },
+
+ "app-misc/D-1" : {
+ "EAPI": "5",
+ "IUSE": "bar"
+ },
+
+ }
+
+ test_cases = (
+
+ # Bug 510270
+ # _solve_non_slot_operator_slot_conflicts throws
+ # IndexError: tuple index out of range
+ # due to autounmask USE breakage.
+ ResolverPlaygroundTestCase(
+ ["app-misc/C", "app-misc/B", "app-misc/A"],
+ options={"--autounmask-backtrack": "y"},
+ all_permutations = True,
+ success = False,
+ ambiguous_slot_collision_solutions = True,
+ slot_collision_solutions = [None, []]
+ ),
+
+ # With --autounmask-backtrack=y:
+ #emerge: there are no ebuilds built with USE flags to satisfy "app-misc/D[foo]".
+ #!!! One of the following packages is required to complete your request:
+ #- app-misc/D-0::test_repo (Change USE: +foo)
+ #(dependency required by "app-misc/B-0::test_repo" [ebuild])
+ #(dependency required by "app-misc/B" [argument])
+
+ # Without --autounmask-backtrack=y:
+ #[ebuild N ] app-misc/D-0 USE="foo"
+ #[ebuild N ] app-misc/D-1 USE="-bar"
+ #[ebuild N ] app-misc/C-0
+ #[ebuild N ] app-misc/B-0
+ #[ebuild N ] app-misc/A-0
+ #
+ #!!! Multiple package instances within a single package slot have been pulled
+ #!!! into the dependency graph, resulting in a slot conflict:
+ #
+ #app-misc/D:0
+ #
+ # (app-misc/D-0:0/0::test_repo, ebuild scheduled for merge) pulled in by
+ # app-misc/D[-foo] required by (app-misc/A-0:0/0::test_repo, ebuild scheduled for merge)
+ # ^^^^
+ # app-misc/D[foo] required by (app-misc/B-0:0/0::test_repo, ebuild scheduled for merge)
+ # ^^^
+ #
+ # (app-misc/D-1:0/0::test_repo, ebuild scheduled for merge) pulled in by
+ # >=app-misc/D-1 required by (app-misc/C-0:0/0::test_repo, ebuild scheduled for merge)
+ # ^^ ^
+ #
+ #The following USE changes are necessary to proceed:
+ # (see "package.use" in the portage(5) man page for more details)
+ ## required by app-misc/B-0::test_repo
+ ## required by app-misc/B (argument)
+ #=app-misc/D-0 foo
+
+ # NOTE: The --autounmask-backtrack=n output is preferable here,
+ # because it highlights the unsolvable dependency conflict.
+ # It would be better if it eliminated the autounmask suggestion,
+ # since that suggestion won't solve the conflict.
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_backtracking.py b/lib/portage/tests/resolver/test_backtracking.py
new file mode 100644
index 000000000..656715347
--- /dev/null
+++ b/lib/portage/tests/resolver/test_backtracking.py
@@ -0,0 +1,179 @@
+# Copyright 2010-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class BacktrackingTestCase(TestCase):
+
+ def testBacktracking(self):
+ ebuilds = {
+ "dev-libs/A-1": {},
+ "dev-libs/A-2": {},
+ "dev-libs/B-1": { "DEPEND": "dev-libs/A" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1", "dev-libs/B"],
+ all_permutations = True,
+ mergelist = ["dev-libs/A-1", "dev-libs/B-1"],
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testBacktrackNotNeeded(self):
+ ebuilds = {
+ "dev-libs/A-1": {},
+ "dev-libs/A-2": {},
+ "dev-libs/B-1": {},
+ "dev-libs/B-2": {},
+ "dev-libs/C-1": { "DEPEND": "dev-libs/A dev-libs/B" },
+ "dev-libs/D-1": { "DEPEND": "=dev-libs/A-1 =dev-libs/B-1" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C", "dev-libs/D"],
+ all_permutations = True,
+ options = { "--backtrack": 1 },
+ mergelist = ["dev-libs/A-1", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order = True,
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testBacktrackWithoutUpdates(self):
+ """
+ If --update is not given we might have to mask the old installed version later.
+ """
+
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "dev-libs/Z" },
+ "dev-libs/B-1": { "DEPEND": ">=dev-libs/Z-2" },
+ "dev-libs/Z-1": { },
+ "dev-libs/Z-2": { },
+ }
+
+ installed = {
+ "dev-libs/Z-1": { "USE": "" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B", "dev-libs/A"],
+ all_permutations = True,
+ mergelist = ["dev-libs/Z-2", "dev-libs/B-1", "dev-libs/A-1",],
+ ignore_mergelist_order = True,
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testBacktrackMissedUpdates(self):
+ """
+ An update is missed due to a dependency on an older version.
+ """
+
+ ebuilds = {
+ "dev-libs/A-1": { },
+ "dev-libs/A-2": { },
+ "dev-libs/B-1": { "RDEPEND": "<=dev-libs/A-1" },
+ }
+
+ installed = {
+ "dev-libs/A-1": { "USE": "" },
+ "dev-libs/B-1": { "USE": "", "RDEPEND": "<=dev-libs/A-1" },
+ }
+
+ options = {'--update' : True, '--deep' : True, '--selective' : True}
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B"],
+ options = options,
+ all_permutations = True,
+ mergelist = [],
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testBacktrackNoWrongRebuilds(self):
+ """
+ Ensure we remove backtrack masks if the reason for the mask gets masked itself.
+ """
+
+ ebuilds = {
+ "dev-libs/A-1": { },
+ "dev-libs/A-2": { },
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/D"},
+ "dev-libs/C-1": { },
+ "dev-libs/C-2": { "RDEPEND": ">=dev-libs/A-2" },
+ "dev-libs/D-1": { "RDEPEND": "<dev-libs/A-2" },
+ }
+
+ installed = {
+ "dev-libs/A-1": { },
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/D" },
+ "dev-libs/C-1": { },
+ "dev-libs/D-1": { "RDEPEND": "<dev-libs/A-2" },
+ }
+
+ world = ["dev-libs/B", "dev-libs/C"]
+
+ options = {
+ '--backtrack': 6,
+ '--deep' : True,
+ '--selective' : True,
+ '--update' : True,
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = options,
+ mergelist = [],
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_bdeps.py b/lib/portage/tests/resolver/test_bdeps.py
new file mode 100644
index 000000000..c0d64991c
--- /dev/null
+++ b/lib/portage/tests/resolver/test_bdeps.py
@@ -0,0 +1,215 @@
+# Copyright 2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground,
+ ResolverPlaygroundTestCase,
+)
+
+class BdepsTestCase(TestCase):
+
+ def testImageMagickUpdate(self):
+
+ ebuilds = {
+ "app-misc/A-1" : {
+ "EAPI": "6",
+ "DEPEND": "app-misc/B",
+ "RDEPEND": "app-misc/C",
+ },
+
+ "app-misc/B-1" : {
+ "EAPI": "6"
+ },
+ "app-misc/B-2" : {
+ "EAPI": "6",
+ },
+
+ "app-misc/C-1" : {
+ "EAPI": "6",
+ "DEPEND": "app-misc/D",
+ },
+ "app-misc/C-2" : {
+ "EAPI": "6",
+ "DEPEND": "app-misc/D",
+ },
+
+ "app-misc/D-1" : {
+ "EAPI": "6",
+ },
+ "app-misc/D-2" : {
+ "EAPI": "6",
+ },
+ }
+
+ installed = {
+ "app-misc/A-1" : {
+ "EAPI": "6",
+ "DEPEND": "app-misc/B",
+ "RDEPEND": "app-misc/C",
+ },
+
+ "app-misc/B-1" : {
+ "EAPI": "6",
+ },
+ "app-misc/C-1" : {
+ "EAPI": "6",
+ "DEPEND": "app-misc/D",
+ },
+
+ "app-misc/D-1" : {
+ "EAPI": "6",
+ },
+ }
+
+ binpkgs = {
+ "app-misc/A-1" : {
+ "EAPI": "6",
+ "DEPEND": "app-misc/B",
+ "RDEPEND": "app-misc/C",
+ },
+
+ "app-misc/B-1" : {
+ "EAPI": "6",
+ },
+ "app-misc/B-2" : {
+ "EAPI": "6",
+ },
+
+ "app-misc/C-1" : {
+ "EAPI": "6",
+ "DEPEND": "app-misc/D",
+ },
+ "app-misc/C-2" : {
+ "EAPI": "6",
+ "DEPEND": "app-misc/D",
+ },
+
+ "app-misc/D-1" : {
+ "EAPI": "6",
+ },
+ "app-misc/D-2" : {
+ "EAPI": "6",
+ },
+ }
+
+ world = (
+ "app-misc/A",
+ )
+
+ test_cases = (
+
+ # Enable --with-bdeps automatically when
+ # --usepkg has not been specified.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--update": True,
+ "--deep": True,
+ },
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = [
+ "app-misc/D-2",
+ ("app-misc/B-2", "app-misc/C-2"),
+ ]
+ ),
+
+ # Use --with-bdeps-auto=n to prevent --with-bdeps
+ # from being enabled automatically.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--update": True,
+ "--deep": True,
+ "--with-bdeps-auto": "n",
+ },
+ success = True,
+ mergelist = [
+ "app-misc/D-2",
+ "app-misc/C-2",
+ ]
+ ),
+
+ # Do not enable --with-bdeps automatically when
+ # --usepkg has been specified, since many users of binary
+ # packages do not want unnecessary build time dependencies
+ # installed. In this case we miss an update to
+ # app-misc/D-2, since DEPEND is not pulled in for
+ # the [binary]app-misc/C-2 update.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--update": True,
+ "--deep": True,
+ "--usepkg": True,
+ },
+ success = True,
+ mergelist = [
+ "[binary]app-misc/C-2",
+ ]
+ ),
+
+ # Use --with-bdeps=y to pull in build-time dependencies of
+ # binary packages.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--update": True,
+ "--deep": True,
+ "--usepkg": True,
+ "--with-bdeps": "y",
+ },
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = [
+ (
+ "[binary]app-misc/D-2",
+ "[binary]app-misc/B-2",
+ "[binary]app-misc/C-2",
+ ),
+ ]
+ ),
+
+ # For --depclean, do not remove build-time dependencies by
+ # default. Specify --with-bdeps-auto=n, in order to
+ # demonstrate that it does not affect removal actions.
+ ResolverPlaygroundTestCase(
+ [],
+ options = {
+ "--depclean": True,
+ "--with-bdeps-auto": "n",
+ },
+ success = True,
+ cleanlist = [],
+ ),
+
+ # For --depclean, remove build-time dependencies if
+ # --with-bdeps=n has been specified.
+ ResolverPlaygroundTestCase(
+ [],
+ options = {
+ "--depclean": True,
+ "--with-bdeps": "n",
+ },
+ success = True,
+ ignore_cleanlist_order = True,
+ cleanlist = [
+ "app-misc/D-1",
+ "app-misc/B-1",
+ ],
+ ),
+ )
+
+ playground = ResolverPlayground(debug=False,
+ ebuilds=ebuilds, installed=installed,
+ binpkgs=binpkgs, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ # Disable debug so that cleanup works.
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_binary_pkg_ebuild_visibility.py b/lib/portage/tests/resolver/test_binary_pkg_ebuild_visibility.py
new file mode 100644
index 000000000..0d01d0696
--- /dev/null
+++ b/lib/portage/tests/resolver/test_binary_pkg_ebuild_visibility.py
@@ -0,0 +1,144 @@
+# Copyright 2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground,
+ ResolverPlaygroundTestCase,
+)
+
+class BinaryPkgEbuildVisibilityTestCase(TestCase):
+
+ def testBinaryPkgEbuildVisibility(self):
+
+ binpkgs = {
+ "app-misc/foo-3" : {},
+ "app-misc/foo-2" : {},
+ "app-misc/foo-1" : {},
+ }
+
+ ebuilds = {
+ "app-misc/foo-2" : {},
+ "app-misc/foo-1" : {},
+ }
+
+ installed = {
+ "app-misc/foo-1" : {},
+ }
+
+ world = ["app-misc/foo"]
+
+ test_cases = (
+
+ # Test bug #612960, where --use-ebuild-visibility failed
+ # to reject binary packages for which ebuilds were not
+ # available.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--update": True,
+ "--deep": True,
+ "--use-ebuild-visibility": 'y',
+ "--usepkgonly": True,
+ },
+ success = True,
+ mergelist = [
+ '[binary]app-misc/foo-2',
+ ],
+ ),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--update": True,
+ "--deep": True,
+ "--usepkgonly": True,
+ },
+ success = True,
+ mergelist = [
+ '[binary]app-misc/foo-3',
+ ],
+ ),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--update": True,
+ "--deep": True,
+ "--usepkg": True,
+ },
+ success = True,
+ mergelist = [
+ '[binary]app-misc/foo-2',
+ ],
+ ),
+
+ ResolverPlaygroundTestCase(
+ ["=app-misc/foo-3"],
+ options = {
+ "--use-ebuild-visibility": 'y',
+ "--usepkgonly": True,
+ },
+ success = False,
+ ),
+
+ ResolverPlaygroundTestCase(
+ ["app-misc/foo"],
+ options = {
+ "--use-ebuild-visibility": 'y',
+ "--usepkgonly": True,
+ },
+ success = True,
+ mergelist = [
+ '[binary]app-misc/foo-2',
+ ],
+ ),
+
+ ResolverPlaygroundTestCase(
+ ["app-misc/foo"],
+ options = {
+ "--usepkgonly": True,
+ },
+ success = True,
+ mergelist = [
+ '[binary]app-misc/foo-3',
+ ],
+ ),
+
+ # The default behavior is to enforce ebuild visibility as
+ # long as a visible package is available to satisfy the
+ # current atom. In the following test case, ebuild visibility
+ # is ignored in order to satisfy the =app-misc/foo-3 atom.
+ ResolverPlaygroundTestCase(
+ ["=app-misc/foo-3"],
+ options = {
+ "--usepkg": True,
+ },
+ success = True,
+ mergelist = [
+ '[binary]app-misc/foo-3',
+ ],
+ ),
+
+ # Verify that --use-ebuild-visibility works with --usepkg
+ # when no other visible package is available.
+ ResolverPlaygroundTestCase(
+ ["=app-misc/foo-3"],
+ options = {
+ "--use-ebuild-visibility": "y",
+ "--usepkg": True,
+ },
+ success = False,
+ ),
+ )
+
+ playground = ResolverPlayground(binpkgs=binpkgs, ebuilds=ebuilds,
+ installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
diff --git a/lib/portage/tests/resolver/test_blocker.py b/lib/portage/tests/resolver/test_blocker.py
new file mode 100644
index 000000000..94a88b8b4
--- /dev/null
+++ b/lib/portage/tests/resolver/test_blocker.py
@@ -0,0 +1,48 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class SlotConflictWithBlockerTestCase(TestCase):
+
+ def testBlocker(self):
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "dev-libs/X" },
+ "dev-libs/B-1": { "DEPEND": "<dev-libs/X-2" },
+ "dev-libs/C-1": { "DEPEND": "<dev-libs/X-3" },
+
+ "dev-libs/X-1": { "EAPI": "2", "RDEPEND": "!=dev-libs/Y-1" },
+ "dev-libs/X-2": { "EAPI": "2", "RDEPEND": "!=dev-libs/Y-2" },
+ "dev-libs/X-3": { "EAPI": "2", "RDEPEND": "!=dev-libs/Y-3" },
+
+ "dev-libs/Y-1": { "SLOT": "1" },
+ "dev-libs/Y-2": { "SLOT": "2" },
+ "dev-libs/Y-3": { "SLOT": "3" },
+ }
+
+ installed = {
+ "dev-libs/Y-1": { "SLOT": "1" },
+ "dev-libs/Y-2": { "SLOT": "2" },
+ "dev-libs/Y-3": { "SLOT": "3" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B", "dev-libs/C"],
+ options = { "--backtrack": 0 },
+ all_permutations = True,
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = ["dev-libs/X-1", "[uninstall]dev-libs/Y-1", "!=dev-libs/Y-1", \
+ ("dev-libs/A-1", "dev-libs/B-1", "dev-libs/C-1")]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_changed_deps.py b/lib/portage/tests/resolver/test_changed_deps.py
new file mode 100644
index 000000000..420a00172
--- /dev/null
+++ b/lib/portage/tests/resolver/test_changed_deps.py
@@ -0,0 +1,121 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground, ResolverPlaygroundTestCase)
+
+class ChangedDepsTestCase(TestCase):
+
+ def testChangedDeps(self):
+
+ ebuilds = {
+ "app-misc/A-0": {
+ "DEPEND": "app-misc/B",
+ "RDEPEND": "app-misc/B",
+ },
+ "app-misc/B-0": {
+ }
+ }
+
+ binpkgs = {
+ "app-misc/A-0": {},
+ }
+
+ installed = {
+ "app-misc/A-0": {},
+ }
+
+ world= (
+ "app-misc/A",
+ )
+
+ test_cases = (
+
+ # --dynamic-deps=n causes the original deps to be respected
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ success = True,
+ options = {
+ "--update": True,
+ "--deep": True,
+ "--dynamic-deps": "n",
+ "--usepkg": True,
+ },
+ mergelist = []
+ ),
+
+ # --dynamic-deps causes app-misc/B to get pulled in
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ success = True,
+ options = {
+ "--update": True,
+ "--deep": True,
+ "--dynamic-deps": "y",
+ "--usepkg": True,
+ },
+ mergelist = ["app-misc/B-0"]
+ ),
+
+ # --changed-deps causes app-misc/A to be rebuilt
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ success = True,
+ options = {
+ "--update": True,
+ "--deep": True,
+ "--changed-deps": "y",
+ "--usepkg": True,
+ },
+ mergelist = ["app-misc/B-0", "app-misc/A-0"]
+ ),
+
+ # --usepkgonly prevents automatic --binpkg-changed-deps
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ success = True,
+ options = {
+ "--changed-deps": "y",
+ "--usepkgonly": True,
+ },
+ mergelist = ["[binary]app-misc/A-0"]
+ ),
+
+ # Test automatic --binpkg-changed-deps, which cases the
+ # binpkg with stale deps to be ignored (with warning
+ # message)
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ success = True,
+ options = {
+ "--usepkg": True,
+ },
+ mergelist = ["app-misc/B-0", "app-misc/A-0"]
+ ),
+ )
+ test_cases = (
+
+ # Forcibly disable --binpkg-changed-deps, which causes
+ # --changed-deps to be overridden by --binpkg-changed-deps
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ success = True,
+ options = {
+ "--binpkg-changed-deps": "n",
+ "--changed-deps": "y",
+ "--usepkg": True,
+ },
+ mergelist = ["[binary]app-misc/A-0"]
+ ),
+ )
+
+ playground = ResolverPlayground(debug=False, ebuilds=ebuilds,
+ binpkgs=binpkgs, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success,
+ True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_circular_choices.py b/lib/portage/tests/resolver/test_circular_choices.py
new file mode 100644
index 000000000..33b730627
--- /dev/null
+++ b/lib/portage/tests/resolver/test_circular_choices.py
@@ -0,0 +1,61 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class CircularChoicesTestCase(TestCase):
+
+ def testDirectCircularDependency(self):
+
+ ebuilds = {
+ "dev-lang/gwydion-dylan-2.4.0": {"DEPEND": "|| ( dev-lang/gwydion-dylan dev-lang/gwydion-dylan-bin )" },
+ "dev-lang/gwydion-dylan-bin-2.4.0": {},
+ }
+
+ test_cases = (
+ # Automatically pull in gwydion-dylan-bin to solve a circular dep
+ ResolverPlaygroundTestCase(
+ ["dev-lang/gwydion-dylan"],
+ mergelist = ['dev-lang/gwydion-dylan-bin-2.4.0', 'dev-lang/gwydion-dylan-2.4.0'],
+ success = True,
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+class VirtualCircularChoicesTestCase(TestCase):
+ def testDirectVirtualCircularDependency(self):
+
+ # Bug #384107
+ self.todo = True
+
+ ebuilds = {
+ "dev-java/icedtea-6.1.10.3": { "SLOT" : "6", "DEPEND": "virtual/jdk" },
+ "dev-java/icedtea6-bin-1.10.3": {},
+ "virtual/jdk-1.6.0": { "SLOT" : "1.6", "RDEPEND": "|| ( dev-java/icedtea6-bin =dev-java/icedtea-6* )" },
+ }
+
+ test_cases = (
+ # Automatically pull in icedtea6-bin to solve a circular dep
+ ResolverPlaygroundTestCase(
+ ["dev-java/icedtea"],
+ mergelist = ["dev-java/icedtea6-bin-1.10.3", "virtual/jdk-1.6.0", "dev-java/icedtea-6.1.10.3"],
+ success = True,
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_circular_dependencies.py b/lib/portage/tests/resolver/test_circular_dependencies.py
new file mode 100644
index 000000000..f8331ac4e
--- /dev/null
+++ b/lib/portage/tests/resolver/test_circular_dependencies.py
@@ -0,0 +1,84 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class CircularDependencyTestCase(TestCase):
+
+ #TODO:
+ # use config change by autounmask
+ # conflict on parent's parent
+ # difference in RDEPEND and DEPEND
+ # is there anything else than priority buildtime and runtime?
+ # play with use.{mask,force}
+ # play with REQUIRED_USE
+
+
+ def testCircularDependency(self):
+
+ ebuilds = {
+ "dev-libs/Z-1": { "DEPEND": "foo? ( !bar? ( dev-libs/Y ) )", "IUSE": "+foo bar", "EAPI": 1 },
+ "dev-libs/Z-2": { "DEPEND": "foo? ( dev-libs/Y ) !bar? ( dev-libs/Y )", "IUSE": "+foo bar", "EAPI": 1 },
+ "dev-libs/Z-3": { "DEPEND": "foo? ( !bar? ( dev-libs/Y ) ) foo? ( dev-libs/Y ) !bar? ( dev-libs/Y )", "IUSE": "+foo bar", "EAPI": 1 },
+ "dev-libs/Y-1": { "DEPEND": "dev-libs/Z" },
+ "dev-libs/W-1": { "DEPEND": "dev-libs/Z[foo] dev-libs/Y", "EAPI": 2 },
+ "dev-libs/W-2": { "DEPEND": "dev-libs/Z[foo=] dev-libs/Y", "IUSE": "+foo", "EAPI": 2 },
+ "dev-libs/W-3": { "DEPEND": "dev-libs/Z[bar] dev-libs/Y", "EAPI": 2 },
+
+ "app-misc/A-1": { "DEPEND": "foo? ( =app-misc/B-1 )", "IUSE": "+foo bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+ "app-misc/A-2": { "DEPEND": "foo? ( =app-misc/B-2 ) bar? ( =app-misc/B-2 )", "IUSE": "+foo bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+ "app-misc/B-1": { "DEPEND": "=app-misc/A-1" },
+ "app-misc/B-2": { "DEPEND": "=app-misc/A-2" },
+ }
+
+ test_cases = (
+ #Simple tests
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/Z-1"],
+ circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False)]), frozenset([("bar", True)])])},
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/Z-2"],
+ circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False), ("bar", True)])])},
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/Z-3"],
+ circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False), ("bar", True)])])},
+ success = False),
+
+ #Conflict on parent
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/W-1"],
+ circular_dependency_solutions = {},
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/W-2"],
+ circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False), ("bar", True)])])},
+ success = False),
+
+ #Conflict with autounmask
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/W-3"],
+ circular_dependency_solutions = { "dev-libs/Y-1": frozenset([frozenset([("foo", False)])])},
+ use_changes = { "dev-libs/Z-3": {"bar": True}},
+ success = False),
+
+ #Conflict with REQUIRED_USE
+ ResolverPlaygroundTestCase(
+ ["=app-misc/B-1"],
+ circular_dependency_solutions = { "app-misc/B-1": frozenset([frozenset([("foo", False), ("bar", True)])])},
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["=app-misc/B-2"],
+ circular_dependency_solutions = {},
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_complete_graph.py b/lib/portage/tests/resolver/test_complete_graph.py
new file mode 100644
index 000000000..6b5f54a3a
--- /dev/null
+++ b/lib/portage/tests/resolver/test_complete_graph.py
@@ -0,0 +1,148 @@
+# Copyright 2011-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class CompleteGraphTestCase(TestCase):
+
+ def testCompleteGraphUseChange(self):
+ """
+ Prevent reverse dependency breakage triggered by USE changes.
+ """
+
+ ebuilds = {
+ "dev-libs/libxml2-2.8.0": {
+ "EAPI": "2",
+ "IUSE": "+icu",
+ "SLOT": "2",
+ },
+ "x11-libs/qt-webkit-4.8.2": {
+ "EAPI": "2",
+ "IUSE": "icu",
+ "RDEPEND" : "dev-libs/libxml2:2[!icu?]",
+ },
+ }
+
+ installed = {
+ "dev-libs/libxml2-2.8.0": {
+ "EAPI": "2",
+ "IUSE": "+icu",
+ "USE": "",
+ "SLOT": "2",
+ },
+ "x11-libs/qt-webkit-4.8.2": {
+ "EAPI": "2",
+ "IUSE": "icu",
+ "RDEPEND" : "dev-libs/libxml2:2[-icu]",
+ "USE": "",
+ }
+ }
+
+ world = ["x11-libs/qt-webkit"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/libxml2"],
+ options = {"--complete-graph-if-new-use" : "y" },
+ mergelist = ["dev-libs/libxml2-2.8.0"],
+ slot_collision_solutions = [{'dev-libs/libxml2-2.8.0': {'icu': False}}],
+ success = False,
+ ),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/libxml2"],
+ options = {"--complete-graph-if-new-use" : "n" },
+ mergelist = ["dev-libs/libxml2-2.8.0"],
+ success = True,
+ ),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/libxml2"],
+ options = {"--ignore-world" : True},
+ mergelist = ["dev-libs/libxml2-2.8.0"],
+ success = True,
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testCompleteGraphVersionChange(self):
+ """
+ Prevent reverse dependency breakage triggered by version changes.
+ """
+
+ ebuilds = {
+ "sys-libs/x-0.1": {},
+ "sys-libs/x-1": {},
+ "sys-libs/x-2": {},
+ "sys-apps/a-1": {"RDEPEND" : ">=sys-libs/x-1 <sys-libs/x-2"},
+ }
+
+ installed = {
+ "sys-libs/x-1": {},
+ "sys-apps/a-1": {"RDEPEND" : ">=sys-libs/x-1 <sys-libs/x-2"},
+ }
+
+ world = ["sys-apps/a"]
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [">=sys-libs/x-2"],
+ options = {"--complete-graph-if-new-ver" : "n", "--rebuild-if-new-slot": "n"},
+ mergelist = ["sys-libs/x-2"],
+ success = True,
+ ),
+ ResolverPlaygroundTestCase(
+ [">=sys-libs/x-2"],
+ options = {"--ignore-world" : True},
+ mergelist = ["sys-libs/x-2"],
+ success = True,
+ ),
+ ResolverPlaygroundTestCase(
+ [">=sys-libs/x-2"],
+ options = {"--complete-graph-if-new-ver" : "y"},
+ mergelist = ["sys-libs/x-2"],
+ slot_collision_solutions = [],
+ success = False,
+ ),
+ ResolverPlaygroundTestCase(
+ ["<sys-libs/x-1"],
+ options = {"--complete-graph-if-new-ver" : "n", "--rebuild-if-new-slot": "n"},
+ mergelist = ["sys-libs/x-0.1"],
+ success = True,
+ ),
+ ResolverPlaygroundTestCase(
+ ["<sys-libs/x-1"],
+ options = {"--ignore-world" : True},
+ mergelist = ["sys-libs/x-0.1"],
+ success = True,
+ ),
+ ResolverPlaygroundTestCase(
+ ["<sys-libs/x-1"],
+ options = {"--complete-graph-if-new-ver" : "y"},
+ mergelist = ["sys-libs/x-0.1"],
+ slot_collision_solutions = [],
+ success = False,
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_complete_if_new_subslot_without_revbump.py b/lib/portage/tests/resolver/test_complete_if_new_subslot_without_revbump.py
new file mode 100644
index 000000000..fddbead7c
--- /dev/null
+++ b/lib/portage/tests/resolver/test_complete_if_new_subslot_without_revbump.py
@@ -0,0 +1,74 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class CompeteIfNewSubSlotWithoutRevBumpTestCase(TestCase):
+
+ def testCompeteIfNewSubSlotWithoutRevBump(self):
+
+ ebuilds = {
+ "media-libs/libpng-1.5.14" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+
+ "x11-libs/gdk-pixbuf-2.26.5" : {
+ "EAPI": "5",
+ "DEPEND": ">=media-libs/libpng-1.4:=",
+ "RDEPEND": ">=media-libs/libpng-1.4:="
+ },
+ }
+
+ binpkgs = {
+ "x11-libs/gdk-pixbuf-2.26.5" : {
+ "EAPI": "5",
+ "DEPEND": ">=media-libs/libpng-1.4:0/15=",
+ "RDEPEND": ">=media-libs/libpng-1.4:0/15="
+ },
+ }
+
+ installed = {
+ "media-libs/libpng-1.5.14" : {
+ "EAPI": "5",
+ "SLOT": "0/15"
+ },
+
+ "x11-libs/gdk-pixbuf-2.26.5" : {
+ "EAPI": "5",
+ "DEPEND": ">=media-libs/libpng-1.4:0/15=",
+ "RDEPEND": ">=media-libs/libpng-1.4:0/15="
+ },
+ }
+
+ world = ["x11-libs/gdk-pixbuf"]
+
+ test_cases = (
+ # Test that --complete-graph-if-new-ver=y triggers rebuild
+ # when the sub-slot changes without a revbump.
+ ResolverPlaygroundTestCase(
+ ["media-libs/libpng"],
+ options = {
+ "--oneshot": True,
+ "--complete-graph-if-new-ver": "y",
+ "--rebuild-if-new-slot": "n",
+ "--usepkg": True
+ },
+ success = True,
+ mergelist = [
+ "media-libs/libpng-1.5.14",
+ "x11-libs/gdk-pixbuf-2.26.5"
+ ]
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_depclean.py b/lib/portage/tests/resolver/test_depclean.py
new file mode 100644
index 000000000..f3c4c638c
--- /dev/null
+++ b/lib/portage/tests/resolver/test_depclean.py
@@ -0,0 +1,291 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class SimpleDepcleanTestCase(TestCase):
+
+ def testSimpleDepclean(self):
+ ebuilds = {
+ "dev-libs/A-1": {},
+ "dev-libs/B-1": {},
+ }
+ installed = {
+ "dev-libs/A-1": {},
+ "dev-libs/B-1": {},
+ }
+
+ world = (
+ "dev-libs/A",
+ )
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+class DepcleanWithDepsTestCase(TestCase):
+
+ def testDepcleanWithDeps(self):
+ ebuilds = {
+ "dev-libs/A-1": { "RDEPEND": "dev-libs/C" },
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/D" },
+ "dev-libs/C-1": {},
+ "dev-libs/D-1": { "RDEPEND": "dev-libs/E" },
+ "dev-libs/E-1": { "RDEPEND": "dev-libs/F" },
+ "dev-libs/F-1": {},
+ }
+ installed = {
+ "dev-libs/A-1": { "RDEPEND": "dev-libs/C" },
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/D" },
+ "dev-libs/C-1": {},
+ "dev-libs/D-1": { "RDEPEND": "dev-libs/E" },
+ "dev-libs/E-1": { "RDEPEND": "dev-libs/F" },
+ "dev-libs/F-1": {},
+ }
+
+ world = (
+ "dev-libs/A",
+ )
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/B-1", "dev-libs/D-1",
+ "dev-libs/E-1", "dev-libs/F-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+class DepcleanWithInstalledMaskedTestCase(TestCase):
+
+ def testDepcleanWithInstalledMasked(self):
+ """
+ Test case for bug 332719.
+ emerge --declean ignores that B is masked by license and removes C.
+ The next emerge -uDN world doesn't take B and installs C again.
+ """
+ ebuilds = {
+ "dev-libs/A-1": { "RDEPEND": "|| ( dev-libs/B dev-libs/C )" },
+ "dev-libs/B-1": { "LICENSE": "TEST", "KEYWORDS": "x86" },
+ "dev-libs/C-1": { "KEYWORDS": "x86" },
+ }
+ installed = {
+ "dev-libs/A-1": { "RDEPEND": "|| ( dev-libs/B dev-libs/C )" },
+ "dev-libs/B-1": { "LICENSE": "TEST", "KEYWORDS": "x86" },
+ "dev-libs/C-1": { "KEYWORDS": "x86" },
+ }
+
+ world = (
+ "dev-libs/A",
+ )
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ #cleanlist=["dev-libs/C-1"]),
+ cleanlist=["dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+class DepcleanInstalledKeywordMaskedSlotTestCase(TestCase):
+
+ def testDepcleanInstalledKeywordMaskedSlot(self):
+ """
+ Verify that depclean removes newer slot
+ masked by KEYWORDS (see bug #350285).
+ """
+ ebuilds = {
+ "dev-libs/A-1": { "RDEPEND": "|| ( =dev-libs/B-2.7* =dev-libs/B-2.6* )" },
+ "dev-libs/B-2.6": { "SLOT":"2.6", "KEYWORDS": "x86" },
+ "dev-libs/B-2.7": { "SLOT":"2.7", "KEYWORDS": "~x86" },
+ }
+ installed = {
+ "dev-libs/A-1": { "EAPI" : "3", "RDEPEND": "|| ( dev-libs/B:2.7 dev-libs/B:2.6 )" },
+ "dev-libs/B-2.6": { "SLOT":"2.6", "KEYWORDS": "x86" },
+ "dev-libs/B-2.7": { "SLOT":"2.7", "KEYWORDS": "~x86" },
+ }
+
+ world = (
+ "dev-libs/A",
+ )
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/B-2.7"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+class DepcleanWithExcludeTestCase(TestCase):
+
+ def testDepcleanWithExclude(self):
+
+ installed = {
+ "dev-libs/A-1": {},
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/A" },
+ }
+
+ # depclean asserts non-empty @world set
+ world = ["non-empty/world-set"]
+
+ test_cases = (
+ #Without --exclude.
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/B-1", "dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=[]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/B-1"]),
+
+ #With --exclude
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True, "--exclude": ["dev-libs/A"]},
+ success=True,
+ cleanlist=["dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options={"--depclean": True, "--exclude": ["dev-libs/B"]},
+ success=True,
+ cleanlist=[]),
+ )
+
+ playground = ResolverPlayground(installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+class DepcleanWithExcludeAndSlotsTestCase(TestCase):
+
+ def testDepcleanWithExcludeAndSlots(self):
+
+ installed = {
+ "dev-libs/Z-1": { "SLOT": 1},
+ "dev-libs/Z-2": { "SLOT": 2},
+ "dev-libs/Y-1": { "RDEPEND": "=dev-libs/Z-1", "SLOT": 1 },
+ "dev-libs/Y-2": { "RDEPEND": "=dev-libs/Z-2", "SLOT": 2 },
+ }
+
+ world=["dev-libs/Y"]
+
+ test_cases = (
+ #Without --exclude.
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/Y-1", "dev-libs/Z-1"]),
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True, "--exclude": ["dev-libs/Z"]},
+ success=True,
+ cleanlist=["dev-libs/Y-1"]),
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True, "--exclude": ["dev-libs/Y"]},
+ success=True,
+ cleanlist=[]),
+ )
+
+ playground = ResolverPlayground(installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+class DepcleanAndWildcardsTestCase(TestCase):
+
+ def testDepcleanAndWildcards(self):
+
+ installed = {
+ "dev-libs/A-1": { "RDEPEND": "dev-libs/B" },
+ "dev-libs/B-1": {},
+ }
+
+ # depclean asserts non-empty @world set
+ world = ["non-empty/world-set"]
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["*/*"],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/A-1", "dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/*"],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/A-1", "dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["*/A"],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["*/B"],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=[]),
+ )
+
+ playground = ResolverPlayground(installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_depclean_order.py b/lib/portage/tests/resolver/test_depclean_order.py
new file mode 100644
index 000000000..9511d292c
--- /dev/null
+++ b/lib/portage/tests/resolver/test_depclean_order.py
@@ -0,0 +1,57 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class SimpleDepcleanTestCase(TestCase):
+
+ def testSimpleDepclean(self):
+
+ ebuilds = {
+ "dev-libs/A-1": {
+ "EAPI": "5",
+ "RDEPEND": "dev-libs/B:=",
+ },
+ "dev-libs/B-1": {
+ "EAPI": "5",
+ "RDEPEND": "dev-libs/A",
+ },
+ "dev-libs/C-1": {},
+ }
+
+ installed = {
+ "dev-libs/A-1": {
+ "EAPI": "5",
+ "RDEPEND": "dev-libs/B:0/0=",
+ },
+ "dev-libs/B-1": {
+ "EAPI": "5",
+ "RDEPEND": "dev-libs/A",
+ },
+ "dev-libs/C-1": {},
+ }
+
+ world = (
+ "dev-libs/C",
+ )
+
+ test_cases = (
+ # Remove dev-libs/A-1 first because of dev-libs/B:0/0= (built
+ # slot-operator dep).
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ ordered=True,
+ cleanlist=["dev-libs/A-1", "dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_depclean_slot_unavailable.py b/lib/portage/tests/resolver/test_depclean_slot_unavailable.py
new file mode 100644
index 000000000..689392bb5
--- /dev/null
+++ b/lib/portage/tests/resolver/test_depclean_slot_unavailable.py
@@ -0,0 +1,78 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class DepcleanUnavailableSlotTestCase(TestCase):
+
+ def testDepcleanUnavailableSlot(self):
+ """
+ Test bug #445506, where we want to remove the slot
+ for which the ebuild is no longer available, even
+ though its version is higher.
+ """
+
+ ebuilds = {
+ "sys-kernel/gentoo-sources-3.0.53": {
+ "SLOT": "3.0.53",
+ "KEYWORDS": "x86"
+ },
+ }
+
+ installed = {
+ "sys-kernel/gentoo-sources-3.0.53": {
+ "SLOT": "3.0.53",
+ "KEYWORDS": "x86"
+ },
+ "sys-kernel/gentoo-sources-3.2.21": {
+ "SLOT": "3.2.21",
+ "KEYWORDS": "x86"
+ },
+ }
+
+ world = ["sys-kernel/gentoo-sources"]
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["sys-kernel/gentoo-sources-3.2.21"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ # Now make the newer version availale and verify that
+ # the lower version is depcleaned.
+ ebuilds.update({
+ "sys-kernel/gentoo-sources-3.2.21": {
+ "SLOT": "3.2.21",
+ "KEYWORDS": "x86"
+ },
+ })
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["sys-kernel/gentoo-sources-3.0.53"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_depth.py b/lib/portage/tests/resolver/test_depth.py
new file mode 100644
index 000000000..cb1e2dd5d
--- /dev/null
+++ b/lib/portage/tests/resolver/test_depth.py
@@ -0,0 +1,252 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class ResolverDepthTestCase(TestCase):
+
+ def testResolverDepth(self):
+
+ ebuilds = {
+ "dev-libs/A-1": {"RDEPEND" : "dev-libs/B"},
+ "dev-libs/A-2": {"RDEPEND" : "dev-libs/B"},
+ "dev-libs/B-1": {"RDEPEND" : "dev-libs/C"},
+ "dev-libs/B-2": {"RDEPEND" : "dev-libs/C"},
+ "dev-libs/C-1": {},
+ "dev-libs/C-2": {},
+
+ "virtual/libusb-0" : {"EAPI" :"2", "SLOT" : "0", "RDEPEND" : "|| ( >=dev-libs/libusb-0.1.12-r1:0 dev-libs/libusb-compat >=sys-freebsd/freebsd-lib-8.0[usb] )"},
+ "virtual/libusb-1" : {"EAPI" :"2", "SLOT" : "1", "RDEPEND" : ">=dev-libs/libusb-1.0.4:1"},
+ "dev-libs/libusb-0.1.13" : {},
+ "dev-libs/libusb-1.0.5" : {"SLOT":"1"},
+ "dev-libs/libusb-compat-1" : {},
+ "sys-freebsd/freebsd-lib-8": {"IUSE" : "+usb"},
+
+ "sys-fs/udev-164" : {"EAPI" : "1", "RDEPEND" : "virtual/libusb:0"},
+
+ "virtual/jre-1.5.0" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.5.0* =virtual/jdk-1.5.0* )"},
+ "virtual/jre-1.5.0-r1" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.5.0* =virtual/jdk-1.5.0* )"},
+ "virtual/jre-1.6.0" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.6.0* =virtual/jdk-1.6.0* )"},
+ "virtual/jre-1.6.0-r1" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/sun-jre-bin-1.6.0* =virtual/jdk-1.6.0* )"},
+ "virtual/jdk-1.5.0" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jdk-1.5.0* dev-java/gcj-jdk )"},
+ "virtual/jdk-1.5.0-r1" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jdk-1.5.0* dev-java/gcj-jdk )"},
+ "virtual/jdk-1.6.0" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+ "virtual/jdk-1.6.0-r1" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+ "dev-java/gcj-jdk-4.5" : {},
+ "dev-java/gcj-jdk-4.5-r1" : {},
+ "dev-java/icedtea-6.1" : {},
+ "dev-java/icedtea-6.1-r1" : {},
+ "dev-java/sun-jdk-1.5" : {"SLOT" : "1.5"},
+ "dev-java/sun-jdk-1.6" : {"SLOT" : "1.6"},
+ "dev-java/sun-jre-bin-1.5" : {"SLOT" : "1.5"},
+ "dev-java/sun-jre-bin-1.6" : {"SLOT" : "1.6"},
+
+ "dev-java/ant-core-1.8" : {"DEPEND" : ">=virtual/jdk-1.4"},
+ "dev-db/hsqldb-1.8" : {"RDEPEND" : ">=virtual/jre-1.6"},
+ }
+
+ installed = {
+ "dev-libs/A-1": {"RDEPEND" : "dev-libs/B"},
+ "dev-libs/B-1": {"RDEPEND" : "dev-libs/C"},
+ "dev-libs/C-1": {},
+
+ "virtual/jre-1.5.0" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =virtual/jdk-1.5.0* =dev-java/sun-jre-bin-1.5.0* )"},
+ "virtual/jre-1.6.0" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =virtual/jdk-1.6.0* =dev-java/sun-jre-bin-1.6.0* )"},
+ "virtual/jdk-1.5.0" : {"SLOT" : "1.5", "RDEPEND" : "|| ( =dev-java/sun-jdk-1.5.0* dev-java/gcj-jdk )"},
+ "virtual/jdk-1.6.0" : {"SLOT" : "1.6", "RDEPEND" : "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+ "dev-java/gcj-jdk-4.5" : {},
+ "dev-java/icedtea-6.1" : {},
+
+ "virtual/libusb-0" : {"EAPI" :"2", "SLOT" : "0", "RDEPEND" : "|| ( >=dev-libs/libusb-0.1.12-r1:0 dev-libs/libusb-compat >=sys-freebsd/freebsd-lib-8.0[usb] )"},
+ }
+
+ world = ["dev-libs/A"]
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--update": True, "--deep": 0},
+ success = True,
+ mergelist = ["dev-libs/A-2"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--update": True, "--deep": 1},
+ success = True,
+ mergelist = ["dev-libs/B-2", "dev-libs/A-2"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--update": True, "--deep": 2},
+ success = True,
+ mergelist = ["dev-libs/C-2", "dev-libs/B-2", "dev-libs/A-2"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["dev-libs/C-2", "dev-libs/B-2", "dev-libs/A-2"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--emptytree": True},
+ success = True,
+ mergelist = ["dev-libs/C-2", "dev-libs/B-2", "dev-libs/A-2"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--selective": True, "--deep": True},
+ success = True,
+ mergelist = []),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--deep": 2},
+ success = True,
+ mergelist = ["dev-libs/A-2"]),
+
+ ResolverPlaygroundTestCase(
+ ["virtual/jre"],
+ options = {},
+ success = True,
+ mergelist = ['virtual/jre-1.6.0-r1']),
+
+ ResolverPlaygroundTestCase(
+ ["virtual/jre"],
+ options = {"--deep" : True},
+ success = True,
+ mergelist = ['virtual/jre-1.6.0-r1']),
+
+ # Test bug #141118, where we avoid pulling in
+ # redundant deps, satisfying nested virtuals
+ # as efficiently as possible.
+ ResolverPlaygroundTestCase(
+ ["virtual/jre"],
+ options = {"--selective" : True, "--deep" : True},
+ success = True,
+ mergelist = []),
+
+ # Test bug #150361, where depgraph._greedy_slots()
+ # is triggered by --update with AtomArg.
+ ResolverPlaygroundTestCase(
+ ["virtual/jre"],
+ options = {"--update" : True},
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = [('virtual/jre-1.6.0-r1', 'virtual/jre-1.5.0-r1')]),
+
+ # Recursively traversed virtual dependencies, and their
+ # direct dependencies, are considered to have the same
+ # depth as direct dependencies.
+ ResolverPlaygroundTestCase(
+ ["virtual/jre"],
+ options = {"--update" : True, "--deep" : 1},
+ success = True,
+ ambiguous_merge_order = True,
+ merge_order_assertions=(('dev-java/icedtea-6.1-r1', 'virtual/jdk-1.6.0-r1'), ('virtual/jdk-1.6.0-r1', 'virtual/jre-1.6.0-r1'),
+ ('dev-java/gcj-jdk-4.5-r1', 'virtual/jdk-1.5.0-r1'), ('virtual/jdk-1.5.0-r1', 'virtual/jre-1.5.0-r1')),
+ mergelist = [('dev-java/icedtea-6.1-r1', 'dev-java/gcj-jdk-4.5-r1', 'virtual/jdk-1.6.0-r1', 'virtual/jdk-1.5.0-r1', 'virtual/jre-1.6.0-r1', 'virtual/jre-1.5.0-r1')]),
+
+ ResolverPlaygroundTestCase(
+ ["virtual/jre:1.5"],
+ options = {"--update" : True},
+ success = True,
+ mergelist = ['virtual/jre-1.5.0-r1']),
+
+ ResolverPlaygroundTestCase(
+ ["virtual/jre:1.6"],
+ options = {"--update" : True},
+ success = True,
+ mergelist = ['virtual/jre-1.6.0-r1']),
+
+ # Test that we don't pull in any unnecessary updates
+ # when --update is not specified, even though we
+ # specified --deep.
+ ResolverPlaygroundTestCase(
+ ["dev-java/ant-core"],
+ options = {"--deep" : True},
+ success = True,
+ mergelist = ["dev-java/ant-core-1.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-java/ant-core"],
+ options = {"--update" : True},
+ success = True,
+ mergelist = ["dev-java/ant-core-1.8"]),
+
+ # Recursively traversed virtual dependencies, and their
+ # direct dependencies, are considered to have the same
+ # depth as direct dependencies.
+ ResolverPlaygroundTestCase(
+ ["dev-java/ant-core"],
+ options = {"--update" : True, "--deep" : 1},
+ success = True,
+ mergelist = ['dev-java/icedtea-6.1-r1', 'virtual/jdk-1.6.0-r1', 'dev-java/ant-core-1.8']),
+
+ ResolverPlaygroundTestCase(
+ ["dev-db/hsqldb"],
+ options = {"--deep" : True},
+ success = True,
+ mergelist = ["dev-db/hsqldb-1.8"]),
+
+ # Don't traverse deps of an installed package with --deep=0,
+ # even if it's a virtual.
+ ResolverPlaygroundTestCase(
+ ["virtual/libusb:0"],
+ options = {"--selective" : True, "--deep" : 0},
+ success = True,
+ mergelist = []),
+
+ # Satisfy unsatisfied dep of installed package with --deep=1.
+ ResolverPlaygroundTestCase(
+ ["virtual/libusb:0"],
+ options = {"--selective" : True, "--deep" : 1},
+ success = True,
+ mergelist = ['dev-libs/libusb-0.1.13']),
+
+ # Pull in direct dep of virtual, even with --deep=0.
+ ResolverPlaygroundTestCase(
+ ["sys-fs/udev"],
+ options = {"--deep" : 0},
+ success = True,
+ mergelist = ['dev-libs/libusb-0.1.13', 'sys-fs/udev-164']),
+
+ # Test --nodeps with direct virtual deps.
+ ResolverPlaygroundTestCase(
+ ["sys-fs/udev"],
+ options = {"--nodeps" : True},
+ success = True,
+ mergelist = ["sys-fs/udev-164"]),
+
+ # Test that --nodeps overrides --deep.
+ ResolverPlaygroundTestCase(
+ ["sys-fs/udev"],
+ options = {"--nodeps" : True, "--deep" : True},
+ success = True,
+ mergelist = ["sys-fs/udev-164"]),
+
+ # Test that --nodeps overrides --emptytree.
+ ResolverPlaygroundTestCase(
+ ["sys-fs/udev"],
+ options = {"--nodeps" : True, "--emptytree" : True},
+ success = True,
+ mergelist = ["sys-fs/udev-164"]),
+
+ # Test --emptytree with virtuals.
+ ResolverPlaygroundTestCase(
+ ["sys-fs/udev"],
+ options = {"--emptytree" : True},
+ success = True,
+ mergelist = ['dev-libs/libusb-0.1.13', 'virtual/libusb-0', 'sys-fs/udev-164']),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed,
+ world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_disjunctive_depend_order.py b/lib/portage/tests/resolver/test_disjunctive_depend_order.py
new file mode 100644
index 000000000..88f6dac2d
--- /dev/null
+++ b/lib/portage/tests/resolver/test_disjunctive_depend_order.py
@@ -0,0 +1,87 @@
+# Copyright 2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground,
+ ResolverPlaygroundTestCase,
+)
+
+class DisjunctiveDependOrderTestCase(TestCase):
+
+ def testDisjunctiveDependOrderTestCase(self):
+ ebuilds = {
+ 'virtual/jre-1.8': {
+ 'EAPI': '6',
+ 'SLOT' : '1.8',
+ 'RDEPEND' : '|| ( dev-java/oracle-jre-bin:1.8 virtual/jdk:1.8 )',
+ },
+ 'virtual/jdk-1.8': {
+ 'EAPI': '6',
+ 'SLOT' : '1.8',
+ 'RDEPEND' : '|| ( dev-java/icedtea:8 dev-java/oracle-jdk-bin:1.8 )',
+ },
+ 'dev-java/icedtea-3.6': {
+ 'SLOT' : '8',
+ },
+ 'dev-java/oracle-jdk-bin-1.8': {
+ 'SLOT' : '1.8',
+ },
+ 'dev-java/oracle-jre-bin-1.8': {
+ 'SLOT' : '1.8',
+ },
+ 'dev-db/hsqldb-1.8' : {
+ 'DEPEND' : 'virtual/jdk',
+ 'RDEPEND' : 'virtual/jre',
+ },
+ }
+
+ binpkgs = {
+ 'dev-db/hsqldb-1.8' : {
+ 'DEPEND' : 'virtual/jdk',
+ 'RDEPEND' : 'virtual/jre',
+ },
+ }
+
+ test_cases = (
+ # Test bug 639346, where a redundant jre implementation
+ # was pulled in because DEPEND was evaluated after
+ # RDEPEND.
+ ResolverPlaygroundTestCase(
+ ['dev-db/hsqldb'],
+ success=True,
+ mergelist=[
+ 'dev-java/icedtea-3.6',
+ 'virtual/jdk-1.8',
+ 'virtual/jre-1.8',
+ 'dev-db/hsqldb-1.8',
+ ],
+ ),
+
+ # The jdk is not needed with --usepkg, so the jre should
+ # be preferred in this case.
+ ResolverPlaygroundTestCase(
+ ['dev-db/hsqldb'],
+ options = {
+ '--usepkg': True
+ },
+ success=True,
+ mergelist=[
+ 'dev-java/oracle-jre-bin-1.8',
+ 'virtual/jre-1.8',
+ '[binary]dev-db/hsqldb-1.8',
+ ],
+ ),
+ )
+
+ playground = ResolverPlayground(debug=False,
+ binpkgs=binpkgs, ebuilds=ebuilds)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_eapi.py b/lib/portage/tests/resolver/test_eapi.py
new file mode 100644
index 000000000..50b9d90da
--- /dev/null
+++ b/lib/portage/tests/resolver/test_eapi.py
@@ -0,0 +1,122 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class EAPITestCase(TestCase):
+
+ def testEAPI(self):
+
+ ebuilds = {
+ #EAPI-1: IUSE-defaults
+ "dev-libs/A-1.0": { "EAPI": 0, "IUSE": "+foo" },
+ "dev-libs/A-1.1": { "EAPI": 1, "IUSE": "+foo" },
+ "dev-libs/A-1.2": { "EAPI": 2, "IUSE": "+foo" },
+ "dev-libs/A-1.3": { "EAPI": 3, "IUSE": "+foo" },
+ "dev-libs/A-1.4": { "EAPI": "4", "IUSE": "+foo" },
+
+ #EAPI-1: slot deps
+ "dev-libs/A-2.0": { "EAPI": 0, "DEPEND": "dev-libs/B:0" },
+ "dev-libs/A-2.1": { "EAPI": 1, "DEPEND": "dev-libs/B:0" },
+ "dev-libs/A-2.2": { "EAPI": 2, "DEPEND": "dev-libs/B:0" },
+ "dev-libs/A-2.3": { "EAPI": 3, "DEPEND": "dev-libs/B:0" },
+ "dev-libs/A-2.4": { "EAPI": "4", "DEPEND": "dev-libs/B:0" },
+
+ #EAPI-2: use deps
+ "dev-libs/A-3.0": { "EAPI": 0, "DEPEND": "dev-libs/B[foo]" },
+ "dev-libs/A-3.1": { "EAPI": 1, "DEPEND": "dev-libs/B[foo]" },
+ "dev-libs/A-3.2": { "EAPI": 2, "DEPEND": "dev-libs/B[foo]" },
+ "dev-libs/A-3.3": { "EAPI": 3, "DEPEND": "dev-libs/B[foo]" },
+ "dev-libs/A-3.4": { "EAPI": "4", "DEPEND": "dev-libs/B[foo]" },
+
+ #EAPI-2: strong blocks
+ "dev-libs/A-4.0": { "EAPI": 0, "DEPEND": "!!dev-libs/B" },
+ "dev-libs/A-4.1": { "EAPI": 1, "DEPEND": "!!dev-libs/B" },
+ "dev-libs/A-4.2": { "EAPI": 2, "DEPEND": "!!dev-libs/B" },
+ "dev-libs/A-4.3": { "EAPI": 3, "DEPEND": "!!dev-libs/B" },
+ "dev-libs/A-4.4": { "EAPI": "4", "DEPEND": "!!dev-libs/B" },
+
+ #EAPI-4: slot operator deps
+ #~ "dev-libs/A-5.0": { "EAPI": 0, "DEPEND": "dev-libs/B:*" },
+ #~ "dev-libs/A-5.1": { "EAPI": 1, "DEPEND": "dev-libs/B:*" },
+ #~ "dev-libs/A-5.2": { "EAPI": 2, "DEPEND": "dev-libs/B:*" },
+ #~ "dev-libs/A-5.3": { "EAPI": 3, "DEPEND": "dev-libs/B:*" },
+ #~ "dev-libs/A-5.4": { "EAPI": "4", "DEPEND": "dev-libs/B:*" },
+
+ #EAPI-4: use dep defaults
+ "dev-libs/A-6.0": { "EAPI": 0, "DEPEND": "dev-libs/B[bar(+)]" },
+ "dev-libs/A-6.1": { "EAPI": 1, "DEPEND": "dev-libs/B[bar(+)]" },
+ "dev-libs/A-6.2": { "EAPI": 2, "DEPEND": "dev-libs/B[bar(+)]" },
+ "dev-libs/A-6.3": { "EAPI": 3, "DEPEND": "dev-libs/B[bar(+)]" },
+ "dev-libs/A-6.4": { "EAPI": "4", "DEPEND": "dev-libs/B[bar(+)]" },
+
+ #EAPI-4: REQUIRED_USE
+ "dev-libs/A-7.0": { "EAPI": 0, "IUSE": "foo bar", "REQUIRED_USE": "|| ( foo bar )" },
+ "dev-libs/A-7.1": { "EAPI": 1, "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" },
+ "dev-libs/A-7.2": { "EAPI": 2, "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" },
+ "dev-libs/A-7.3": { "EAPI": 3, "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" },
+ "dev-libs/A-7.4": { "EAPI": "4", "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )" },
+
+ "dev-libs/B-1": {"EAPI": 1, "IUSE": "+foo"},
+
+ #EAPI-7: implicit || ( ) no longer satisfies deps
+ "dev-libs/C-1": { "EAPI": "6", "IUSE": "foo", "RDEPEND": "|| ( foo? ( dev-libs/B ) )" },
+ "dev-libs/C-2": { "EAPI": "7", "IUSE": "foo", "RDEPEND": "|| ( foo? ( dev-libs/B ) )" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(["=dev-libs/A-1.0"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-1.1"], success = True, mergelist = ["dev-libs/A-1.1"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-1.2"], success = True, mergelist = ["dev-libs/A-1.2"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-1.3"], success = True, mergelist = ["dev-libs/A-1.3"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-1.4"], success = True, mergelist = ["dev-libs/A-1.4"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/A-2.0"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-2.1"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.1"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-2.2"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.2"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-2.3"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.3"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-2.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-2.4"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/A-3.0"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-3.1"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-3.2"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-3.2"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-3.3"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-3.3"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-3.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-3.4"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/A-4.0"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-4.1"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-4.2"], success = True, mergelist = ["dev-libs/A-4.2"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-4.3"], success = True, mergelist = ["dev-libs/A-4.3"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-4.4"], success = True, mergelist = ["dev-libs/A-4.4"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/A-5.0"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-5.1"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-5.2"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-5.3"], success = False),
+ # not implemented: EAPI-4: slot operator deps
+ #~ ResolverPlaygroundTestCase(["=dev-libs/A-5.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-5.4"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/A-6.0"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-6.1"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-6.2"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-6.3"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-6.4"], success = True, mergelist = ["dev-libs/B-1", "dev-libs/A-6.4"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/A-7.0"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-7.1"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-7.2"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-7.3"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-7.4"], success = True, mergelist = ["dev-libs/A-7.4"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/C-1"], success = True, mergelist = ["dev-libs/C-1"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-2"], success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_features_test_use.py b/lib/portage/tests/resolver/test_features_test_use.py
new file mode 100644
index 000000000..bdd179d7a
--- /dev/null
+++ b/lib/portage/tests/resolver/test_features_test_use.py
@@ -0,0 +1,68 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class FeaturesTestUse(TestCase):
+
+ def testFeaturesTestUse(self):
+ ebuilds = {
+ "dev-libs/A-1" : {
+ "IUSE": "test"
+ },
+ "dev-libs/B-1" : {
+ "IUSE": "test foo"
+ },
+ }
+
+ installed = {
+ "dev-libs/A-1" : {
+ "USE": "",
+ "IUSE": "test"
+ },
+ "dev-libs/B-1" : {
+ "USE": "foo",
+ "IUSE": "test foo"
+ },
+ }
+
+ user_config = {
+ "make.conf" : ("FEATURES=test", "USE=\"-test -foo\"")
+ }
+
+ test_cases = (
+
+ # USE=test state should not trigger --newuse rebuilds, as
+ # specified in bug #373209, comment #3.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--newuse": True, "--selective": True},
+ success = True,
+ mergelist = []),
+
+ # USE=-test -> USE=test, with USE=test forced by FEATURES=test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {},
+ success = True,
+ mergelist = ["dev-libs/A-1"]),
+
+ # USE=foo -> USE=-foo, with USE=test forced by FEATURES=test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--newuse": True, "--selective": True},
+ success = True,
+ mergelist = ["dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, user_config=user_config, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
diff --git a/lib/portage/tests/resolver/test_imagemagick_graphicsmagick.py b/lib/portage/tests/resolver/test_imagemagick_graphicsmagick.py
new file mode 100644
index 000000000..e5a3d7d7d
--- /dev/null
+++ b/lib/portage/tests/resolver/test_imagemagick_graphicsmagick.py
@@ -0,0 +1,104 @@
+# Copyright 2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground,
+ ResolverPlaygroundTestCase,
+)
+
+class ImageMagickGraphicsMagickTestCase(TestCase):
+
+ def testImageMagickUpdate(self):
+
+ ebuilds = {
+ "media-gfx/imagemagick-6.9.7.0" : {
+ "EAPI": "6",
+ "SLOT": "0/6.9.7.0",
+ },
+
+ "media-gfx/imagemagick-6.9.6.6" : {
+ "EAPI": "6",
+ "SLOT": "0/6.9.6.6",
+ },
+
+ "media-gfx/inkscape-0.91-r3" : {
+ "EAPI": "6",
+ "DEPEND": "media-gfx/imagemagick:=",
+ "RDEPEND": "media-gfx/imagemagick:=",
+ },
+
+ "media-video/dvdrip-0.98.11-r3" : {
+ "EAPI": "6",
+ "DEPEND": "|| ( media-gfx/graphicsmagick[imagemagick] media-gfx/imagemagick )",
+ "RDEPEND": "|| ( media-gfx/graphicsmagick[imagemagick] media-gfx/imagemagick )",
+ },
+
+ "media-gfx/graphicsmagick-1.3.25" : {
+ "EAPI": "6",
+ "SLOT": "0/1.3",
+ "IUSE": "imagemagick",
+ "RDEPEND": "imagemagick? ( !media-gfx/imagemagick )",
+ },
+ }
+
+ installed = {
+ "media-gfx/imagemagick-6.9.6.6" : {
+ "EAPI": "6",
+ "SLOT": "0/6.9.6.6",
+ },
+
+ "media-gfx/inkscape-0.91-r3" : {
+ "EAPI": "6",
+ "DEPEND": "media-gfx/imagemagick:0/6.9.6.6=",
+ "RDEPEND": "media-gfx/imagemagick:0/6.9.6.6=",
+ },
+
+ "media-video/dvdrip-0.98.11-r3" : {
+ "EAPI": "6",
+ "DEPEND": "|| ( media-gfx/graphicsmagick[imagemagick] media-gfx/imagemagick )",
+ "RDEPEND": "|| ( media-gfx/graphicsmagick[imagemagick] media-gfx/imagemagick )",
+ },
+
+ "media-gfx/graphicsmagick-1.3.25" : {
+ "EAPI": "6",
+ "SLOT": "0/1.3",
+ "IUSE": "imagemagick",
+ "USE": "",
+ "RDEPEND": "imagemagick? ( !media-gfx/imagemagick )",
+ },
+ }
+
+ world = (
+ "media-gfx/inkscape",
+ "media-video/dvdrip",
+ "media-gfx/graphicsmagick",
+ )
+
+ test_cases = (
+
+ # bug #554070: imagemagick upgrade triggered erroneous
+ # autounmask USE change for media-gfx/graphicsmagick[imagemagick]
+ ResolverPlaygroundTestCase(
+ ["media-gfx/imagemagick", "@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = [
+ "media-gfx/imagemagick-6.9.7.0",
+ "media-gfx/inkscape-0.91-r3"
+ ]
+ ),
+
+ )
+
+ playground = ResolverPlayground(debug=False,
+ ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ # Disable debug so that cleanup works.
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_keywords.py b/lib/portage/tests/resolver/test_keywords.py
new file mode 100644
index 000000000..d59ea5881
--- /dev/null
+++ b/lib/portage/tests/resolver/test_keywords.py
@@ -0,0 +1,356 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class KeywordsTestCase(TestCase):
+
+ def testStableConfig(self):
+ # Only accept stable keywords for a particular ARCH.
+
+ user_config = {
+ 'package.accept_keywords':
+ (
+ '*/* -* x86',
+ ),
+ }
+
+ ebuilds = {
+ 'app-misc/A-1': {'KEYWORDS': 'x86'},
+ 'app-misc/B-1': {'KEYWORDS': '~x86'},
+ 'app-misc/C-1': {'KEYWORDS': '*'},
+ 'app-misc/D-1': {'KEYWORDS': '~*'},
+ 'app-misc/E-1': {'KEYWORDS': 'arm'},
+ 'app-misc/F-1': {'KEYWORDS': '~arm'},
+ 'app-misc/G-1': {'KEYWORDS': ''},
+ }
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/A'],
+ success = True,
+ mergelist = ['app-misc/A-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/B'],
+ success = False,
+ unstable_keywords = ('app-misc/B-1',),
+ mergelist = ['app-misc/B-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/C'],
+ success = True,
+ mergelist = ['app-misc/C-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/D'],
+ success = False,
+ unstable_keywords = ('app-misc/D-1',),
+ mergelist = ['app-misc/D-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/E'],
+ success = False,
+ unstable_keywords = ('app-misc/E-1',),
+ mergelist = ['app-misc/E-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/F'],
+ success = False,
+ unstable_keywords = ('app-misc/F-1',),
+ mergelist = ['app-misc/F-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/G'],
+ success = False,
+ unstable_keywords = ('app-misc/G-1',),
+ mergelist = ['app-misc/G-1']),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testAnyStableConfig(self):
+ # Accept stable keywords for any ARCH.
+
+ user_config = {
+ 'package.accept_keywords':
+ (
+ '*/* -* *',
+ ),
+ }
+
+ ebuilds = {
+ 'app-misc/A-1': {'KEYWORDS': 'x86'},
+ 'app-misc/B-1': {'KEYWORDS': '~x86'},
+ 'app-misc/C-1': {'KEYWORDS': '*'},
+ 'app-misc/D-1': {'KEYWORDS': '~*'},
+ 'app-misc/E-1': {'KEYWORDS': 'arm'},
+ 'app-misc/F-1': {'KEYWORDS': '~arm'},
+ 'app-misc/G-1': {'KEYWORDS': ''},
+ }
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/A'],
+ success = True,
+ mergelist = ['app-misc/A-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/B'],
+ success = False,
+ unstable_keywords = ('app-misc/B-1',),
+ mergelist = ['app-misc/B-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/C'],
+ success = True,
+ mergelist = ['app-misc/C-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/D'],
+ success = False,
+ unstable_keywords = ('app-misc/D-1',),
+ mergelist = ['app-misc/D-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/E'],
+ success = True,
+ mergelist = ['app-misc/E-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/F'],
+ success = False,
+ unstable_keywords = ('app-misc/F-1',),
+ mergelist = ['app-misc/F-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/G'],
+ success = False,
+ unstable_keywords = ('app-misc/G-1',),
+ mergelist = ['app-misc/G-1']),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testUnstableConfig(self):
+ # Accept stable and unstable keywords for a particular ARCH.
+
+ user_config = {
+ 'package.accept_keywords':
+ (
+ '*/* -* x86 ~x86',
+ ),
+ }
+
+ ebuilds = {
+ 'app-misc/A-1': {'KEYWORDS': 'x86'},
+ 'app-misc/B-1': {'KEYWORDS': '~x86'},
+ 'app-misc/C-1': {'KEYWORDS': '*'},
+ 'app-misc/D-1': {'KEYWORDS': '~*'},
+ 'app-misc/E-1': {'KEYWORDS': 'arm'},
+ 'app-misc/F-1': {'KEYWORDS': '~arm'},
+ 'app-misc/G-1': {'KEYWORDS': ''},
+ }
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/A'],
+ success = True,
+ mergelist = ['app-misc/A-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/B'],
+ success = True,
+ mergelist = ['app-misc/B-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/C'],
+ success = True,
+ mergelist = ['app-misc/C-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/D'],
+ success = True,
+ mergelist = ['app-misc/D-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/E'],
+ success = False,
+ unstable_keywords = ('app-misc/E-1',),
+ mergelist = ['app-misc/E-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/F'],
+ success = False,
+ unstable_keywords = ('app-misc/F-1',),
+ mergelist = ['app-misc/F-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/G'],
+ success = False,
+ unstable_keywords = ('app-misc/G-1',),
+ mergelist = ['app-misc/G-1']),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testAnyUnstableConfig(self):
+ # Accept unstable keywords for any ARCH.
+
+ user_config = {
+ 'package.accept_keywords':
+ (
+ '*/* -* * ~*',
+ ),
+ }
+
+ ebuilds = {
+ 'app-misc/A-1': {'KEYWORDS': 'x86'},
+ 'app-misc/B-1': {'KEYWORDS': '~x86'},
+ 'app-misc/C-1': {'KEYWORDS': '*'},
+ 'app-misc/D-1': {'KEYWORDS': '~*'},
+ 'app-misc/E-1': {'KEYWORDS': 'arm'},
+ 'app-misc/F-1': {'KEYWORDS': '~arm'},
+ 'app-misc/G-1': {'KEYWORDS': ''},
+ }
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/A'],
+ success = True,
+ mergelist = ['app-misc/A-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/B'],
+ success = True,
+ mergelist = ['app-misc/B-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/C'],
+ success = True,
+ mergelist = ['app-misc/C-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/D'],
+ success = True,
+ mergelist = ['app-misc/D-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/E'],
+ success = True,
+ mergelist = ['app-misc/E-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/F'],
+ success = True,
+ mergelist = ['app-misc/F-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/G'],
+ success = False,
+ unstable_keywords = ('app-misc/G-1',),
+ mergelist = ['app-misc/G-1']),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testIgnoreKeywordsConfig(self):
+ # Ignore keywords entirely (accept **)
+
+ user_config = {
+ 'package.accept_keywords':
+ (
+ '*/* -* **',
+ ),
+ }
+
+ ebuilds = {
+ 'app-misc/A-1': {'KEYWORDS': 'x86'},
+ 'app-misc/B-1': {'KEYWORDS': '~x86'},
+ 'app-misc/C-1': {'KEYWORDS': '*'},
+ 'app-misc/D-1': {'KEYWORDS': '~*'},
+ 'app-misc/E-1': {'KEYWORDS': 'arm'},
+ 'app-misc/F-1': {'KEYWORDS': '~arm'},
+ 'app-misc/G-1': {'KEYWORDS': ''},
+ }
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/A'],
+ success = True,
+ mergelist = ['app-misc/A-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/B'],
+ success = True,
+ mergelist = ['app-misc/B-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/C'],
+ success = True,
+ mergelist = ['app-misc/C-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/D'],
+ success = True,
+ mergelist = ['app-misc/D-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/E'],
+ success = True,
+ mergelist = ['app-misc/E-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/F'],
+ success = True,
+ mergelist = ['app-misc/F-1']),
+
+ ResolverPlaygroundTestCase(
+ ['app-misc/G'],
+ success = True,
+ mergelist = ['app-misc/G-1']),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_merge_order.py b/lib/portage/tests/resolver/test_merge_order.py
new file mode 100644
index 000000000..5d000d12b
--- /dev/null
+++ b/lib/portage/tests/resolver/test_merge_order.py
@@ -0,0 +1,478 @@
+# Copyright 2011-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import portage
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class MergeOrderTestCase(TestCase):
+
+ def testMergeOrder(self):
+ ebuilds = {
+ "app-misc/blocker-buildtime-a-1" : {},
+ "app-misc/blocker-buildtime-unbuilt-a-1" : {
+ "DEPEND" : "!app-misc/installed-blocker-a",
+ },
+ "app-misc/blocker-buildtime-unbuilt-hard-a-1" : {
+ "EAPI" : "2",
+ "DEPEND" : "!!app-misc/installed-blocker-a",
+ },
+ "app-misc/blocker-update-order-a-1" : {},
+ "app-misc/blocker-update-order-hard-a-1" : {},
+ "app-misc/blocker-update-order-hard-unsolvable-a-1" : {},
+ "app-misc/blocker-runtime-a-1" : {},
+ "app-misc/blocker-runtime-b-1" : {},
+ "app-misc/blocker-runtime-hard-a-1" : {},
+ "app-misc/circ-buildtime-a-0": {},
+ "app-misc/circ-buildtime-a-1": {
+ "RDEPEND": "app-misc/circ-buildtime-b",
+ },
+ "app-misc/circ-buildtime-b-1": {
+ "RDEPEND": "app-misc/circ-buildtime-c",
+ },
+ "app-misc/circ-buildtime-c-1": {
+ "DEPEND": "app-misc/circ-buildtime-a",
+ },
+ "app-misc/circ-buildtime-unsolvable-a-1": {
+ "RDEPEND": "app-misc/circ-buildtime-unsolvable-b",
+ },
+ "app-misc/circ-buildtime-unsolvable-b-1": {
+ "RDEPEND": "app-misc/circ-buildtime-unsolvable-c",
+ },
+ "app-misc/circ-buildtime-unsolvable-c-1": {
+ "DEPEND": "app-misc/circ-buildtime-unsolvable-a",
+ },
+ "app-misc/circ-post-runtime-a-1": {
+ "PDEPEND": "app-misc/circ-post-runtime-b",
+ },
+ "app-misc/circ-post-runtime-b-1": {
+ "RDEPEND": "app-misc/circ-post-runtime-c",
+ },
+ "app-misc/circ-post-runtime-c-1": {
+ "RDEPEND": "app-misc/circ-post-runtime-a",
+ },
+ "app-misc/circ-runtime-a-1": {
+ "RDEPEND": "app-misc/circ-runtime-b",
+ },
+ "app-misc/circ-runtime-b-1": {
+ "RDEPEND": "app-misc/circ-runtime-c",
+ },
+ "app-misc/circ-runtime-c-1": {
+ "RDEPEND": "app-misc/circ-runtime-a",
+ },
+ "app-misc/circ-satisfied-a-0": {
+ "RDEPEND": "app-misc/circ-satisfied-b",
+ },
+ "app-misc/circ-satisfied-a-1": {
+ "RDEPEND": "app-misc/circ-satisfied-b",
+ },
+ "app-misc/circ-satisfied-b-0": {
+ "RDEPEND": "app-misc/circ-satisfied-c",
+ },
+ "app-misc/circ-satisfied-b-1": {
+ "RDEPEND": "app-misc/circ-satisfied-c",
+ },
+ "app-misc/circ-satisfied-c-0": {
+ "DEPEND": "app-misc/circ-satisfied-a",
+ "RDEPEND": "app-misc/circ-satisfied-a",
+ },
+ "app-misc/circ-satisfied-c-1": {
+ "DEPEND": "app-misc/circ-satisfied-a",
+ "RDEPEND": "app-misc/circ-satisfied-a",
+ },
+ "app-misc/circ-smallest-a-1": {
+ "RDEPEND": "app-misc/circ-smallest-b",
+ },
+ "app-misc/circ-smallest-b-1": {
+ "RDEPEND": "app-misc/circ-smallest-a",
+ },
+ "app-misc/circ-smallest-c-1": {
+ "RDEPEND": "app-misc/circ-smallest-d",
+ },
+ "app-misc/circ-smallest-d-1": {
+ "RDEPEND": "app-misc/circ-smallest-e",
+ },
+ "app-misc/circ-smallest-e-1": {
+ "RDEPEND": "app-misc/circ-smallest-c",
+ },
+ "app-misc/circ-smallest-f-1": {
+ "RDEPEND": "app-misc/circ-smallest-g app-misc/circ-smallest-a app-misc/circ-smallest-c",
+ },
+ "app-misc/circ-smallest-g-1": {
+ "RDEPEND": "app-misc/circ-smallest-f",
+ },
+ "app-misc/installed-blocker-a-1" : {
+ "EAPI" : "2",
+ "DEPEND" : "!app-misc/blocker-buildtime-a",
+ "RDEPEND" : "!app-misc/blocker-runtime-a !app-misc/blocker-runtime-b !!app-misc/blocker-runtime-hard-a",
+ },
+ "app-misc/installed-old-version-blocks-a-1" : {
+ "RDEPEND" : "!app-misc/blocker-update-order-a",
+ },
+ "app-misc/installed-old-version-blocks-a-2" : {},
+ "app-misc/installed-old-version-blocks-hard-a-1" : {
+ "EAPI" : "2",
+ "RDEPEND" : "!!app-misc/blocker-update-order-hard-a",
+ },
+ "app-misc/installed-old-version-blocks-hard-a-2" : {},
+ "app-misc/installed-old-version-blocks-hard-unsolvable-a-1" : {
+ "EAPI" : "2",
+ "RDEPEND" : "!!app-misc/blocker-update-order-hard-unsolvable-a",
+ },
+ "app-misc/installed-old-version-blocks-hard-unsolvable-a-2" : {
+ "DEPEND" : "app-misc/blocker-update-order-hard-unsolvable-a",
+ "RDEPEND" : "",
+ },
+ "app-misc/some-app-a-1": {
+ "RDEPEND": "app-misc/circ-runtime-a app-misc/circ-runtime-b",
+ },
+ "app-misc/some-app-b-1": {
+ "RDEPEND": "app-misc/circ-post-runtime-a app-misc/circ-post-runtime-b",
+ },
+ "app-misc/some-app-c-1": {
+ "RDEPEND": "app-misc/circ-buildtime-a app-misc/circ-buildtime-b",
+ },
+ "app-admin/eselect-python-20100321" : {},
+ "sys-apps/portage-2.1.9.42" : {
+ "DEPEND" : "dev-lang/python",
+ "RDEPEND" : "dev-lang/python",
+ },
+ "sys-apps/portage-2.1.9.49" : {
+ "DEPEND" : "dev-lang/python >=app-admin/eselect-python-20091230",
+ "RDEPEND" : "dev-lang/python",
+ },
+ "dev-lang/python-3.1" : {},
+ "dev-lang/python-3.2" : {},
+ "virtual/libc-0" : {
+ "RDEPEND" : "sys-libs/glibc",
+ },
+ "sys-devel/gcc-4.5.2" : {},
+ "sys-devel/binutils-2.18" : {},
+ "sys-devel/binutils-2.20.1" : {},
+ "sys-libs/glibc-2.11" : {
+ "DEPEND" : "virtual/os-headers sys-devel/gcc sys-devel/binutils",
+ "RDEPEND": "",
+ },
+ "sys-libs/glibc-2.13" : {
+ "DEPEND" : "virtual/os-headers sys-devel/gcc sys-devel/binutils",
+ "RDEPEND": "",
+ },
+ "virtual/os-headers-0" : {
+ "RDEPEND" : "sys-kernel/linux-headers",
+ },
+ "sys-kernel/linux-headers-2.6.38": {
+ "DEPEND" : "app-arch/xz-utils",
+ "RDEPEND": "",
+ },
+ "sys-kernel/linux-headers-2.6.39": {
+ "DEPEND" : "app-arch/xz-utils",
+ "RDEPEND": "",
+ },
+ "app-arch/xz-utils-5.0.1" : {},
+ "app-arch/xz-utils-5.0.2" : {},
+ "dev-util/pkgconfig-0.25-r2" : {},
+ "kde-base/kdelibs-3.5.7" : {
+ "PDEPEND" : "kde-misc/kdnssd-avahi",
+ },
+ "kde-misc/kdnssd-avahi-0.1.2" : {
+ "DEPEND" : "kde-base/kdelibs app-arch/xz-utils dev-util/pkgconfig",
+ "RDEPEND" : "kde-base/kdelibs",
+ },
+ "kde-base/kdnssd-3.5.7" : {
+ "DEPEND" : "kde-base/kdelibs",
+ "RDEPEND" : "kde-base/kdelibs",
+ },
+ "kde-base/libkdegames-3.5.7" : {
+ "DEPEND" : "kde-base/kdelibs",
+ "RDEPEND" : "kde-base/kdelibs",
+ },
+ "kde-base/kmines-3.5.7" : {
+ "DEPEND" : "kde-base/libkdegames",
+ "RDEPEND" : "kde-base/libkdegames",
+ },
+ "media-libs/mesa-9.1.3" : {
+ "EAPI" : "5",
+ "IUSE" : "+xorg",
+ "DEPEND" : "xorg? ( x11-base/xorg-server:= )",
+ "RDEPEND" : "xorg? ( x11-base/xorg-server:= )",
+ },
+ "media-video/libav-0.7_pre20110327" : {
+ "EAPI" : "2",
+ "IUSE" : "X +encode",
+ "RDEPEND" : "!media-video/ffmpeg",
+ },
+ "media-video/ffmpeg-0.7_rc1" : {
+ "EAPI" : "2",
+ "IUSE" : "X +encode",
+ },
+ "virtual/ffmpeg-0.6.90" : {
+ "EAPI" : "2",
+ "IUSE" : "X +encode",
+ "RDEPEND" : "|| ( >=media-video/ffmpeg-0.6.90_rc0-r2[X=,encode=] >=media-video/libav-0.6.90_rc[X=,encode=] )",
+ },
+ "x11-base/xorg-server-1.14.1" : {
+ "EAPI" : "5",
+ "SLOT": "0/1.14.1",
+ "DEPEND" : "media-libs/mesa",
+ "RDEPEND" : "media-libs/mesa",
+ },
+ }
+
+ installed = {
+ "app-misc/circ-buildtime-a-0": {},
+ "app-misc/circ-satisfied-a-0": {
+ "RDEPEND": "app-misc/circ-satisfied-b",
+ },
+ "app-misc/circ-satisfied-b-0": {
+ "RDEPEND": "app-misc/circ-satisfied-c",
+ },
+ "app-misc/circ-satisfied-c-0": {
+ "DEPEND": "app-misc/circ-satisfied-a",
+ "RDEPEND": "app-misc/circ-satisfied-a",
+ },
+ "app-misc/installed-blocker-a-1" : {
+ "EAPI" : "2",
+ "DEPEND" : "!app-misc/blocker-buildtime-a",
+ "RDEPEND" : "!app-misc/blocker-runtime-a !app-misc/blocker-runtime-b !!app-misc/blocker-runtime-hard-a",
+ },
+ "app-misc/installed-old-version-blocks-a-1" : {
+ "RDEPEND" : "!app-misc/blocker-update-order-a",
+ },
+ "app-misc/installed-old-version-blocks-hard-a-1" : {
+ "EAPI" : "2",
+ "RDEPEND" : "!!app-misc/blocker-update-order-hard-a",
+ },
+ "app-misc/installed-old-version-blocks-hard-unsolvable-a-1" : {
+ "EAPI" : "2",
+ "RDEPEND" : "!!app-misc/blocker-update-order-hard-unsolvable-a",
+ },
+ "sys-apps/portage-2.1.9.42" : {
+ "DEPEND" : "dev-lang/python",
+ "RDEPEND" : "dev-lang/python",
+ },
+ "dev-lang/python-3.1" : {},
+ "virtual/libc-0" : {
+ "RDEPEND" : "sys-libs/glibc",
+ },
+ "sys-devel/binutils-2.18" : {},
+ "sys-libs/glibc-2.11" : {
+ "DEPEND" : "virtual/os-headers sys-devel/gcc sys-devel/binutils",
+ "RDEPEND": "",
+ },
+ "virtual/os-headers-0" : {
+ "RDEPEND" : "sys-kernel/linux-headers",
+ },
+ "sys-kernel/linux-headers-2.6.38": {
+ "DEPEND" : "app-arch/xz-utils",
+ "RDEPEND": "",
+ },
+ "app-arch/xz-utils-5.0.1" : {},
+ "media-libs/mesa-9.1.3" : {
+ "EAPI" : "5",
+ "IUSE" : "+xorg",
+ "USE": "xorg",
+ "DEPEND" : "x11-base/xorg-server:0/1.14.1=",
+ "RDEPEND" : "x11-base/xorg-server:0/1.14.1=",
+ },
+ "media-video/ffmpeg-0.7_rc1" : {
+ "EAPI" : "2",
+ "IUSE" : "X +encode",
+ "USE" : "encode",
+ },
+ "virtual/ffmpeg-0.6.90" : {
+ "EAPI" : "2",
+ "IUSE" : "X +encode",
+ "USE" : "encode",
+ "RDEPEND" : "|| ( >=media-video/ffmpeg-0.6.90_rc0-r2[X=,encode=] >=media-video/libav-0.6.90_rc[X=,encode=] )",
+ },
+ "x11-base/xorg-server-1.14.1" : {
+ "EAPI" : "5",
+ "SLOT": "0/1.14.1",
+ "DEPEND" : "media-libs/mesa",
+ "RDEPEND" : "media-libs/mesa",
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["app-misc/some-app-a"],
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = [("app-misc/circ-runtime-a-1", "app-misc/circ-runtime-b-1", "app-misc/circ-runtime-c-1"), "app-misc/some-app-a-1"]),
+ ResolverPlaygroundTestCase(
+ ["app-misc/some-app-a"],
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = [("app-misc/circ-runtime-c-1", "app-misc/circ-runtime-b-1", "app-misc/circ-runtime-a-1"), "app-misc/some-app-a-1"]),
+ # Test unsolvable circular dep that is RDEPEND in one
+ # direction and DEPEND in the other.
+ ResolverPlaygroundTestCase(
+ ["app-misc/circ-buildtime-unsolvable-a"],
+ success = False,
+ circular_dependency_solutions = {}),
+ # Test optimal merge order for a circular dep that is
+ # RDEPEND in one direction and DEPEND in the other.
+ # This requires an installed instance of the DEPEND
+ # package in order to be solvable.
+ ResolverPlaygroundTestCase(
+ ["app-misc/some-app-c", "app-misc/circ-buildtime-a"],
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = [("app-misc/circ-buildtime-b-1", "app-misc/circ-buildtime-c-1"), "app-misc/circ-buildtime-a-1", "app-misc/some-app-c-1"]),
+ # Test optimal merge order for a circular dep that is
+ # RDEPEND in one direction and PDEPEND in the other.
+ ResolverPlaygroundTestCase(
+ ["app-misc/some-app-b"],
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = ["app-misc/circ-post-runtime-a-1", ("app-misc/circ-post-runtime-b-1", "app-misc/circ-post-runtime-c-1"), "app-misc/some-app-b-1"]),
+ # Test optimal merge order for a circular dep that is
+ # RDEPEND in one direction and DEPEND in the other,
+ # with all dependencies initially satisfied. Optimally,
+ # the DEPEND/buildtime dep should be updated before the
+ # package that depends on it, even though it's feasible
+ # to update it later since it is already satisfied.
+ ResolverPlaygroundTestCase(
+ ["app-misc/circ-satisfied-a", "app-misc/circ-satisfied-b", "app-misc/circ-satisfied-c"],
+ success = True,
+ all_permutations = True,
+ ambiguous_merge_order = True,
+ merge_order_assertions = (("app-misc/circ-satisfied-a-1", "app-misc/circ-satisfied-c-1"),),
+ mergelist = [("app-misc/circ-satisfied-a-1", "app-misc/circ-satisfied-b-1", "app-misc/circ-satisfied-c-1")]),
+ # In the case of multiple runtime cycles, where some cycles
+ # may depend on smaller independent cycles, it's optimal
+ # to merge smaller independent cycles before other cycles
+ # that depend on them.
+ ResolverPlaygroundTestCase(
+ ["app-misc/circ-smallest-a", "app-misc/circ-smallest-c", "app-misc/circ-smallest-f"],
+ success = True,
+ ambiguous_merge_order = True,
+ all_permutations = True,
+ mergelist = [('app-misc/circ-smallest-a-1', 'app-misc/circ-smallest-b-1'),
+ ('app-misc/circ-smallest-c-1', 'app-misc/circ-smallest-d-1', 'app-misc/circ-smallest-e-1'),
+ ('app-misc/circ-smallest-f-1', 'app-misc/circ-smallest-g-1')]),
+ # installed package has buildtime-only blocker
+ # that should be ignored
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-buildtime-a"],
+ success = True,
+ mergelist = ["app-misc/blocker-buildtime-a-1"]),
+ # We're installing a package that an old version of
+ # an installed package blocks. However, an update is
+ # available to the old package. The old package should
+ # be updated first, in order to solve the blocker without
+ # any need for blocking packages to temporarily overlap.
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-update-order-a", "app-misc/installed-old-version-blocks-a"],
+ success = True,
+ all_permutations = True,
+ mergelist = ["app-misc/installed-old-version-blocks-a-2", "app-misc/blocker-update-order-a-1"]),
+ # This is the same as above but with a hard blocker. The hard
+ # blocker is solved automatically since the update makes it
+ # irrelevant.
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-update-order-hard-a", "app-misc/installed-old-version-blocks-hard-a"],
+ success = True,
+ all_permutations = True,
+ mergelist = ["app-misc/installed-old-version-blocks-hard-a-2", "app-misc/blocker-update-order-hard-a-1"]),
+ # This is similar to the above case except that it's unsolvable
+ # due to merge order, unless bug 250286 is implemented so that
+ # the installed blocker will be unmerged before installation
+ # of the package it blocks (rather than after like a soft blocker
+ # would be handled). The "unmerge before" behavior requested
+ # in bug 250286 must be optional since essential programs or
+ # libraries may be temporarily unavailable during a
+ # non-overlapping update like this.
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-update-order-hard-unsolvable-a", "app-misc/installed-old-version-blocks-hard-unsolvable-a"],
+ success = False,
+ all_permutations = True,
+ ambiguous_merge_order = True,
+ merge_order_assertions = (('app-misc/blocker-update-order-hard-unsolvable-a-1', 'app-misc/installed-old-version-blocks-hard-unsolvable-a-2'),),
+ mergelist = [('app-misc/blocker-update-order-hard-unsolvable-a-1', 'app-misc/installed-old-version-blocks-hard-unsolvable-a-2', '!!app-misc/blocker-update-order-hard-unsolvable-a')]),
+ # The installed package has runtime blockers that
+ # should cause it to be uninstalled. The uninstall
+ # task is executed only after blocking packages have
+ # been merged.
+ # TODO: distinguish between install/uninstall tasks in mergelist
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-runtime-a", "app-misc/blocker-runtime-b"],
+ success = True,
+ all_permutations = True,
+ ambiguous_merge_order = True,
+ mergelist = [("app-misc/blocker-runtime-a-1", "app-misc/blocker-runtime-b-1"), "[uninstall]app-misc/installed-blocker-a-1", ("!app-misc/blocker-runtime-a", "!app-misc/blocker-runtime-b")]),
+ # We have a soft buildtime blocker against an installed
+ # package that should cause it to be uninstalled. Note that with
+ # soft blockers, the blocking packages are allowed to temporarily
+ # overlap. This allows any essential programs/libraries provided
+ # by both packages to be available at all times.
+ # TODO: distinguish between install/uninstall tasks in mergelist
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-buildtime-unbuilt-a"],
+ success = True,
+ mergelist = ["app-misc/blocker-buildtime-unbuilt-a-1", "[uninstall]app-misc/installed-blocker-a-1", "!app-misc/installed-blocker-a"]),
+ # We have a hard buildtime blocker against an installed
+ # package that will not resolve automatically (unless
+ # the option requested in bug 250286 is implemented).
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-buildtime-unbuilt-hard-a"],
+ success = False,
+ mergelist = ['app-misc/blocker-buildtime-unbuilt-hard-a-1', '!!app-misc/installed-blocker-a']),
+ # An installed package has a hard runtime blocker that
+ # will not resolve automatically (unless the option
+ # requested in bug 250286 is implemented).
+ ResolverPlaygroundTestCase(
+ ["app-misc/blocker-runtime-hard-a"],
+ success = False,
+ mergelist = ['app-misc/blocker-runtime-hard-a-1', '!!app-misc/blocker-runtime-hard-a']),
+ # Test swapping of providers for a new-style virtual package,
+ # which relies on delayed evaluation of disjunctive (virtual
+ # and ||) deps as required to solve bug #264434. Note that
+ # this behavior is not supported for old-style PROVIDE virtuals,
+ # as reported in bug #339164.
+ ResolverPlaygroundTestCase(
+ ["media-video/libav"],
+ success=True,
+ mergelist = ['media-video/libav-0.7_pre20110327', '[uninstall]media-video/ffmpeg-0.7_rc1', '!media-video/ffmpeg']),
+ # Test that OS_HEADERS_PACKAGE_ATOM and LIBC_PACKAGE_ATOM
+ # are merged asap, in order to account for implicit
+ # dependencies. See bug #303567. Optimally, satisfied deps
+ # are always merged after the asap nodes that depend on them.
+ ResolverPlaygroundTestCase(
+ ["app-arch/xz-utils", "sys-kernel/linux-headers", "sys-devel/binutils", "sys-libs/glibc"],
+ options = {"--complete-graph" : True},
+ success = True,
+ all_permutations = True,
+ ambiguous_merge_order = True,
+ mergelist = ['sys-kernel/linux-headers-2.6.39', 'sys-devel/gcc-4.5.2', 'sys-libs/glibc-2.13', ('app-arch/xz-utils-5.0.2', 'sys-devel/binutils-2.20.1')]),
+ # Test asap install of PDEPEND for bug #180045.
+ ResolverPlaygroundTestCase(
+ ["kde-base/kmines", "kde-base/kdnssd", "kde-base/kdelibs", "app-arch/xz-utils"],
+ success = True,
+ all_permutations = True,
+ ambiguous_merge_order = True,
+ merge_order_assertions = (
+ ('dev-util/pkgconfig-0.25-r2', 'kde-misc/kdnssd-avahi-0.1.2'),
+ ('kde-misc/kdnssd-avahi-0.1.2', 'kde-base/libkdegames-3.5.7'),
+ ('kde-misc/kdnssd-avahi-0.1.2', 'kde-base/kdnssd-3.5.7'),
+ ('kde-base/libkdegames-3.5.7', 'kde-base/kmines-3.5.7'),
+ ),
+ mergelist = [('kde-base/kdelibs-3.5.7', 'dev-util/pkgconfig-0.25-r2', 'kde-misc/kdnssd-avahi-0.1.2', 'app-arch/xz-utils-5.0.2', 'kde-base/libkdegames-3.5.7', 'kde-base/kdnssd-3.5.7', 'kde-base/kmines-3.5.7')]),
+ # Test satisfied circular DEPEND/RDEPEND with one := operator.
+ # Both deps are already satisfied by installed packages, but
+ # the := dep is given higher priority in merge order.
+ ResolverPlaygroundTestCase(
+ ["media-libs/mesa", "x11-base/xorg-server"],
+ success=True,
+ all_permutations = True,
+ mergelist = ['x11-base/xorg-server-1.14.1', 'media-libs/mesa-9.1.3']),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_missing_iuse_and_evaluated_atoms.py b/lib/portage/tests/resolver/test_missing_iuse_and_evaluated_atoms.py
new file mode 100644
index 000000000..a860e7bb6
--- /dev/null
+++ b/lib/portage/tests/resolver/test_missing_iuse_and_evaluated_atoms.py
@@ -0,0 +1,31 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class MissingIUSEandEvaluatedAtomsTestCase(TestCase):
+
+ def testMissingIUSEandEvaluatedAtoms(self):
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "dev-libs/B[foo?]", "IUSE": "foo bar", "EAPI": 2 },
+ "dev-libs/A-2": { "DEPEND": "dev-libs/B[foo?,bar]", "IUSE": "foo bar", "EAPI": 2 },
+ "dev-libs/B-1": { "IUSE": "bar" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1"],
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-2"],
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_multirepo.py b/lib/portage/tests/resolver/test_multirepo.py
new file mode 100644
index 000000000..dabec6af9
--- /dev/null
+++ b/lib/portage/tests/resolver/test_multirepo.py
@@ -0,0 +1,398 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class MultirepoTestCase(TestCase):
+
+ def testMultirepo(self):
+ ebuilds = {
+ #Simple repo selection
+ "dev-libs/A-1": { },
+ "dev-libs/A-1::repo1": { },
+ "dev-libs/A-2::repo1": { },
+ "dev-libs/A-1::repo2": { },
+
+ #Packages in exactly one repo
+ "dev-libs/B-1": { },
+ "dev-libs/C-1::repo1": { },
+
+ #Package in repository 1 and 2, but 1 must be used
+ "dev-libs/D-1::repo1": { },
+ "dev-libs/D-1::repo2": { },
+
+ "dev-libs/E-1": { },
+ "dev-libs/E-1::repo1": { },
+ "dev-libs/E-1::repo2": { "SLOT": "1" },
+
+ "dev-libs/F-1::repo1": { "SLOT": "1" },
+ "dev-libs/F-1::repo2": { "SLOT": "1" },
+
+ "dev-libs/G-1::repo1": { "EAPI" : "4", "IUSE":"+x +y", "REQUIRED_USE" : "" },
+ "dev-libs/G-1::repo2": { "EAPI" : "4", "IUSE":"+x +y", "REQUIRED_USE" : "^^ ( x y )" },
+
+ "dev-libs/H-1": { "KEYWORDS": "x86", "EAPI" : "3",
+ "RDEPEND" : "|| ( dev-libs/I:2 dev-libs/I:1 )" },
+
+ "dev-libs/I-1::repo2": { "SLOT" : "1"},
+ "dev-libs/I-2::repo2": { "SLOT" : "2"},
+
+ "dev-libs/K-1::repo2": { },
+ }
+
+ installed = {
+ "dev-libs/H-1": { "RDEPEND" : "|| ( dev-libs/I:2 dev-libs/I:1 )", "EAPI" : "3" },
+ "dev-libs/I-2::repo1": {"SLOT" : "2"},
+ "dev-libs/K-1::repo1": { },
+ }
+
+ binpkgs = {
+ "dev-libs/C-1::repo2": { },
+ "dev-libs/I-2::repo1": {"SLOT" : "2"},
+ "dev-libs/K-1::repo2": { },
+ }
+
+ sets = {
+ "multirepotest":
+ ("dev-libs/A::test_repo",)
+ }
+
+ test_cases = (
+ #Simple repo selection
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-2::repo1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A::test_repo"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A::repo2"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-1::repo2"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1::repo1"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-1::repo1"]),
+ ResolverPlaygroundTestCase(
+ ["@multirepotest"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-1"]),
+
+ #Packages in exactly one repo
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/C-1::repo1"]),
+
+ #Package in repository 1 and 2, but 2 must be used
+ ResolverPlaygroundTestCase(
+ ["dev-libs/D"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/D-1::repo2"]),
+
+ #--usepkg: don't reinstall on new repo without --newrepo
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ options = {"--usepkg": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["[binary]dev-libs/C-1::repo2"]),
+
+ #--usepkgonly: don't reinstall on new repo without --newrepo
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ options = {"--usepkgonly": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["[binary]dev-libs/C-1::repo2"]),
+
+ #--newrepo: pick ebuild if binpkg/ebuild have different repo
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ options = {"--usepkg": True, "--newrepo": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/C-1::repo1"]),
+
+ #--newrepo --usepkgonly: ebuild is ignored
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ options = {"--usepkgonly": True, "--newrepo": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["[binary]dev-libs/C-1::repo2"]),
+
+ #--newrepo: pick ebuild if binpkg/ebuild have different repo
+ ResolverPlaygroundTestCase(
+ ["dev-libs/I"],
+ options = {"--usepkg": True, "--newrepo": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/I-2::repo2"]),
+
+ #--newrepo --usepkgonly: if binpkg matches installed, do nothing
+ ResolverPlaygroundTestCase(
+ ["dev-libs/I"],
+ options = {"--usepkgonly": True, "--newrepo": True, "--selective": True},
+ success = True,
+ mergelist = []),
+
+ #--newrepo --usepkgonly: reinstall if binpkg has new repo.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/K"],
+ options = {"--usepkgonly": True, "--newrepo": True, "--selective": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["[binary]dev-libs/K-1::repo2"]),
+
+ #--usepkgonly: don't reinstall on new repo without --newrepo.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/K"],
+ options = {"--usepkgonly": True, "--selective": True},
+ success = True,
+ mergelist = []),
+
+ #Atoms with slots
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/E-1::repo2"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E:1::repo2"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/E-1::repo2"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E:1"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/E-1::repo2"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/F:1"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/F-1::repo2"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/F-1:1"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/F-1::repo2"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/F-1:1::repo1"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/F-1::repo1"]),
+
+ # Dependency on installed dev-libs/C-2 ebuild for which ebuild is
+ # not available from the same repo should not unnecessarily
+ # reinstall the same version from a different repo.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/H"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = []),
+
+ # Dependency on installed dev-libs/I-2 ebuild should trigger reinstall
+ # when --newrepo flag is used.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/H"],
+ options = {"--update": True, "--deep": True, "--newrepo": True},
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/I-2::repo2"]),
+
+ # Check interaction between repo priority and unsatisfied
+ # REQUIRED_USE, for bug #350254.
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/G-1"],
+ check_repo_names = True,
+ success = False),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ binpkgs=binpkgs, installed=installed, sets=sets)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testMultirepoUserConfig(self):
+ ebuilds = {
+ #package.use test
+ "dev-libs/A-1": { "IUSE": "foo" },
+ "dev-libs/A-2::repo1": { "IUSE": "foo" },
+ "dev-libs/A-3::repo2": { },
+ "dev-libs/B-1": { "DEPEND": "dev-libs/A", "EAPI": 2 },
+ "dev-libs/B-2": { "DEPEND": "dev-libs/A[foo]", "EAPI": 2 },
+ "dev-libs/B-3": { "DEPEND": "dev-libs/A[-foo]", "EAPI": 2 },
+
+ #package.keywords test
+ "dev-libs/C-1": { "KEYWORDS": "~x86" },
+ "dev-libs/C-1::repo1": { "KEYWORDS": "~x86" },
+
+ #package.license
+ "dev-libs/D-1": { "LICENSE": "TEST" },
+ "dev-libs/D-1::repo1": { "LICENSE": "TEST" },
+
+ #package.mask
+ "dev-libs/E-1": { },
+ "dev-libs/E-1::repo1": { },
+ "dev-libs/H-1": { },
+ "dev-libs/H-1::repo1": { },
+ "dev-libs/I-1::repo2": { "SLOT" : "1"},
+ "dev-libs/I-2::repo2": { "SLOT" : "2"},
+ "dev-libs/J-1": { "KEYWORDS": "x86", "EAPI" : "3",
+ "RDEPEND" : "|| ( dev-libs/I:2 dev-libs/I:1 )" },
+
+ #package.properties
+ "dev-libs/F-1": { "PROPERTIES": "bar"},
+ "dev-libs/F-1::repo1": { "PROPERTIES": "bar"},
+
+ #package.unmask
+ "dev-libs/G-1": { },
+ "dev-libs/G-1::repo1": { },
+
+ #package.mask with wildcards
+ "dev-libs/Z-1::repo3": { },
+ }
+
+ installed = {
+ "dev-libs/J-1": { "RDEPEND" : "|| ( dev-libs/I:2 dev-libs/I:1 )", "EAPI" : "3" },
+ "dev-libs/I-2::repo1": {"SLOT" : "2"},
+ }
+
+ user_config = {
+ "package.use":
+ (
+ "dev-libs/A::repo1 foo",
+ ),
+ "package.keywords":
+ (
+ "=dev-libs/C-1::test_repo",
+ ),
+ "package.license":
+ (
+ "=dev-libs/D-1::test_repo TEST",
+ ),
+ "package.mask":
+ (
+ "dev-libs/E::repo1",
+ "dev-libs/H",
+ "dev-libs/I::repo1",
+ #needed for package.unmask test
+ "dev-libs/G",
+ #wildcard test
+ "*/*::repo3",
+ ),
+ "package.properties":
+ (
+ "dev-libs/F::repo1 -bar",
+ ),
+ "package.unmask":
+ (
+ "dev-libs/G::test_repo",
+ ),
+ }
+
+ test_cases = (
+ #package.use test
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/B-1"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-3::repo2", "dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/B-2"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/A-2::repo1", "dev-libs/B-2"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/B-3"],
+ options = { "--autounmask": 'n' },
+ success = False,
+ check_repo_names = True),
+
+ #package.keywords test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/C-1"]),
+
+ #package.license test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/D"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/D-1"]),
+
+ #package.mask test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/E-1"]),
+
+ # Dependency on installed dev-libs/C-2 ebuild for which ebuild is
+ # masked from the same repo should not unnecessarily pull
+ # in a different slot. It should just pull in the same slot from
+ # a different repo (bug #351828).
+ ResolverPlaygroundTestCase(
+ ["dev-libs/J"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["dev-libs/I-2"]),
+
+ #package.properties test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/F"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/F-1"]),
+
+ #package.mask test
+ ResolverPlaygroundTestCase(
+ ["dev-libs/G"],
+ success = True,
+ check_repo_names = True,
+ mergelist = ["dev-libs/G-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/H"],
+ options = { "--autounmask": 'n' },
+ success = False),
+
+ #package.mask with wildcards
+ ResolverPlaygroundTestCase(
+ ["dev-libs/Z"],
+ options = { "--autounmask": 'n' },
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_multislot.py b/lib/portage/tests/resolver/test_multislot.py
new file mode 100644
index 000000000..cbb1beebb
--- /dev/null
+++ b/lib/portage/tests/resolver/test_multislot.py
@@ -0,0 +1,54 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class MultSlotTestCase(TestCase):
+
+ def testMultiSlotSelective(self):
+ """
+ Test that a package isn't reinstalled due to SLOT dependency
+ interaction with USE=multislot (bug #220341).
+ """
+
+ ebuilds = {
+ "sys-devel/gcc-4.4.4": { "SLOT": "4.4" },
+ "dev-util/nvidia-cuda-toolkit-4.0" : {"EAPI": "1", "RDEPEND": "sys-devel/gcc:4.4"},
+ }
+
+ installed = {
+ "sys-devel/gcc-4.4.4": { "SLOT": "i686-pc-linux-gnu-4.4.4" },
+ "dev-util/nvidia-cuda-toolkit-4.0" : {"EAPI": "1", "RDEPEND": "sys-devel/gcc:4.4"},
+ }
+
+ world = (
+ "dev-util/nvidia-cuda-toolkit",
+ )
+
+ options = {'--update' : True, '--deep' : True, '--selective' : True}
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["sys-devel/gcc:4.4"],
+ options = options,
+ mergelist = [],
+ success = True),
+
+ # depclean test for bug #382823
+ ResolverPlaygroundTestCase(
+ [],
+ options = {"--depclean": True},
+ success = True,
+ cleanlist = []),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed,
+ world=world)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_old_dep_chain_display.py b/lib/portage/tests/resolver/test_old_dep_chain_display.py
new file mode 100644
index 000000000..8aedf5999
--- /dev/null
+++ b/lib/portage/tests/resolver/test_old_dep_chain_display.py
@@ -0,0 +1,35 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class OldDepChainDisplayTestCase(TestCase):
+
+ def testOldDepChainDisplay(self):
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "foo? ( dev-libs/B[-bar] )", "IUSE": "+foo", "EAPI": "2" },
+ "dev-libs/A-2": { "DEPEND": "foo? ( dev-libs/C )", "IUSE": "+foo", "EAPI": "1" },
+ "dev-libs/B-1": { "IUSE": "bar", "DEPEND": "!bar? ( dev-libs/D[-baz] )", "EAPI": "2" },
+ "dev-libs/C-1": { "KEYWORDS": "~x86" },
+ "dev-libs/D-1": { "IUSE": "+baz", "EAPI": "1" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1"],
+ options = { "--autounmask": 'n' },
+ success = False),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-2"],
+ options = { "--autounmask": 'n' },
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_onlydeps.py b/lib/portage/tests/resolver/test_onlydeps.py
new file mode 100644
index 000000000..986769aee
--- /dev/null
+++ b/lib/portage/tests/resolver/test_onlydeps.py
@@ -0,0 +1,34 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class OnlydepsTestCase(TestCase):
+
+ def testOnlydeps(self):
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "dev-libs/B" },
+ "dev-libs/B-1": { },
+ }
+ installed = {
+ "dev-libs/B-1": { },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B"],
+ all_permutations = True,
+ success = True,
+ options = { "--onlydeps": True },
+ mergelist = ["dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_onlydeps_circular.py b/lib/portage/tests/resolver/test_onlydeps_circular.py
new file mode 100644
index 000000000..ce35cee85
--- /dev/null
+++ b/lib/portage/tests/resolver/test_onlydeps_circular.py
@@ -0,0 +1,51 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import \
+ ResolverPlayground, ResolverPlaygroundTestCase
+
+class OnlydepsTestCase(TestCase):
+
+ def testOnlydeps(self):
+ ebuilds = {
+ "app-misc/A-1": {
+ "EAPI": "5",
+ "SLOT": "1",
+ "DEPEND": "|| ( app-misc/B app-misc/A:1 )"
+ },
+ "app-misc/A-2": {
+ "EAPI": "5",
+ "SLOT": "2",
+ },
+ "app-misc/B-0": {
+ "EAPI": "5",
+ }
+ }
+
+ installed = {
+ "app-misc/A-2": {
+ "EAPI": "5",
+ "SLOT": "2",
+ }
+ }
+
+ test_cases = (
+ # bug 524916 - direct circular dep should not pull
+ # in an onlydeps node when possible
+ ResolverPlaygroundTestCase(
+ ["app-misc/A:1"],
+ success = True,
+ options = { "--onlydeps": True },
+ mergelist = ["app-misc/B-0"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success,
+ True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_onlydeps_minimal.py b/lib/portage/tests/resolver/test_onlydeps_minimal.py
new file mode 100644
index 000000000..efda02c59
--- /dev/null
+++ b/lib/portage/tests/resolver/test_onlydeps_minimal.py
@@ -0,0 +1,48 @@
+# Copyright 2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class OnlydepsMinimalTestCase(TestCase):
+
+ def testOnlydepsMinimal(self):
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "dev-libs/B",
+ "RDEPEND": "dev-libs/C",
+ "PDEPEND": "dev-libs/D" },
+ "dev-libs/B-1": { },
+ "dev-libs/C-1": { },
+ "dev-libs/D-1": { },
+ }
+ installed = {
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ all_permutations = True,
+ success = True,
+ options = { "--onlydeps": True,
+ "--onlydeps-with-rdeps": "y" },
+ ambiguous_merge_order = True,
+ mergelist = [("dev-libs/B-1",
+ "dev-libs/C-1",
+ "dev-libs/D-1")]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ all_permutations = True,
+ success = True,
+ options = { "--onlydeps": True,
+ "--onlydeps-with-rdeps": "n" },
+ mergelist = ["dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_or_choices.py b/lib/portage/tests/resolver/test_or_choices.py
new file mode 100644
index 000000000..63e62d010
--- /dev/null
+++ b/lib/portage/tests/resolver/test_or_choices.py
@@ -0,0 +1,342 @@
+# Copyright 2013-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class OrChoicesTestCase(TestCase):
+
+ def testOrChoices(self):
+ ebuilds = {
+ "dev-lang/vala-0.20.0" : {
+ "EAPI": "5",
+ "SLOT": "0.20"
+ },
+ "dev-lang/vala-0.18.0" : {
+ "EAPI": "5",
+ "SLOT": "0.18"
+ },
+ #"dev-libs/gobject-introspection-1.36.0" : {
+ # "EAPI": "5",
+ # "RDEPEND" : "!<dev-lang/vala-0.20.0",
+ #},
+ "dev-libs/gobject-introspection-1.34.0" : {
+ "EAPI": "5"
+ },
+ "sys-apps/systemd-ui-2" : {
+ "EAPI": "5",
+ "RDEPEND" : "|| ( dev-lang/vala:0.20 dev-lang/vala:0.18 )"
+ },
+ }
+
+ installed = {
+ "dev-lang/vala-0.18.0" : {
+ "EAPI": "5",
+ "SLOT": "0.18"
+ },
+ "dev-libs/gobject-introspection-1.34.0" : {
+ "EAPI": "5"
+ },
+ "sys-apps/systemd-ui-2" : {
+ "EAPI": "5",
+ "RDEPEND" : "|| ( dev-lang/vala:0.20 dev-lang/vala:0.18 )"
+ },
+ }
+
+ world = ["dev-libs/gobject-introspection", "sys-apps/systemd-ui"]
+
+ test_cases = (
+ # Demonstrate that vala:0.20 update is pulled in, for bug #478188
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success=True,
+ all_permutations = True,
+ mergelist = ['dev-lang/vala-0.20.0']),
+ # Verify that vala:0.20 is not pulled in without --deep
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True},
+ success=True,
+ all_permutations = True,
+ mergelist = []),
+ # Verify that vala:0.20 is not pulled in without --update
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--selective": True, "--deep": True},
+ success=True,
+ all_permutations = True,
+ mergelist = []),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testOrChoicesLibpostproc(self):
+ ebuilds = {
+ "media-video/ffmpeg-0.10" : {
+ "EAPI": "5",
+ "SLOT": "0.10"
+ },
+ "media-video/ffmpeg-1.2.2" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+ "media-libs/libpostproc-0.8.0.20121125" : {
+ "EAPI": "5"
+ },
+ "media-plugins/gst-plugins-ffmpeg-0.10.13_p201211-r1" : {
+ "EAPI": "5",
+ "RDEPEND" : "|| ( media-video/ffmpeg:0 media-libs/libpostproc )"
+ },
+ }
+
+ installed = {
+ "media-video/ffmpeg-0.10" : {
+ "EAPI": "5",
+ "SLOT": "0.10"
+ },
+ "media-libs/libpostproc-0.8.0.20121125" : {
+ "EAPI": "5"
+ },
+ "media-plugins/gst-plugins-ffmpeg-0.10.13_p201211-r1" : {
+ "EAPI": "5",
+ "RDEPEND" : "|| ( media-video/ffmpeg:0 media-libs/libpostproc )"
+ },
+ }
+
+ world = ["media-plugins/gst-plugins-ffmpeg"]
+
+ test_cases = (
+ # Demonstrate that libpostproc is preferred
+ # over ffmpeg:0 for bug #480736.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success=True,
+ all_permutations = True,
+ mergelist = []),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed,
+ world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testInitiallyUnsatisfied(self):
+
+ ebuilds = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A:="
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "RDEPEND": "|| ( app-misc/X <app-misc/A-2 )"
+ },
+
+ }
+
+ installed = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A:0/1="
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "RDEPEND": "|| ( app-misc/X <app-misc/A-2 )"
+ },
+
+ }
+
+ world = ["app-misc/B", "app-misc/C"]
+
+ test_cases = (
+
+ # Test bug #522652, where the unsatisfiable app-misc/X
+ # atom is selected, and the dependency is placed into
+ # _initially_unsatisfied_deps where it is ignored, causing
+ # upgrade to app-misc/A-2 (breaking a dependency of
+ # app-misc/C-0).
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ options = {},
+ success = True,
+ mergelist = ['app-misc/A-1']
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testUseMask(self):
+
+ profile = {
+ "use.mask":
+ (
+ "abi_ppc_32",
+ ),
+ }
+
+ ebuilds = {
+
+ "sys-libs/A-1" : {
+ "EAPI": "5",
+ "RDEPEND": "|| ( sys-libs/zlib[abi_ppc_32(-)] " + \
+ "sys-libs/zlib[abi_x86_32(-)] )"
+ },
+
+ "sys-libs/zlib-1.2.8-r1" : {
+ "EAPI": "5",
+ "IUSE": "abi_ppc_32 abi_x86_32"
+ },
+
+ "sys-libs/zlib-1.2.8" : {
+ "EAPI": "5",
+ "IUSE": ""
+ },
+ }
+
+ test_cases = (
+
+ # bug #515584: We want to prefer choices that do
+ # not require changes to use.mask or use.force.
+ # In this case, abi_ppc_32 is use.masked in the
+ # profile, so we want to avoid that choice.
+ ResolverPlaygroundTestCase(
+ ["sys-libs/A"],
+ options = {},
+ success = False,
+ use_changes = {
+ 'sys-libs/zlib-1.2.8-r1': {'abi_x86_32': True}
+ },
+ mergelist = ["sys-libs/zlib-1.2.8-r1", "sys-libs/A-1"]
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ profile=profile, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testConflictMissedUpdate(self):
+
+ ebuilds = {
+ "dev-lang/ocaml-4.02.1" : {
+ "EAPI": "5",
+ "SLOT": "0/4.02.1",
+ },
+
+ "dev-lang/ocaml-4.01.0" : {
+ "EAPI": "5",
+ "SLOT": "0/4.01.0",
+ },
+
+ "dev-ml/lablgl-1.05" : {
+ "EAPI": "5",
+ "DEPEND": (">=dev-lang/ocaml-3.10.2:= "
+ "|| ( dev-ml/labltk:= <dev-lang/ocaml-4.02 )"),
+ "RDEPEND": (">=dev-lang/ocaml-3.10.2:= "
+ "|| ( dev-ml/labltk:= <dev-lang/ocaml-4.02 )"),
+ },
+
+ "dev-ml/labltk-8.06.0" : {
+ "EAPI": "5",
+ "SLOT": "0/8.06.0",
+ "DEPEND": ">=dev-lang/ocaml-4.02:=",
+ "RDEPEND": ">=dev-lang/ocaml-4.02:=",
+ },
+ }
+
+ installed = {
+ "dev-lang/ocaml-4.01.0" : {
+ "EAPI": "5",
+ "SLOT": "0/4.01.0",
+ },
+
+ "dev-ml/lablgl-1.05" : {
+ "EAPI": "5",
+ "DEPEND": (">=dev-lang/ocaml-3.10.2:0/4.01.0= "
+ "|| ( dev-ml/labltk:= <dev-lang/ocaml-4.02 )"),
+ "RDEPEND": (">=dev-lang/ocaml-3.10.2:0/4.01.0= "
+ "|| ( dev-ml/labltk:= <dev-lang/ocaml-4.02 )"),
+ },
+ }
+
+ world = (
+ "dev-lang/ocaml",
+ "dev-ml/lablgl",
+ )
+
+ test_cases = (
+
+ # bug #531656: If an ocaml update is desirable,
+ # then we need to pull in dev-ml/labltk.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = [
+ "dev-lang/ocaml-4.02.1",
+ "dev-ml/labltk-8.06.0",
+ "dev-ml/lablgl-1.05",
+ ]
+ ),
+
+ )
+
+ playground = ResolverPlayground(debug=False,
+ ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ # Disable debug so that cleanup works.
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_or_downgrade_installed.py b/lib/portage/tests/resolver/test_or_downgrade_installed.py
new file mode 100644
index 000000000..22307a5bc
--- /dev/null
+++ b/lib/portage/tests/resolver/test_or_downgrade_installed.py
@@ -0,0 +1,97 @@
+# Copyright 2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground,
+ ResolverPlaygroundTestCase,
+)
+
+class OrDowngradeInstalledTestCase(TestCase):
+
+ def testOrDowngradeInstalled(self):
+ ebuilds = {
+ 'net-misc/foo-1': {
+ 'EAPI': '6',
+ 'RDEPEND': '|| ( sys-libs/glibc[rpc(-)] net-libs/libtirpc )'
+ },
+ 'net-libs/libtirpc-1': {
+ 'EAPI': '6',
+ },
+ 'sys-libs/glibc-2.26': {
+ 'EAPI': '6',
+ 'IUSE': ''
+ },
+ 'sys-libs/glibc-2.24': {
+ 'EAPI': '6',
+ 'IUSE': '+rpc'
+ },
+ }
+
+ installed = {
+ 'sys-libs/glibc-2.26': {
+ 'EAPI': '6',
+ 'IUSE': ''
+ },
+ }
+
+ world = ['sys-libs/glibc']
+
+ test_cases = (
+ # Test bug 635540, where we need to install libtirpc
+ # rather than downgrade glibc.
+ ResolverPlaygroundTestCase(
+ ['net-misc/foo'],
+ success=True,
+ mergelist=[
+ 'net-libs/libtirpc-1',
+ 'net-misc/foo-1',
+ ],
+ ),
+ )
+
+ playground = ResolverPlayground(debug=False,
+ ebuilds=ebuilds, installed=installed, world=world)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
+
+ # In some cases it's necessary to downgrade due to
+ # the installed package being masked (glibc is a
+ # not an ideal example because it's usually not
+ # practical to downgrade it).
+ user_config = {
+ "package.mask" : (
+ ">=sys-libs/glibc-2.26",
+ ),
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ['net-misc/foo'],
+ success=True,
+ mergelist=[
+ 'sys-libs/glibc-2.24',
+ 'net-misc/foo-1',
+ ],
+ ),
+ )
+
+ playground = ResolverPlayground(debug=False,
+ ebuilds=ebuilds, installed=installed, world=world,
+ user_config=user_config)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_or_upgrade_installed.py b/lib/portage/tests/resolver/test_or_upgrade_installed.py
new file mode 100644
index 000000000..7018e08de
--- /dev/null
+++ b/lib/portage/tests/resolver/test_or_upgrade_installed.py
@@ -0,0 +1,160 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground,
+ ResolverPlaygroundTestCase,
+)
+
+class OrUpgradeInstalledTestCase(TestCase):
+
+ def testOrUpgradeInstalled(self):
+ ebuilds = {
+ 'net-misc/foo-1': {
+ 'EAPI': '6',
+ 'RDEPEND': '|| ( sys-libs/glibc[rpc(-)] net-libs/libtirpc )'
+ },
+ 'net-libs/libtirpc-1': {
+ 'EAPI': '6',
+ },
+ 'sys-libs/glibc-2.26': {
+ 'EAPI': '6',
+ 'IUSE': ''
+ },
+ 'sys-libs/glibc-2.24': {
+ 'EAPI': '6',
+ 'IUSE': '+rpc'
+ },
+ }
+
+ installed = {
+ 'sys-libs/glibc-2.24': {
+ 'EAPI': '6',
+ 'IUSE': '+rpc',
+ 'USE': 'rpc',
+ },
+ }
+
+ world = ['sys-libs/glibc']
+
+ test_cases = (
+ # Test bug 643974, where we need to install libtirpc
+ # in order to upgrade glibc.
+ ResolverPlaygroundTestCase(
+ ['net-misc/foo', '@world'],
+ options={'--update': True, '--deep': True},
+ success=True,
+ ambiguous_merge_order=True,
+ mergelist=(
+ (
+ 'net-libs/libtirpc-1',
+ 'sys-libs/glibc-2.26',
+ 'net-misc/foo-1',
+ ),
+ )
+ ),
+ )
+
+ playground = ResolverPlayground(debug=False,
+ ebuilds=ebuilds, installed=installed, world=world)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
+
+ # In some cases it's necessary to avoid upgrade due to
+ # the package being masked.
+ user_config = {
+ "package.mask" : (
+ ">=sys-libs/glibc-2.26",
+ ),
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ['net-misc/foo', '@world'],
+ options={'--update': True, '--deep': True},
+ success=True,
+ mergelist=[
+ 'net-misc/foo-1',
+ ]
+ ),
+ )
+
+ playground = ResolverPlayground(debug=False,
+ ebuilds=ebuilds, installed=installed, world=world,
+ user_config=user_config)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
+
+ def testVirtualRust(self):
+ ebuilds = {
+ 'dev-lang/rust-1.19.0': {},
+ 'dev-lang/rust-1.23.0': {},
+ 'dev-lang/rust-bin-1.19.0': {},
+ 'virtual/rust-1.19.0': {
+ 'RDEPEND': '|| ( =dev-lang/rust-1.19.0* =dev-lang/rust-bin-1.19.0* )'
+ },
+ }
+
+ installed = {
+ 'dev-lang/rust-1.19.0': {},
+ 'virtual/rust-1.19.0': {
+ 'RDEPEND': '|| ( =dev-lang/rust-1.19.0* =dev-lang/rust-bin-1.19.0* )'
+ },
+ }
+
+ world = ['virtual/rust']
+
+ test_cases = (
+ # Test bug 645416, where rust-bin-1.19.0 was pulled in
+ # inappropriately due to the rust-1.23.0 update being
+ # available.
+ ResolverPlaygroundTestCase(
+ ['virtual/rust'],
+ options={'--update': True, '--deep': True},
+ success=True,
+ mergelist=[]
+ ),
+ # Test upgrade to rust-1.23.0, which is only possible
+ # if rust-bin-1.19.0 is installed in order to satisfy
+ # virtual/rust-1.19.0.
+ ResolverPlaygroundTestCase(
+ ['=dev-lang/rust-1.23.0', 'virtual/rust'],
+ options={'--update': True, '--deep': True},
+ all_permutations=True,
+ success=True,
+ ambiguous_merge_order=True,
+ mergelist=(
+ (
+ 'dev-lang/rust-1.23.0',
+ 'dev-lang/rust-bin-1.19.0',
+ ),
+ ),
+ ),
+ )
+
+ playground = ResolverPlayground(debug=False,
+ ebuilds=ebuilds, installed=installed, world=world)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_output.py b/lib/portage/tests/resolver/test_output.py
new file mode 100644
index 000000000..34efe9c56
--- /dev/null
+++ b/lib/portage/tests/resolver/test_output.py
@@ -0,0 +1,88 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class MergelistOutputTestCase(TestCase):
+
+ def testMergelistOutput(self):
+ """
+ This test doesn't check if the output is correct, but makes sure
+ that we don't backtrace somewhere in the output code.
+ """
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "dev-libs/B dev-libs/C", "IUSE": "+foo", "EAPI": 1 },
+ "dev-libs/B-1": { "DEPEND": "dev-libs/D", "IUSE": "foo +bar", "EAPI": 1 },
+ "dev-libs/C-1": { "DEPEND": "dev-libs/E", "IUSE": "foo bar" },
+ "dev-libs/D-1": { "IUSE": "" },
+ "dev-libs/E-1": {},
+
+ #reinstall for flags
+ "dev-libs/Z-1": { "IUSE": "+foo", "EAPI": 1 },
+ "dev-libs/Y-1": { "IUSE": "foo", "EAPI": 1 },
+ "dev-libs/X-1": {},
+ "dev-libs/W-1": { "IUSE": "+foo", "EAPI": 1 },
+ }
+
+ installed = {
+ "dev-libs/Z-1": { "USE": "", "IUSE": "foo" },
+ "dev-libs/Y-1": { "USE": "foo", "IUSE": "+foo", "EAPI": 1 },
+ "dev-libs/X-1": { "USE": "foo", "IUSE": "+foo", "EAPI": 1 },
+ "dev-libs/W-1": { },
+ }
+
+ option_cobos = (
+ (),
+ ("verbose",),
+ ("tree",),
+ ("tree", "unordered-display",),
+ ("verbose",),
+ ("verbose", "tree",),
+ ("verbose", "tree", "unordered-display",),
+ )
+
+ test_cases = []
+ for options in option_cobos:
+ testcase_opts = {}
+ for opt in options:
+ testcase_opts["--" + opt] = True
+
+ test_cases.append(ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = testcase_opts,
+ success = True,
+ ignore_mergelist_order=True,
+ mergelist = ["dev-libs/D-1", "dev-libs/E-1", "dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1"]))
+
+ test_cases.append(ResolverPlaygroundTestCase(
+ ["dev-libs/Z"],
+ options = testcase_opts,
+ success = True,
+ mergelist = ["dev-libs/Z-1"]))
+
+ test_cases.append(ResolverPlaygroundTestCase(
+ ["dev-libs/Y"],
+ options = testcase_opts,
+ success = True,
+ mergelist = ["dev-libs/Y-1"]))
+
+ test_cases.append(ResolverPlaygroundTestCase(
+ ["dev-libs/X"],
+ options = testcase_opts,
+ success = True,
+ mergelist = ["dev-libs/X-1"]))
+
+ test_cases.append(ResolverPlaygroundTestCase(
+ ["dev-libs/W"],
+ options = testcase_opts,
+ success = True,
+ mergelist = ["dev-libs/W-1"]))
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_package_tracker.py b/lib/portage/tests/resolver/test_package_tracker.py
new file mode 100644
index 000000000..468c3d8a5
--- /dev/null
+++ b/lib/portage/tests/resolver/test_package_tracker.py
@@ -0,0 +1,261 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import collections
+
+from portage.dep import Atom
+from portage.tests import TestCase
+from _emerge.resolver.package_tracker import PackageTracker, PackageTrackerDbapiWrapper
+
+class PackageTrackerTestCase(TestCase):
+
+ FakePackage = collections.namedtuple("FakePackage",
+ ["root", "cp", "cpv", "slot", "slot_atom", "version", "repo"])
+
+ FakeConflict = collections.namedtuple("FakeConflict",
+ ["description", "root", "pkgs"])
+
+ def make_pkg(self, root, atom, repo="test_repo"):
+ atom = Atom(atom)
+ slot_atom = Atom("%s:%s" % (atom.cp, atom.slot))
+ slot = atom.slot
+
+ return self.FakePackage(root=root, cp=atom.cp, cpv=atom.cpv,
+ slot=slot, slot_atom=slot_atom, version=atom.version, repo=repo)
+
+ def make_conflict(self, description, root, pkgs):
+ return self.FakeConflict(description=description, root=root, pkgs=pkgs)
+
+ def test_add_remove_discard(self):
+ p = PackageTracker()
+
+ x1 = self.make_pkg("/", "=dev-libs/X-1:0")
+ x2 = self.make_pkg("/", "=dev-libs/X-2:0")
+
+ p.add_pkg(x1)
+ self.assertTrue(x1 in p)
+ self.assertTrue(p.contains(x1, installed=True))
+ self.assertTrue(p.contains(x1, installed=False))
+ p.remove_pkg(x1)
+ self.assertTrue(x1 not in p)
+
+ p.add_pkg(x1)
+ self.assertTrue(x1 in p)
+ p.add_pkg(x1)
+ self.assertTrue(x1 in p)
+
+ self.assertRaises(KeyError, p.remove_pkg, x2)
+
+ p.add_pkg(x2)
+ self.assertTrue(x2 in p)
+ p.remove_pkg(x2)
+ self.assertTrue(x2 not in p)
+ p.discard_pkg(x2)
+ self.assertTrue(x2 not in p)
+ p.add_pkg(x2)
+ self.assertTrue(x2 in p)
+
+ all_pkgs = list(p.all_pkgs("/"))
+ self.assertEqual(len(all_pkgs), 2)
+ self.assertTrue(all_pkgs[0] is x1 and all_pkgs[1] is x2)
+
+ self.assertEqual(len(list(p.all_pkgs("/"))), 2)
+ self.assertEqual(len(list(p.all_pkgs("/xxx"))), 0)
+
+ def test_match(self):
+ p = PackageTracker()
+ x1 = self.make_pkg("/", "=dev-libs/X-1:0")
+ x2 = self.make_pkg("/", "=dev-libs/X-2:0")
+ x3 = self.make_pkg("/", "=dev-libs/X-3:1")
+
+ p.add_pkg(x2)
+ p.add_pkg(x1)
+
+ matches = list(p.match("/", Atom("=dev-libs/X-1")))
+ self.assertTrue(x1 in matches)
+ self.assertEqual(len(matches), 1)
+
+ matches = list(p.match("/", Atom("dev-libs/X")))
+ self.assertTrue(x1 is matches[0] and x2 is matches[1])
+ self.assertEqual(len(matches), 2)
+
+ matches = list(p.match("/xxx", Atom("dev-libs/X")))
+ self.assertEqual(len(matches), 0)
+
+ matches = list(p.match("/", Atom("dev-libs/Y")))
+ self.assertEqual(len(matches), 0)
+
+ p.add_pkg(x3)
+ matches = list(p.match("/", Atom("dev-libs/X")))
+ self.assertTrue(x1 is matches[0] and x2 is matches[1] and x3 is matches[2])
+ self.assertEqual(len(matches), 3)
+
+ p.remove_pkg(x3)
+ matches = list(p.match("/", Atom("dev-libs/X")))
+ self.assertTrue(x1 is matches[0] and x2 is matches[1])
+ self.assertEqual(len(matches), 2)
+
+ def test_dbapi_interface(self):
+ p = PackageTracker()
+ dbapi = PackageTrackerDbapiWrapper("/", p)
+ installed = self.make_pkg("/", "=dev-libs/X-0:0")
+ x1 = self.make_pkg("/", "=dev-libs/X-1:0")
+ x2 = self.make_pkg("/", "=dev-libs/X-2:0")
+ x3 = self.make_pkg("/", "=dev-libs/X-3:0")
+ x4 = self.make_pkg("/", "=dev-libs/X-4:6")
+ x5 = self.make_pkg("/xxx", "=dev-libs/X-5:6")
+
+ def check_dbapi(pkgs):
+ all_pkgs = set(dbapi)
+ self.assertEqual(len(all_pkgs), len(pkgs))
+
+ x_atom = "dev-libs/X"
+ y_atom = "dev-libs/Y"
+ matches = dbapi.cp_list(x_atom)
+ for pkg in pkgs:
+ if pkg.root == "/" and pkg.cp == x_atom:
+ self.assertTrue(pkg in matches)
+ self.assertTrue(not dbapi.cp_list(y_atom))
+ matches = dbapi.match(Atom(x_atom))
+ for pkg in pkgs:
+ if pkg.root == "/" and pkg.cp == x_atom:
+ self.assertTrue(pkg in matches)
+ self.assertTrue(not dbapi.match(Atom(y_atom)))
+
+ check_dbapi([])
+
+ p.add_installed_pkg(installed)
+ check_dbapi([installed])
+
+ p.add_pkg(x1)
+ check_dbapi([x1])
+
+ p.remove_pkg(x1)
+ check_dbapi([installed])
+
+ dbapi.cpv_inject(x1)
+ check_dbapi([x1])
+
+ dbapi.cpv_inject(x2)
+ check_dbapi([x1, x2])
+
+ p.remove_pkg(x1)
+ check_dbapi([x2])
+
+ p.add_pkg(x5)
+ check_dbapi([x2])
+
+
+ def test_installed(self):
+ p = PackageTracker()
+ x1 = self.make_pkg("/", "=dev-libs/X-1:0")
+ x1b = self.make_pkg("/", "=dev-libs/X-1.1:0")
+ x2 = self.make_pkg("/", "=dev-libs/X-2:0")
+ x3 = self.make_pkg("/", "=dev-libs/X-3:1")
+
+ def check_installed(x, should_contain, num_pkgs):
+ self.assertEqual(x in p, should_contain)
+ self.assertEqual(p.contains(x), should_contain)
+ self.assertEqual(p.contains(x1, installed=True), should_contain)
+ self.assertEqual(p.contains(x1, installed=False), False)
+ self.assertEqual(len(list(p.all_pkgs("/"))), num_pkgs)
+
+ def check_matches(atom, expected):
+ matches = list(p.match("/", Atom(atom)))
+ self.assertEqual(len(matches), len(expected))
+ for x, y in zip(matches, expected):
+ self.assertTrue(x is y)
+
+ p.add_installed_pkg(x1)
+ check_installed(x1, True, 1)
+ check_matches("dev-libs/X", [x1])
+
+ p.add_installed_pkg(x1)
+ check_installed(x1, True, 1)
+ check_matches("dev-libs/X", [x1])
+
+ p.add_pkg(x2)
+ check_installed(x1, False, 1)
+ check_matches("dev-libs/X", [x2])
+
+ p.add_installed_pkg(x1)
+ check_installed(x1, False, 1)
+ check_matches("dev-libs/X", [x2])
+
+ p.add_installed_pkg(x1b)
+ check_installed(x1, False, 1)
+ check_installed(x1b, False, 1)
+ check_matches("dev-libs/X", [x2])
+
+ p.remove_pkg(x2)
+ check_installed(x1, True, 2)
+ check_installed(x1b, True, 2)
+ check_matches("dev-libs/X", [x1, x1b])
+
+ def test_conflicts(self):
+ p = PackageTracker()
+ installed1 = self.make_pkg("/", "=dev-libs/X-0:0")
+ installed2 = self.make_pkg("/", "=dev-libs/X-0.1:0")
+ x1 = self.make_pkg("/", "=dev-libs/X-1:0")
+ x2 = self.make_pkg("/", "=dev-libs/X-2:0")
+ x3 = self.make_pkg("/", "=dev-libs/X-3:0")
+ x4 = self.make_pkg("/", "=dev-libs/X-4:4")
+ x4b = self.make_pkg("/", "=dev-libs/X-4:4b::x-repo")
+
+ def check_conflicts(expected, slot_conflicts_only=False):
+ if slot_conflicts_only:
+ conflicts = list(p.slot_conflicts())
+ else:
+ conflicts = list(p.conflicts())
+ self.assertEqual(len(conflicts), len(expected))
+ for got, exp in zip(conflicts, expected):
+ self.assertEqual(got.description, exp.description)
+ self.assertEqual(got.root, exp.root)
+ self.assertEqual(len(got.pkgs), len(exp.pkgs))
+ self.assertEqual(len(got), len(exp.pkgs))
+ for x, y in zip(got.pkgs, exp.pkgs):
+ self.assertTrue(x is y)
+ for x, y in zip(got, exp.pkgs):
+ self.assertTrue(x is y)
+ for x in exp.pkgs:
+ self.assertTrue(x in got)
+
+ check_conflicts([])
+ check_conflicts([])
+
+ p.add_installed_pkg(installed1)
+ p.add_installed_pkg(installed2)
+ check_conflicts([])
+
+ p.add_pkg(x1)
+ check_conflicts([])
+ p.add_pkg(x2)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x2])])
+ p.add_pkg(x3)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x2, x3])])
+ p.remove_pkg(x3)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x2])])
+ p.remove_pkg(x2)
+ check_conflicts([])
+ p.add_pkg(x3)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x3])])
+ p.add_pkg(x2)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x3, x2])])
+
+ p.add_pkg(x4)
+ check_conflicts([self.make_conflict("slot conflict", "/", [x1, x3, x2])])
+
+ p.add_pkg(x4b)
+ check_conflicts(
+ [
+ self.make_conflict("slot conflict", "/", [x1, x3, x2]),
+ self.make_conflict("cpv conflict", "/", [x4, x4b]),
+ ]
+ )
+
+ check_conflicts(
+ [
+ self.make_conflict("slot conflict", "/", [x1, x3, x2]),
+ ],
+ slot_conflicts_only=True
+ )
diff --git a/lib/portage/tests/resolver/test_profile_default_eapi.py b/lib/portage/tests/resolver/test_profile_default_eapi.py
new file mode 100644
index 000000000..cc5721949
--- /dev/null
+++ b/lib/portage/tests/resolver/test_profile_default_eapi.py
@@ -0,0 +1,126 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import io
+
+from portage import os, _encodings
+from portage.const import USER_CONFIG_PATH
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.dep import ExtendedAtomDict
+from portage.util import ensure_dirs
+
+class ProfileDefaultEAPITestCase(TestCase):
+
+ def testProfileDefaultEAPI(self):
+
+ repo_configs = {
+ "test_repo": {
+ "layout.conf": (
+ "profile-formats = profile-default-eapi",
+ "profile_eapi_when_unspecified = 5"
+ ),
+ }
+ }
+
+ profiles = (
+ (
+ "",
+ {
+ "package.mask": ("sys-libs/A:1",),
+ "package.use": ("sys-libs/A:1 flag",)
+ }
+ ),
+ (
+ "default/linux",
+ {
+ "package.mask": ("sys-libs/B:1",),
+ "package.use": ("sys-libs/B:1 flag",),
+ "package.keywords": ("sys-libs/B:1 x86",)
+ }
+ ),
+ (
+ "default/linux/x86",
+ {
+ "package.mask": ("sys-libs/C:1",),
+ "package.use": ("sys-libs/C:1 flag",),
+ "package.keywords": ("sys-libs/C:1 x86",),
+ "parent": ("..",)
+ }
+ ),
+ )
+
+ user_profile = {
+ "package.mask": ("sys-libs/D:1",),
+ "package.use": ("sys-libs/D:1 flag",),
+ "package.keywords": ("sys-libs/D:1 x86",),
+ }
+
+ test_cases = (
+ (lambda x: x._mask_manager._pmaskdict, {
+ "sys-libs/A": ("sys-libs/A:1::test_repo",),
+ "sys-libs/B": ("sys-libs/B:1",),
+ "sys-libs/C": ("sys-libs/C:1",),
+ "sys-libs/D": ("sys-libs/D:1",),
+ }),
+ (lambda x: x._use_manager._repo_puse_dict, {
+ "test_repo": {
+ "sys-libs/A": {
+ "sys-libs/A:1": ("flag",)
+ }
+ }
+ }),
+ (lambda x: x._use_manager._pkgprofileuse, (
+ {"sys-libs/B": {"sys-libs/B:1": "flag"}},
+ {"sys-libs/C": {"sys-libs/C:1": "flag"}},
+ {},
+ {"sys-libs/D": {"sys-libs/D:1": "flag"}},
+ )),
+ (lambda x: x._keywords_manager._pkeywords_list, (
+ {"sys-libs/B": {"sys-libs/B:1": ["x86"]}},
+ {"sys-libs/C": {"sys-libs/C:1": ["x86"]}},
+ {"sys-libs/D": {"sys-libs/D:1": ["x86"]}},
+ )
+ )
+ )
+
+ playground = ResolverPlayground(debug=False,
+ repo_configs=repo_configs)
+ try:
+ repo_dir = (playground.settings.repositories.
+ get_location_for_name("test_repo"))
+ profile_root = os.path.join(repo_dir, "profiles")
+ profile_info = [(os.path.join(profile_root, p), data)
+ for p, data in profiles]
+ profile_info.append((os.path.join(playground.eroot,
+ USER_CONFIG_PATH, "profile"), user_profile))
+
+ for prof_path, data in profile_info:
+ ensure_dirs(prof_path)
+ for k, v in data.items():
+ with io.open(os.path.join(prof_path, k), mode="w",
+ encoding=_encodings["repo.content"]) as f:
+ for line in v:
+ f.write("%s\n" % line)
+
+ # The config must be reloaded in order to account
+ # for the above profile customizations.
+ playground.reload_config()
+
+ for fn, expected in test_cases:
+ result = self._translate_result(fn(playground.settings))
+ self.assertEqual(result, expected)
+
+ finally:
+ playground.cleanup()
+
+
+ @staticmethod
+ def _translate_result(result):
+ if isinstance(result, ExtendedAtomDict):
+ result = dict(result.items())
+ elif isinstance(result, tuple):
+ result = tuple(dict(x.items()) for x in result)
+ return result
diff --git a/lib/portage/tests/resolver/test_profile_package_set.py b/lib/portage/tests/resolver/test_profile_package_set.py
new file mode 100644
index 000000000..88a2a8259
--- /dev/null
+++ b/lib/portage/tests/resolver/test_profile_package_set.py
@@ -0,0 +1,123 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import io
+
+from portage import os, _encodings
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground, ResolverPlaygroundTestCase)
+from portage.util import ensure_dirs
+
+class ProfilePackageSetTestCase(TestCase):
+
+ def testProfilePackageSet(self):
+
+ repo_configs = {
+ "test_repo": {
+ "layout.conf": ("profile-formats = profile-set",),
+ }
+ }
+
+ profiles = (
+ (
+ 'default/linux',
+ {
+ "eapi": ("5",),
+ "packages": (
+ "*sys-libs/A",
+ "app-misc/A",
+ "app-misc/B",
+ "app-misc/C",
+ ),
+ }
+ ),
+ (
+ 'default/linux/x86',
+ {
+ "eapi": ("5",),
+ "packages": (
+ "-app-misc/B",
+ ),
+ "parent": ("..",)
+ }
+ ),
+ )
+
+ ebuilds = {
+ "sys-libs/A-1": {
+ "EAPI": "5",
+ },
+ "app-misc/A-1": {
+ "EAPI": "5",
+ },
+ "app-misc/B-1": {
+ "EAPI": "5",
+ },
+ "app-misc/C-1": {
+ "EAPI": "5",
+ },
+ }
+
+ installed = {
+ "sys-libs/A-1": {
+ "EAPI": "5",
+ },
+ "app-misc/A-1": {
+ "EAPI": "5",
+ },
+ "app-misc/B-1": {
+ "EAPI": "5",
+ },
+ "app-misc/C-1": {
+ "EAPI": "5",
+ },
+ }
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options={"--update": True, "--deep": True},
+ mergelist = [],
+ success = True,
+ ),
+
+ ResolverPlaygroundTestCase(
+ [],
+ options={"--depclean": True},
+ success=True,
+ cleanlist=["app-misc/B-1"]
+ ),
+
+ )
+
+ playground = ResolverPlayground(debug=False, ebuilds=ebuilds,
+ installed=installed, repo_configs=repo_configs)
+ try:
+ repo_dir = (playground.settings.repositories.
+ get_location_for_name("test_repo"))
+ profile_root = os.path.join(repo_dir, "profiles")
+
+ for p, data in profiles:
+ prof_path = os.path.join(profile_root, p)
+ ensure_dirs(prof_path)
+ for k, v in data.items():
+ with io.open(os.path.join(prof_path, k), mode="w",
+ encoding=_encodings["repo.content"]) as f:
+ for line in v:
+ f.write("%s\n" % line)
+
+ # The config must be reloaded in order to account
+ # for the above profile customizations.
+ playground.reload_config()
+
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_rebuild.py b/lib/portage/tests/resolver/test_rebuild.py
new file mode 100644
index 000000000..6f1a7834b
--- /dev/null
+++ b/lib/portage/tests/resolver/test_rebuild.py
@@ -0,0 +1,143 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class RebuildTestCase(TestCase):
+
+ def testRebuild(self):
+ """
+ Rebuild packages when build-time dependencies are upgraded.
+ """
+
+ ebuilds = {
+ "sys-libs/x-1": { },
+ "sys-libs/x-1-r1": { },
+ "sys-libs/x-2": { },
+ "sys-apps/a-1": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/a-2": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/b-1": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/b-2": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/c-1": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/c-2": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/d-1": { "RDEPEND" : "sys-libs/x"},
+ "sys-apps/d-2": { "RDEPEND" : "sys-libs/x"},
+ "sys-apps/e-2": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/f-2": { "DEPEND" : "sys-apps/a", "RDEPEND" : ""},
+ "sys-apps/g-2": { "DEPEND" : "sys-apps/b sys-libs/x",
+ "RDEPEND" : ""},
+ }
+
+ installed = {
+ "sys-libs/x-1": { },
+ "sys-apps/a-1": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/b-1": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/c-1": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/d-1": { "RDEPEND" : "sys-libs/x"},
+ "sys-apps/e-1": { "DEPEND" : "sys-libs/x", "RDEPEND" : ""},
+ "sys-apps/f-1": { "DEPEND" : "sys-apps/a", "RDEPEND" : ""},
+ "sys-apps/g-1": { "DEPEND" : "sys-apps/b",
+ "RDEPEND" : ""},
+ }
+
+ world = ["sys-apps/a", "sys-apps/b", "sys-apps/c", "sys-apps/d",
+ "sys-apps/e", "sys-apps/f", "sys-apps/g"]
+
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["sys-libs/x", "sys-apps/b"],
+ options = {"--rebuild-if-unbuilt" : True,
+ "--rebuild-exclude" : ["sys-apps/c"]},
+ mergelist = ['sys-libs/x-2', 'sys-apps/a-2', 'sys-apps/b-2',
+ 'sys-apps/e-2', 'sys-apps/g-2'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/x", "sys-apps/b"],
+ options = {"--rebuild-if-unbuilt" : True},
+ mergelist = ['sys-libs/x-2', 'sys-apps/a-2', 'sys-apps/b-2',
+ 'sys-apps/c-2', 'sys-apps/e-2', 'sys-apps/g-2'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/x"],
+ options = {"--rebuild-if-unbuilt" : True,
+ "--rebuild-ignore" : ["sys-libs/x"]},
+ mergelist = ['sys-libs/x-2'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/x", "sys-apps/b"],
+ options = {"--rebuild-if-unbuilt" : True,
+ "--rebuild-ignore" : ["sys-apps/b"]},
+ mergelist = ['sys-libs/x-2', 'sys-apps/a-2', 'sys-apps/b-2',
+ 'sys-apps/c-2', 'sys-apps/e-2'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["=sys-libs/x-1-r1", "sys-apps/b"],
+ options = {"--rebuild-if-unbuilt" : True},
+ mergelist = ['sys-libs/x-1-r1', 'sys-apps/a-2',
+ 'sys-apps/b-2', 'sys-apps/c-2', 'sys-apps/e-2',
+ 'sys-apps/g-2'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["=sys-libs/x-1-r1", "sys-apps/b"],
+ options = {"--rebuild-if-new-rev" : True},
+ mergelist = ['sys-libs/x-1-r1', 'sys-apps/a-2',
+ 'sys-apps/b-2', 'sys-apps/c-2', 'sys-apps/e-2',
+ 'sys-apps/g-2'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["=sys-libs/x-1-r1"],
+ options = {"--rebuild-if-new-ver" : True},
+ mergelist = ['sys-libs/x-1-r1'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/x", "sys-apps/b"],
+ options = {"--rebuild-if-new-ver" : True},
+ mergelist = ['sys-libs/x-2', 'sys-apps/a-2',
+ 'sys-apps/b-2', 'sys-apps/c-2', 'sys-apps/e-2',
+ 'sys-apps/g-2'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["=sys-libs/x-1"],
+ options = {"--rebuild-if-new-rev" : True},
+ mergelist = ['sys-libs/x-1'],
+ ignore_mergelist_order = True,
+ success = True),
+
+ ResolverPlaygroundTestCase(
+ ["=sys-libs/x-1", "=sys-apps/b-1"],
+ options = {"--rebuild-if-unbuilt" : True},
+ mergelist = ['sys-libs/x-1', 'sys-apps/a-2',
+ 'sys-apps/b-1', 'sys-apps/c-2', 'sys-apps/e-2',
+ 'sys-apps/g-2'],
+ ignore_mergelist_order = True,
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_regular_slot_change_without_revbump.py b/lib/portage/tests/resolver/test_regular_slot_change_without_revbump.py
new file mode 100644
index 000000000..415277bc7
--- /dev/null
+++ b/lib/portage/tests/resolver/test_regular_slot_change_without_revbump.py
@@ -0,0 +1,59 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class RegularSlotChangeWithoutRevBumpTestCase(TestCase):
+
+ def testRegularSlotChangeWithoutRevBumpTestCase(self):
+
+ ebuilds = {
+ "dev-libs/boost-1.52.0" : {
+ "SLOT": "0"
+ },
+ "app-office/libreoffice-4.0.0.2" : {
+ "EAPI": "5",
+ "DEPEND": ">=dev-libs/boost-1.46:=",
+ "RDEPEND": ">=dev-libs/boost-1.46:=",
+ },
+ }
+
+ binpkgs = {
+ "dev-libs/boost-1.52.0" : {
+ "SLOT": "1.52"
+ },
+ }
+
+ installed = {
+ "dev-libs/boost-1.52.0" : {
+ "SLOT": "1.52"
+ },
+ }
+
+ world = []
+
+ test_cases = (
+ # Test that @__auto_slot_operator_replace_installed__
+ # pulls in the available slot, even though it's
+ # different from the installed slot (0 instead of 1.52).
+ ResolverPlaygroundTestCase(
+ ["app-office/libreoffice"],
+ options = {"--oneshot": True, "--usepkg": True},
+ success = True,
+ mergelist = [
+ 'dev-libs/boost-1.52.0',
+ 'app-office/libreoffice-4.0.0.2'
+ ]
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_required_use.py b/lib/portage/tests/resolver/test_required_use.py
new file mode 100644
index 000000000..c679ce300
--- /dev/null
+++ b/lib/portage/tests/resolver/test_required_use.py
@@ -0,0 +1,134 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class RequiredUSETestCase(TestCase):
+
+ def testRequiredUSE(self):
+ """
+ Only simple REQUIRED_USE values here. The parser is tested under in dep/testCheckRequiredUse
+ """
+
+ ebuilds = {
+ "dev-libs/A-1" : {"EAPI": "4", "IUSE": "foo bar", "REQUIRED_USE": "|| ( foo bar )"},
+ "dev-libs/A-2" : {"EAPI": "4", "IUSE": "foo +bar", "REQUIRED_USE": "|| ( foo bar )"},
+ "dev-libs/A-3" : {"EAPI": "4", "IUSE": "+foo bar", "REQUIRED_USE": "|| ( foo bar )"},
+ "dev-libs/A-4" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "|| ( foo bar )"},
+ "dev-libs/A-5" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "|| ( )"},
+
+ "dev-libs/B-1" : {"EAPI": "4", "IUSE": "foo bar", "REQUIRED_USE": "^^ ( foo bar )"},
+ "dev-libs/B-2" : {"EAPI": "4", "IUSE": "foo +bar", "REQUIRED_USE": "^^ ( foo bar )"},
+ "dev-libs/B-3" : {"EAPI": "4", "IUSE": "+foo bar", "REQUIRED_USE": "^^ ( foo bar )"},
+ "dev-libs/B-4" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "^^ ( foo bar )"},
+ "dev-libs/B-5" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "^^ ( )"},
+
+ "dev-libs/C-1" : {"EAPI": "4", "IUSE": "+foo bar", "REQUIRED_USE": "foo? ( !bar )"},
+ "dev-libs/C-2" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "foo? ( !bar )"},
+ "dev-libs/C-3" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "foo? ( bar )"},
+ "dev-libs/C-4" : {"EAPI": "4", "IUSE": "+foo bar", "REQUIRED_USE": "foo? ( bar )"},
+ "dev-libs/C-5" : {"EAPI": "4", "IUSE": "foo bar", "REQUIRED_USE": "foo? ( bar )"},
+ "dev-libs/C-6" : {"EAPI": "4", "IUSE": "foo +bar", "REQUIRED_USE": "foo? ( bar )"},
+ "dev-libs/C-7" : {"EAPI": "4", "IUSE": "foo +bar", "REQUIRED_USE": "!foo? ( bar )"},
+ "dev-libs/C-8" : {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "!foo? ( bar )"},
+ "dev-libs/C-9" : {"EAPI": "4", "IUSE": "+foo bar", "REQUIRED_USE": "!foo? ( bar )"},
+ "dev-libs/C-10": {"EAPI": "4", "IUSE": "foo bar", "REQUIRED_USE": "!foo? ( bar )"},
+ "dev-libs/C-11": {"EAPI": "4", "IUSE": "foo bar", "REQUIRED_USE": "!foo? ( !bar )"},
+ "dev-libs/C-12": {"EAPI": "4", "IUSE": "foo +bar", "REQUIRED_USE": "!foo? ( !bar )"},
+ "dev-libs/C-13": {"EAPI": "4", "IUSE": "+foo +bar", "REQUIRED_USE": "!foo? ( !bar )"},
+ "dev-libs/C-14": {"EAPI": "4", "IUSE": "+foo bar", "REQUIRED_USE": "!foo? ( !bar )"},
+
+ "dev-libs/D-1" : {"EAPI": "4", "IUSE": "+w +x +y z", "REQUIRED_USE": "w? ( x || ( y z ) )"},
+ "dev-libs/D-2" : {"EAPI": "4", "IUSE": "+w +x +y +z", "REQUIRED_USE": "w? ( x || ( y z ) )"},
+ "dev-libs/D-3" : {"EAPI": "4", "IUSE": "+w +x y z", "REQUIRED_USE": "w? ( x || ( y z ) )"},
+ "dev-libs/D-4" : {"EAPI": "4", "IUSE": "+w x +y +z", "REQUIRED_USE": "w? ( x || ( y z ) )"},
+ "dev-libs/D-5" : {"EAPI": "4", "IUSE": "w x y z", "REQUIRED_USE": "w? ( x || ( y z ) )"},
+
+ "dev-libs/E-1" : {"EAPI": "5", "IUSE": "foo bar", "REQUIRED_USE": "?? ( foo bar )"},
+ "dev-libs/E-2" : {"EAPI": "5", "IUSE": "foo +bar", "REQUIRED_USE": "?? ( foo bar )"},
+ "dev-libs/E-3" : {"EAPI": "5", "IUSE": "+foo bar", "REQUIRED_USE": "?? ( foo bar )"},
+ "dev-libs/E-4" : {"EAPI": "5", "IUSE": "+foo +bar", "REQUIRED_USE": "?? ( foo bar )"},
+ "dev-libs/E-5" : {"EAPI": "5", "IUSE": "+foo +bar", "REQUIRED_USE": "?? ( )"},
+
+ "dev-libs/F-1" : {"EAPI": "7", "IUSE": "+foo +bar", "REQUIRED_USE": "|| ( )"},
+ "dev-libs/F-2" : {"EAPI": "7", "IUSE": "+foo +bar", "REQUIRED_USE": "^^ ( )"},
+ "dev-libs/F-3" : {"EAPI": "7", "IUSE": "+foo +bar", "REQUIRED_USE": "?? ( )"},
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(["=dev-libs/A-1"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/A-2"], success = True, mergelist=["dev-libs/A-2"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-3"], success = True, mergelist=["dev-libs/A-3"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-4"], success = True, mergelist=["dev-libs/A-4"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-5"], success = True, mergelist=["dev-libs/A-5"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/B-1"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/B-2"], success = True, mergelist=["dev-libs/B-2"]),
+ ResolverPlaygroundTestCase(["=dev-libs/B-3"], success = True, mergelist=["dev-libs/B-3"]),
+ ResolverPlaygroundTestCase(["=dev-libs/B-4"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/B-5"], success = True, mergelist=["dev-libs/B-5"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/C-1"], success = True, mergelist=["dev-libs/C-1"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-2"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/C-3"], success = True, mergelist=["dev-libs/C-3"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-4"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/C-5"], success = True, mergelist=["dev-libs/C-5"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-6"], success = True, mergelist=["dev-libs/C-6"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-7"], success = True, mergelist=["dev-libs/C-7"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-8"], success = True, mergelist=["dev-libs/C-8"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-9"], success = True, mergelist=["dev-libs/C-9"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-10"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/C-11"], success = True, mergelist=["dev-libs/C-11"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-12"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/C-13"], success = True, mergelist=["dev-libs/C-13"]),
+ ResolverPlaygroundTestCase(["=dev-libs/C-14"], success = True, mergelist=["dev-libs/C-14"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/D-1"], success = True, mergelist=["dev-libs/D-1"]),
+ ResolverPlaygroundTestCase(["=dev-libs/D-2"], success = True, mergelist=["dev-libs/D-2"]),
+ ResolverPlaygroundTestCase(["=dev-libs/D-3"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/D-4"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/D-5"], success = True, mergelist=["dev-libs/D-5"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/E-1"], success = True, mergelist=["dev-libs/E-1"]),
+ ResolverPlaygroundTestCase(["=dev-libs/E-2"], success = True, mergelist=["dev-libs/E-2"]),
+ ResolverPlaygroundTestCase(["=dev-libs/E-3"], success = True, mergelist=["dev-libs/E-3"]),
+ ResolverPlaygroundTestCase(["=dev-libs/E-4"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/E-5"], success = True, mergelist=["dev-libs/E-5"]),
+
+ ResolverPlaygroundTestCase(["=dev-libs/F-1"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/F-2"], success = False),
+ ResolverPlaygroundTestCase(["=dev-libs/F-3"], success = True, mergelist=["dev-libs/F-3"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testRequiredUseOrDeps(self):
+
+ ebuilds = {
+ "dev-libs/A-1": { "IUSE": "+x +y", "REQUIRED_USE": "^^ ( x y )", "EAPI": "4" },
+ "dev-libs/B-1": { "IUSE": "+x +y", "REQUIRED_USE": "", "EAPI": "4" },
+ "app-misc/p-1": { "RDEPEND": "|| ( =dev-libs/A-1 =dev-libs/B-1 )" },
+ }
+
+ test_cases = (
+ # This should fail and show a REQUIRED_USE error for
+ # dev-libs/A-1, since this choice it preferred.
+ ResolverPlaygroundTestCase(
+ ["=app-misc/p-1"],
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_runtime_cycle_merge_order.py b/lib/portage/tests/resolver/test_runtime_cycle_merge_order.py
new file mode 100644
index 000000000..438d9cbfc
--- /dev/null
+++ b/lib/portage/tests/resolver/test_runtime_cycle_merge_order.py
@@ -0,0 +1,72 @@
+# Copyright 2016 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+
+class RuntimeCycleMergeOrderTestCase(TestCase):
+
+ def testRuntimeCycleMergeOrder(self):
+ ebuilds = {
+ 'app-misc/plugins-consumer-1' : {
+ 'EAPI': '6',
+ 'DEPEND' : 'app-misc/plugin-b:=',
+ 'RDEPEND' : 'app-misc/plugin-b:=',
+ },
+ 'app-misc/plugin-b-1' : {
+ 'EAPI': '6',
+ 'RDEPEND' : 'app-misc/runtime-cycle-b',
+ 'PDEPEND': 'app-misc/plugins-consumer',
+ },
+ 'app-misc/runtime-cycle-b-1' : {
+ 'RDEPEND' : 'app-misc/plugin-b app-misc/branch-b',
+ },
+ 'app-misc/branch-b-1' : {
+ 'RDEPEND' : 'app-misc/leaf-b app-misc/branch-c',
+ },
+ 'app-misc/leaf-b-1' : {},
+ 'app-misc/branch-c-1' : {
+ 'RDEPEND' : 'app-misc/runtime-cycle-c app-misc/runtime-c',
+ },
+ 'app-misc/runtime-cycle-c-1' : {
+ 'RDEPEND' : 'app-misc/branch-c',
+ },
+ 'app-misc/runtime-c-1' : {
+ 'RDEPEND' : 'app-misc/branch-d',
+ },
+ 'app-misc/branch-d-1' : {
+ 'RDEPEND' : 'app-misc/leaf-d app-misc/branch-e',
+ },
+ 'app-misc/branch-e-1' : {
+ 'RDEPEND' : 'app-misc/leaf-e',
+ },
+ 'app-misc/leaf-d-1' : {},
+ 'app-misc/leaf-e-1' : {},
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ['app-misc/plugin-b'],
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = [
+ ('app-misc/leaf-b-1', 'app-misc/leaf-d-1', 'app-misc/leaf-e-1'),
+ ('app-misc/branch-d-1', 'app-misc/branch-e-1'),
+ 'app-misc/runtime-c-1',
+ ('app-misc/runtime-cycle-c-1', 'app-misc/branch-c-1'),
+ 'app-misc/branch-b-1',
+ ('app-misc/runtime-cycle-b-1', 'app-misc/plugin-b-1'),
+ 'app-misc/plugins-consumer-1',
+ ],
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_simple.py b/lib/portage/tests/resolver/test_simple.py
new file mode 100644
index 000000000..324ffa2a6
--- /dev/null
+++ b/lib/portage/tests/resolver/test_simple.py
@@ -0,0 +1,74 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class SimpleResolverTestCase(TestCase):
+
+ def testSimple(self):
+ ebuilds = {
+ "dev-libs/A-1": { "KEYWORDS": "x86" },
+ "dev-libs/A-2": { "KEYWORDS": "~x86" },
+ "dev-libs/B-1.2": {},
+
+ "app-misc/Z-1": { "DEPEND": "|| ( app-misc/Y ( app-misc/X app-misc/W ) )", "RDEPEND": "" },
+ "app-misc/Y-1": { "KEYWORDS": "~x86" },
+ "app-misc/X-1": {},
+ "app-misc/W-1": {},
+ }
+ binpkgs = {
+ "dev-libs/B-1.2": {},
+ }
+ installed = {
+ "dev-libs/A-1": {},
+ "dev-libs/B-1.1": {},
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(["dev-libs/A"], success = True, mergelist = ["dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(["=dev-libs/A-2"], options = { "--autounmask": 'n' }, success = False),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--noreplace": True},
+ success = True,
+ mergelist = []),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--noreplace": True},
+ success = True,
+ mergelist = []),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--update": True},
+ success = True,
+ mergelist = ["dev-libs/B-1.2"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--update": True, "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/B-1.2"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--update": True, "--usepkgonly": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/B-1.2"]),
+
+ ResolverPlaygroundTestCase(
+ ["app-misc/Z"],
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = [("app-misc/W-1", "app-misc/X-1"), "app-misc/Z-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ binpkgs=binpkgs, installed=installed)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_slot_abi.py b/lib/portage/tests/resolver/test_slot_abi.py
new file mode 100644
index 000000000..7dbbebe2f
--- /dev/null
+++ b/lib/portage/tests/resolver/test_slot_abi.py
@@ -0,0 +1,457 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotAbiTestCase(TestCase):
+
+ def __init__(self, *args, **kwargs):
+ super(SlotAbiTestCase, self).__init__(*args, **kwargs)
+
+ def testSubSlot(self):
+ ebuilds = {
+ "dev-libs/icu-49" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/49"
+ },
+ "dev-libs/icu-4.8" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/48"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:=",
+ "RDEPEND": "dev-libs/icu:="
+ },
+ }
+ binpkgs = {
+ "dev-libs/icu-49" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/49"
+ },
+ "dev-libs/icu-4.8" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/48"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:0/48=",
+ "RDEPEND": "dev-libs/icu:0/48="
+ },
+ }
+ installed = {
+ "dev-libs/icu-4.8" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/48"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:0/48=",
+ "RDEPEND": "dev-libs/icu:0/48="
+ },
+ }
+
+ world = ["dev-libs/libxml2"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True},
+ success = True,
+ mergelist = ["dev-libs/icu-49", "dev-libs/libxml2-2.7.8" ]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/icu-49"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-49", "dev-libs/libxml2-2.7.8" ]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--usepkgonly": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-4.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-49"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["dev-libs/icu-49", "dev-libs/libxml2-2.7.8" ]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/icu-49"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-49", "dev-libs/libxml2-2.7.8" ]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True},
+ success = True,
+ mergelist = []),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-49"]),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testWholeSlot(self):
+ ebuilds = {
+ "sys-libs/db-4.8" : {
+ "SLOT": "4.8"
+ },
+ "sys-libs/db-4.7" : {
+ "SLOT": "4.7"
+ },
+ "app-office/libreoffice-3.5.4.2" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": ">=sys-libs/db-4:=",
+ "RDEPEND": ">=sys-libs/db-4:="
+ },
+ }
+ binpkgs = {
+ "sys-libs/db-4.8" : {
+ "SLOT": "4.8"
+ },
+ "sys-libs/db-4.7" : {
+ "SLOT": "4.7"
+ },
+ "app-office/libreoffice-3.5.4.2" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": ">=sys-libs/db-4:4.7/4.7=",
+ "RDEPEND": ">=sys-libs/db-4:4.7/4.7="
+ },
+ }
+ installed = {
+ "sys-libs/db-4.7" : {
+ "SLOT": "4.7"
+ },
+ "app-office/libreoffice-3.5.4.2" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": ">=sys-libs/db-4:4.7/4.7=",
+ "RDEPEND": ">=sys-libs/db-4:4.7/4.7="
+ },
+ }
+
+ world = ["app-office/libreoffice"]
+
+ test_cases = (
+
+ # The first 2 test cases don't trigger a libreoffice rebuild
+ # because sys-libs/db is the only package requested, and a
+ # rebuild is not necessary because the sys-libs/db:4.7 slot
+ # remains installed.
+ ResolverPlaygroundTestCase(
+ ["sys-libs/db"],
+ options = {"--oneshot": True},
+ success = True,
+ mergelist = ["sys-libs/db-4.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/db"],
+ options = {"--oneshot": True, "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]sys-libs/db-4.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/db"],
+ options = {"--oneshot": True, "--usepkgonly": True},
+ success = True,
+ mergelist = ["[binary]sys-libs/db-4.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/db"],
+ options = {"--oneshot": True, "--rebuild-if-new-slot": "n"},
+ success = True,
+ mergelist = ["sys-libs/db-4.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["sys-libs/db-4.8", "app-office/libreoffice-3.5.4.2"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]sys-libs/db-4.8", "app-office/libreoffice-3.5.4.2"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkg": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["[binary]sys-libs/db-4.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True},
+ success = True,
+ mergelist = []),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["[binary]sys-libs/db-4.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--rebuild-if-new-slot": "n"},
+ success = True,
+ mergelist = []),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testWholeSlotConditional(self):
+ ebuilds = {
+ "dev-libs/libnl-3.2.14" : {
+ "SLOT": "3"
+ },
+ "dev-libs/libnl-1.1-r3" : {
+ "SLOT": "1.1"
+ },
+ "net-misc/networkmanager-0.9.6.4-r1" : {
+ "EAPI": "5",
+ "IUSE": "wimax",
+ "DEPEND": "wimax? ( dev-libs/libnl:1.1= ) !wimax? ( dev-libs/libnl:3= )",
+ "RDEPEND": "wimax? ( dev-libs/libnl:1.1= ) !wimax? ( dev-libs/libnl:3= )"
+ },
+ }
+ installed = {
+ "dev-libs/libnl-1.1-r3" : {
+ "SLOT": "1.1"
+ },
+ "net-misc/networkmanager-0.9.6.4-r1" : {
+ "EAPI": "5",
+ "IUSE": "wimax",
+ "USE": "wimax",
+ "DEPEND": "dev-libs/libnl:1.1/1.1=",
+ "RDEPEND": "dev-libs/libnl:1.1/1.1="
+ },
+ }
+
+ user_config = {
+ "make.conf" : ("USE=\"wimax\"",)
+ }
+
+ world = ["net-misc/networkmanager"]
+
+ test_cases = (
+
+ # Demonstrate bug #460304, where _slot_operator_update_probe needs
+ # to account for USE conditional deps.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = []),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, user_config=user_config, world=world,
+ debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ user_config = {
+ "make.conf" : ("USE=\"-wimax\"",)
+ }
+
+ test_cases = (
+
+ # Demonstrate bug #460304 again, but with inverted USE
+ # settings this time.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ['dev-libs/libnl-3.2.14', 'net-misc/networkmanager-0.9.6.4-r1']),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, user_config=user_config, world=world,
+ debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testWholeSlotSubSlotMix(self):
+ ebuilds = {
+ "dev-libs/glib-1.2.10" : {
+ "SLOT": "1"
+ },
+ "dev-libs/glib-2.30.2" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.30"
+ },
+ "dev-libs/glib-2.32.3" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.32"
+ },
+ "dev-libs/dbus-glib-0.98" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/glib:2=",
+ "RDEPEND": "dev-libs/glib:2="
+ },
+ }
+ binpkgs = {
+ "dev-libs/glib-1.2.10" : {
+ "SLOT": "1"
+ },
+ "dev-libs/glib-2.30.2" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.30"
+ },
+ "dev-libs/glib-2.32.3" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.32"
+ },
+ "dev-libs/dbus-glib-0.98" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/glib:2/2.30=",
+ "RDEPEND": "dev-libs/glib:2/2.30="
+ },
+ }
+ installed = {
+ "dev-libs/glib-1.2.10" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "1"
+ },
+ "dev-libs/glib-2.30.2" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.30"
+ },
+ "dev-libs/dbus-glib-0.98" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/glib:2/2.30=",
+ "RDEPEND": "dev-libs/glib:2/2.30="
+ },
+ }
+
+ world = ["dev-libs/glib:1", "dev-libs/dbus-glib"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/glib"],
+ options = {"--oneshot": True},
+ success = True,
+ mergelist = ["dev-libs/glib-2.32.3", "dev-libs/dbus-glib-0.98" ]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/glib"],
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/glib-2.32.3"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/glib"],
+ options = {"--oneshot": True, "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/glib-2.32.3", "dev-libs/dbus-glib-0.98" ]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/glib"],
+ options = {"--oneshot": True, "--usepkgonly": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/glib-2.30.2"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/glib"],
+ options = {"--oneshot": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["[binary]dev-libs/glib-2.32.3"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["dev-libs/glib-2.32.3", "dev-libs/dbus-glib-0.98" ]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/glib-2.32.3"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/glib-2.32.3", "dev-libs/dbus-glib-0.98" ]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True},
+ success = True,
+ mergelist = []),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["[binary]dev-libs/glib-2.32.3"]),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_slot_abi_downgrade.py b/lib/portage/tests/resolver/test_slot_abi_downgrade.py
new file mode 100644
index 000000000..08e9a9db2
--- /dev/null
+++ b/lib/portage/tests/resolver/test_slot_abi_downgrade.py
@@ -0,0 +1,225 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotAbiDowngradeTestCase(TestCase):
+
+ def __init__(self, *args, **kwargs):
+ super(SlotAbiDowngradeTestCase, self).__init__(*args, **kwargs)
+
+ def testSubSlot(self):
+ ebuilds = {
+ "dev-libs/icu-4.8" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/48"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:=",
+ "RDEPEND": "dev-libs/icu:="
+ },
+ }
+ binpkgs = {
+ "dev-libs/icu-49" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/49"
+ },
+ "dev-libs/icu-4.8" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/48"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:0/49=",
+ "RDEPEND": "dev-libs/icu:0/49="
+ },
+ }
+ installed = {
+ "dev-libs/icu-49" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/49"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:0/49=",
+ "RDEPEND": "dev-libs/icu:0/49="
+ },
+ }
+
+ world = ["dev-libs/libxml2"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True},
+ success = True,
+ mergelist = ["dev-libs/icu-4.8", "dev-libs/libxml2-2.7.8" ]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/icu-4.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-4.8", "dev-libs/libxml2-2.7.8" ]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--usepkgonly": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-49"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["dev-libs/icu-4.8", "dev-libs/libxml2-2.7.8" ]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/icu-4.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-4.8", "dev-libs/libxml2-2.7.8" ]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True},
+ success = True,
+ mergelist = []),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testWholeSlotSubSlotMix(self):
+ ebuilds = {
+ "dev-libs/glib-1.2.10" : {
+ "SLOT": "1"
+ },
+ "dev-libs/glib-2.30.2" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.30"
+ },
+ "dev-libs/dbus-glib-0.98" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/glib:2=",
+ "RDEPEND": "dev-libs/glib:2="
+ },
+ }
+ binpkgs = {
+ "dev-libs/glib-1.2.10" : {
+ "SLOT": "1"
+ },
+ "dev-libs/glib-2.30.2" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.30"
+ },
+ "dev-libs/glib-2.32.3" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.32"
+ },
+ "dev-libs/dbus-glib-0.98" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/glib:2/2.32=",
+ "RDEPEND": "dev-libs/glib:2/2.32="
+ },
+ }
+ installed = {
+ "dev-libs/glib-1.2.10" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "1"
+ },
+ "dev-libs/glib-2.32.3" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "2/2.32"
+ },
+ "dev-libs/dbus-glib-0.98" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/glib:2/2.32=",
+ "RDEPEND": "dev-libs/glib:2/2.32="
+ },
+ }
+
+ world = ["dev-libs/glib:1", "dev-libs/dbus-glib"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/glib"],
+ options = {"--oneshot": True},
+ success = True,
+ mergelist = ["dev-libs/glib-2.30.2", "dev-libs/dbus-glib-0.98" ]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/glib"],
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/glib-2.30.2"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/glib"],
+ options = {"--oneshot": True, "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/glib-2.30.2", "dev-libs/dbus-glib-0.98" ]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/glib"],
+ options = {"--oneshot": True, "--usepkgonly": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/glib-2.32.3"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["dev-libs/glib-2.30.2", "dev-libs/dbus-glib-0.98" ]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/glib-2.30.2"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/glib-2.30.2", "dev-libs/dbus-glib-0.98" ]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True},
+ success = True,
+ mergelist = []),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_slot_change_without_revbump.py b/lib/portage/tests/resolver/test_slot_change_without_revbump.py
new file mode 100644
index 000000000..5cd8c53d1
--- /dev/null
+++ b/lib/portage/tests/resolver/test_slot_change_without_revbump.py
@@ -0,0 +1,88 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotChangeWithoutRevBumpTestCase(TestCase):
+
+ def testSlotChangeWithoutRevBump(self):
+
+ ebuilds = {
+ "app-arch/libarchive-3.1.1" : {
+ "EAPI": "5",
+ "SLOT": "0/13"
+ },
+ "app-arch/libarchive-3.0.4-r1" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+ "kde-base/ark-4.10.0" : {
+ "EAPI": "5",
+ "DEPEND": "app-arch/libarchive:=",
+ "RDEPEND": "app-arch/libarchive:="
+ },
+ }
+
+ binpkgs = {
+ "app-arch/libarchive-3.1.1" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+ }
+
+ installed = {
+ "app-arch/libarchive-3.1.1" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+
+ "kde-base/ark-4.10.0" : {
+ "EAPI": "5",
+ "DEPEND": "app-arch/libarchive:0/0=",
+ "RDEPEND": "app-arch/libarchive:0/0="
+ },
+ }
+
+ world = ["kde-base/ark"]
+
+ test_cases = (
+
+ # Demonstrate bug #456208, where a sub-slot change
+ # without revbump needs to trigger a rebuild.
+ ResolverPlaygroundTestCase(
+ ["kde-base/ark"],
+ options = {"--oneshot": True, "--usepkg": True},
+ success = True,
+ mergelist = ['app-arch/libarchive-3.1.1', "kde-base/ark-4.10.0"]),
+
+ ResolverPlaygroundTestCase(
+ ["app-arch/libarchive"],
+ options = {"--noreplace": True, "--usepkg": True},
+ success = True,
+ mergelist = []),
+
+ ResolverPlaygroundTestCase(
+ ["app-arch/libarchive"],
+ options = {"--usepkg": True},
+ success = True,
+ mergelist = ["[binary]app-arch/libarchive-3.1.1"]),
+
+ # Test --changed-slot
+ ResolverPlaygroundTestCase(
+ ["app-arch/libarchive"],
+ options = {"--changed-slot": True, "--usepkg": True},
+ success = True,
+ mergelist = ["app-arch/libarchive-3.1.1", "kde-base/ark-4.10.0"]),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_slot_collisions.py b/lib/portage/tests/resolver/test_slot_collisions.py
new file mode 100644
index 000000000..430ccaad6
--- /dev/null
+++ b/lib/portage/tests/resolver/test_slot_collisions.py
@@ -0,0 +1,263 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class SlotCollisionTestCase(TestCase):
+
+ def testSlotCollision(self):
+
+ ebuilds = {
+ "dev-libs/A-1": { "PDEPEND": "foo? ( dev-libs/B )", "IUSE": "foo" },
+ "dev-libs/B-1": { "IUSE": "foo" },
+ "dev-libs/C-1": { "DEPEND": "dev-libs/A[foo]", "EAPI": 2 },
+ "dev-libs/D-1": { "DEPEND": "dev-libs/A[foo=] dev-libs/B[foo=]", "IUSE": "foo", "EAPI": 2 },
+ "dev-libs/E-1": { },
+ "dev-libs/E-2": { "IUSE": "foo" },
+
+ "app-misc/Z-1": { },
+ "app-misc/Z-2": { },
+ "app-misc/Y-1": { "DEPEND": "=app-misc/Z-1" },
+ "app-misc/Y-2": { "DEPEND": ">app-misc/Z-1" },
+ "app-misc/X-1": { "DEPEND": "=app-misc/Z-2" },
+ "app-misc/X-2": { "DEPEND": "<app-misc/Z-2" },
+
+ "sci-libs/K-1": { "IUSE": "+foo", "EAPI": 1 },
+ "sci-libs/L-1": { "DEPEND": "sci-libs/K[-foo]", "EAPI": 2 },
+ "sci-libs/M-1": { "DEPEND": "sci-libs/K[foo=]", "IUSE": "+foo", "EAPI": 2 },
+
+ "sci-libs/Q-1": { "SLOT": "1", "IUSE": "+bar foo", "EAPI": 1 },
+ "sci-libs/Q-2": { "SLOT": "2", "IUSE": "+bar +foo", "EAPI": 2, "PDEPEND": "sci-libs/Q:1[bar?,foo?]" },
+ "sci-libs/P-1": { "DEPEND": "sci-libs/Q:1[foo=]", "IUSE": "foo", "EAPI": 2 },
+
+ "sys-libs/A-1": { "RDEPEND": "foo? ( sys-libs/J[foo=] )", "IUSE": "+foo", "EAPI": "4" },
+ "sys-libs/B-1": { "RDEPEND": "bar? ( sys-libs/J[bar=] )", "IUSE": "+bar", "EAPI": "4" },
+ "sys-libs/C-1": { "RDEPEND": "sys-libs/J[bar]", "EAPI": "4" },
+ "sys-libs/D-1": { "RDEPEND": "sys-libs/J[bar?]", "IUSE": "bar", "EAPI": "4" },
+ "sys-libs/E-1": { "RDEPEND": "sys-libs/J[foo(+)?]", "IUSE": "+foo", "EAPI": "4" },
+ "sys-libs/F-1": { "RDEPEND": "sys-libs/J[foo(+)]", "EAPI": "4" },
+ "sys-libs/J-1": { "IUSE": "+foo", "EAPI": "4" },
+ "sys-libs/J-2": { "IUSE": "+bar", "EAPI": "4" },
+
+ "app-misc/A-1": { "IUSE": "foo +bar", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+ "app-misc/B-1": { "DEPEND": "=app-misc/A-1[foo=]", "IUSE": "foo", "EAPI": 2 },
+ "app-misc/C-1": { "DEPEND": "=app-misc/A-1[foo]", "EAPI": 2 },
+ "app-misc/E-1": { "RDEPEND": "dev-libs/E[foo?]", "IUSE": "foo", "EAPI": "2" },
+ "app-misc/F-1": { "RDEPEND": "=dev-libs/E-1", "IUSE": "foo", "EAPI": "2" },
+
+ "dev-lang/perl-5.12": {"SLOT": "0/5.12", "EAPI": "4-slot-abi"},
+ "dev-lang/perl-5.16": {"SLOT": "0/5.16", "EAPI": "4-slot-abi"},
+ }
+ installed = {
+ "dev-libs/A-1": { "PDEPEND": "foo? ( dev-libs/B )", "IUSE": "foo", "USE": "foo" },
+ "dev-libs/B-1": { "IUSE": "foo", "USE": "foo" },
+ "dev-libs/C-1": { "DEPEND": "dev-libs/A[foo]", "EAPI": 2 },
+ "dev-libs/D-1": { "DEPEND": "dev-libs/A[foo=] dev-libs/B[foo=]", "IUSE": "foo", "USE": "foo", "EAPI": 2 },
+
+ "sci-libs/K-1": { "IUSE": "foo", "USE": "" },
+ "sci-libs/L-1": { "DEPEND": "sci-libs/K[-foo]" },
+
+ "sci-libs/Q-1": { "SLOT": "1", "IUSE": "+bar +foo", "USE": "bar foo", "EAPI": 1 },
+ "sci-libs/Q-2": { "SLOT": "2", "IUSE": "+bar +foo", "USE": "bar foo", "EAPI": 2, "PDEPEND": "sci-libs/Q:1[bar?,foo?]" },
+
+ "app-misc/A-1": { "IUSE": "+foo bar", "USE": "foo", "REQUIRED_USE": "^^ ( foo bar )", "EAPI": "4" },
+ }
+
+ test_cases = (
+ #A qt-*[qt3support] like mess.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B", "dev-libs/C", "dev-libs/D"],
+ options = { "--autounmask": 'n' },
+ success = False,
+ mergelist = ["dev-libs/A-1", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order = True,
+ slot_collision_solutions = [ {"dev-libs/A-1": {"foo": True}, "dev-libs/D-1": {"foo": True}} ]),
+
+ ResolverPlaygroundTestCase(
+ ["sys-libs/A", "sys-libs/B", "sys-libs/C", "sys-libs/D", "sys-libs/E", "sys-libs/F"],
+ options = { "--autounmask": 'n' },
+ success = False,
+ ignore_mergelist_order = True,
+ slot_collision_solutions = [],
+ mergelist = ['sys-libs/J-2', 'sys-libs/J-1', 'sys-libs/A-1', 'sys-libs/B-1', 'sys-libs/C-1', 'sys-libs/D-1', 'sys-libs/E-1', 'sys-libs/F-1'],
+ ),
+
+ #A version based conflicts, nothing we can do.
+ ResolverPlaygroundTestCase(
+ ["=app-misc/X-1", "=app-misc/Y-1"],
+ success = False,
+ mergelist = ["app-misc/Z-1", "app-misc/Z-2", "app-misc/X-1", "app-misc/Y-1"],
+ ignore_mergelist_order = True,
+ slot_collision_solutions = []
+ ),
+ ResolverPlaygroundTestCase(
+ ["=app-misc/X-2", "=app-misc/Y-2"],
+ success = False,
+ mergelist = ["app-misc/Z-1", "app-misc/Z-2", "app-misc/X-2", "app-misc/Y-2"],
+ ignore_mergelist_order = True,
+ slot_collision_solutions = []
+ ),
+
+ ResolverPlaygroundTestCase(
+ ["=app-misc/E-1", "=app-misc/F-1"],
+ success = False,
+ mergelist = ["dev-libs/E-1", "dev-libs/E-2", "app-misc/E-1", "app-misc/F-1"],
+ ignore_mergelist_order = True,
+ slot_collision_solutions = []
+ ),
+
+ # sub-slot
+ ResolverPlaygroundTestCase(
+ ["dev-lang/perl:0/5.12", "dev-lang/perl:0/5.16", "=dev-lang/perl-5.12*"],
+ success = False,
+ mergelist = ["dev-lang/perl-5.12", "dev-lang/perl-5.16"],
+ ignore_mergelist_order = True,
+ slot_collision_solutions = []
+ ),
+
+ #Simple cases.
+ ResolverPlaygroundTestCase(
+ ["sci-libs/L", "sci-libs/M"],
+ success = False,
+ mergelist = ["sci-libs/L-1", "sci-libs/M-1", "sci-libs/K-1"],
+ ignore_mergelist_order = True,
+ slot_collision_solutions = [{"sci-libs/K-1": {"foo": False}, "sci-libs/M-1": {"foo": False}}]
+ ),
+
+ #Avoid duplicates.
+ ResolverPlaygroundTestCase(
+ ["sci-libs/P", "sci-libs/Q:2"],
+ success = False,
+ options = { "--update": True, "--complete-graph": True, "--autounmask": 'n' },
+ mergelist = ["sci-libs/P-1", "sci-libs/Q-1"],
+ ignore_mergelist_order = True,
+ all_permutations=True,
+ slot_collision_solutions = [{"sci-libs/Q-1": {"foo": True}, "sci-libs/P-1": {"foo": True}}]
+ ),
+
+ )
+ # NOTE: For this test case, ResolverPlaygroundTestCase attributes
+ # vary randomly between runs, so it's expected to fail randomly.
+ #Conflict with REQUIRED_USE
+ #ResolverPlaygroundTestCase(
+ # ["=app-misc/C-1", "=app-misc/B-1"],
+ # all_permutations = True,
+ # slot_collision_solutions = None,
+ # use_changes={"app-misc/A-1": {"foo": True}},
+ # mergelist = ["app-misc/A-1", "app-misc/C-1", "app-misc/B-1"],
+ # ignore_mergelist_order = True,
+ # success = False),
+ #)
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testConnectedCollision(self):
+ """
+ Ensure that we are able to solve connected slot conflicts
+ which cannot be solved each on their own.
+ """
+ ebuilds = {
+ "dev-libs/A-1": { "RDEPEND": "=dev-libs/X-1" },
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/X" },
+
+ "dev-libs/X-1": { "RDEPEND": "=dev-libs/Y-1" },
+ "dev-libs/X-2": { "RDEPEND": "=dev-libs/Y-2" },
+
+ "dev-libs/Y-1": { "PDEPEND": "=dev-libs/X-1" },
+ "dev-libs/Y-2": { "PDEPEND": "=dev-libs/X-2" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B"],
+ all_permutations = True,
+ options = { "--backtrack": 0 },
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = ["dev-libs/Y-1", "dev-libs/X-1", ("dev-libs/A-1", "dev-libs/B-1")]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testDeeplyConnectedCollision(self):
+ """
+ Like testConnectedCollision, except that there is another
+ level of dependencies between the two conflicts.
+ """
+ ebuilds = {
+ "dev-libs/A-1": { "RDEPEND": "=dev-libs/X-1" },
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/X" },
+
+ "dev-libs/X-1": { "RDEPEND": "dev-libs/K" },
+ "dev-libs/X-2": { "RDEPEND": "dev-libs/L" },
+
+ "dev-libs/K-1": { "RDEPEND": "=dev-libs/Y-1" },
+ "dev-libs/L-1": { "RDEPEND": "=dev-libs/Y-2" },
+
+ "dev-libs/Y-1": { "PDEPEND": "=dev-libs/X-1" },
+ "dev-libs/Y-2": { "PDEPEND": "=dev-libs/X-2" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B"],
+ all_permutations = True,
+ options = { "--backtrack": 0 },
+ success = True,
+ ignore_mergelist_order = True,
+ mergelist = ["dev-libs/Y-1", "dev-libs/X-1", "dev-libs/K-1", \
+ "dev-libs/A-1", "dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testSelfDEPENDRemovalCrash(self):
+ """
+ Make sure we don't try to remove a packages twice. This happened
+ in the past when a package had a DEPEND on itself.
+ """
+ ebuilds = {
+ "dev-libs/A-1": { "RDEPEND": "=dev-libs/X-1" },
+ "dev-libs/B-1": { "RDEPEND": "dev-libs/X" },
+
+ "dev-libs/X-1": { },
+ "dev-libs/X-2": { "DEPEND": ">=dev-libs/X-2" },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A", "dev-libs/B"],
+ all_permutations = True,
+ success = True,
+ ignore_mergelist_order = True,
+ mergelist = ["dev-libs/X-1", "dev-libs/A-1", "dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_slot_conflict_force_rebuild.py b/lib/portage/tests/resolver/test_slot_conflict_force_rebuild.py
new file mode 100644
index 000000000..4170bfd9d
--- /dev/null
+++ b/lib/portage/tests/resolver/test_slot_conflict_force_rebuild.py
@@ -0,0 +1,84 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotConflictForceRebuildTestCase(TestCase):
+
+ def testSlotConflictForceRebuild(self):
+
+ ebuilds = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A:="
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A"
+ },
+
+ }
+
+ installed = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A:0/1="
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A:0/1="
+ },
+
+ }
+
+ world = ["app-misc/B", "app-misc/C"]
+
+ test_cases = (
+
+ # Test bug #521990, where forced_rebuilds omits ebuilds that
+ # had have had their slot operator atoms removed from the
+ # ebuilds, even though the corresponding installed
+ # instances had really forced rebuilds due to being built
+ # with slot-operators in their deps.
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ options = {},
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = ['app-misc/A-2', ('app-misc/B-0', 'app-misc/C-0')],
+ forced_rebuilds = {
+ 'app-misc/A-2': ['app-misc/B-0', 'app-misc/C-0']
+ }
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_slot_conflict_mask_update.py b/lib/portage/tests/resolver/test_slot_conflict_mask_update.py
new file mode 100644
index 000000000..a90eeac29
--- /dev/null
+++ b/lib/portage/tests/resolver/test_slot_conflict_mask_update.py
@@ -0,0 +1,41 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotConflictMaskUpdateTestCase(TestCase):
+
+ def testBacktrackingGoodVersionFirst(self):
+ """
+ When backtracking due to slot conflicts, we masked the version that has been pulled
+ in first. This is not always a good idea. Mask the highest version instead.
+ """
+
+
+ self.todo = True
+
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "=dev-libs/C-1 dev-libs/B" },
+ "dev-libs/B-1": { "DEPEND": "=dev-libs/C-1" },
+ "dev-libs/B-2": { "DEPEND": "=dev-libs/C-2" },
+ "dev-libs/C-1": { },
+ "dev-libs/C-2": { },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ mergelist = ["dev-libs/C-1", "dev-libs/B-1", "dev-libs/A-1",],
+ success = True),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_slot_conflict_rebuild.py b/lib/portage/tests/resolver/test_slot_conflict_rebuild.py
new file mode 100644
index 000000000..95b6396ba
--- /dev/null
+++ b/lib/portage/tests/resolver/test_slot_conflict_rebuild.py
@@ -0,0 +1,455 @@
+# Copyright 2012-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotConflictRebuildTestCase(TestCase):
+
+ def testSlotConflictRebuild(self):
+
+ ebuilds = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/A:=",
+ "RDEPEND": "app-misc/A:="
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "DEPEND": "<app-misc/A-2",
+ "RDEPEND": "<app-misc/A-2"
+ },
+
+ "app-misc/D-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/D-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+
+ "app-misc/E-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/D:=",
+ "RDEPEND": "app-misc/D:="
+ },
+
+ }
+
+ installed = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/A:0/1=",
+ "RDEPEND": "app-misc/A:0/1="
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "DEPEND": "<app-misc/A-2",
+ "RDEPEND": "<app-misc/A-2"
+ },
+
+ "app-misc/D-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/E-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/D:0/1=",
+ "RDEPEND": "app-misc/D:0/1="
+ },
+
+ }
+
+ world = ["app-misc/B", "app-misc/C", "app-misc/E"]
+
+ test_cases = (
+
+ # Test bug #439688, where a slot conflict prevents an
+ # upgrade and we don't want to trigger unnecessary rebuilds.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--backtrack": 4},
+ success = True,
+ mergelist = ["app-misc/D-2", "app-misc/E-0"]),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testSlotConflictMassRebuild(self):
+ """
+ Bug 486580
+ Before this bug was fixed, emerge would backtrack for each package that needs
+ a rebuild. This could cause it to hit the backtrack limit and not rebuild all
+ needed packages.
+ """
+ ebuilds = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:=",
+ "RDEPEND": "app-misc/B:="
+ },
+
+ "app-misc/B-1" : {
+ "EAPI": "5",
+ "SLOT": "1"
+ },
+
+ "app-misc/B-2" : {
+ "EAPI": "5",
+ "SLOT": "2/2"
+ },
+ }
+
+ installed = {
+ "app-misc/B-1" : {
+ "EAPI": "5",
+ "SLOT": "1"
+ },
+ }
+
+ expected_mergelist = ['app-misc/A-1', 'app-misc/B-2']
+
+ for i in range(5):
+ ebuilds["app-misc/C%sC-1" % i] = {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:=",
+ "RDEPEND": "app-misc/B:="
+ }
+
+ installed["app-misc/C%sC-1" % i] = {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:1/1=",
+ "RDEPEND": "app-misc/B:1/1="
+ }
+ for x in ("DEPEND", "RDEPEND"):
+ ebuilds["app-misc/A-1"][x] += " app-misc/C%sC" % i
+
+ expected_mergelist.append("app-misc/C%sC-1" % i)
+
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ ignore_mergelist_order=True,
+ all_permutations=True,
+ options = {"--backtrack": 3, '--update': True, '--deep': True},
+ success = True,
+ mergelist = expected_mergelist),
+ )
+
+ world = []
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testSlotConflictForgottenChild(self):
+ """
+ Similar to testSlotConflictMassRebuild above, but this time the rebuilds are scheduled,
+ but the package causing the rebuild (the child) is not installed.
+ """
+ ebuilds = {
+
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:= app-misc/C",
+ "RDEPEND": "app-misc/B:= app-misc/C",
+ },
+
+ "app-misc/B-2" : {
+ "EAPI": "5",
+ "SLOT": "2"
+ },
+
+ "app-misc/C-1": {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:=",
+ "RDEPEND": "app-misc/B:="
+ },
+ }
+
+ installed = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:1/1= app-misc/C",
+ "RDEPEND": "app-misc/B:1/1= app-misc/C",
+ },
+
+ "app-misc/B-1" : {
+ "EAPI": "5",
+ "SLOT": "1"
+ },
+
+ "app-misc/C-1": {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:1/1=",
+ "RDEPEND": "app-misc/B:1/1="
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ success = True,
+ mergelist = ['app-misc/A-2']),
+
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ options={"--update": True, "--deep": True},
+ success = True,
+ mergelist = ['app-misc/B-2', 'app-misc/C-1', 'app-misc/A-2']),
+ )
+
+ world = []
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testSlotConflictDepChange(self):
+ """
+ Bug 490362
+ The dependency in the ebuild was changed form slot operator to
+ no slot operator. The vdb contained the slot operator and emerge
+ would refuse to rebuild.
+ """
+ ebuilds = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B",
+ "RDEPEND": "app-misc/B"
+ },
+
+ "app-misc/B-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/B-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+ }
+
+ installed = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/B:0/1=",
+ "RDEPEND": "app-misc/B:0/1="
+ },
+ "app-misc/B-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["app-misc/B"],
+ success = True,
+ mergelist = ['app-misc/B-2', 'app-misc/A-1']),
+ )
+
+ world = ["app-misc/A"]
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testSlotConflictMixedDependencies(self):
+ """
+ Bug 487198
+ For parents with mixed >= and < dependencies, we scheduled rebuilds for the
+ >= atom, but in the end didn't install the child update because of the < atom.
+ """
+ ebuilds = {
+ "cat/slotted-lib-1" : {
+ "EAPI": "5",
+ "SLOT": "1"
+ },
+ "cat/slotted-lib-2" : {
+ "EAPI": "5",
+ "SLOT": "2"
+ },
+ "cat/slotted-lib-3" : {
+ "EAPI": "5",
+ "SLOT": "3"
+ },
+ "cat/slotted-lib-4" : {
+ "EAPI": "5",
+ "SLOT": "4"
+ },
+ "cat/slotted-lib-5" : {
+ "EAPI": "5",
+ "SLOT": "5"
+ },
+ "cat/user-1" : {
+ "EAPI": "5",
+ "DEPEND": ">=cat/slotted-lib-2:= <cat/slotted-lib-4:=",
+ "RDEPEND": ">=cat/slotted-lib-2:= <cat/slotted-lib-4:=",
+ },
+ }
+
+ installed = {
+ "cat/slotted-lib-3" : {
+ "EAPI": "5",
+ "SLOT": "3"
+ },
+ "cat/user-1" : {
+ "EAPI": "5",
+ "DEPEND": ">=cat/slotted-lib-2:3/3= <cat/slotted-lib-4:3/3=",
+ "RDEPEND": ">=cat/slotted-lib-2:3/3= <cat/slotted-lib-4:3/3=",
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["cat/user"],
+ options = {"--deep": True, "--update": True},
+ success = True,
+ mergelist = []),
+ )
+
+ world = []
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ def testSlotConflictMultiRepo(self):
+ """
+ Bug 497238
+ Different repositories contain the same cpv with different sub-slots for
+ a slot operator child.
+ Downgrading the slot operator parent would result in a sub-slot change of
+ the installed package by changing the source repository.
+ Make sure we don't perform this undesirable rebuild.
+ """
+ ebuilds = {
+ "net-firewall/iptables-1.4.21::overlay" : { "EAPI": "5", "SLOT": "0/10" },
+ "sys-apps/iproute2-3.11.0::overlay" : { "EAPI": "5", "RDEPEND": "net-firewall/iptables:=" },
+
+ "net-firewall/iptables-1.4.21" : { "EAPI": "5", "SLOT": "0" },
+ "sys-apps/iproute2-3.12.0": { "EAPI": "5", "RDEPEND": "net-firewall/iptables:=" },
+ }
+
+ installed = {
+ "net-firewall/iptables-1.4.21::overlay" : { "EAPI": "5", "SLOT": "0/10" },
+ "sys-apps/iproute2-3.12.0": { "EAPI": "5", "RDEPEND": "net-firewall/iptables:0/10=" },
+ }
+
+ world = ["sys-apps/iproute2"]
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--deep": True, "--update": True, "--verbose": True},
+ success = True,
+ mergelist = []),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testSlotConflictMultiRepoUpdates(self):
+ """
+ Bug 508236 (similar to testSlotConflictMultiRepo)
+ Different repositories contain the same cpv with different sub-slots for
+ a slot operator child. For both the installed version and an updated version.
+
+ """
+ ebuilds = {
+ "net-firewall/iptables-1.4.21::overlay" : { "EAPI": "5", "SLOT": "0/10" },
+ "net-firewall/iptables-1.4.21-r1::overlay" : { "EAPI": "5", "SLOT": "0/10" },
+ "sys-apps/iproute2-3.11.0::overlay" : { "EAPI": "5", "RDEPEND": "net-firewall/iptables:=" },
+
+ "net-firewall/iptables-1.4.21" : { "EAPI": "5", "SLOT": "0" },
+ "net-firewall/iptables-1.4.21-r1" : { "EAPI": "5", "SLOT": "0" },
+ "sys-apps/iproute2-3.12.0": { "EAPI": "5", "RDEPEND": "net-firewall/iptables:=" },
+ }
+
+ installed = {
+ "net-firewall/iptables-1.4.21::overlay" : { "EAPI": "5", "SLOT": "0/10" },
+ "sys-apps/iproute2-3.12.0": { "EAPI": "5", "RDEPEND": "net-firewall/iptables:0/10=" },
+ }
+
+ world = ["sys-apps/iproute2"]
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--deep": True, "--update": True, "--verbose": True},
+ success = True,
+ mergelist = ["net-firewall/iptables-1.4.21-r1::overlay"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_slot_conflict_unsatisfied_deep_deps.py b/lib/portage/tests/resolver/test_slot_conflict_unsatisfied_deep_deps.py
new file mode 100644
index 000000000..846ba0e59
--- /dev/null
+++ b/lib/portage/tests/resolver/test_slot_conflict_unsatisfied_deep_deps.py
@@ -0,0 +1,176 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class SlotConflictUnsatisfiedDeepDepsTestCase(TestCase):
+
+ def testSlotConflictUnsatisfiedDeepDeps(self):
+
+ ebuilds = {
+ "dev-libs/A-1": { },
+ "dev-libs/A-2": { "KEYWORDS": "~x86" },
+ "dev-libs/B-1": { "DEPEND": "dev-libs/A" },
+ "dev-libs/C-1": { "DEPEND": ">=dev-libs/A-2" },
+ "dev-libs/D-1": { "DEPEND": "dev-libs/A" },
+ }
+
+ installed = {
+ "dev-libs/broken-1": {
+ "RDEPEND": "dev-libs/A dev-libs/initially-unsatisfied"
+ },
+ }
+
+ world = (
+ "dev-libs/A",
+ "dev-libs/B",
+ "dev-libs/C",
+ "dev-libs/D",
+ "dev-libs/broken"
+ )
+
+ test_cases = (
+ # Test bug #520950, where unsatisfied deps of installed
+ # packages are supposed to be ignored when they are beyond
+ # the depth requested by the user.
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B", "dev-libs/C", "dev-libs/D"],
+ all_permutations=True,
+ options={
+ "--autounmask": "y",
+ "--complete-graph": True
+ },
+ mergelist=["dev-libs/A-2", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order=True,
+ unstable_keywords=["dev-libs/A-2"],
+ unsatisfied_deps=[],
+ success=False),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options={
+ "--autounmask": "y",
+ "--complete-graph": True
+ },
+ mergelist=["dev-libs/A-2", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order=True,
+ unstable_keywords=["dev-libs/A-2"],
+ unsatisfied_deps=["dev-libs/broken"],
+ success=False),
+
+ # Test --selective with --deep = 0
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options={
+ "--autounmask": "y",
+ "--complete-graph": True,
+ "--selective": True,
+ "--deep": 0
+ },
+ mergelist=["dev-libs/A-2", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order=True,
+ unstable_keywords=["dev-libs/A-2"],
+ unsatisfied_deps=[],
+ success=False),
+
+ # Test --deep = 1
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options={
+ "--autounmask": "y",
+ "--autounmask-backtrack": "y",
+ "--complete-graph": True,
+ "--selective": True,
+ "--deep": 1
+ },
+ mergelist=["dev-libs/A-2", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order=True,
+ unstable_keywords=["dev-libs/A-2"],
+ unsatisfied_deps=["dev-libs/initially-unsatisfied"],
+ success=False),
+
+ # With --autounmask-backtrack=y:
+ #[ebuild N ~] dev-libs/A-2
+ #[ebuild N ] dev-libs/C-1
+ #[ebuild N ] dev-libs/D-1
+ #[ebuild N ] dev-libs/B-1
+ #
+ #The following keyword changes are necessary to proceed:
+ # (see "package.accept_keywords" in the portage(5) man page for more details)
+ ## required by dev-libs/C-1::test_repo
+ ## required by @selected
+ ## required by @world (argument)
+ #=dev-libs/A-2 ~x86
+ #
+ #!!! Problems have been detected with your world file
+ #!!! Please run emaint --check world
+ #
+ #
+ #!!! Ebuilds for the following packages are either all
+ #!!! masked or don't exist:
+ #dev-libs/broken
+ #
+ #emerge: there are no ebuilds to satisfy "dev-libs/initially-unsatisfied".
+ #(dependency required by "dev-libs/broken-1::test_repo" [installed])
+ #(dependency required by "@selected" [set])
+ #(dependency required by "@world" [argument])
+
+ # Without --autounmask-backtrack=y:
+ #!!! Multiple package instances within a single package slot have been pulled
+ #!!! into the dependency graph, resulting in a slot conflict:
+ #
+ #dev-libs/A:0
+ #
+ # (dev-libs/A-1:0/0::test_repo, ebuild scheduled for merge) pulled in by
+ # (no parents that aren't satisfied by other packages in this slot)
+ #
+ # (dev-libs/A-2:0/0::test_repo, ebuild scheduled for merge) pulled in by
+ # >=dev-libs/A-2 required by (dev-libs/C-1:0/0::test_repo, ebuild scheduled for merge)
+ # ^^ ^
+ #
+ #The following keyword changes are necessary to proceed:
+ # (see "package.accept_keywords" in the portage(5) man page for more details)
+ ## required by dev-libs/C-1::test_repo
+ ## required by @selected
+ ## required by @world (argument)
+ #=dev-libs/A-2 ~x86
+ #
+ #emerge: there are no ebuilds to satisfy "dev-libs/initially-unsatisfied".
+ #(dependency required by "dev-libs/broken-1::test_repo" [installed])
+ #(dependency required by "@selected" [set])
+ #(dependency required by "@world" [argument])
+
+ # Test --deep = True
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options={
+ "--autounmask": "y",
+ "--autounmask-backtrack": "y",
+ "--complete-graph": True,
+ "--selective": True,
+ "--deep": True
+ },
+ mergelist=["dev-libs/A-2", "dev-libs/B-1", "dev-libs/C-1", "dev-libs/D-1"],
+ ignore_mergelist_order=True,
+ unstable_keywords=["dev-libs/A-2"],
+ unsatisfied_deps=["dev-libs/initially-unsatisfied"],
+ success=False),
+
+ # The effects of --autounmask-backtrack are the same as the previous test case.
+ # Both test cases can randomly succeed with --autounmask-backtrack=n, when
+ # "backtracking due to unsatisfied dep" randomly occurs before the autounmask
+ # unstable keyword change. It would be possible to eliminate backtracking here
+ # by recognizing that there are no alternatives to satisfy the dev-libs/broken
+ # atom in the world file. Then the test cases will consistently succeed with
+ # --autounmask-backtrack=n.
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed,
+ world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_slot_conflict_update.py b/lib/portage/tests/resolver/test_slot_conflict_update.py
new file mode 100644
index 000000000..f251d01f1
--- /dev/null
+++ b/lib/portage/tests/resolver/test_slot_conflict_update.py
@@ -0,0 +1,98 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotConflictUpdateTestCase(TestCase):
+
+ def testSlotConflictUpdate(self):
+
+ ebuilds = {
+
+ "app-text/podofo-0.9.2" : {
+ "EAPI": "5",
+ "RDEPEND" : "dev-util/boost-build"
+ },
+
+ "dev-cpp/libcmis-0.3.1" : {
+ "EAPI": "5",
+ "RDEPEND" : "dev-libs/boost:="
+ },
+
+ "dev-libs/boost-1.53.0" : {
+ "EAPI": "5",
+ "SLOT": "0/1.53",
+ "RDEPEND" : "=dev-util/boost-build-1.53.0"
+ },
+
+ "dev-libs/boost-1.52.0" : {
+ "EAPI": "5",
+ "SLOT": "0/1.52",
+ "RDEPEND" : "=dev-util/boost-build-1.52.0"
+ },
+
+ "dev-util/boost-build-1.53.0" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+
+ "dev-util/boost-build-1.52.0" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+
+
+ }
+
+ installed = {
+
+ "app-text/podofo-0.9.2" : {
+ "EAPI": "5",
+ "RDEPEND" : "dev-util/boost-build"
+ },
+
+ "dev-cpp/libcmis-0.3.1" : {
+ "EAPI": "5",
+ "RDEPEND" : "dev-libs/boost:0/1.52="
+ },
+
+ "dev-util/boost-build-1.52.0" : {
+ "EAPI": "5",
+ "SLOT": "0"
+ },
+
+ "dev-libs/boost-1.52.0" : {
+ "EAPI": "5",
+ "SLOT": "0/1.52",
+ "RDEPEND" : "=dev-util/boost-build-1.52.0"
+ }
+
+ }
+
+ world = ["dev-cpp/libcmis", "dev-libs/boost", "app-text/podofo"]
+
+ test_cases = (
+
+ # In order to avoid a missed update, first mask lower
+ # versions that conflict with higher versions. Note that
+ # this behavior makes SlotConflictMaskUpdateTestCase
+ # fail.
+ ResolverPlaygroundTestCase(
+ ['@world'],
+ all_permutations = True,
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ['dev-util/boost-build-1.53.0', 'dev-libs/boost-1.53.0', 'dev-cpp/libcmis-0.3.1']),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_slot_operator_autounmask.py b/lib/portage/tests/resolver/test_slot_operator_autounmask.py
new file mode 100644
index 000000000..624271b39
--- /dev/null
+++ b/lib/portage/tests/resolver/test_slot_operator_autounmask.py
@@ -0,0 +1,120 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotOperatorAutoUnmaskTestCase(TestCase):
+
+ def __init__(self, *args, **kwargs):
+ super(SlotOperatorAutoUnmaskTestCase, self).__init__(*args, **kwargs)
+
+ def testSubSlot(self):
+ ebuilds = {
+ "dev-libs/icu-49" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/49"
+ },
+ "dev-libs/icu-4.8" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/48"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:=",
+ "RDEPEND": "dev-libs/icu:=",
+ "KEYWORDS": "~x86"
+ },
+ }
+ binpkgs = {
+ "dev-libs/icu-49" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/49"
+ },
+ "dev-libs/icu-4.8" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/48"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:0/48=",
+ "RDEPEND": "dev-libs/icu:0/48="
+ },
+ }
+ installed = {
+ "dev-libs/icu-4.8" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/48"
+ },
+ "dev-libs/libxml2-2.7.8" : {
+ "EAPI": "4-slot-abi",
+ "DEPEND": "dev-libs/icu:0/48=",
+ "RDEPEND": "dev-libs/icu:0/48="
+ },
+ }
+
+ world = ["dev-libs/libxml2"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--autounmask": True, "--oneshot": True},
+ success = False,
+ mergelist = ["dev-libs/icu-49", "dev-libs/libxml2-2.7.8" ],
+ unstable_keywords = ['dev-libs/libxml2-2.7.8']),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/icu-49"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--usepkg": True},
+ success = False,
+ mergelist = ["[binary]dev-libs/icu-49", "dev-libs/libxml2-2.7.8" ],
+ unstable_keywords = ['dev-libs/libxml2-2.7.8']),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--usepkgonly": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-4.8"]),
+
+ ResolverPlaygroundTestCase(
+ ["dev-libs/icu"],
+ options = {"--oneshot": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-49"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["dev-libs/icu-49"]),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True},
+ success = True,
+ mergelist = []),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkgonly": True, "--ignore-built-slot-operator-deps": "y"},
+ success = True,
+ mergelist = ["[binary]dev-libs/icu-49"]),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_slot_operator_complete_graph.py b/lib/portage/tests/resolver/test_slot_operator_complete_graph.py
new file mode 100644
index 000000000..1d59bcef1
--- /dev/null
+++ b/lib/portage/tests/resolver/test_slot_operator_complete_graph.py
@@ -0,0 +1,141 @@
+# Copyright 2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground,
+ ResolverPlaygroundTestCase,
+)
+
+class SlotOperatorCompleteGraphTestCase(TestCase):
+
+ def testSlotOperatorCompleteGraph(self):
+
+ ebuilds = {
+ "app-misc/meta-pkg-2" : {
+ "EAPI": "6",
+ "DEPEND": "=app-misc/B-2 =app-misc/C-1 =app-misc/D-1 =dev-libs/foo-2",
+ "RDEPEND": "=app-misc/B-2 =app-misc/C-1 =app-misc/D-1 =dev-libs/foo-2",
+ },
+
+ "app-misc/meta-pkg-1" : {
+ "EAPI": "6",
+ "DEPEND": "=app-misc/B-1 =app-misc/C-1 =app-misc/D-1 =dev-libs/foo-1",
+ "RDEPEND": "=app-misc/B-1 =app-misc/C-1 =app-misc/D-1 =dev-libs/foo-1",
+ },
+
+ "app-misc/B-1" : {
+ "EAPI": "6",
+ "DEPEND": "dev-libs/foo:=",
+ "RDEPEND": "dev-libs/foo:=",
+ },
+
+ "app-misc/B-2" : {
+ "EAPI": "6",
+ "DEPEND": "dev-libs/foo:=",
+ "RDEPEND": "dev-libs/foo:=",
+ },
+
+ "app-misc/C-1" : {
+ "EAPI": "6",
+ "DEPEND": "dev-libs/foo:= app-misc/B",
+ "RDEPEND": "dev-libs/foo:= app-misc/B",
+ },
+
+ "app-misc/C-2" : {
+ "EAPI": "6",
+ "DEPEND": "dev-libs/foo:= app-misc/B",
+ "RDEPEND": "dev-libs/foo:= app-misc/B",
+ },
+
+ "app-misc/D-1" : {
+ "EAPI": "6",
+ "DEPEND": "dev-libs/foo:=",
+ "RDEPEND": "dev-libs/foo:=",
+ },
+
+ "app-misc/D-2" : {
+ "EAPI": "6",
+ "DEPEND": "dev-libs/foo:=",
+ "RDEPEND": "dev-libs/foo:=",
+ },
+
+ "dev-libs/foo-1" : {
+ "EAPI": "6",
+ "SLOT": "0/1",
+ },
+
+ "dev-libs/foo-2" : {
+ "EAPI": "6",
+ "SLOT": "0/2",
+ },
+ }
+
+ installed = {
+ "app-misc/meta-pkg-1" : {
+ "EAPI": "6",
+ "DEPEND": "=app-misc/B-1 =app-misc/C-1 =app-misc/D-1 =dev-libs/foo-1",
+ "RDEPEND": "=app-misc/B-1 =app-misc/C-1 =app-misc/D-1 =dev-libs/foo-1",
+ },
+
+ "app-misc/B-1" : {
+ "EAPI": "6",
+ "DEPEND": "dev-libs/foo:0/1=",
+ "RDEPEND": "dev-libs/foo:0/1=",
+ },
+
+ "app-misc/C-1" : {
+ "EAPI": "6",
+ "DEPEND": "dev-libs/foo:0/1= app-misc/B",
+ "RDEPEND": "dev-libs/foo:0/1= app-misc/B",
+ },
+
+ "app-misc/D-1" : {
+ "EAPI": "6",
+ "DEPEND": "dev-libs/foo:0/1=",
+ "RDEPEND": "dev-libs/foo:0/1=",
+ },
+
+ "dev-libs/foo-1" : {
+ "EAPI": "6",
+ "SLOT": "0/1",
+ },
+ }
+
+ world = (
+ "app-misc/meta-pkg",
+ )
+
+ test_cases = (
+ # Test bug 614390, where the depgraph._complete_graph
+ # method pulled in an installed package that had been
+ # scheduled for rebuild by the previous calculation,
+ # triggering an unsolved slot conflict and preventing
+ # slot operator rebuilds.
+ ResolverPlaygroundTestCase(
+ ["=app-misc/meta-pkg-2", "app-misc/C"],
+ options = {
+ "--backtrack": 5,
+ },
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = [
+ 'dev-libs/foo-2',
+ ('app-misc/D-1', 'app-misc/C-1', 'app-misc/B-2'),
+ 'app-misc/meta-pkg-2',
+ ]
+ ),
+ )
+
+ playground = ResolverPlayground(debug=False,
+ ebuilds=ebuilds, installed=installed,
+ world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ # Disable debug so that cleanup works.
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_slot_operator_exclusive_slots.py b/lib/portage/tests/resolver/test_slot_operator_exclusive_slots.py
new file mode 100644
index 000000000..689ed31d0
--- /dev/null
+++ b/lib/portage/tests/resolver/test_slot_operator_exclusive_slots.py
@@ -0,0 +1,148 @@
+# Copyright 2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground,
+ ResolverPlaygroundTestCase,
+)
+
+class SlotOperatorExclusiveSlotsTestCase(TestCase):
+
+ def testSlotOperatorExclusiveSlots(self):
+
+ ebuilds = {
+
+ "media-libs/mesa-17.0.1" : {
+ "EAPI": "6",
+ "SLOT": "0",
+ "RDEPEND": "<sys-devel/llvm-5:="
+ },
+
+ "sys-devel/clang-4.0.0" : {
+ "EAPI": "6",
+ "SLOT": "4",
+ "RDEPEND": ("~sys-devel/llvm-4.0.0:4= "
+ "!sys-devel/llvm:0 !sys-devel/clang:0"),
+ },
+
+ "sys-devel/clang-3.9.1-r100" : {
+ "EAPI": "6",
+ "SLOT": "0/3.9.1",
+ "RDEPEND": "~sys-devel/llvm-3.9.1",
+ },
+
+ "sys-devel/llvm-4.0.0" : {
+ "EAPI": "6",
+ "SLOT": "4",
+ "RDEPEND": "!sys-devel/llvm:0",
+ },
+
+ "sys-devel/llvm-3.9.1" : {
+ "EAPI": "6",
+ "SLOT": "0/3.91",
+ "RDEPEND": "!sys-devel/llvm:0",
+ "PDEPEND": "=sys-devel/clang-3.9.1-r100",
+ },
+
+ }
+
+ installed = {
+
+ "media-libs/mesa-17.0.1" : {
+ "EAPI": "6",
+ "SLOT": "0",
+ "RDEPEND": "<sys-devel/llvm-5:0/3.9.1="
+ },
+
+ "sys-devel/clang-3.9.1-r100" : {
+ "EAPI": "6",
+ "SLOT": "0/3.9.1",
+ "RDEPEND": "~sys-devel/llvm-3.9.1",
+ },
+
+ "sys-devel/llvm-3.9.1" : {
+ "EAPI": "6",
+ "SLOT": "0/3.9.1",
+ "RDEPEND": "!sys-devel/llvm:0",
+ "PDEPEND": "=sys-devel/clang-3.9.1-r100",
+ },
+
+ }
+
+ world = ["sys-devel/clang", "media-libs/mesa"]
+
+ test_cases = (
+
+ # Test bug #612772, where slot operator rebuilds are not
+ # properly triggered (for things like mesa) during a
+ # llvm:0 to llvm:4 upgrade with clang, resulting in
+ # unsolved blockers.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = [
+ 'sys-devel/llvm-4.0.0',
+ 'media-libs/mesa-17.0.1',
+ (
+ 'sys-devel/clang-4.0.0',
+ '[uninstall]sys-devel/llvm-3.9.1',
+ '!sys-devel/llvm:0',
+ '[uninstall]sys-devel/clang-3.9.1-r100',
+ '!sys-devel/clang:0',
+ )
+ ],
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+
+ world = ["media-libs/mesa"]
+
+ test_cases = (
+
+ # Test bug #612874, where a direct circular dependency
+ # between llvm-3.9.1 and clang-3.9.1-r100 causes a
+ # missed update from llvm:0 to llvm:4. Since llvm:4 does
+ # not have a dependency on clang, the upgrade from llvm:0
+ # to llvm:4 makes the installed sys-devel/clang-3.9.1-r100
+ # instance eligible for removal by emerge --depclean, which
+ # explains why clang does not appear in the mergelist.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = [
+ 'sys-devel/llvm-4.0.0',
+ (
+ 'media-libs/mesa-17.0.1',
+ '[uninstall]sys-devel/llvm-3.9.1',
+ '!sys-devel/llvm:0',
+ )
+ ],
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_slot_operator_rebuild.py b/lib/portage/tests/resolver/test_slot_operator_rebuild.py
new file mode 100644
index 000000000..381683331
--- /dev/null
+++ b/lib/portage/tests/resolver/test_slot_operator_rebuild.py
@@ -0,0 +1,121 @@
+# Copyright 2014-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotOperatorRebuildTestCase(TestCase):
+
+ def testSlotOperatorRebuild(self):
+
+ ebuilds = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A:="
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "RDEPEND": "|| ( app-misc/X app-misc/A:= )"
+ },
+
+ "app-misc/D-1" : {
+ "EAPI": "6",
+ "RDEPEND": "app-misc/E",
+ },
+
+ "app-misc/E-1" : {
+ "EAPI": "6",
+ "RDEPEND": "app-misc/F:=",
+ },
+
+ "app-misc/F-1" : {
+ "EAPI": "6",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/F-2" : {
+ "EAPI": "6",
+ "SLOT": "0/2"
+ },
+ }
+
+ binpkgs = {
+ "app-misc/E-1" : {
+ "EAPI": "6",
+ "RDEPEND": "app-misc/F:0/1=",
+ },
+ }
+
+ installed = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A:0/1="
+ },
+
+ "app-misc/C-0" : {
+ "EAPI": "5",
+ "RDEPEND": "|| ( app-misc/X app-misc/A:0/1= )"
+ },
+
+ "app-misc/F-2" : {
+ "EAPI": "6",
+ "SLOT": "0/2"
+ },
+ }
+
+ world = ["app-misc/B", "app-misc/C"]
+
+ test_cases = (
+
+ # Test bug #522652, where the unsatisfiable app-misc/X
+ # atom is selected, and the dependency is placed into
+ # _initially_unsatisfied_deps where it is ignored, causing
+ # the app-misc/C-0 rebuild to be missed.
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ options = {"--dynamic-deps": "n"},
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = ['app-misc/A-2', ('app-misc/B-0', 'app-misc/C-0')]
+ ),
+
+ # Test bug #652938, where a binary package built against an
+ # older subslot triggered downgrade of an installed package.
+ # In this case we want to reject the app-misc/E-1 binary
+ # package, and rebuild it against the installed instance of
+ # app-misc/F.
+ ResolverPlaygroundTestCase(
+ ["app-misc/D"],
+ options = {'--usepkg': True},
+ success = True,
+ mergelist = ['app-misc/E-1', 'app-misc/D-1']
+ ),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_slot_operator_required_use.py b/lib/portage/tests/resolver/test_slot_operator_required_use.py
new file mode 100644
index 000000000..9cc6dbad4
--- /dev/null
+++ b/lib/portage/tests/resolver/test_slot_operator_required_use.py
@@ -0,0 +1,72 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotOperatorRequiredUseTestCase(TestCase):
+
+ def testSlotOperatorRequiredUse(self):
+
+ ebuilds = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A:=",
+ "IUSE": "x y",
+ "REQUIRED_USE": "|| ( x y )"
+ },
+
+ }
+
+ installed = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A:0/1=",
+ "IUSE": "x y",
+ "USE": "x"
+ },
+
+ }
+
+ world = ["app-misc/B"]
+
+ test_cases = (
+
+ # bug 523048
+ # Ensure that unsatisfied REQUIRED_USE is reported when
+ # it blocks necessary slot-operator rebuilds.
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ success = False,
+ required_use_unsatisfied = ['app-misc/B:0']
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_slot_operator_reverse_deps.py b/lib/portage/tests/resolver/test_slot_operator_reverse_deps.py
new file mode 100644
index 000000000..ce614a4dc
--- /dev/null
+++ b/lib/portage/tests/resolver/test_slot_operator_reverse_deps.py
@@ -0,0 +1,113 @@
+# Copyright 2016 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground,
+ ResolverPlaygroundTestCase,
+)
+
+class SlotOperatorReverseDepsTestCase(TestCase):
+
+ def testSlotOperatorReverseDeps(self):
+
+ ebuilds = {
+
+ "media-libs/mesa-11.2.2" : {
+ "EAPI": "6",
+ "SLOT": "0",
+ "RDEPEND": ">=sys-devel/llvm-3.6.0:="
+ },
+
+ "sys-devel/clang-3.7.1-r100" : {
+ "EAPI": "6",
+ "SLOT": "0/3.7",
+ "RDEPEND": "~sys-devel/llvm-3.7.1"
+ },
+
+ "sys-devel/clang-3.8.0-r100" : {
+ "EAPI": "6",
+ "SLOT": "0/3.8",
+ "RDEPEND": "~sys-devel/llvm-3.8.0"
+ },
+
+ "sys-devel/llvm-3.7.1-r2" : {
+ "EAPI": "6",
+ "SLOT": "0/3.7.1",
+ "PDEPEND": "=sys-devel/clang-3.7.1-r100"
+ },
+
+ "sys-devel/llvm-3.8.0-r2" : {
+ "EAPI": "6",
+ "SLOT": "0/3.8.0",
+ "PDEPEND": "=sys-devel/clang-3.8.0-r100"
+ },
+
+ }
+
+ installed = {
+
+ "media-libs/mesa-11.2.2" : {
+ "EAPI": "6",
+ "SLOT": "0",
+ "RDEPEND": ">=sys-devel/llvm-3.6.0:0/3.7.1="
+ },
+
+ "sys-devel/clang-3.7.1-r100" : {
+ "EAPI": "6",
+ "SLOT": "0/3.7",
+ "RDEPEND": "~sys-devel/llvm-3.7.1"
+ },
+
+ "sys-devel/llvm-3.7.1-r2" : {
+ "EAPI": "6",
+ "SLOT": "0/3.7.1",
+ "PDEPEND": "=sys-devel/clang-3.7.1-r100"
+ },
+
+ }
+
+ world = ["media-libs/mesa"]
+
+ test_cases = (
+
+ # Test bug #584626, where an llvm update is missed due to
+ # the check_reverse_dependencies function seeing that
+ # updating llvm will break a dependency of the installed
+ # version of clang (though a clang update is available).
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = [
+ 'sys-devel/llvm-3.8.0-r2',
+ 'sys-devel/clang-3.8.0-r100',
+ 'media-libs/mesa-11.2.2',
+ ],
+ ),
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {
+ "--update": True,
+ "--deep": True,
+ "--ignore-built-slot-operator-deps": "y",
+ },
+ success = True,
+ mergelist = [
+ 'sys-devel/llvm-3.8.0-r2',
+ 'sys-devel/clang-3.8.0-r100',
+ ],
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_slot_operator_runtime_pkg_mask.py b/lib/portage/tests/resolver/test_slot_operator_runtime_pkg_mask.py
new file mode 100644
index 000000000..0a5a7fa78
--- /dev/null
+++ b/lib/portage/tests/resolver/test_slot_operator_runtime_pkg_mask.py
@@ -0,0 +1,136 @@
+# Copyright 2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground,
+ ResolverPlaygroundTestCase,
+)
+
+class SlotOperatorRuntimePkgMaskTestCase(TestCase):
+
+ def testSlotOperatorRuntimePkgMask(self):
+
+ ebuilds = {
+ "app-misc/meta-pkg-2" : {
+ "EAPI": "6",
+ "DEPEND": "=app-misc/B-2 =app-misc/C-1 =app-misc/D-1 =dev-libs/foo-2",
+ "RDEPEND": "=app-misc/B-2 =app-misc/C-1 =app-misc/D-1 =dev-libs/foo-2",
+ },
+
+ "app-misc/meta-pkg-1" : {
+ "EAPI": "6",
+ "DEPEND": "=app-misc/B-1 =app-misc/C-1 =app-misc/D-1 =dev-libs/foo-1",
+ "RDEPEND": "=app-misc/B-1 =app-misc/C-1 =app-misc/D-1 =dev-libs/foo-1",
+ },
+
+ "app-misc/B-1" : {
+ "EAPI": "6",
+ "DEPEND": "dev-libs/foo:=",
+ "RDEPEND": "dev-libs/foo:=",
+ },
+
+ "app-misc/B-2" : {
+ "EAPI": "6",
+ "DEPEND": "dev-libs/foo:=",
+ "RDEPEND": "dev-libs/foo:=",
+ },
+
+ "app-misc/C-1" : {
+ "EAPI": "6",
+ "DEPEND": "dev-libs/foo:=",
+ "RDEPEND": "dev-libs/foo:=",
+ },
+
+ "app-misc/C-2" : {
+ "EAPI": "6",
+ "DEPEND": "dev-libs/foo:=",
+ "RDEPEND": "dev-libs/foo:=",
+ },
+
+ "app-misc/D-1" : {
+ "EAPI": "6",
+ "DEPEND": "dev-libs/foo:=",
+ "RDEPEND": "dev-libs/foo:=",
+ },
+
+ "app-misc/D-2" : {
+ "EAPI": "6",
+ "DEPEND": "dev-libs/foo:=",
+ "RDEPEND": "dev-libs/foo:=",
+ },
+
+ "dev-libs/foo-1" : {
+ "EAPI": "6",
+ "SLOT": "0/1",
+ },
+
+ "dev-libs/foo-2" : {
+ "EAPI": "6",
+ "SLOT": "0/2",
+ },
+ }
+
+ installed = {
+ "app-misc/meta-pkg-1" : {
+ "EAPI": "6",
+ "DEPEND": "=app-misc/B-1 =app-misc/C-1 =app-misc/D-1 =dev-libs/foo-1",
+ "RDEPEND": "=app-misc/B-1 =app-misc/C-1 =app-misc/D-1 =dev-libs/foo-1",
+ },
+
+ "app-misc/B-1" : {
+ "EAPI": "6",
+ "DEPEND": "dev-libs/foo:0/1=",
+ "RDEPEND": "dev-libs/foo:0/1=",
+ },
+
+ "app-misc/C-1" : {
+ "EAPI": "6",
+ "DEPEND": "dev-libs/foo:0/1=",
+ "RDEPEND": "dev-libs/foo:0/1=",
+ },
+
+ "app-misc/D-1" : {
+ "EAPI": "6",
+ "DEPEND": "dev-libs/foo:0/1=",
+ "RDEPEND": "dev-libs/foo:0/1=",
+ },
+
+ "dev-libs/foo-1" : {
+ "EAPI": "6",
+ "SLOT": "0/1",
+ },
+ }
+
+ world = (
+ "app-misc/meta-pkg",
+ )
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["=app-misc/meta-pkg-2"],
+ options = {
+ "--backtrack": 5,
+ },
+ success = True,
+ ambiguous_merge_order = True,
+ mergelist = [
+ 'dev-libs/foo-2',
+ ('app-misc/D-1', 'app-misc/C-1', 'app-misc/B-2'),
+ 'app-misc/meta-pkg-2',
+ ]
+ ),
+ )
+
+ playground = ResolverPlayground(debug=False,
+ ebuilds=ebuilds, installed=installed,
+ world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ # Disable debug so that cleanup works.
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_slot_operator_unsatisfied.py b/lib/portage/tests/resolver/test_slot_operator_unsatisfied.py
new file mode 100644
index 000000000..e3b53d159
--- /dev/null
+++ b/lib/portage/tests/resolver/test_slot_operator_unsatisfied.py
@@ -0,0 +1,70 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotOperatorUnsatisfiedTestCase(TestCase):
+
+ def testSlotOperatorUnsatisfied(self):
+
+ ebuilds = {
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1"
+ },
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/A:=",
+ "RDEPEND": "app-misc/A:="
+ },
+ }
+
+ installed = {
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "DEPEND": "app-misc/A:0/1=",
+ "RDEPEND": "app-misc/A:0/1="
+ },
+ }
+
+ world = ["app-misc/B"]
+
+ test_cases = (
+
+ # Demonstrate bug #439694, where a broken slot-operator
+ # sub-slot dependency needs to trigger a rebuild.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ["app-misc/B-0"]),
+
+ # This doesn't trigger a rebuild, since there's no version
+ # change to trigger complete graph mode, and initially
+ # unsatisfied deps are ignored in complete graph mode anyway.
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ options = {"--oneshot": True},
+ success = True,
+ mergelist = ["app-misc/A-2"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_slot_operator_unsolved.py b/lib/portage/tests/resolver/test_slot_operator_unsolved.py
new file mode 100644
index 000000000..c19783ddf
--- /dev/null
+++ b/lib/portage/tests/resolver/test_slot_operator_unsolved.py
@@ -0,0 +1,88 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SlotOperatorUnsolvedTestCase(TestCase):
+ """
+ Demonstrate bug #456340, where an unsolved circular dependency
+ interacts with an unsatisfied built slot-operator dep.
+ """
+ def __init__(self, *args, **kwargs):
+ super(SlotOperatorUnsolvedTestCase, self).__init__(*args, **kwargs)
+
+ def testSlotOperatorUnsolved(self):
+ ebuilds = {
+ "dev-libs/icu-50.1.2" : {
+ "EAPI": "5",
+ "SLOT": "0/50.1.2"
+ },
+ "net-libs/webkit-gtk-1.10.2-r300" : {
+ "EAPI": "5",
+ "DEPEND": ">=dev-libs/icu-3.8.1-r1:=",
+ "RDEPEND": ">=dev-libs/icu-3.8.1-r1:="
+ },
+ "dev-ruby/rdoc-3.12.1" : {
+ "EAPI": "5",
+ "IUSE": "test",
+ "DEPEND": "test? ( >=dev-ruby/hoe-2.7.0 )",
+ },
+ "dev-ruby/hoe-2.13.0" : {
+ "EAPI": "5",
+ "IUSE": "test",
+ "DEPEND": "test? ( >=dev-ruby/rdoc-3.10 )",
+ "RDEPEND": "test? ( >=dev-ruby/rdoc-3.10 )",
+ },
+ }
+
+ binpkgs = {
+ "net-libs/webkit-gtk-1.10.2-r300" : {
+ "EAPI": "5",
+ "DEPEND": ">=dev-libs/icu-3.8.1-r1:0/50=",
+ "RDEPEND": ">=dev-libs/icu-3.8.1-r1:0/50="
+ },
+ }
+
+ installed = {
+ "dev-libs/icu-50.1.2" : {
+ "EAPI": "5",
+ "SLOT": "0/50.1.2"
+ },
+ "net-libs/webkit-gtk-1.10.2-r300" : {
+ "EAPI": "5",
+ "DEPEND": ">=dev-libs/icu-3.8.1-r1:0/50=",
+ "RDEPEND": ">=dev-libs/icu-3.8.1-r1:0/50="
+ },
+ }
+
+ user_config = {
+ "make.conf" : ("FEATURES=test",)
+ }
+
+ world = ["net-libs/webkit-gtk", "dev-ruby/hoe"]
+
+ test_cases = (
+
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--usepkg": True},
+ circular_dependency_solutions = {
+ 'dev-ruby/hoe-2.13.0': frozenset([frozenset([('test', False)])]),
+ 'dev-ruby/rdoc-3.12.1': frozenset([frozenset([('test', False)])])
+ },
+ success = False
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, binpkgs=binpkgs,
+ installed=installed, user_config=user_config,
+ world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_slot_operator_update_probe_parent_downgrade.py b/lib/portage/tests/resolver/test_slot_operator_update_probe_parent_downgrade.py
new file mode 100644
index 000000000..2ec15b602
--- /dev/null
+++ b/lib/portage/tests/resolver/test_slot_operator_update_probe_parent_downgrade.py
@@ -0,0 +1,68 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import \
+ ResolverPlayground, ResolverPlaygroundTestCase
+
+class SlotOperatorUpdateProbeParentDowngradeTestCase(TestCase):
+
+ def testSlotOperatorUpdateProbeParentDowngrade(self):
+
+ ebuilds = {
+ "net-nds/openldap-2.4.40-r3": {
+ "EAPI": "5",
+ "RDEPEND": "<sys-libs/db-6.0:= " + \
+ "|| ( sys-libs/db:5.3 sys-libs/db:5.1 )"
+ },
+ "net-nds/openldap-2.4.40": {
+ "EAPI": "5",
+ "RDEPEND": "sys-libs/db"
+ },
+ "sys-libs/db-6.0": {
+ "SLOT": "6.0",
+ },
+ "sys-libs/db-5.3": {
+ "SLOT": "5.3",
+ },
+ }
+
+ installed = {
+ "net-nds/openldap-2.4.40-r3": {
+ "EAPI": "5",
+ "RDEPEND": "<sys-libs/db-6.0:5.3/5.3= " + \
+ "|| ( sys-libs/db:5.3 sys-libs/db:5.1 )"
+ },
+ "sys-libs/db-6.0": {
+ "SLOT": "6.0",
+ },
+ "sys-libs/db-5.3": {
+ "SLOT": "5.3",
+ },
+ }
+
+ world = (
+ "net-nds/openldap",
+ )
+
+ test_cases = (
+ # bug 528610 - openldap rebuild was triggered
+ # inappropriately, due to slot_operator_update_probe
+ # selecting an inappropriate replacement parent of
+ # a lower version than desired.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ success = True,
+ options = { "--update": True, "--deep": True },
+ mergelist = []),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success,
+ True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_solve_non_slot_operator_slot_conflicts.py b/lib/portage/tests/resolver/test_solve_non_slot_operator_slot_conflicts.py
new file mode 100644
index 000000000..c6024f404
--- /dev/null
+++ b/lib/portage/tests/resolver/test_solve_non_slot_operator_slot_conflicts.py
@@ -0,0 +1,75 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
+ ResolverPlaygroundTestCase)
+
+class SolveNonSlotOperatorSlotConflictsTestCase(TestCase):
+
+ def testSolveNonSlotOperatorSlotConflicts(self):
+
+ ebuilds = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1",
+ "PDEPEND": "app-misc/B"
+ },
+
+ "app-misc/A-2" : {
+ "EAPI": "5",
+ "SLOT": "0/2",
+ "PDEPEND": "app-misc/B"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A:="
+ },
+
+ }
+
+ installed = {
+
+ "app-misc/A-1" : {
+ "EAPI": "5",
+ "SLOT": "0/1",
+ "PDEPEND": "app-misc/B"
+ },
+
+ "app-misc/B-0" : {
+ "EAPI": "5",
+ "RDEPEND": "app-misc/A:0/1="
+ },
+
+ }
+
+ world = ["app-misc/A"]
+
+ test_cases = (
+
+ # bug 522084
+ # In this case, _solve_non_slot_operator_slot_conflicts
+ # removed both versions of app-misc/A from the graph, since
+ # they didn't have any non-conflict parents (except for
+ # @selected which matched both instances). The result was
+ # a missed update.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True},
+ success = True,
+ mergelist = ['app-misc/A-2', 'app-misc/B-0']
+ ),
+
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ installed=installed, world=world, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_targetroot.py b/lib/portage/tests/resolver/test_targetroot.py
new file mode 100644
index 000000000..db6c60de3
--- /dev/null
+++ b/lib/portage/tests/resolver/test_targetroot.py
@@ -0,0 +1,85 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class TargetRootTestCase(TestCase):
+
+ def testTargetRoot(self):
+ ebuilds = {
+ "dev-lang/python-3.2": {
+ "EAPI": "5-hdepend",
+ "IUSE": "targetroot",
+ "HDEPEND": "targetroot? ( ~dev-lang/python-3.2 )",
+ },
+ "dev-libs/A-1": {
+ "EAPI": "4",
+ "DEPEND": "dev-libs/B",
+ "RDEPEND": "dev-libs/C",
+ },
+ "dev-libs/B-1": {},
+ "dev-libs/C-1": {},
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-lang/python"],
+ options = {},
+ success = True,
+ mergelist = ["dev-lang/python-3.2", "dev-lang/python-3.2{targetroot}"]),
+ ResolverPlaygroundTestCase(
+ ["dev-lang/python"],
+ options = {"--root-deps": True},
+ success = True,
+ mergelist = ["dev-lang/python-3.2", "dev-lang/python-3.2{targetroot}"]),
+ ResolverPlaygroundTestCase(
+ ["dev-lang/python"],
+ options = {"--root-deps": "rdeps"},
+ success = True,
+ mergelist = ["dev-lang/python-3.2", "dev-lang/python-3.2{targetroot}"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {},
+ ambiguous_merge_order = True,
+ success = True,
+ mergelist = [("dev-libs/B-1", "dev-libs/C-1{targetroot}"), "dev-libs/A-1{targetroot}"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--root-deps": True},
+ ambiguous_merge_order = True,
+ success = True,
+ mergelist = [("dev-libs/B-1{targetroot}", "dev-libs/C-1{targetroot}"), "dev-libs/A-1{targetroot}"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--root-deps": "rdeps"},
+ ambiguous_merge_order = True,
+ success = True,
+ mergelist = [("dev-libs/C-1{targetroot}"), "dev-libs/A-1{targetroot}"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, targetroot=True,
+ debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-lang/python"],
+ options = {},
+ success = True,
+ mergelist = ["dev-lang/python-3.2"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, targetroot=False,
+ debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_unpack_dependencies.py b/lib/portage/tests/resolver/test_unpack_dependencies.py
new file mode 100644
index 000000000..cfceff4b1
--- /dev/null
+++ b/lib/portage/tests/resolver/test_unpack_dependencies.py
@@ -0,0 +1,65 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class UnpackDependenciesTestCase(TestCase):
+ def testUnpackDependencies(self):
+ distfiles = {
+ "A-1.tar.gz": b"binary\0content",
+ "B-1.TAR.XZ": b"binary\0content",
+ "B-docs-1.tar.bz2": b"binary\0content",
+ "C-1.TAR.XZ": b"binary\0content",
+ "C-docs-1.tar.bz2": b"binary\0content",
+ }
+
+ ebuilds = {
+ "dev-libs/A-1": {"SRC_URI": "A-1.tar.gz", "EAPI": "5-progress"},
+ "dev-libs/B-1": {"IUSE": "doc", "SRC_URI": "B-1.TAR.XZ doc? ( B-docs-1.tar.bz2 )", "EAPI": "5-progress"},
+ "dev-libs/C-1": {"IUSE": "doc", "SRC_URI": "C-1.TAR.XZ doc? ( C-docs-1.tar.bz2 )", "EAPI": "5-progress"},
+ "app-arch/bzip2-1": {},
+ "app-arch/gzip-1": {},
+ "app-arch/tar-1": {},
+ "app-arch/xz-utils-1": {},
+ }
+
+ repo_configs = {
+ "test_repo": {
+ "unpack_dependencies/5-progress": (
+ "tar.bz2 app-arch/tar app-arch/bzip2",
+ "tar.gz app-arch/tar app-arch/gzip",
+ "tar.xz app-arch/tar app-arch/xz-utils",
+ ),
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ success = True,
+ ignore_mergelist_order = True,
+ mergelist = ["app-arch/tar-1", "app-arch/gzip-1", "dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ success = True,
+ ignore_mergelist_order = True,
+ mergelist = ["app-arch/tar-1", "app-arch/xz-utils-1", "dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ success = True,
+ ignore_mergelist_order = True,
+ mergelist = ["app-arch/tar-1", "app-arch/xz-utils-1", "app-arch/bzip2-1", "dev-libs/C-1"]),
+ )
+
+ user_config = {
+ "package.use": ("dev-libs/C doc",)
+ }
+
+ playground = ResolverPlayground(distfiles=distfiles, ebuilds=ebuilds, repo_configs=repo_configs, user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_use_aliases.py b/lib/portage/tests/resolver/test_use_aliases.py
new file mode 100644
index 000000000..7c2debbb1
--- /dev/null
+++ b/lib/portage/tests/resolver/test_use_aliases.py
@@ -0,0 +1,131 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class UseAliasesTestCase(TestCase):
+ def testUseAliases(self):
+ ebuilds = {
+ "dev-libs/A-1": {"DEPEND": "dev-libs/K[x]", "RDEPEND": "dev-libs/K[x]", "EAPI": "5"},
+ "dev-libs/B-1": {"DEPEND": "dev-libs/L[x]", "RDEPEND": "dev-libs/L[x]", "EAPI": "5"},
+ "dev-libs/C-1": {"DEPEND": "dev-libs/M[xx]", "RDEPEND": "dev-libs/M[xx]", "EAPI": "5"},
+ "dev-libs/D-1": {"DEPEND": "dev-libs/N[-x]", "RDEPEND": "dev-libs/N[-x]", "EAPI": "5"},
+ "dev-libs/E-1": {"DEPEND": "dev-libs/O[-xx]", "RDEPEND": "dev-libs/O[-xx]", "EAPI": "5"},
+ "dev-libs/F-1": {"DEPEND": "dev-libs/P[-xx]", "RDEPEND": "dev-libs/P[-xx]", "EAPI": "5"},
+ "dev-libs/G-1": {"DEPEND": "dev-libs/Q[x-y]", "RDEPEND": "dev-libs/Q[x-y]", "EAPI": "5"},
+ "dev-libs/H-1": {"DEPEND": "=dev-libs/R-1*[yy]", "RDEPEND": "=dev-libs/R-1*[yy]", "EAPI": "5"},
+ "dev-libs/H-2": {"DEPEND": "=dev-libs/R-2*[yy]", "RDEPEND": "=dev-libs/R-2*[yy]", "EAPI": "5"},
+ "dev-libs/I-1": {"DEPEND": "dev-libs/S[y-z]", "RDEPEND": "dev-libs/S[y-z]", "EAPI": "5"},
+ "dev-libs/I-2": {"DEPEND": "dev-libs/S[y_z]", "RDEPEND": "dev-libs/S[y_z]", "EAPI": "5"},
+ "dev-libs/J-1": {"DEPEND": "dev-libs/T[x]", "RDEPEND": "dev-libs/T[x]", "EAPI": "5"},
+ "dev-libs/K-1": {"IUSE": "+x", "EAPI": "5"},
+ "dev-libs/K-2::repo1": {"IUSE": "+X", "EAPI": "5-progress"},
+ "dev-libs/L-1": {"IUSE": "+x", "EAPI": "5"},
+ "dev-libs/M-1::repo1": {"IUSE": "X", "EAPI": "5-progress"},
+ "dev-libs/N-1": {"IUSE": "x", "EAPI": "5"},
+ "dev-libs/N-2::repo1": {"IUSE": "X", "EAPI": "5-progress"},
+ "dev-libs/O-1": {"IUSE": "x", "EAPI": "5"},
+ "dev-libs/P-1::repo1": {"IUSE": "+X", "EAPI": "5-progress"},
+ "dev-libs/Q-1::repo2": {"IUSE": "X.Y", "EAPI": "5-progress"},
+ "dev-libs/R-1::repo1": {"IUSE": "Y", "EAPI": "5-progress"},
+ "dev-libs/R-2::repo1": {"IUSE": "y", "EAPI": "5-progress"},
+ "dev-libs/S-1::repo2": {"IUSE": "Y.Z", "EAPI": "5-progress"},
+ "dev-libs/S-2::repo2": {"IUSE": "Y.Z", "EAPI": "5-progress"},
+ "dev-libs/T-1::repo1": {"IUSE": "+X", "EAPI": "5"},
+ }
+
+ installed = {
+ "dev-libs/L-2::repo1": {"IUSE": "+X", "USE": "X", "EAPI": "5-progress"},
+ "dev-libs/O-2::repo1": {"IUSE": "X", "USE": "", "EAPI": "5-progress"},
+ }
+
+ repo_configs = {
+ "repo1": {
+ "use.aliases": ("X x xx",),
+ "package.use.aliases": (
+ "=dev-libs/R-1* Y yy",
+ "=dev-libs/R-2* y yy",
+ )
+ },
+ "repo2": {
+ "eapi": ("5-progress",),
+ "use.aliases": ("X.Y x-y",),
+ "package.use.aliases": (
+ "=dev-libs/S-1* Y.Z y-z",
+ "=dev-libs/S-2* Y.Z y_z",
+ ),
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ success = True,
+ mergelist = ["dev-libs/K-2", "dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ success = True,
+ mergelist = ["dev-libs/B-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/C"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/M-1", "dev-libs/C-1"],
+ use_changes = {"dev-libs/M-1": {"X": True}}),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/D"],
+ success = True,
+ mergelist = ["dev-libs/N-2", "dev-libs/D-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/E"],
+ success = True,
+ mergelist = ["dev-libs/E-1"]),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/F"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/P-1", "dev-libs/F-1"],
+ use_changes = {"dev-libs/P-1": {"X": False}}),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/G"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/Q-1", "dev-libs/G-1"],
+ use_changes = {"dev-libs/Q-1": {"X.Y": True}}),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/H-1*"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/R-1", "dev-libs/H-1"],
+ use_changes = {"dev-libs/R-1": {"Y": True}}),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/H-2*"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/R-2", "dev-libs/H-2"],
+ use_changes = {"dev-libs/R-2": {"y": True}}),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/I-1*"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/S-1", "dev-libs/I-1"],
+ use_changes = {"dev-libs/S-1": {"Y.Z": True}}),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/I-2*"],
+ options = {"--autounmask": True},
+ success = False,
+ mergelist = ["dev-libs/S-2", "dev-libs/I-2"],
+ use_changes = {"dev-libs/S-2": {"Y.Z": True}}),
+ ResolverPlaygroundTestCase(
+ ["dev-libs/J"],
+ success = False),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, installed=installed, repo_configs=repo_configs)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_use_dep_defaults.py b/lib/portage/tests/resolver/test_use_dep_defaults.py
new file mode 100644
index 000000000..7d171066e
--- /dev/null
+++ b/lib/portage/tests/resolver/test_use_dep_defaults.py
@@ -0,0 +1,40 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class UseDepDefaultsTestCase(TestCase):
+
+ def testUseDepDefaultse(self):
+
+ ebuilds = {
+ "dev-libs/A-1": { "DEPEND": "dev-libs/B[foo]", "RDEPEND": "dev-libs/B[foo]", "EAPI": "2" },
+ "dev-libs/A-2": { "DEPEND": "dev-libs/B[foo(+)]", "RDEPEND": "dev-libs/B[foo(+)]", "EAPI": "4" },
+ "dev-libs/A-3": { "DEPEND": "dev-libs/B[foo(-)]", "RDEPEND": "dev-libs/B[foo(-)]", "EAPI": "4" },
+ "dev-libs/B-1": { "IUSE": "+foo", "EAPI": "1" },
+ "dev-libs/B-2": {},
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-1"],
+ success = True,
+ mergelist = ["dev-libs/B-1", "dev-libs/A-1"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-2"],
+ success = True,
+ mergelist = ["dev-libs/B-2", "dev-libs/A-2"]),
+ ResolverPlaygroundTestCase(
+ ["=dev-libs/A-3"],
+ success = True,
+ mergelist = ["dev-libs/B-1", "dev-libs/A-3"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_useflags.py b/lib/portage/tests/resolver/test_useflags.py
new file mode 100644
index 000000000..0a5f3b3ff
--- /dev/null
+++ b/lib/portage/tests/resolver/test_useflags.py
@@ -0,0 +1,78 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class UseFlagsTestCase(TestCase):
+
+ def testUseFlags(self):
+ ebuilds = {
+ "dev-libs/A-1": { "IUSE": "X", },
+ "dev-libs/B-1": { "IUSE": "X Y", },
+ }
+
+ installed = {
+ "dev-libs/A-1": { "IUSE": "X", },
+ "dev-libs/B-1": { "IUSE": "X", },
+ }
+
+ binpkgs = installed
+
+ user_config = {
+ "package.use": ( "dev-libs/A X", ),
+ "use.force": ( "Y", ),
+ }
+
+ test_cases = (
+ #default: don't reinstall on use flag change
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--selective": True, "--usepkg": True},
+ success = True,
+ mergelist = []),
+
+ #default: respect use flags for binpkgs
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--usepkg": True},
+ success = True,
+ mergelist = ["dev-libs/A-1"]),
+
+ #--binpkg-respect-use=n: use binpkgs with different use flags
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--binpkg-respect-use": "n", "--usepkg": True},
+ success = True,
+ mergelist = ["[binary]dev-libs/A-1"]),
+
+ #--reinstall=changed-use: reinstall if use flag changed
+ ResolverPlaygroundTestCase(
+ ["dev-libs/A"],
+ options = {"--reinstall": "changed-use", "--usepkg": True},
+ success = True,
+ mergelist = ["dev-libs/A-1"]),
+
+ #--reinstall=changed-use: don't reinstall on new use flag
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--reinstall": "changed-use", "--usepkg": True},
+ success = True,
+ mergelist = []),
+
+ #--newuse: reinstall on new use flag
+ ResolverPlaygroundTestCase(
+ ["dev-libs/B"],
+ options = {"--newuse": True, "--usepkg": True},
+ success = True,
+ mergelist = ["dev-libs/B-1"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ binpkgs=binpkgs, installed=installed, user_config=user_config)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_virtual_minimize_children.py b/lib/portage/tests/resolver/test_virtual_minimize_children.py
new file mode 100644
index 000000000..b566cb592
--- /dev/null
+++ b/lib/portage/tests/resolver/test_virtual_minimize_children.py
@@ -0,0 +1,287 @@
+# Copyright 2017-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import (
+ ResolverPlayground,
+ ResolverPlaygroundTestCase,
+)
+
+class VirtualMinimizeChildrenTestCase(TestCase):
+
+ def testVirtualMinimizeChildren(self):
+ ebuilds = {
+ 'app-misc/bar-1': {
+ 'EAPI': '6',
+ 'RDEPEND': 'virtual/foo'
+ },
+ 'virtual/foo-1': {
+ 'EAPI': '6',
+ 'RDEPEND': '|| ( app-misc/A app-misc/B ) || ( app-misc/B app-misc/C )'
+ },
+ 'app-misc/A-1': {
+ 'EAPI': '6',
+ },
+ 'app-misc/B-1': {
+ 'EAPI': '6',
+ },
+ 'app-misc/C-1': {
+ 'EAPI': '6',
+ },
+ }
+
+ test_cases = (
+ # Test bug 632026, where we want to minimize the number of
+ # packages chosen to satisfy overlapping || deps like
+ # "|| ( foo bar ) || ( bar baz )".
+ ResolverPlaygroundTestCase(
+ ['app-misc/bar'],
+ success=True,
+ mergelist=[
+ 'app-misc/B-1',
+ 'virtual/foo-1',
+ 'app-misc/bar-1',
+ ],
+ ),
+ )
+
+ playground = ResolverPlayground(debug=False,
+ ebuilds=ebuilds)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
+
+ # If app-misc/A and app-misc/C are installed then
+ # that choice should be preferred over app-misc/B.
+ installed = {
+ 'app-misc/A-1': {
+ 'EAPI': '6',
+ },
+ 'app-misc/C-1': {
+ 'EAPI': '6',
+ },
+ }
+
+ test_cases = (
+ ResolverPlaygroundTestCase(
+ ['app-misc/bar'],
+ success=True,
+ mergelist=[
+ 'virtual/foo-1',
+ 'app-misc/bar-1',
+ ],
+ ),
+ )
+
+ playground = ResolverPlayground(debug=False,
+ ebuilds=ebuilds, installed=installed)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
+
+ def testOverlapSlotConflict(self):
+ ebuilds = {
+ 'app-misc/bar-1': {
+ 'EAPI': '6',
+ 'RDEPEND': 'virtual/foo'
+ },
+ 'virtual/foo-1': {
+ 'EAPI': '6',
+ 'RDEPEND': '|| ( app-misc/A >=app-misc/B-2 ) || ( <app-misc/B-2 app-misc/C )'
+ },
+ 'app-misc/A-1': {
+ 'EAPI': '6',
+ },
+ 'app-misc/B-2': {
+ 'EAPI': '6',
+ },
+ 'app-misc/B-1': {
+ 'EAPI': '6',
+ },
+ 'app-misc/C-1': {
+ 'EAPI': '6',
+ },
+ }
+
+ test_cases = (
+ # Here the ( >=app-misc/B-2 <app-misc/B-2 ) choice is not satisfiable.
+ ResolverPlaygroundTestCase(
+ ['app-misc/bar'],
+ success=True,
+ ambiguous_merge_order=True,
+ mergelist=[
+ (
+ 'app-misc/C-1',
+ 'app-misc/A-1',
+ ),
+ 'virtual/foo-1',
+ 'app-misc/bar-1',
+ ]
+ ),
+ )
+
+ playground = ResolverPlayground(debug=False,
+ ebuilds=ebuilds)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
+
+ def testVirtualPackageManager(self):
+ ebuilds = {
+ 'app-admin/perl-cleaner-2.25': {
+ 'RDEPEND': '''
+ || (
+ ( sys-apps/portage app-portage/portage-utils )
+ sys-apps/pkgcore
+ sys-apps/paludis
+ )'''
+ },
+ 'app-portage/portage-utils-0.64': {},
+ 'sys-apps/paludis-2.6.0': {},
+ 'sys-apps/portage-2.3.19-r1': {},
+ 'virtual/package-manager-0': {
+ 'RDEPEND': '''
+ || (
+ sys-apps/portage
+ sys-apps/paludis
+ sys-apps/pkgcore
+ )'''
+ },
+ }
+
+ test_cases = (
+ # Test bug 645002, where paludis was selected to satisfy a
+ # perl-cleaner dependency because that choice contained fewer
+ # packages than the ( portage portage-utils ) choice which
+ # should have been preferred according to the order of
+ # choices specified in the ebuild.
+ ResolverPlaygroundTestCase(
+ [
+ 'app-admin/perl-cleaner',
+ 'virtual/package-manager',
+ ],
+ all_permutations=True,
+ success=True,
+ ambiguous_merge_order=True,
+ mergelist=(
+ (
+ 'sys-apps/portage-2.3.19-r1',
+ 'app-portage/portage-utils-0.64',
+ 'app-admin/perl-cleaner-2.25',
+ 'virtual/package-manager-0',
+ ),
+ )
+ ),
+ # Test paludis preference. In this case, if paludis is not
+ # included in the argument atoms then the result varies
+ # depending on whether the app-admin/perl-cleaner or
+ # virtual/package-manager dependencies are evaluated first!
+ # Therefore, include paludis in the argument atoms.
+ ResolverPlaygroundTestCase(
+ [
+ 'app-admin/perl-cleaner',
+ 'virtual/package-manager',
+ 'sys-apps/paludis',
+ ],
+ all_permutations=True,
+ success=True,
+ ambiguous_merge_order=True,
+ mergelist=(
+ 'sys-apps/paludis-2.6.0',
+ (
+ 'app-admin/perl-cleaner-2.25',
+ 'virtual/package-manager-0',
+ ),
+ )
+ ),
+ )
+
+ playground = ResolverPlayground(debug=False, ebuilds=ebuilds)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
+
+ def testVirtualDevManager(self):
+ ebuilds = {
+ 'sys-fs/eudev-3.1.5': {},
+ 'sys-fs/static-dev-0.1': {},
+ 'sys-fs/udev-233': {},
+ 'virtual/dev-manager-0': {
+ 'RDEPEND': '''
+ || (
+ virtual/udev
+ sys-fs/static-dev
+ )'''
+ },
+ 'virtual/udev-0': {
+ 'RDEPEND': '''
+ || (
+ >=sys-fs/eudev-2.1.1
+ >=sys-fs/udev-217
+ )'''
+ },
+ }
+
+ test_cases = (
+ # Test bug 645190, where static-dev was pulled in instead
+ # of eudev.
+ ResolverPlaygroundTestCase(
+ [
+ 'virtual/dev-manager',
+ ],
+ success=True,
+ mergelist=(
+ 'sys-fs/eudev-3.1.5',
+ 'virtual/udev-0',
+ 'virtual/dev-manager-0',
+ ),
+ ),
+ # Test static-dev preference.
+ ResolverPlaygroundTestCase(
+ [
+ 'sys-fs/static-dev',
+ 'virtual/dev-manager',
+ ],
+ all_permutations=True,
+ success=True,
+ mergelist=(
+ 'sys-fs/static-dev-0.1',
+ 'virtual/dev-manager-0',
+ ),
+ ),
+ )
+
+ playground = ResolverPlayground(debug=False, ebuilds=ebuilds)
+
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True,
+ test_case.fail_msg)
+ finally:
+ playground.debug = False
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_virtual_slot.py b/lib/portage/tests/resolver/test_virtual_slot.py
new file mode 100644
index 000000000..ef4bd367a
--- /dev/null
+++ b/lib/portage/tests/resolver/test_virtual_slot.py
@@ -0,0 +1,217 @@
+# Copyright 2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground, ResolverPlaygroundTestCase
+
+class VirtualSlotResolverTestCase(TestCase):
+
+ def testLicenseMaskedVirtualSlotUpdate(self):
+
+ ebuilds = {
+ "dev-java/oracle-jdk-bin-1.7.0" : {"SLOT": "1.7", "LICENSE": "TEST"},
+ "dev-java/sun-jdk-1.6.0" : {"SLOT": "1.6", "LICENSE": "TEST"},
+ "dev-java/icedtea-6.1.10.3" : {"SLOT": "6"},
+ "app-misc/java-app-1": {"RDEPEND": ">=virtual/jdk-1.6.0"},
+ "virtual/jdk-1.6.0": {"SLOT": "1.6", "RDEPEND": "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+ "virtual/jdk-1.7.0": {"SLOT": "1.7", "RDEPEND": "|| ( =dev-java/oracle-jdk-bin-1.7.0* )"},
+ }
+
+ installed = {
+ "app-misc/java-app-1": {"RDEPEND": ">=virtual/jdk-1.6.0"},
+ "dev-java/icedtea-6.1.10.3" : {"SLOT": "6"},
+ "virtual/jdk-1.6.0": {"SLOT" : "1.6", "RDEPEND": "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+ }
+
+ world = ("app-misc/java-app",)
+
+ test_cases = (
+ # Bug #382557 - Don't pull in the virtual/jdk-1.7.0 slot update
+ # since its dependencies can only be satisfied by a package that
+ # is masked by license.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update" : True, "--deep" : True},
+ success = True,
+ mergelist = []),
+ )
+
+ playground = ResolverPlayground(
+ ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testVirtualSlotUpdate(self):
+
+ ebuilds = {
+ "dev-java/oracle-jdk-bin-1.7.0" : {"SLOT": "1.7", "LICENSE": "TEST"},
+ "dev-java/sun-jdk-1.6.0" : {"SLOT": "1.6", "LICENSE": "TEST"},
+ "dev-java/icedtea-6.1.10.3" : {"SLOT": "6"},
+ "dev-java/icedtea-7" : {"SLOT": "7"},
+ "app-misc/java-app-1": {"RDEPEND": ">=virtual/jdk-1.6.0"},
+ "virtual/jdk-1.6.0": {"SLOT": "1.6", "RDEPEND": "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+ "virtual/jdk-1.7.0": {"SLOT": "1.7", "RDEPEND": "|| ( =dev-java/icedtea-7* =dev-java/oracle-jdk-bin-1.7.0* )"},
+ }
+
+ installed = {
+ "app-misc/java-app-1": {"RDEPEND": ">=virtual/jdk-1.6.0"},
+ "dev-java/icedtea-6.1.10.3" : {"SLOT": "6"},
+ "virtual/jdk-1.6.0": {"SLOT" : "1.6", "RDEPEND": "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+ }
+
+ world = ("app-misc/java-app",)
+
+ test_cases = (
+ # Pull in the virtual/jdk-1.7.0 slot update since its dependencies
+ # can only be satisfied by an unmasked package.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update" : True, "--deep" : True},
+ success = True,
+ mergelist = ["dev-java/icedtea-7", "virtual/jdk-1.7.0"]),
+
+ # Bug #275945 - Don't pull in the virtual/jdk-1.7.0 slot update
+ # unless --update is enabled.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--selective" : True, "--deep" : True},
+ success = True,
+ mergelist = []),
+ )
+
+ playground = ResolverPlayground(
+ ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testVirtualSubslotUpdate(self):
+
+ ebuilds = {
+ "virtual/pypy-2.3.1" : {
+ "EAPI": "5",
+ "SLOT": "0/2.3",
+ "RDEPEND": "|| ( >=dev-python/pypy-2.3.1:0/2.3 >=dev-python/pypy-bin-2.3.1:0/2.3 ) "
+ },
+ "virtual/pypy-2.4.0" : {
+ "EAPI": "5",
+ "SLOT": "0/2.4",
+ "RDEPEND": "|| ( >=dev-python/pypy-2.4.0:0/2.4 >=dev-python/pypy-bin-2.4.0:0/2.4 ) "
+ },
+ "dev-python/pypy-2.3.1": {
+ "EAPI": "5",
+ "SLOT": "0/2.3"
+ },
+ "dev-python/pypy-2.4.0": {
+ "EAPI": "5",
+ "SLOT": "0/2.4"
+ },
+ "dev-python/pygments-1.6_p20140324-r1": {
+ "EAPI": "5",
+ "DEPEND": "virtual/pypy:0="
+ }
+ }
+
+ installed = {
+ "virtual/pypy-2.3.1" : {
+ "EAPI": "5",
+ "SLOT": "0/2.3",
+ "RDEPEND": "|| ( >=dev-python/pypy-2.3.1:0/2.3 >=dev-python/pypy-bin-2.3.1:0/2.3 ) "
+ },
+ "dev-python/pypy-2.3.1": {
+ "EAPI": "5",
+ "SLOT": "0/2.3"
+ },
+ "dev-python/pygments-1.6_p20140324-r1": {
+ "EAPI": "5",
+ "DEPEND": "virtual/pypy:0/2.3=",
+ "RDEPEND": "virtual/pypy:0/2.3=",
+ }
+ }
+
+ world = ["dev-python/pygments"]
+
+ test_cases = (
+ # bug 526160 - test for missed pypy sub-slot update
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--dynamic-deps": "y"},
+ success=True,
+ mergelist = ['dev-python/pypy-2.4.0',
+ 'virtual/pypy-2.4.0',
+ 'dev-python/pygments-1.6_p20140324-r1']),
+
+ # Repeat above test, but with --dynamic-deps disabled.
+ ResolverPlaygroundTestCase(
+ ["@world"],
+ options = {"--update": True, "--deep": True, "--dynamic-deps": "n"},
+ success=True,
+ mergelist = ['dev-python/pypy-2.4.0',
+ 'virtual/pypy-2.4.0',
+ 'dev-python/pygments-1.6_p20140324-r1']),
+ )
+
+ playground = ResolverPlayground(debug=False, ebuilds=ebuilds,
+ installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
+
+ def testVirtualSlotDepclean(self):
+
+ ebuilds = {
+ "dev-java/oracle-jdk-bin-1.7.0" : {"SLOT": "1.7", "LICENSE": "TEST"},
+ "dev-java/sun-jdk-1.6.0" : {"SLOT": "1.6", "LICENSE": "TEST"},
+ "dev-java/icedtea-6.1.10.3" : {"SLOT": "6"},
+ "dev-java/icedtea-7" : {"SLOT": "7"},
+ "app-misc/java-app-1": {"RDEPEND": ">=virtual/jdk-1.6.0"},
+ "virtual/jdk-1.6.0": {"SLOT": "1.6", "RDEPEND": "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+ "virtual/jdk-1.7.0": {"SLOT": "1.7", "RDEPEND": "|| ( =dev-java/icedtea-7* =dev-java/oracle-jdk-bin-1.7.0* )"},
+ }
+
+ installed = {
+ "app-misc/java-app-1": {"RDEPEND": ">=virtual/jdk-1.6.0"},
+ "dev-java/icedtea-6.1.10.3" : {"SLOT": "6"},
+ "dev-java/icedtea-7" : {"SLOT": "7"},
+ "virtual/jdk-1.6.0": {"SLOT" : "1.6", "RDEPEND": "|| ( =dev-java/icedtea-6* =dev-java/sun-jdk-1.6.0* )"},
+ "virtual/jdk-1.7.0": {"SLOT": "1.7", "RDEPEND": "|| ( =dev-java/icedtea-7* =dev-java/oracle-jdk-bin-1.7.0* )"},
+ }
+
+ world = ("virtual/jdk:1.6", "app-misc/java-app",)
+
+ test_cases = (
+ # Make sure that depclean doesn't remove a new slot even though
+ # it is redundant in the sense that the older slot will satisfy
+ # all dependencies.
+ ResolverPlaygroundTestCase(
+ [],
+ options = {"--depclean" : True},
+ success = True,
+ cleanlist = []),
+
+ # Prune redundant lower slots, even if they are in world.
+ ResolverPlaygroundTestCase(
+ [],
+ options = {"--prune" : True},
+ success = True,
+ cleanlist = ['virtual/jdk-1.6.0', 'dev-java/icedtea-6.1.10.3']),
+ )
+
+ playground = ResolverPlayground(
+ ebuilds=ebuilds, installed=installed, world=world)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success, True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/resolver/test_with_test_deps.py b/lib/portage/tests/resolver/test_with_test_deps.py
new file mode 100644
index 000000000..5bfc6a8a2
--- /dev/null
+++ b/lib/portage/tests/resolver/test_with_test_deps.py
@@ -0,0 +1,44 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import \
+ ResolverPlayground, ResolverPlaygroundTestCase
+
+class WithTestDepsTestCase(TestCase):
+
+ def testWithTestDeps(self):
+ ebuilds = {
+ "app-misc/A-0": {
+ "EAPI": "5",
+ "IUSE": "test",
+ "DEPEND": "test? ( app-misc/B )"
+ },
+ "app-misc/B-0": {
+ "EAPI": "5",
+ "IUSE": "test",
+ "DEPEND": "test? ( app-misc/C )"
+ },
+ "app-misc/C-0": {
+ "EAPI": "5",
+ }
+ }
+
+ test_cases = (
+ # Test that --with-test-deps only pulls in direct
+ # test deps of packages matched by arguments.
+ ResolverPlaygroundTestCase(
+ ["app-misc/A"],
+ success = True,
+ options = { "--onlydeps": True, "--with-test-deps": True },
+ mergelist = ["app-misc/B-0"]),
+ )
+
+ playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
+ try:
+ for test_case in test_cases:
+ playground.run_TestCase(test_case)
+ self.assertEqual(test_case.test_success,
+ True, test_case.fail_msg)
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/runTests.py b/lib/portage/tests/runTests.py
new file mode 100755
index 000000000..d4d1f7c76
--- /dev/null
+++ b/lib/portage/tests/runTests.py
@@ -0,0 +1,65 @@
+#!/usr/bin/python -bWd
+# runTests.py -- Portage Unit Test Functionality
+# Copyright 2006-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os, sys
+import os.path as osp
+import grp
+import platform
+import pwd
+import signal
+
+def debug_signal(signum, frame):
+ import pdb
+ pdb.set_trace()
+
+if platform.python_implementation() == 'Jython':
+ debug_signum = signal.SIGUSR2 # bug #424259
+else:
+ debug_signum = signal.SIGUSR1
+
+signal.signal(debug_signum, debug_signal)
+
+# Pretend that the current user's uid/gid are the 'portage' uid/gid,
+# so things go smoothly regardless of the current user and global
+# user/group configuration.
+os.environ["PORTAGE_USERNAME"] = pwd.getpwuid(os.getuid()).pw_name
+os.environ["PORTAGE_GRPNAME"] = grp.getgrgid(os.getgid()).gr_name
+
+# Insert our parent dir so we can do shiny import "tests"
+# This line courtesy of Marienz and Pkgcore ;)
+sys.path.insert(0, osp.dirname(osp.dirname(osp.dirname(osp.realpath(__file__)))))
+
+import portage
+portage._internal_caller = True
+
+# Ensure that we don't instantiate portage.settings, so that tests should
+# work the same regardless of global configuration file state/existence.
+portage._disable_legacy_globals()
+
+if os.environ.get('NOCOLOR') in ('yes', 'true'):
+ portage.output.nocolor()
+
+import portage.tests as tests
+from portage.util._eventloop.global_event_loop import global_event_loop
+from portage.const import PORTAGE_BIN_PATH
+path = os.environ.get("PATH", "").split(":")
+path = [x for x in path if x]
+
+insert_bin_path = True
+try:
+ insert_bin_path = not path or \
+ not os.path.samefile(path[0], PORTAGE_BIN_PATH)
+except OSError:
+ pass
+
+if insert_bin_path:
+ path.insert(0, PORTAGE_BIN_PATH)
+ os.environ["PATH"] = ":".join(path)
+
+if __name__ == "__main__":
+ try:
+ sys.exit(tests.main())
+ finally:
+ global_event_loop().close()
diff --git a/lib/portage/tests/sets/__init__.py b/lib/portage/tests/sets/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/sets/__init__.py
diff --git a/lib/portage/tests/sets/__test__.py b/lib/portage/tests/sets/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/sets/__test__.py
diff --git a/lib/portage/tests/sets/base/__init__.py b/lib/portage/tests/sets/base/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/sets/base/__init__.py
diff --git a/lib/portage/tests/sets/base/__test__.py b/lib/portage/tests/sets/base/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/sets/base/__test__.py
diff --git a/lib/portage/tests/sets/base/testInternalPackageSet.py b/lib/portage/tests/sets/base/testInternalPackageSet.py
new file mode 100644
index 000000000..e0a347876
--- /dev/null
+++ b/lib/portage/tests/sets/base/testInternalPackageSet.py
@@ -0,0 +1,61 @@
+# testConfigFileSet.py -- Portage Unit Testing Functionality
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.dep import Atom
+from portage.exception import InvalidAtom
+from portage.tests import TestCase
+from portage._sets.base import InternalPackageSet
+
+class InternalPackageSetTestCase(TestCase):
+ """Simple Test Case for InternalPackageSet"""
+
+ def testInternalPackageSet(self):
+ i1_atoms = set(("dev-libs/A", ">=dev-libs/A-1", "dev-libs/B"))
+ i2_atoms = set(("dev-libs/A", "dev-libs/*", "dev-libs/C"))
+
+ i1 = InternalPackageSet(initial_atoms=i1_atoms)
+ i2 = InternalPackageSet(initial_atoms=i2_atoms, allow_wildcard=True)
+ self.assertRaises(InvalidAtom, InternalPackageSet, initial_atoms=i2_atoms)
+
+ self.assertEqual(i1.getAtoms(), i1_atoms)
+ self.assertEqual(i2.getAtoms(), i2_atoms)
+
+ new_atom = Atom("*/*", allow_wildcard=True)
+ self.assertRaises(InvalidAtom, i1.add, new_atom)
+ i2.add(new_atom)
+
+ i2_atoms.add(new_atom)
+
+ self.assertEqual(i1.getAtoms(), i1_atoms)
+ self.assertEqual(i2.getAtoms(), i2_atoms)
+
+ removed_atom = Atom("dev-libs/A")
+
+ i1.remove(removed_atom)
+ i2.remove(removed_atom)
+
+ i1_atoms.remove(removed_atom)
+ i2_atoms.remove(removed_atom)
+
+ self.assertEqual(i1.getAtoms(), i1_atoms)
+ self.assertEqual(i2.getAtoms(), i2_atoms)
+
+ update_atoms = [Atom("dev-libs/C"), Atom("dev-*/C", allow_wildcard=True)]
+
+ self.assertRaises(InvalidAtom, i1.update, update_atoms)
+ i2.update(update_atoms)
+
+ i2_atoms.update(update_atoms)
+
+ self.assertEqual(i1.getAtoms(), i1_atoms)
+ self.assertEqual(i2.getAtoms(), i2_atoms)
+
+ replace_atoms = [Atom("dev-libs/D"), Atom("*-libs/C", allow_wildcard=True)]
+
+ self.assertRaises(InvalidAtom, i1.replace, replace_atoms)
+ i2.replace(replace_atoms)
+
+ i2_atoms = set(replace_atoms)
+
+ self.assertEqual(i2.getAtoms(), i2_atoms)
diff --git a/lib/portage/tests/sets/files/__init__.py b/lib/portage/tests/sets/files/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/sets/files/__init__.py
diff --git a/lib/portage/tests/sets/files/__test__.py b/lib/portage/tests/sets/files/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/sets/files/__test__.py
diff --git a/lib/portage/tests/sets/files/testConfigFileSet.py b/lib/portage/tests/sets/files/testConfigFileSet.py
new file mode 100644
index 000000000..3ec26a077
--- /dev/null
+++ b/lib/portage/tests/sets/files/testConfigFileSet.py
@@ -0,0 +1,32 @@
+# testConfigFileSet.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+
+from portage import os
+from portage.tests import TestCase, test_cps
+from portage._sets.files import ConfigFileSet
+
+class ConfigFileSetTestCase(TestCase):
+ """Simple Test Case for ConfigFileSet"""
+
+ def setUp(self):
+ fd, self.testfile = tempfile.mkstemp(suffix=".testdata", prefix=self.__class__.__name__, text=True)
+ f = os.fdopen(fd, 'w')
+ for i in range(0, len(test_cps)):
+ atom = test_cps[i]
+ if i % 2 == 0:
+ f.write(atom + ' abc def\n')
+ else:
+ f.write(atom + '\n')
+ f.close()
+
+ def tearDown(self):
+ os.unlink(self.testfile)
+
+ def testConfigStaticFileSet(self):
+ s = ConfigFileSet(self.testfile)
+ s.load()
+ self.assertEqual(set(test_cps), s.getAtoms())
+
diff --git a/lib/portage/tests/sets/files/testStaticFileSet.py b/lib/portage/tests/sets/files/testStaticFileSet.py
new file mode 100644
index 000000000..d515a6728
--- /dev/null
+++ b/lib/portage/tests/sets/files/testStaticFileSet.py
@@ -0,0 +1,27 @@
+# testStaticFileSet.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+
+from portage import os
+from portage.tests import TestCase, test_cps
+from portage._sets.files import StaticFileSet
+
+class StaticFileSetTestCase(TestCase):
+ """Simple Test Case for StaticFileSet"""
+
+ def setUp(self):
+ fd, self.testfile = tempfile.mkstemp(suffix=".testdata", prefix=self.__class__.__name__, text=True)
+ f = os.fdopen(fd, 'w')
+ f.write("\n".join(test_cps))
+ f.close()
+
+ def tearDown(self):
+ os.unlink(self.testfile)
+
+ def testSampleStaticFileSet(self):
+ s = StaticFileSet(self.testfile)
+ s.load()
+ self.assertEqual(set(test_cps), s.getAtoms())
+
diff --git a/lib/portage/tests/sets/shell/__init__.py b/lib/portage/tests/sets/shell/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/sets/shell/__init__.py
diff --git a/lib/portage/tests/sets/shell/__test__.py b/lib/portage/tests/sets/shell/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/sets/shell/__test__.py
diff --git a/lib/portage/tests/sets/shell/testShell.py b/lib/portage/tests/sets/shell/testShell.py
new file mode 100644
index 000000000..2cdd833c3
--- /dev/null
+++ b/lib/portage/tests/sets/shell/testShell.py
@@ -0,0 +1,28 @@
+# testCommandOututSet.py -- Portage Unit Testing Functionality
+# Copyright 2007 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.process import find_binary
+from portage.tests import TestCase, test_cps
+from portage._sets.shell import CommandOutputSet
+
+class CommandOutputSetTestCase(TestCase):
+ """Simple Test Case for CommandOutputSet"""
+
+ def setUp(self):
+ pass
+
+ def tearDown(self):
+ pass
+
+ def testCommand(self):
+
+ input = set(test_cps)
+ command = find_binary("bash")
+ command += " -c '"
+ for a in input:
+ command += " echo -e \"%s\" ; " % a
+ command += "'"
+ s = CommandOutputSet(command)
+ atoms = s.getAtoms()
+ self.assertEqual(atoms, input)
diff --git a/lib/portage/tests/sync/__init__.py b/lib/portage/tests/sync/__init__.py
new file mode 100644
index 000000000..7cd880e11
--- /dev/null
+++ b/lib/portage/tests/sync/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/tests/sync/__test__.py b/lib/portage/tests/sync/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/sync/__test__.py
diff --git a/lib/portage/tests/sync/test_sync_local.py b/lib/portage/tests/sync/test_sync_local.py
new file mode 100644
index 000000000..17ff6f200
--- /dev/null
+++ b/lib/portage/tests/sync/test_sync_local.py
@@ -0,0 +1,271 @@
+# Copyright 2014-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+import sys
+import textwrap
+import time
+
+import portage
+from portage import os, shutil, _shell_quote
+from portage import _unicode_decode
+from portage.const import PORTAGE_PYM_PATH, TIMESTAMP_FORMAT
+from portage.process import find_binary
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util import ensure_dirs
+
+class SyncLocalTestCase(TestCase):
+ """
+ Test sync with rsync and git, using file:// sync-uri.
+ """
+
+ def _must_skip(self):
+ if find_binary("rsync") is None:
+ return "rsync: command not found"
+ if find_binary("git") is None:
+ return "git: command not found"
+
+ def testSyncLocal(self):
+ debug = False
+
+ skip_reason = self._must_skip()
+ if skip_reason:
+ self.portage_skip = skip_reason
+ self.assertFalse(True, skip_reason)
+ return
+
+ repos_conf = textwrap.dedent("""
+ [DEFAULT]
+ %(default_keys)s
+ [test_repo]
+ location = %(EPREFIX)s/var/repositories/test_repo
+ sync-type = %(sync-type)s
+ sync-uri = file://%(EPREFIX)s/var/repositories/test_repo_sync
+ auto-sync = %(auto-sync)s
+ %(repo_extra_keys)s
+ """)
+
+ profile = {
+ "eapi": ("5",),
+ "package.use.stable.mask": ("dev-libs/A flag",)
+ }
+
+ ebuilds = {
+ "dev-libs/A-0": {}
+ }
+
+ user_config = {
+ 'make.conf': ('FEATURES="metadata-transfer"',)
+ }
+
+ playground = ResolverPlayground(ebuilds=ebuilds,
+ profile=profile, user_config=user_config, debug=debug)
+ settings = playground.settings
+ eprefix = settings["EPREFIX"]
+ eroot = settings["EROOT"]
+ homedir = os.path.join(eroot, "home")
+ distdir = os.path.join(eprefix, "distdir")
+ repo = settings.repositories["test_repo"]
+ metadata_dir = os.path.join(repo.location, "metadata")
+
+ cmds = {}
+ for cmd in ("emerge", "emaint"):
+ for bindir in (self.bindir, self.sbindir):
+ path = os.path.join(bindir, cmd)
+ if os.path.exists(path):
+ cmds[cmd] = (portage._python_interpreter,
+ "-b", "-Wd", path)
+ break
+ else:
+ raise AssertionError('%s binary not found in %s or %s' %
+ (cmd, self.bindir, self.sbindir))
+
+ git_binary = find_binary("git")
+ git_cmd = (git_binary,)
+
+ committer_name = "Gentoo Dev"
+ committer_email = "gentoo-dev@gentoo.org"
+
+ def repos_set_conf(sync_type, dflt_keys=None, xtra_keys=None,
+ auto_sync="yes"):
+ env["PORTAGE_REPOSITORIES"] = repos_conf % {\
+ "EPREFIX": eprefix, "sync-type": sync_type,
+ "auto-sync": auto_sync,
+ "default_keys": "" if dflt_keys is None else dflt_keys,
+ "repo_extra_keys": "" if xtra_keys is None else xtra_keys}
+
+ def alter_ebuild():
+ with open(os.path.join(repo.location + "_sync",
+ "dev-libs", "A", "A-0.ebuild"), "a") as f:
+ f.write("\n")
+ os.unlink(os.path.join(metadata_dir, 'timestamp.chk'))
+
+ sync_cmds = (
+ (homedir, cmds["emerge"] + ("--sync",)),
+ (homedir, lambda: self.assertTrue(os.path.exists(
+ os.path.join(repo.location, "dev-libs", "A")
+ ), "dev-libs/A expected, but missing")),
+ (homedir, cmds["emaint"] + ("sync", "-A")),
+ )
+
+ sync_cmds_auto_sync = (
+ (homedir, lambda: repos_set_conf("rsync", auto_sync="no")),
+ (homedir, cmds["emerge"] + ("--sync",)),
+ (homedir, lambda: self.assertFalse(os.path.exists(
+ os.path.join(repo.location, "dev-libs", "A")
+ ), "dev-libs/A found, expected missing")),
+ (homedir, lambda: repos_set_conf("rsync", auto_sync="yes")),
+ )
+
+ rename_repo = (
+ (homedir, lambda: os.rename(repo.location,
+ repo.location + "_sync")),
+ )
+
+ rsync_opts_repos = (
+ (homedir, alter_ebuild),
+ (homedir, lambda: repos_set_conf("rsync", None,
+ "sync-rsync-extra-opts = --backup --backup-dir=%s" %
+ _shell_quote(repo.location + "_back"))),
+ (homedir, cmds['emerge'] + ("--sync",)),
+ (homedir, lambda: self.assertTrue(os.path.exists(
+ repo.location + "_back"))),
+ (homedir, lambda: shutil.rmtree(repo.location + "_back")),
+ (homedir, lambda: repos_set_conf("rsync")),
+ )
+
+ rsync_opts_repos_default = (
+ (homedir, alter_ebuild),
+ (homedir, lambda: repos_set_conf("rsync",
+ "sync-rsync-extra-opts = --backup --backup-dir=%s" %
+ _shell_quote(repo.location+"_back"))),
+ (homedir, cmds['emerge'] + ("--sync",)),
+ (homedir, lambda: self.assertTrue(os.path.exists(repo.location + "_back"))),
+ (homedir, lambda: shutil.rmtree(repo.location + "_back")),
+ (homedir, lambda: repos_set_conf("rsync")),
+ )
+
+ rsync_opts_repos_default_ovr = (
+ (homedir, alter_ebuild),
+ (homedir, lambda: repos_set_conf("rsync",
+ "sync-rsync-extra-opts = --backup --backup-dir=%s" %
+ _shell_quote(repo.location + "_back_nowhere"),
+ "sync-rsync-extra-opts = --backup --backup-dir=%s" %
+ _shell_quote(repo.location + "_back"))),
+ (homedir, cmds['emerge'] + ("--sync",)),
+ (homedir, lambda: self.assertTrue(os.path.exists(repo.location + "_back"))),
+ (homedir, lambda: shutil.rmtree(repo.location + "_back")),
+ (homedir, lambda: repos_set_conf("rsync")),
+ )
+
+ rsync_opts_repos_default_cancel = (
+ (homedir, alter_ebuild),
+ (homedir, lambda: repos_set_conf("rsync",
+ "sync-rsync-extra-opts = --backup --backup-dir=%s" %
+ _shell_quote(repo.location + "_back_nowhere"),
+ "sync-rsync-extra-opts = ")),
+ (homedir, cmds['emerge'] + ("--sync",)),
+ (homedir, lambda: self.assertFalse(os.path.exists(repo.location + "_back"))),
+ (homedir, lambda: repos_set_conf("rsync")),
+ )
+
+ delete_sync_repo = (
+ (homedir, lambda: shutil.rmtree(
+ repo.location + "_sync")),
+ )
+
+ git_repo_create = (
+ (repo.location, git_cmd +
+ ("config", "--global", "user.name", committer_name,)),
+ (repo.location, git_cmd +
+ ("config", "--global", "user.email", committer_email,)),
+ (repo.location, git_cmd + ("init-db",)),
+ (repo.location, git_cmd + ("add", ".")),
+ (repo.location, git_cmd +
+ ("commit", "-a", "-m", "add whole repo")),
+ )
+
+ sync_type_git = (
+ (homedir, lambda: repos_set_conf("git")),
+ )
+
+ pythonpath = os.environ.get("PYTHONPATH")
+ if pythonpath is not None and not pythonpath.strip():
+ pythonpath = None
+ if pythonpath is not None and \
+ pythonpath.split(":")[0] == PORTAGE_PYM_PATH:
+ pass
+ else:
+ if pythonpath is None:
+ pythonpath = ""
+ else:
+ pythonpath = ":" + pythonpath
+ pythonpath = PORTAGE_PYM_PATH + pythonpath
+
+ env = {
+ "PORTAGE_OVERRIDE_EPREFIX" : eprefix,
+ "DISTDIR" : distdir,
+ "GENTOO_COMMITTER_NAME" : committer_name,
+ "GENTOO_COMMITTER_EMAIL" : committer_email,
+ "HOME" : homedir,
+ "PATH" : os.environ["PATH"],
+ "PORTAGE_GRPNAME" : os.environ["PORTAGE_GRPNAME"],
+ "PORTAGE_USERNAME" : os.environ["PORTAGE_USERNAME"],
+ "PYTHONDONTWRITEBYTECODE" : os.environ.get("PYTHONDONTWRITEBYTECODE", ""),
+ "PYTHONPATH" : pythonpath,
+ }
+ repos_set_conf("rsync")
+
+ if os.environ.get("SANDBOX_ON") == "1":
+ # avoid problems from nested sandbox instances
+ env["FEATURES"] = "-sandbox -usersandbox"
+
+ dirs = [homedir, metadata_dir]
+ try:
+ for d in dirs:
+ ensure_dirs(d)
+
+ timestamp_path = os.path.join(metadata_dir, 'timestamp.chk')
+ with open(timestamp_path, 'w') as f:
+ f.write(time.strftime('%s\n' % TIMESTAMP_FORMAT, time.gmtime()))
+
+ if debug:
+ # The subprocess inherits both stdout and stderr, for
+ # debugging purposes.
+ stdout = None
+ else:
+ # The subprocess inherits stderr so that any warnings
+ # triggered by python -Wd will be visible.
+ stdout = subprocess.PIPE
+
+ for cwd, cmd in rename_repo + sync_cmds_auto_sync + sync_cmds + \
+ rsync_opts_repos + rsync_opts_repos_default + \
+ rsync_opts_repos_default_ovr + rsync_opts_repos_default_cancel + \
+ delete_sync_repo + git_repo_create + sync_type_git + \
+ rename_repo + sync_cmds:
+
+ if hasattr(cmd, '__call__'):
+ cmd()
+ continue
+
+ abs_cwd = os.path.join(repo.location, cwd)
+ proc = subprocess.Popen(cmd,
+ cwd=abs_cwd, env=env, stdout=stdout)
+
+ if debug:
+ proc.wait()
+ else:
+ output = proc.stdout.readlines()
+ proc.wait()
+ proc.stdout.close()
+ if proc.returncode != os.EX_OK:
+ for line in output:
+ sys.stderr.write(_unicode_decode(line))
+
+ self.assertEqual(os.EX_OK, proc.returncode,
+ "%s failed in %s" % (cmd, cwd,))
+
+
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/unicode/__init__.py b/lib/portage/tests/unicode/__init__.py
new file mode 100644
index 000000000..21a391aee
--- /dev/null
+++ b/lib/portage/tests/unicode/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/tests/unicode/__test__.py b/lib/portage/tests/unicode/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/unicode/__test__.py
diff --git a/lib/portage/tests/unicode/test_string_format.py b/lib/portage/tests/unicode/test_string_format.py
new file mode 100644
index 000000000..9d4366a91
--- /dev/null
+++ b/lib/portage/tests/unicode/test_string_format.py
@@ -0,0 +1,108 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import sys
+
+from portage import _encodings, _unicode_encode
+from portage.exception import PortageException
+from portage.tests import TestCase
+from _emerge.DependencyArg import DependencyArg
+from _emerge.UseFlagDisplay import UseFlagDisplay
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+STR_IS_UNICODE = sys.hexversion >= 0x3000000
+
+class StringFormatTestCase(TestCase):
+ """
+ Test that string formatting works correctly in the current interpretter,
+ which may be either python2 or python3.
+ """
+
+ # We need unicode_literals in order to get some unicode test strings
+ # in a way that works in both python2 and python3.
+
+ unicode_strings = (
+ '\u2018',
+ '\u2019',
+ )
+
+ def testDependencyArg(self):
+
+ self.assertEqual(_encodings['content'], 'utf_8')
+
+ for arg_unicode in self.unicode_strings:
+ arg_bytes = _unicode_encode(arg_unicode, encoding=_encodings['content'])
+ dependency_arg = DependencyArg(arg=arg_unicode)
+
+ # Use unicode_literals for unicode format string so that
+ # __unicode__() is called in Python 2.
+ formatted_str = "%s" % (dependency_arg,)
+ self.assertEqual(formatted_str, arg_unicode)
+
+ if STR_IS_UNICODE:
+
+ # Test the __str__ method which returns unicode in python3
+ formatted_str = "%s" % (dependency_arg,)
+ self.assertEqual(formatted_str, arg_unicode)
+
+ else:
+
+ # Test the __str__ method which returns encoded bytes in python2
+ formatted_bytes = b"%s" % (dependency_arg,)
+ self.assertEqual(formatted_bytes, arg_bytes)
+
+ def testPortageException(self):
+
+ self.assertEqual(_encodings['content'], 'utf_8')
+
+ for arg_unicode in self.unicode_strings:
+ arg_bytes = _unicode_encode(arg_unicode, encoding=_encodings['content'])
+ e = PortageException(arg_unicode)
+
+ # Use unicode_literals for unicode format string so that
+ # __unicode__() is called in Python 2.
+ formatted_str = "%s" % (e,)
+ self.assertEqual(formatted_str, arg_unicode)
+
+ if STR_IS_UNICODE:
+
+ # Test the __str__ method which returns unicode in python3
+ formatted_str = "%s" % (e,)
+ self.assertEqual(formatted_str, arg_unicode)
+
+ else:
+
+ # Test the __str__ method which returns encoded bytes in python2
+ formatted_bytes = b"%s" % (e,)
+ self.assertEqual(formatted_bytes, arg_bytes)
+
+ def testUseFlagDisplay(self):
+
+ self.assertEqual(_encodings['content'], 'utf_8')
+
+ for enabled in (True, False):
+ for forced in (True, False):
+ for arg_unicode in self.unicode_strings:
+ e = UseFlagDisplay(arg_unicode, enabled, forced)
+
+ # Use unicode_literals for unicode format string so that
+ # __unicode__() is called in Python 2.
+ formatted_str = "%s" % (e,)
+ self.assertEqual(isinstance(formatted_str, basestring), True)
+
+ if STR_IS_UNICODE:
+
+ # Test the __str__ method which returns unicode in python3
+ formatted_str = "%s" % (e,)
+ self.assertEqual(isinstance(formatted_str, str), True)
+
+ else:
+
+ # Test the __str__ method which returns encoded bytes in python2
+ formatted_bytes = b"%s" % (e,)
+ self.assertEqual(isinstance(formatted_bytes, bytes), True)
diff --git a/lib/portage/tests/update/__init__.py b/lib/portage/tests/update/__init__.py
new file mode 100644
index 000000000..418ad862b
--- /dev/null
+++ b/lib/portage/tests/update/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/tests/update/__test__.py b/lib/portage/tests/update/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/update/__test__.py
diff --git a/lib/portage/tests/update/test_move_ent.py b/lib/portage/tests/update/test_move_ent.py
new file mode 100644
index 000000000..d9647a95e
--- /dev/null
+++ b/lib/portage/tests/update/test_move_ent.py
@@ -0,0 +1,109 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import textwrap
+
+import portage
+from portage import os
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util import ensure_dirs
+from portage._global_updates import _do_global_updates
+
+class MoveEntTestCase(TestCase):
+
+ def testMoveEnt(self):
+
+ ebuilds = {
+
+ "dev-libs/A-2::dont_apply_updates" : {
+ "EAPI": "4",
+ "SLOT": "2",
+ },
+
+ }
+
+ installed = {
+
+ "dev-libs/A-1::test_repo" : {
+ "EAPI": "4",
+ },
+
+ "dev-libs/A-2::dont_apply_updates" : {
+ "EAPI": "4",
+ "SLOT": "2",
+ },
+
+ }
+
+ binpkgs = {
+
+ "dev-libs/A-1::test_repo" : {
+ "EAPI": "4",
+ },
+
+ "dev-libs/A-2::dont_apply_updates" : {
+ "EAPI": "4",
+ "SLOT": "2",
+ },
+
+ }
+
+ updates = textwrap.dedent("""
+ move dev-libs/A dev-libs/A-moved
+ """)
+
+ playground = ResolverPlayground(binpkgs=binpkgs,
+ ebuilds=ebuilds, installed=installed)
+
+ settings = playground.settings
+ trees = playground.trees
+ eroot = settings["EROOT"]
+ test_repo_location = settings.repositories["test_repo"].location
+ portdb = trees[eroot]["porttree"].dbapi
+ vardb = trees[eroot]["vartree"].dbapi
+ bindb = trees[eroot]["bintree"].dbapi
+
+ updates_dir = os.path.join(test_repo_location, "profiles", "updates")
+
+ try:
+ ensure_dirs(updates_dir)
+ with open(os.path.join(updates_dir, "1Q-2010"), 'w') as f:
+ f.write(updates)
+
+ # Create an empty updates directory, so that this
+ # repo doesn't inherit updates from the main repo.
+ ensure_dirs(os.path.join(
+ portdb.getRepositoryPath("dont_apply_updates"),
+ "profiles", "updates"))
+
+ global_noiselimit = portage.util.noiselimit
+ portage.util.noiselimit = -2
+ try:
+ _do_global_updates(trees, {})
+ finally:
+ portage.util.noiselimit = global_noiselimit
+
+ # Workaround for cache validation not working
+ # correctly when filesystem has timestamp precision
+ # of 1 second.
+ vardb._clear_cache()
+
+ # A -> A-moved
+ self.assertRaises(KeyError,
+ vardb.aux_get, "dev-libs/A-1", ["EAPI"])
+ vardb.aux_get("dev-libs/A-moved-1", ["EAPI"])
+ self.assertRaises(KeyError,
+ bindb.aux_get, "dev-libs/A-1", ["EAPI"])
+ bindb.aux_get("dev-libs/A-moved-1", ["EAPI"])
+
+ # dont_apply_updates
+ self.assertRaises(KeyError,
+ vardb.aux_get, "dev-libs/A-moved-2", ["EAPI"])
+ vardb.aux_get("dev-libs/A-2", ["EAPI"])
+ self.assertRaises(KeyError,
+ bindb.aux_get, "dev-libs/A-moved-2", ["EAPI"])
+ bindb.aux_get("dev-libs/A-2", ["EAPI"])
+
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/update/test_move_slot_ent.py b/lib/portage/tests/update/test_move_slot_ent.py
new file mode 100644
index 000000000..3e49e1144
--- /dev/null
+++ b/lib/portage/tests/update/test_move_slot_ent.py
@@ -0,0 +1,154 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import textwrap
+
+import portage
+from portage import os
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.util import ensure_dirs
+from portage._global_updates import _do_global_updates
+
+class MoveSlotEntTestCase(TestCase):
+
+ def testMoveSlotEnt(self):
+
+ ebuilds = {
+
+ "dev-libs/A-2::dont_apply_updates" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/2.30",
+ },
+
+ "dev-libs/B-2::dont_apply_updates" : {
+ "SLOT": "0",
+ },
+
+ "dev-libs/C-2.1::dont_apply_updates" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/2.1",
+ },
+
+ }
+
+ installed = {
+
+ "dev-libs/A-1::test_repo" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/2.30",
+ },
+
+ "dev-libs/B-1::test_repo" : {
+ "SLOT": "0",
+ },
+
+ "dev-libs/C-1::test_repo" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/1",
+ },
+
+ }
+
+ binpkgs = {
+
+ "dev-libs/A-1::test_repo" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/2.30",
+ },
+
+ "dev-libs/A-2::dont_apply_updates" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/2.30",
+ },
+
+ "dev-libs/B-1::test_repo" : {
+ "SLOT": "0",
+ },
+
+ "dev-libs/B-2::dont_apply_updates" : {
+ "SLOT": "0",
+ },
+
+ "dev-libs/C-1::test_repo" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/1",
+ },
+
+ "dev-libs/C-2.1::dont_apply_updates" : {
+ "EAPI": "4-slot-abi",
+ "SLOT": "0/2.1",
+ },
+
+ }
+
+ updates = textwrap.dedent("""
+ slotmove dev-libs/A 0 2
+ slotmove dev-libs/B 0 1
+ slotmove dev-libs/C 0 1
+ """)
+
+ playground = ResolverPlayground(binpkgs=binpkgs,
+ ebuilds=ebuilds, installed=installed)
+
+ settings = playground.settings
+ trees = playground.trees
+ eroot = settings["EROOT"]
+ test_repo_location = settings.repositories["test_repo"].location
+ portdb = trees[eroot]["porttree"].dbapi
+ vardb = trees[eroot]["vartree"].dbapi
+ bindb = trees[eroot]["bintree"].dbapi
+
+ updates_dir = os.path.join(test_repo_location, "profiles", "updates")
+
+ try:
+ ensure_dirs(updates_dir)
+ with open(os.path.join(updates_dir, "1Q-2010"), 'w') as f:
+ f.write(updates)
+
+ # Create an empty updates directory, so that this
+ # repo doesn't inherit updates from the main repo.
+ ensure_dirs(os.path.join(
+ portdb.getRepositoryPath("dont_apply_updates"),
+ "profiles", "updates"))
+
+ global_noiselimit = portage.util.noiselimit
+ portage.util.noiselimit = -2
+ try:
+ _do_global_updates(trees, {})
+ finally:
+ portage.util.noiselimit = global_noiselimit
+
+ # Workaround for cache validation not working
+ # correctly when filesystem has timestamp precision
+ # of 1 second.
+ vardb._clear_cache()
+
+ # 0/2.30 -> 2/2.30
+ self.assertEqual("2/2.30",
+ vardb.aux_get("dev-libs/A-1", ["SLOT"])[0])
+ self.assertEqual("2/2.30",
+ bindb.aux_get("dev-libs/A-1", ["SLOT"])[0])
+
+ # 0 -> 1
+ self.assertEqual("1",
+ vardb.aux_get("dev-libs/B-1", ["SLOT"])[0])
+ self.assertEqual("1",
+ bindb.aux_get("dev-libs/B-1", ["SLOT"])[0])
+
+ # 0/1 -> 1 (equivalent to 1/1)
+ self.assertEqual("1",
+ vardb.aux_get("dev-libs/C-1", ["SLOT"])[0])
+ self.assertEqual("1",
+ bindb.aux_get("dev-libs/C-1", ["SLOT"])[0])
+
+ # dont_apply_updates
+ self.assertEqual("0/2.30",
+ bindb.aux_get("dev-libs/A-2", ["SLOT"])[0])
+ self.assertEqual("0",
+ bindb.aux_get("dev-libs/B-2", ["SLOT"])[0])
+ self.assertEqual("0/2.1",
+ bindb.aux_get("dev-libs/C-2.1", ["SLOT"])[0])
+
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/update/test_update_dbentry.py b/lib/portage/tests/update/test_update_dbentry.py
new file mode 100644
index 000000000..88951149a
--- /dev/null
+++ b/lib/portage/tests/update/test_update_dbentry.py
@@ -0,0 +1,277 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import re
+import textwrap
+
+import portage
+from portage import os
+from portage.dep import Atom
+from portage.tests import TestCase
+from portage.tests.resolver.ResolverPlayground import ResolverPlayground
+from portage.update import update_dbentry
+from portage.util import ensure_dirs
+from portage.versions import _pkg_str
+from portage._global_updates import _do_global_updates
+
+class UpdateDbentryTestCase(TestCase):
+
+ def testUpdateDbentryTestCase(self):
+ cases = (
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "1",
+ " dev-libs/A:0 ", " dev-libs/B:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "1",
+ " >=dev-libs/A-1:0 ", " >=dev-libs/B-1:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "2",
+ " dev-libs/A[foo] ", " dev-libs/B[foo] "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "5",
+ " dev-libs/A:0/1=[foo] ", " dev-libs/B:0/1=[foo] "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "5",
+ " dev-libs/A:0/1[foo] ", " dev-libs/B:0/1[foo] "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "5",
+ " dev-libs/A:0/0[foo] ", " dev-libs/B:0/0[foo] "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")), "5",
+ " dev-libs/A:0=[foo] ", " dev-libs/B:0=[foo] "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "1",
+ " dev-libs/A:0 ", " dev-libs/A:1 "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "1",
+ " >=dev-libs/A-1:0 ", " >=dev-libs/A-1:1 "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "5",
+ " dev-libs/A:0/1=[foo] ", " dev-libs/A:1/1=[foo] "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "5",
+ " dev-libs/A:0/1[foo] ", " dev-libs/A:1/1[foo] "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "5",
+ " dev-libs/A:0/0[foo] ", " dev-libs/A:1/1[foo] "),
+
+ (("slotmove", Atom("dev-libs/A"), "0", "1"), "5",
+ " dev-libs/A:0=[foo] ", " dev-libs/A:1=[foo] "),
+ )
+ for update_cmd, eapi, input_str, output_str in cases:
+ result = update_dbentry(update_cmd, input_str, eapi=eapi)
+ self.assertEqual(result, output_str)
+
+
+ def testUpdateDbentryBlockerTestCase(self):
+ """
+ Avoid creating self-blockers for bug #367215.
+ """
+ cases = (
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/B-1", eapi="1", slot="0"),
+ " !dev-libs/A ", " !dev-libs/A "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/C-1", eapi="1", slot="0"),
+ " !dev-libs/A ", " !dev-libs/B "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/B-1", eapi="1", slot="0"),
+ " !dev-libs/A:0 ", " !dev-libs/A:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/C-1", eapi="1", slot="0"),
+ " !dev-libs/A:0 ", " !dev-libs/B:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/C-1", eapi="1", slot="0"),
+ " !>=dev-libs/A-1:0 ", " !>=dev-libs/B-1:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/B-1", eapi="1", slot="0"),
+ " !>=dev-libs/A-1:0 ", " !>=dev-libs/A-1:0 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/C-1", eapi="1", slot="0"),
+ " !>=dev-libs/A-1 ", " !>=dev-libs/B-1 "),
+
+ (("move", Atom("dev-libs/A"), Atom("dev-libs/B")),
+ _pkg_str("dev-libs/B-1", eapi="1", slot="0"),
+ " !>=dev-libs/A-1 ", " !>=dev-libs/A-1 "),
+
+ )
+ for update_cmd, parent, input_str, output_str in cases:
+ result = update_dbentry(update_cmd, input_str, parent=parent)
+ self.assertEqual(result, output_str)
+
+ def testUpdateDbentryDbapiTestCase(self):
+
+ ebuilds = {
+
+ "dev-libs/A-2::dont_apply_updates" : {
+ "RDEPEND" : "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4",
+ "SLOT": "2",
+ },
+
+ "dev-libs/B-2::dont_apply_updates" : {
+ "RDEPEND" : "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4",
+ "SLOT": "2",
+ },
+
+ }
+
+ installed = {
+
+ "dev-libs/A-1::test_repo" : {
+ "RDEPEND" : "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4",
+ },
+
+ "dev-libs/A-2::dont_apply_updates" : {
+ "RDEPEND" : "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4",
+ "SLOT": "2",
+ },
+
+ "dev-libs/B-1::test_repo" : {
+ "RDEPEND" : "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4-python",
+ },
+
+ "dev-libs/M-1::test_repo" : {
+ "EAPI": "4",
+ },
+
+ "dev-libs/N-1::test_repo" : {
+ "EAPI": "4",
+ },
+
+ "dev-libs/N-2::test_repo" : {
+ "EAPI": "4-python",
+ },
+
+ }
+
+ binpkgs = {
+
+ "dev-libs/A-1::test_repo" : {
+ "RDEPEND" : "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4",
+ },
+
+ "dev-libs/A-2::dont_apply_updates" : {
+ "RDEPEND" : "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4",
+ "SLOT": "2",
+ },
+
+ "dev-libs/B-1::test_repo" : {
+ "RDEPEND" : "dev-libs/M dev-libs/N dev-libs/P",
+ "EAPI": "4-python",
+ },
+
+ }
+
+ world = ["dev-libs/M", "dev-libs/N"]
+
+ updates = textwrap.dedent("""
+ move dev-libs/M dev-libs/M-moved
+ move dev-libs/N dev-libs/N.moved
+ """)
+
+ playground = ResolverPlayground(binpkgs=binpkgs,
+ ebuilds=ebuilds, installed=installed, world=world)
+
+ settings = playground.settings
+ trees = playground.trees
+ eroot = settings["EROOT"]
+ test_repo_location = settings.repositories["test_repo"].location
+ portdb = trees[eroot]["porttree"].dbapi
+ vardb = trees[eroot]["vartree"].dbapi
+ bindb = trees[eroot]["bintree"].dbapi
+ setconfig = trees[eroot]["root_config"].setconfig
+ selected_set = setconfig.getSets()["selected"]
+
+ updates_dir = os.path.join(test_repo_location, "profiles", "updates")
+
+ try:
+ ensure_dirs(updates_dir)
+ with open(os.path.join(updates_dir, "1Q-2010"), 'w') as f:
+ f.write(updates)
+
+ # Create an empty updates directory, so that this
+ # repo doesn't inherit updates from the main repo.
+ ensure_dirs(os.path.join(
+ portdb.getRepositoryPath("dont_apply_updates"),
+ "profiles", "updates"))
+
+ global_noiselimit = portage.util.noiselimit
+ portage.util.noiselimit = -2
+ try:
+ _do_global_updates(trees, {})
+ finally:
+ portage.util.noiselimit = global_noiselimit
+
+ # Workaround for cache validation not working
+ # correctly when filesystem has timestamp precision
+ # of 1 second.
+ vardb._clear_cache()
+
+ # M -> M-moved
+ old_pattern = re.compile(r"\bdev-libs/M(\s|$)")
+ rdepend = vardb.aux_get("dev-libs/A-1", ["RDEPEND"])[0]
+ self.assertTrue(old_pattern.search(rdepend) is None)
+ self.assertTrue("dev-libs/M-moved" in rdepend)
+ rdepend = bindb.aux_get("dev-libs/A-1", ["RDEPEND"])[0]
+ self.assertTrue(old_pattern.search(rdepend) is None)
+ self.assertTrue("dev-libs/M-moved" in rdepend)
+ rdepend = vardb.aux_get("dev-libs/B-1", ["RDEPEND"])[0]
+ self.assertTrue(old_pattern.search(rdepend) is None)
+ self.assertTrue("dev-libs/M-moved" in rdepend)
+ rdepend = vardb.aux_get("dev-libs/B-1", ["RDEPEND"])[0]
+ self.assertTrue(old_pattern.search(rdepend) is None)
+ self.assertTrue("dev-libs/M-moved" in rdepend)
+
+ # EAPI 4-python/*-progress N -> N.moved
+ rdepend = vardb.aux_get("dev-libs/B-1", ["RDEPEND"])[0]
+ old_pattern = re.compile(r"\bdev-libs/N(\s|$)")
+ self.assertTrue(old_pattern.search(rdepend) is None)
+ self.assertTrue("dev-libs/N.moved" in rdepend)
+ rdepend = bindb.aux_get("dev-libs/B-1", ["RDEPEND"])[0]
+ self.assertTrue(old_pattern.search(rdepend) is None)
+ self.assertTrue("dev-libs/N.moved" in rdepend)
+ self.assertRaises(KeyError,
+ vardb.aux_get, "dev-libs/N-2", ["EAPI"])
+ vardb.aux_get("dev-libs/N.moved-2", ["RDEPEND"])[0]
+
+ # EAPI 4 does not allow dots in package names for N -> N.moved
+ rdepend = vardb.aux_get("dev-libs/A-1", ["RDEPEND"])[0]
+ self.assertTrue("dev-libs/N" in rdepend)
+ self.assertTrue("dev-libs/N.moved" not in rdepend)
+ rdepend = bindb.aux_get("dev-libs/A-1", ["RDEPEND"])[0]
+ self.assertTrue("dev-libs/N" in rdepend)
+ self.assertTrue("dev-libs/N.moved" not in rdepend)
+ vardb.aux_get("dev-libs/N-1", ["RDEPEND"])[0]
+ self.assertRaises(KeyError,
+ vardb.aux_get, "dev-libs/N.moved-1", ["EAPI"])
+
+ # dont_apply_updates
+ rdepend = vardb.aux_get("dev-libs/A-2", ["RDEPEND"])[0]
+ self.assertTrue("dev-libs/M" in rdepend)
+ self.assertTrue("dev-libs/M-moved" not in rdepend)
+ rdepend = bindb.aux_get("dev-libs/A-2", ["RDEPEND"])[0]
+ self.assertTrue("dev-libs/M" in rdepend)
+ self.assertTrue("dev-libs/M-moved" not in rdepend)
+
+ selected_set.load()
+ self.assertTrue("dev-libs/M" not in selected_set)
+ self.assertTrue("dev-libs/M-moved" in selected_set)
+ self.assertTrue("dev-libs/N" not in selected_set)
+ self.assertTrue("dev-libs/N.moved" in selected_set)
+
+ finally:
+ playground.cleanup()
diff --git a/lib/portage/tests/util/__init__.py b/lib/portage/tests/util/__init__.py
new file mode 100644
index 000000000..69ce1898d
--- /dev/null
+++ b/lib/portage/tests/util/__init__.py
@@ -0,0 +1,4 @@
+# tests/portage.util/__init__.py -- Portage Unit Test functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
diff --git a/lib/portage/tests/util/__test__.py b/lib/portage/tests/util/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/util/__test__.py
diff --git a/lib/portage/tests/util/dyn_libs/__init__.py b/lib/portage/tests/util/dyn_libs/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/util/dyn_libs/__init__.py
diff --git a/lib/portage/tests/util/dyn_libs/__test__.py b/lib/portage/tests/util/dyn_libs/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/util/dyn_libs/__test__.py
diff --git a/lib/portage/tests/util/dyn_libs/test_soname_deps.py b/lib/portage/tests/util/dyn_libs/test_soname_deps.py
new file mode 100644
index 000000000..823890c91
--- /dev/null
+++ b/lib/portage/tests/util/dyn_libs/test_soname_deps.py
@@ -0,0 +1,34 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.util._dyn_libs.NeededEntry import NeededEntry
+from portage.util._dyn_libs.soname_deps import SonameDepsProcessor
+
+
+class SonameDepsProcessorTestCase(TestCase):
+
+ def testInternalLibsWithoutSoname(self):
+ """
+ Test handling of internal libraries that lack an soname, which are
+ resolved via DT_RUNPATH, see ebtables for example (bug 646190).
+ """
+ needed_elf_2 = """
+X86_64;/sbin/ebtables;;/lib64/ebtables;libebt_802_3.so,libebtable_broute.so,libc.so.6;x86_64
+X86_64;/lib64/ebtables/libebtable_broute.so;;;libc.so.6;x86_64
+X86_64;/lib64/ebtables/libebt_802_3.so;;;libc.so.6;x86_64
+"""
+ soname_deps = SonameDepsProcessor('', '')
+
+ for line in needed_elf_2.splitlines():
+ if not line:
+ continue
+ entry = NeededEntry.parse(None, line)
+ soname_deps.add(entry)
+
+ self.assertEqual(soname_deps.provides, None)
+ # Prior to the fix for bug 646190, REQUIRES contained references to
+ # the internal libebt* libraries which are resolved via a DT_RUNPATH
+ # entry referring to the /lib64/ebtables directory that contains the
+ # internal libraries.
+ self.assertEqual(soname_deps.requires, 'x86_64: libc.so.6\n')
diff --git a/lib/portage/tests/util/eventloop/__init__.py b/lib/portage/tests/util/eventloop/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/util/eventloop/__init__.py
diff --git a/lib/portage/tests/util/eventloop/__test__.py b/lib/portage/tests/util/eventloop/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/util/eventloop/__test__.py
diff --git a/lib/portage/tests/util/eventloop/test_call_soon_fifo.py b/lib/portage/tests/util/eventloop/test_call_soon_fifo.py
new file mode 100644
index 000000000..f970c67a1
--- /dev/null
+++ b/lib/portage/tests/util/eventloop/test_call_soon_fifo.py
@@ -0,0 +1,30 @@
+# Copyright 2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import functools
+import random
+
+from portage import os
+from portage.tests import TestCase
+from portage.util._eventloop.global_event_loop import global_event_loop
+
+
+class CallSoonFifoTestCase(TestCase):
+
+ def testCallSoonFifo(self):
+
+ event_loop = global_event_loop()
+ inputs = [random.random() for index in range(10)]
+ outputs = []
+ finished = event_loop.create_future()
+
+ def add_output(value):
+ outputs.append(value)
+ if len(outputs) == len(inputs):
+ finished.set_result(True)
+
+ for value in inputs:
+ event_loop.call_soon(functools.partial(add_output, value))
+
+ event_loop.run_until_complete(finished)
+ self.assertEqual(inputs, outputs)
diff --git a/lib/portage/tests/util/file_copy/__init__.py b/lib/portage/tests/util/file_copy/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/util/file_copy/__init__.py
diff --git a/lib/portage/tests/util/file_copy/__test__.py b/lib/portage/tests/util/file_copy/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/util/file_copy/__test__.py
diff --git a/lib/portage/tests/util/file_copy/test_copyfile.py b/lib/portage/tests/util/file_copy/test_copyfile.py
new file mode 100644
index 000000000..b900fdef0
--- /dev/null
+++ b/lib/portage/tests/util/file_copy/test_copyfile.py
@@ -0,0 +1,71 @@
+# Copyright 2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import shutil
+import tempfile
+
+from portage import os
+from portage.tests import TestCase
+from portage.checksum import perform_md5
+from portage.util.file_copy import copyfile
+
+
+class CopyFileTestCase(TestCase):
+
+ def testCopyFile(self):
+
+ tempdir = tempfile.mkdtemp()
+ try:
+ src_path = os.path.join(tempdir, 'src')
+ dest_path = os.path.join(tempdir, 'dest')
+ content = b'foo'
+
+ with open(src_path, 'wb') as f:
+ f.write(content)
+
+ copyfile(src_path, dest_path)
+
+ self.assertEqual(perform_md5(src_path), perform_md5(dest_path))
+ finally:
+ shutil.rmtree(tempdir)
+
+
+class CopyFileSparseTestCase(TestCase):
+
+ def testCopyFileSparse(self):
+
+ tempdir = tempfile.mkdtemp()
+ try:
+ src_path = os.path.join(tempdir, 'src')
+ dest_path = os.path.join(tempdir, 'dest')
+ content = b'foo'
+
+ # Use seek to create some sparse blocks. Don't make these
+ # files too big, in case the filesystem doesn't support
+ # sparse files.
+ with open(src_path, 'wb') as f:
+ f.write(content)
+ f.seek(2**17, 1)
+ f.write(content)
+ f.seek(2**18, 1)
+ f.write(content)
+ # Test that sparse blocks are handled correctly at
+ # the end of the file (involves seek and truncate).
+ f.seek(2**17, 1)
+
+ copyfile(src_path, dest_path)
+
+ self.assertEqual(perform_md5(src_path), perform_md5(dest_path))
+
+ # This last part of the test is expected to fail when sparse
+ # copy is not implemented, so set the todo flag in order
+ # to tolerate failures.
+ self.todo = True
+
+ # If sparse blocks were preserved, then both files should
+ # consume the same number of blocks.
+ self.assertEqual(
+ os.stat(src_path).st_blocks,
+ os.stat(dest_path).st_blocks)
+ finally:
+ shutil.rmtree(tempdir)
diff --git a/lib/portage/tests/util/futures/__init__.py b/lib/portage/tests/util/futures/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/util/futures/__init__.py
diff --git a/lib/portage/tests/util/futures/__test__.py b/lib/portage/tests/util/futures/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/util/futures/__test__.py
diff --git a/lib/portage/tests/util/futures/asyncio/__init__.py b/lib/portage/tests/util/futures/asyncio/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/util/futures/asyncio/__init__.py
diff --git a/lib/portage/tests/util/futures/asyncio/__test__.py b/lib/portage/tests/util/futures/asyncio/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/util/futures/asyncio/__test__.py
diff --git a/lib/portage/tests/util/futures/asyncio/test_child_watcher.py b/lib/portage/tests/util/futures/asyncio/test_child_watcher.py
new file mode 100644
index 000000000..0fc73ab49
--- /dev/null
+++ b/lib/portage/tests/util/futures/asyncio/test_child_watcher.py
@@ -0,0 +1,50 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+
+from portage.process import find_binary, spawn
+from portage.tests import TestCase
+from portage.util._eventloop.global_event_loop import global_event_loop
+from portage.util.futures import asyncio
+from portage.util.futures.unix_events import DefaultEventLoopPolicy
+
+
+class ChildWatcherTestCase(TestCase):
+ def testChildWatcher(self):
+ true_binary = find_binary("true")
+ self.assertNotEqual(true_binary, None)
+
+ initial_policy = asyncio.get_event_loop_policy()
+ if not isinstance(initial_policy, DefaultEventLoopPolicy):
+ asyncio.set_event_loop_policy(DefaultEventLoopPolicy())
+
+ loop = None
+ try:
+ try:
+ asyncio.set_child_watcher(None)
+ except NotImplementedError:
+ pass
+ else:
+ self.assertTrue(False)
+
+ args_tuple = ('hello', 'world')
+
+ loop = asyncio._wrap_loop()
+ future = loop.create_future()
+
+ def callback(pid, returncode, *args):
+ future.set_result((pid, returncode, args))
+
+ with asyncio.get_child_watcher() as watcher:
+ pids = spawn([true_binary], returnpid=True)
+ watcher.add_child_handler(pids[0], callback, *args_tuple)
+
+ self.assertEqual(
+ loop.run_until_complete(future),
+ (pids[0], os.EX_OK, args_tuple))
+ finally:
+ asyncio.set_event_loop_policy(initial_policy)
+ if loop not in (None, global_event_loop()):
+ loop.close()
+ self.assertFalse(global_event_loop().is_closed())
diff --git a/lib/portage/tests/util/futures/asyncio/test_event_loop_in_fork.py b/lib/portage/tests/util/futures/asyncio/test_event_loop_in_fork.py
new file mode 100644
index 000000000..177953437
--- /dev/null
+++ b/lib/portage/tests/util/futures/asyncio/test_event_loop_in_fork.py
@@ -0,0 +1,65 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import multiprocessing
+import os
+
+from portage.tests import TestCase
+from portage.util._eventloop.global_event_loop import global_event_loop
+from portage.util.futures import asyncio
+from portage.util.futures.unix_events import DefaultEventLoopPolicy
+
+
+def fork_main(parent_conn, child_conn):
+ parent_conn.close()
+ loop = asyncio._wrap_loop()
+ # This fails with python's default event loop policy,
+ # see https://bugs.python.org/issue22087.
+ loop.run_until_complete(asyncio.sleep(0.1, loop=loop))
+ loop.close()
+
+
+def async_main(fork_exitcode, loop=None):
+ loop = asyncio._wrap_loop(loop)
+
+ # Since python2.7 does not support Process.sentinel, use Pipe to
+ # monitor for process exit.
+ parent_conn, child_conn = multiprocessing.Pipe()
+
+ def eof_callback(proc):
+ loop.remove_reader(parent_conn.fileno())
+ parent_conn.close()
+ proc.join()
+ fork_exitcode.set_result(proc.exitcode)
+
+ proc = multiprocessing.Process(target=fork_main, args=(parent_conn, child_conn))
+ loop.add_reader(parent_conn.fileno(), eof_callback, proc)
+ proc.start()
+ child_conn.close()
+
+
+class EventLoopInForkTestCase(TestCase):
+ """
+ The default asyncio event loop policy does not support loops
+ running in forks, see https://bugs.python.org/issue22087.
+ Portage's DefaultEventLoopPolicy supports forks.
+ """
+
+ def testEventLoopInForkTestCase(self):
+ initial_policy = asyncio.get_event_loop_policy()
+ if not isinstance(initial_policy, DefaultEventLoopPolicy):
+ asyncio.set_event_loop_policy(DefaultEventLoopPolicy())
+ loop = None
+ try:
+ loop = asyncio._wrap_loop()
+ fork_exitcode = loop.create_future()
+ # Make async_main fork while the loop is running, which would
+ # trigger https://bugs.python.org/issue22087 with asyncio's
+ # default event loop policy.
+ loop.call_soon(async_main, fork_exitcode)
+ assert loop.run_until_complete(fork_exitcode) == os.EX_OK
+ finally:
+ asyncio.set_event_loop_policy(initial_policy)
+ if loop not in (None, global_event_loop()):
+ loop.close()
+ self.assertFalse(global_event_loop().is_closed())
diff --git a/lib/portage/tests/util/futures/asyncio/test_pipe_closed.py b/lib/portage/tests/util/futures/asyncio/test_pipe_closed.py
new file mode 100644
index 000000000..507385c04
--- /dev/null
+++ b/lib/portage/tests/util/futures/asyncio/test_pipe_closed.py
@@ -0,0 +1,151 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import os
+import pty
+import shutil
+import socket
+import sys
+import tempfile
+
+from portage.tests import TestCase
+from portage.util._eventloop.global_event_loop import global_event_loop
+from portage.util.futures import asyncio
+from portage.util.futures.unix_events import (
+ DefaultEventLoopPolicy,
+ _set_nonblocking,
+)
+
+
+class _PipeClosedTestCase(object):
+
+ def test_pipe(self):
+ read_end, write_end = os.pipe()
+ self._do_test(read_end, write_end)
+
+ def test_pty_device(self):
+ try:
+ read_end, write_end = pty.openpty()
+ except EnvironmentError:
+ self.skipTest('pty not available')
+ self._do_test(read_end, write_end)
+
+ def test_domain_socket(self):
+ if sys.version_info >= (3, 2):
+ read_end, write_end = socket.socketpair()
+ else:
+ self.skipTest('socket detach not supported')
+ self._do_test(read_end.detach(), write_end.detach())
+
+ def test_named_pipe(self):
+ tempdir = tempfile.mkdtemp()
+ try:
+ fifo_path = os.path.join(tempdir, 'fifo')
+ os.mkfifo(fifo_path)
+ self._do_test(os.open(fifo_path, os.O_NONBLOCK|os.O_RDONLY),
+ os.open(fifo_path, os.O_NONBLOCK|os.O_WRONLY))
+ finally:
+ shutil.rmtree(tempdir)
+
+
+class ReaderPipeClosedTestCase(_PipeClosedTestCase, TestCase):
+ """
+ Test that a reader callback is called after the other end of
+ the pipe has been closed.
+ """
+ def _do_test(self, read_end, write_end):
+ initial_policy = asyncio.get_event_loop_policy()
+ if not isinstance(initial_policy, DefaultEventLoopPolicy):
+ asyncio.set_event_loop_policy(DefaultEventLoopPolicy())
+
+ loop = asyncio._wrap_loop()
+ read_end = os.fdopen(read_end, 'rb', 0)
+ write_end = os.fdopen(write_end, 'wb', 0)
+ try:
+ def reader_callback():
+ if not reader_callback.called.done():
+ reader_callback.called.set_result(None)
+
+ reader_callback.called = loop.create_future()
+ loop.add_reader(read_end.fileno(), reader_callback)
+
+ # Allow the loop to check for IO events, and assert
+ # that our future is still not done.
+ loop.run_until_complete(asyncio.sleep(0, loop=loop))
+ self.assertFalse(reader_callback.called.done())
+
+ # Demonstrate that the callback is called afer the
+ # other end of the pipe has been closed.
+ write_end.close()
+ loop.run_until_complete(reader_callback.called)
+ finally:
+ loop.remove_reader(read_end.fileno())
+ write_end.close()
+ read_end.close()
+ asyncio.set_event_loop_policy(initial_policy)
+ if loop not in (None, global_event_loop()):
+ loop.close()
+ self.assertFalse(global_event_loop().is_closed())
+
+
+class WriterPipeClosedTestCase(_PipeClosedTestCase, TestCase):
+ """
+ Test that a writer callback is called after the other end of
+ the pipe has been closed.
+ """
+ def _do_test(self, read_end, write_end):
+ initial_policy = asyncio.get_event_loop_policy()
+ if not isinstance(initial_policy, DefaultEventLoopPolicy):
+ asyncio.set_event_loop_policy(DefaultEventLoopPolicy())
+
+ loop = asyncio._wrap_loop()
+ read_end = os.fdopen(read_end, 'rb', 0)
+ write_end = os.fdopen(write_end, 'wb', 0)
+ try:
+ def writer_callback():
+ if not writer_callback.called.done():
+ writer_callback.called.set_result(None)
+
+ writer_callback.called = loop.create_future()
+ _set_nonblocking(write_end.fileno())
+ loop.add_writer(write_end.fileno(), writer_callback)
+
+ # With pypy we've seen intermittent spurious writer callbacks
+ # here, so retry until the correct state is achieved.
+ tries = 10
+ while tries:
+ tries -= 1
+
+ # Fill up the pipe, so that no writer callbacks should be
+ # received until the state has changed.
+ while True:
+ try:
+ os.write(write_end.fileno(), 512 * b'0')
+ except EnvironmentError as e:
+ if e.errno != errno.EAGAIN:
+ raise
+ break
+
+ # Allow the loop to check for IO events, and assert
+ # that our future is still not done.
+ loop.run_until_complete(asyncio.sleep(0, loop=loop))
+ if writer_callback.called.done():
+ writer_callback.called = loop.create_future()
+ else:
+ break
+
+ self.assertFalse(writer_callback.called.done())
+
+ # Demonstrate that the callback is called afer the
+ # other end of the pipe has been closed.
+ read_end.close()
+ loop.run_until_complete(writer_callback.called)
+ finally:
+ loop.remove_writer(write_end.fileno())
+ write_end.close()
+ read_end.close()
+ asyncio.set_event_loop_policy(initial_policy)
+ if loop not in (None, global_event_loop()):
+ loop.close()
+ self.assertFalse(global_event_loop().is_closed())
diff --git a/lib/portage/tests/util/futures/asyncio/test_policy_wrapper_recursion.py b/lib/portage/tests/util/futures/asyncio/test_policy_wrapper_recursion.py
new file mode 100644
index 000000000..d3cd94b35
--- /dev/null
+++ b/lib/portage/tests/util/futures/asyncio/test_policy_wrapper_recursion.py
@@ -0,0 +1,29 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+try:
+ import asyncio
+except ImportError:
+ asyncio = None
+
+from portage.tests import TestCase
+from portage.util.futures.unix_events import DefaultEventLoopPolicy
+
+
+class PolicyWrapperRecursionTestCase(TestCase):
+ def testPolicyWrapperRecursion(self):
+ if asyncio is None:
+ self.skipTest('asyncio is not available')
+
+ initial_policy = asyncio.get_event_loop_policy()
+ if not isinstance(initial_policy, DefaultEventLoopPolicy):
+ asyncio.set_event_loop_policy(DefaultEventLoopPolicy())
+
+ try:
+ with self.assertRaises(NotImplementedError):
+ asyncio.get_event_loop()
+
+ with self.assertRaises(NotImplementedError):
+ asyncio.get_child_watcher()
+ finally:
+ asyncio.set_event_loop_policy(initial_policy)
diff --git a/lib/portage/tests/util/futures/asyncio/test_run_until_complete.py b/lib/portage/tests/util/futures/asyncio/test_run_until_complete.py
new file mode 100644
index 000000000..c0e86ae5e
--- /dev/null
+++ b/lib/portage/tests/util/futures/asyncio/test_run_until_complete.py
@@ -0,0 +1,34 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.util._eventloop.global_event_loop import global_event_loop
+from portage.util.futures import asyncio
+from portage.util.futures.unix_events import DefaultEventLoopPolicy
+
+
+class RunUntilCompleteTestCase(TestCase):
+ def test_add_done_callback(self):
+ initial_policy = asyncio.get_event_loop_policy()
+ if not isinstance(initial_policy, DefaultEventLoopPolicy):
+ asyncio.set_event_loop_policy(DefaultEventLoopPolicy())
+
+ loop = None
+ try:
+ loop = asyncio._wrap_loop()
+ f1 = loop.create_future()
+ f2 = loop.create_future()
+ f1.add_done_callback(f2.set_result)
+ loop.call_soon(lambda: f1.set_result(None))
+ loop.run_until_complete(f1)
+ self.assertEqual(f1.done(), True)
+
+ # This proves that done callbacks of f1 are executed before
+ # loop.run_until_complete(f1) returns, which is how asyncio's
+ # default event loop behaves.
+ self.assertEqual(f2.done(), True)
+ finally:
+ asyncio.set_event_loop_policy(initial_policy)
+ if loop not in (None, global_event_loop()):
+ loop.close()
+ self.assertFalse(global_event_loop().is_closed())
diff --git a/lib/portage/tests/util/futures/asyncio/test_subprocess_exec.py b/lib/portage/tests/util/futures/asyncio/test_subprocess_exec.py
new file mode 100644
index 000000000..5a812ba6a
--- /dev/null
+++ b/lib/portage/tests/util/futures/asyncio/test_subprocess_exec.py
@@ -0,0 +1,236 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import subprocess
+
+try:
+ from asyncio import create_subprocess_exec
+except ImportError:
+ create_subprocess_exec = None
+
+from portage.process import find_binary
+from portage.tests import TestCase
+from portage.util._eventloop.global_event_loop import global_event_loop
+from portage.util.futures import asyncio
+from portage.util.futures.executor.fork import ForkExecutor
+from portage.util.futures.unix_events import DefaultEventLoopPolicy
+from _emerge.PipeReader import PipeReader
+
+
+def reader(input_file, loop=None):
+ """
+ Asynchronously read a binary input file.
+
+ @param input_file: binary input file
+ @type input_file: file
+ @param loop: event loop
+ @type loop: EventLoop
+ @return: bytes
+ @rtype: asyncio.Future (or compatible)
+ """
+ loop = asyncio._wrap_loop(loop)
+ future = loop.create_future()
+ _Reader(future, input_file, loop)
+ return future
+
+
+class _Reader(object):
+ def __init__(self, future, input_file, loop):
+ self._future = future
+ self._pipe_reader = PipeReader(
+ input_files={'input_file':input_file}, scheduler=loop)
+
+ self._future.add_done_callback(self._cancel_callback)
+ self._pipe_reader.addExitListener(self._eof)
+ self._pipe_reader.start()
+
+ def _cancel_callback(self, future):
+ if future.cancelled():
+ self._cancel()
+
+ def _eof(self, pipe_reader):
+ self._pipe_reader = None
+ self._future.set_result(pipe_reader.getvalue())
+
+ def _cancel(self):
+ if self._pipe_reader is not None and self._pipe_reader.poll() is None:
+ self._pipe_reader.removeExitListener(self._eof)
+ self._pipe_reader.cancel()
+ self._pipe_reader = None
+
+
+class SubprocessExecTestCase(TestCase):
+ def _run_test(self, test):
+ initial_policy = asyncio.get_event_loop_policy()
+ if not isinstance(initial_policy, DefaultEventLoopPolicy):
+ asyncio.set_event_loop_policy(DefaultEventLoopPolicy())
+
+ loop = asyncio._wrap_loop()
+ try:
+ test(loop)
+ finally:
+ asyncio.set_event_loop_policy(initial_policy)
+ if loop not in (None, global_event_loop()):
+ loop.close()
+ self.assertFalse(global_event_loop().is_closed())
+
+ def testEcho(self):
+ if create_subprocess_exec is None:
+ self.skipTest('create_subprocess_exec not implemented for python2')
+
+ args_tuple = (b'hello', b'world')
+ echo_binary = find_binary("echo")
+ self.assertNotEqual(echo_binary, None)
+ echo_binary = echo_binary.encode()
+
+ # Use os.pipe(), since this loop does not implement the
+ # ReadTransport necessary for subprocess.PIPE support.
+ stdout_pr, stdout_pw = os.pipe()
+ stdout_pr = os.fdopen(stdout_pr, 'rb', 0)
+ stdout_pw = os.fdopen(stdout_pw, 'wb', 0)
+ files = [stdout_pr, stdout_pw]
+
+ def test(loop):
+ output = None
+ try:
+ with open(os.devnull, 'rb', 0) as devnull:
+ proc = loop.run_until_complete(
+ create_subprocess_exec(
+ echo_binary, *args_tuple,
+ stdin=devnull, stdout=stdout_pw, stderr=stdout_pw))
+
+ # This belongs exclusively to the subprocess now.
+ stdout_pw.close()
+
+ output = asyncio.ensure_future(
+ reader(stdout_pr, loop=loop), loop=loop)
+
+ self.assertEqual(
+ loop.run_until_complete(proc.wait()), os.EX_OK)
+ self.assertEqual(
+ tuple(loop.run_until_complete(output).split()), args_tuple)
+ finally:
+ if output is not None and not output.done():
+ output.cancel()
+ for f in files:
+ f.close()
+
+ self._run_test(test)
+
+ def testCat(self):
+ if create_subprocess_exec is None:
+ self.skipTest('create_subprocess_exec not implemented for python2')
+
+ stdin_data = b'hello world'
+ cat_binary = find_binary("cat")
+ self.assertNotEqual(cat_binary, None)
+ cat_binary = cat_binary.encode()
+
+ # Use os.pipe(), since this loop does not implement the
+ # ReadTransport necessary for subprocess.PIPE support.
+ stdout_pr, stdout_pw = os.pipe()
+ stdout_pr = os.fdopen(stdout_pr, 'rb', 0)
+ stdout_pw = os.fdopen(stdout_pw, 'wb', 0)
+
+ stdin_pr, stdin_pw = os.pipe()
+ stdin_pr = os.fdopen(stdin_pr, 'rb', 0)
+ stdin_pw = os.fdopen(stdin_pw, 'wb', 0)
+
+ files = [stdout_pr, stdout_pw, stdin_pr, stdin_pw]
+
+ def test(loop):
+ output = None
+ try:
+ proc = loop.run_until_complete(
+ create_subprocess_exec(
+ cat_binary,
+ stdin=stdin_pr, stdout=stdout_pw, stderr=stdout_pw))
+
+ # These belong exclusively to the subprocess now.
+ stdout_pw.close()
+ stdin_pr.close()
+
+ output = asyncio.ensure_future(
+ reader(stdout_pr, loop=loop), loop=loop)
+
+ with ForkExecutor(loop=loop) as executor:
+ writer = asyncio.ensure_future(loop.run_in_executor(
+ executor, stdin_pw.write, stdin_data), loop=loop)
+
+ # This belongs exclusively to the writer now.
+ stdin_pw.close()
+ loop.run_until_complete(writer)
+
+ self.assertEqual(loop.run_until_complete(proc.wait()), os.EX_OK)
+ self.assertEqual(loop.run_until_complete(output), stdin_data)
+ finally:
+ if output is not None and not output.done():
+ output.cancel()
+ for f in files:
+ f.close()
+
+ self._run_test(test)
+
+ def testReadTransport(self):
+ """
+ Test asyncio.create_subprocess_exec(stdout=subprocess.PIPE) which
+ requires an AbstractEventLoop.connect_read_pipe implementation
+ (and a ReadTransport implementation for it to return).
+ """
+ if create_subprocess_exec is None:
+ self.skipTest('create_subprocess_exec not implemented for python2')
+
+ args_tuple = (b'hello', b'world')
+ echo_binary = find_binary("echo")
+ self.assertNotEqual(echo_binary, None)
+ echo_binary = echo_binary.encode()
+
+ def test(loop):
+ with open(os.devnull, 'rb', 0) as devnull:
+ proc = loop.run_until_complete(
+ create_subprocess_exec(
+ echo_binary, *args_tuple,
+ stdin=devnull,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
+
+ self.assertEqual(
+ tuple(loop.run_until_complete(proc.stdout.read()).split()),
+ args_tuple)
+ self.assertEqual(loop.run_until_complete(proc.wait()), os.EX_OK)
+
+ self._run_test(test)
+
+ def testWriteTransport(self):
+ """
+ Test asyncio.create_subprocess_exec(stdin=subprocess.PIPE) which
+ requires an AbstractEventLoop.connect_write_pipe implementation
+ (and a WriteTransport implementation for it to return).
+ """
+ if create_subprocess_exec is None:
+ self.skipTest('create_subprocess_exec not implemented for python2')
+
+ stdin_data = b'hello world'
+ cat_binary = find_binary("cat")
+ self.assertNotEqual(cat_binary, None)
+ cat_binary = cat_binary.encode()
+
+ def test(loop):
+ proc = loop.run_until_complete(
+ create_subprocess_exec(
+ cat_binary,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
+
+ # This buffers data when necessary to avoid blocking.
+ proc.stdin.write(stdin_data)
+ # Any buffered data is written asynchronously after the
+ # close method is called.
+ proc.stdin.close()
+
+ self.assertEqual(
+ loop.run_until_complete(proc.stdout.read()),
+ stdin_data)
+ self.assertEqual(loop.run_until_complete(proc.wait()), os.EX_OK)
+
+ self._run_test(test)
diff --git a/lib/portage/tests/util/futures/asyncio/test_wakeup_fd_sigchld.py b/lib/portage/tests/util/futures/asyncio/test_wakeup_fd_sigchld.py
new file mode 100644
index 000000000..abc67c241
--- /dev/null
+++ b/lib/portage/tests/util/futures/asyncio/test_wakeup_fd_sigchld.py
@@ -0,0 +1,76 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import subprocess
+
+import portage
+from portage.const import PORTAGE_PYM_PATH
+from portage.tests import TestCase
+from portage.util._eventloop.global_event_loop import _asyncio_enabled
+
+
+class WakeupFdSigchldTestCase(TestCase):
+ def testWakeupFdSigchld(self):
+ """
+ This is expected to trigger a bunch of messages like the following
+ unless the fix for bug 655656 works as intended:
+
+ Exception ignored when trying to write to the signal wakeup fd:
+ BlockingIOError: [Errno 11] Resource temporarily unavailable
+ """
+ if not _asyncio_enabled:
+ self.skipTest('asyncio not enabled')
+
+ script = """
+import asyncio as _real_asyncio
+import os
+import signal
+import sys
+
+import portage
+
+# In order to avoid potential interference with API consumers, wakeup
+# fd handling is enabled only when portage._interal_caller is True.
+portage._internal_caller = True
+
+from portage.util.futures import asyncio
+
+loop = asyncio._wrap_loop()
+
+# Cause the loop to register a child watcher.
+proc = loop.run_until_complete(_real_asyncio.create_subprocess_exec('sleep', '0'))
+loop.run_until_complete(proc.wait())
+
+for i in range(8192):
+ os.kill(os.getpid(), signal.SIGCHLD)
+
+# Verify that the child watcher still works correctly
+# (this will hang if it doesn't).
+proc = loop.run_until_complete(_real_asyncio.create_subprocess_exec('sleep', '0'))
+loop.run_until_complete(proc.wait())
+loop.close()
+sys.stdout.write('success')
+sys.exit(os.EX_OK)
+"""
+
+ pythonpath = os.environ.get('PYTHONPATH', '').strip().split(':')
+ if not pythonpath or pythonpath[0] != PORTAGE_PYM_PATH:
+ pythonpath = [PORTAGE_PYM_PATH] + pythonpath
+ pythonpath = ':'.join(filter(None, pythonpath))
+
+ proc = subprocess.Popen(
+ [portage._python_interpreter, '-c', script],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+ env=dict(os.environ, PYTHONPATH=pythonpath))
+
+ out, err = proc.communicate()
+ try:
+ self.assertEqual(out[:100], b'success')
+ except Exception:
+ portage.writemsg(''.join('{}\n'.format(line)
+ for line in out.decode(errors='replace').splitlines()[:50]),
+ noiselevel=-1)
+ raise
+
+ self.assertEqual(proc.wait(), os.EX_OK)
diff --git a/lib/portage/tests/util/futures/test_compat_coroutine.py b/lib/portage/tests/util/futures/test_compat_coroutine.py
new file mode 100644
index 000000000..cbc070869
--- /dev/null
+++ b/lib/portage/tests/util/futures/test_compat_coroutine.py
@@ -0,0 +1,159 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.util.futures import asyncio
+from portage.util.futures.compat_coroutine import (
+ coroutine,
+ coroutine_return,
+)
+from portage.tests import TestCase
+
+
+class CompatCoroutineTestCase(TestCase):
+
+ def test_returning_coroutine(self):
+ @coroutine
+ def returning_coroutine():
+ yield asyncio.sleep(0)
+ coroutine_return('success')
+
+ self.assertEqual('success',
+ asyncio.get_event_loop().run_until_complete(returning_coroutine()))
+
+ def test_raising_coroutine(self):
+
+ class TestException(Exception):
+ pass
+
+ @coroutine
+ def raising_coroutine():
+ yield asyncio.sleep(0)
+ raise TestException('exception')
+
+ self.assertRaises(TestException,
+ asyncio.get_event_loop().run_until_complete, raising_coroutine())
+
+ def test_catching_coroutine(self):
+
+ class TestException(Exception):
+ pass
+
+ @coroutine
+ def catching_coroutine(loop=None):
+ loop = asyncio._wrap_loop(loop)
+ future = loop.create_future()
+ loop.call_soon(future.set_exception, TestException('exception'))
+ try:
+ yield future
+ except TestException:
+ self.assertTrue(True)
+ else:
+ self.assertTrue(False)
+ coroutine_return('success')
+
+ loop = asyncio.get_event_loop()
+ self.assertEqual('success',
+ loop.run_until_complete(catching_coroutine(loop=loop)))
+
+ def test_cancelled_coroutine(self):
+
+ @coroutine
+ def cancelled_coroutine(loop=None):
+ loop = asyncio._wrap_loop(loop)
+ while True:
+ yield loop.create_future()
+
+ loop = asyncio.get_event_loop()
+ future = cancelled_coroutine(loop=loop)
+ loop.call_soon(future.cancel)
+
+ self.assertRaises(asyncio.CancelledError,
+ loop.run_until_complete, future)
+
+ def test_cancelled_future(self):
+
+ @coroutine
+ def cancelled_future_coroutine(loop=None):
+ loop = asyncio._wrap_loop(loop)
+ while True:
+ future = loop.create_future()
+ loop.call_soon(future.cancel)
+ yield future
+
+ loop = asyncio.get_event_loop()
+ self.assertRaises(asyncio.CancelledError,
+ loop.run_until_complete, cancelled_future_coroutine(loop=loop))
+
+ def test_yield_expression_result(self):
+ @coroutine
+ def yield_expression_coroutine():
+ for i in range(3):
+ x = yield asyncio.sleep(0, result=i)
+ self.assertEqual(x, i)
+
+ asyncio.get_event_loop().run_until_complete(yield_expression_coroutine())
+
+ def test_method_coroutine(self):
+
+ class Cubby(object):
+
+ _empty = object()
+
+ def __init__(self, loop):
+ self._loop = loop
+ self._value = self._empty
+ self._waiters = []
+
+ def _notify(self):
+ waiters = self._waiters
+ self._waiters = []
+ for waiter in waiters:
+ waiter.cancelled() or waiter.set_result(None)
+
+ def _wait(self):
+ waiter = self._loop.create_future()
+ self._waiters.append(waiter)
+ return waiter
+
+ @coroutine
+ def read(self):
+ while self._value is self._empty:
+ yield self._wait()
+
+ value = self._value
+ self._value = self._empty
+ self._notify()
+ coroutine_return(value)
+
+ @coroutine
+ def write(self, value):
+ while self._value is not self._empty:
+ yield self._wait()
+
+ self._value = value
+ self._notify()
+
+ @coroutine
+ def writer_coroutine(cubby, values, sentinel):
+ for value in values:
+ yield cubby.write(value)
+ yield cubby.write(sentinel)
+
+ @coroutine
+ def reader_coroutine(cubby, sentinel):
+ results = []
+ while True:
+ result = yield cubby.read()
+ if result == sentinel:
+ break
+ results.append(result)
+ coroutine_return(results)
+
+ loop = asyncio.get_event_loop()
+ cubby = Cubby(loop)
+ values = list(range(3))
+ writer = asyncio.ensure_future(writer_coroutine(cubby, values, None), loop=loop)
+ reader = asyncio.ensure_future(reader_coroutine(cubby, None), loop=loop)
+ loop.run_until_complete(asyncio.wait([writer, reader]))
+
+ self.assertEqual(reader.result(), values)
diff --git a/lib/portage/tests/util/futures/test_done_callback.py b/lib/portage/tests/util/futures/test_done_callback.py
new file mode 100644
index 000000000..76b727b09
--- /dev/null
+++ b/lib/portage/tests/util/futures/test_done_callback.py
@@ -0,0 +1,35 @@
+# Copyright 2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.util._eventloop.global_event_loop import global_event_loop
+
+
+class FutureDoneCallbackTestCase(TestCase):
+
+ def testFutureDoneCallback(self):
+
+ event_loop = global_event_loop()
+
+ def done_callback(finished):
+ done_callback_called.set_result(True)
+
+ done_callback_called = event_loop.create_future()
+ finished = event_loop.create_future()
+ finished.add_done_callback(done_callback)
+ event_loop.call_soon(finished.set_result, True)
+ event_loop.run_until_complete(done_callback_called)
+
+ def done_callback2(finished):
+ done_callback2_called.set_result(True)
+
+ done_callback_called = event_loop.create_future()
+ done_callback2_called = event_loop.create_future()
+ finished = event_loop.create_future()
+ finished.add_done_callback(done_callback)
+ finished.add_done_callback(done_callback2)
+ finished.remove_done_callback(done_callback)
+ event_loop.call_soon(finished.set_result, True)
+ event_loop.run_until_complete(done_callback2_called)
+
+ self.assertFalse(done_callback_called.done())
diff --git a/lib/portage/tests/util/futures/test_iter_completed.py b/lib/portage/tests/util/futures/test_iter_completed.py
new file mode 100644
index 000000000..9ab410a9e
--- /dev/null
+++ b/lib/portage/tests/util/futures/test_iter_completed.py
@@ -0,0 +1,86 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import time
+from portage.tests import TestCase
+from portage.util._async.ForkProcess import ForkProcess
+from portage.util._eventloop.global_event_loop import global_event_loop
+from portage.util.futures import asyncio
+from portage.util.futures.iter_completed import (
+ iter_completed,
+ async_iter_completed,
+)
+
+
+class SleepProcess(ForkProcess):
+ __slots__ = ('future', 'seconds')
+ def _start(self):
+ self.addExitListener(self._future_done)
+ ForkProcess._start(self)
+
+ def _future_done(self, task):
+ if not self.future.cancelled():
+ self.future.set_result(self.seconds)
+
+ def _run(self):
+ time.sleep(self.seconds)
+
+
+class IterCompletedTestCase(TestCase):
+
+ def testIterCompleted(self):
+
+ # Mark this as todo, since we don't want to fail if heavy system
+ # load causes the tasks to finish in an unexpected order.
+ self.todo = True
+
+ loop = global_event_loop()
+ tasks = [
+ SleepProcess(seconds=0.200),
+ SleepProcess(seconds=0.100),
+ SleepProcess(seconds=0.001),
+ ]
+
+ expected_order = sorted(task.seconds for task in tasks)
+
+ def future_generator():
+ for task in tasks:
+ task.future = loop.create_future()
+ task.scheduler = loop
+ task.start()
+ yield task.future
+
+ for seconds, future in zip(expected_order, iter_completed(future_generator(),
+ max_jobs=True, max_load=None, loop=loop)):
+ self.assertEqual(seconds, future.result())
+
+ def testAsyncCancel(self):
+
+ loop = global_event_loop()
+ input_futures = set()
+ future_count = 3
+
+ def future_generator():
+ for i in range(future_count):
+ future = loop.create_future()
+ loop.call_soon(lambda future: None if future.done()
+ else future.set_result(None), future)
+ input_futures.add(future)
+ yield future
+
+ for future_done_set in async_iter_completed(future_generator(),
+ max_jobs=True, max_load=True, loop=loop):
+ future_done_set.cancel()
+ break
+
+ # With max_jobs=True, async_iter_completed should have executed
+ # the generator until it raised StopIteration.
+ self.assertEqual(future_count, len(input_futures))
+
+ loop.run_until_complete(asyncio.wait(input_futures, loop=loop))
+
+ # The futures may have results or they may have been cancelled
+ # by TaskScheduler, and behavior varies depending on the python
+ # interpreter.
+ for future in input_futures:
+ future.cancelled() or future.result()
diff --git a/lib/portage/tests/util/futures/test_retry.py b/lib/portage/tests/util/futures/test_retry.py
new file mode 100644
index 000000000..7a1e76280
--- /dev/null
+++ b/lib/portage/tests/util/futures/test_retry.py
@@ -0,0 +1,234 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+try:
+ from concurrent.futures import ThreadPoolExecutor
+except ImportError:
+ ThreadPoolExecutor = None
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+import sys
+
+from portage.tests import TestCase
+from portage.util._eventloop.global_event_loop import global_event_loop
+from portage.util.backoff import RandomExponentialBackoff
+from portage.util.futures import asyncio
+from portage.util.futures.retry import retry
+from portage.util.futures.executor.fork import ForkExecutor
+from portage.util.monotonic import monotonic
+
+
+class SucceedLaterException(Exception):
+ pass
+
+
+class SucceedLater(object):
+ """
+ A callable object that succeeds some duration of time has passed.
+ """
+ def __init__(self, duration):
+ self._succeed_time = monotonic() + duration
+
+ def __call__(self):
+ loop = global_event_loop()
+ result = loop.create_future()
+ remaining = self._succeed_time - monotonic()
+ if remaining > 0:
+ loop.call_soon_threadsafe(lambda: None if result.done() else
+ result.set_exception(SucceedLaterException(
+ 'time until success: {} seconds'.format(remaining))))
+ else:
+ loop.call_soon_threadsafe(lambda: None if result.done() else
+ result.set_result('success'))
+ return result
+
+
+class SucceedNeverException(Exception):
+ pass
+
+
+class SucceedNever(object):
+ """
+ A callable object that never succeeds.
+ """
+ def __call__(self):
+ loop = global_event_loop()
+ result = loop.create_future()
+ loop.call_soon_threadsafe(lambda: None if result.done() else
+ result.set_exception(SucceedNeverException('expected failure')))
+ return result
+
+
+class HangForever(object):
+ """
+ A callable object that sleeps forever.
+ """
+ def __call__(self):
+ return global_event_loop().create_future()
+
+
+class RetryTestCase(TestCase):
+
+ def _wrap_coroutine_func(self, coroutine_func):
+ """
+ Derived classes may override this method in order to implement
+ alternative forms of execution.
+ """
+ return coroutine_func
+
+ def testSucceedLater(self):
+ loop = global_event_loop()
+ func_coroutine = self._wrap_coroutine_func(SucceedLater(1))
+ decorator = retry(try_max=9999,
+ delay_func=RandomExponentialBackoff(multiplier=0.1, base=2))
+ decorated_func = decorator(func_coroutine, loop=loop)
+ result = loop.run_until_complete(decorated_func())
+ self.assertEqual(result, 'success')
+
+ def testSucceedNever(self):
+ loop = global_event_loop()
+ func_coroutine = self._wrap_coroutine_func(SucceedNever())
+ decorator = retry(try_max=4, try_timeout=None,
+ delay_func=RandomExponentialBackoff(multiplier=0.1, base=2))
+ decorated_func = decorator(func_coroutine, loop=loop)
+ done, pending = loop.run_until_complete(asyncio.wait([decorated_func()], loop=loop))
+ self.assertEqual(len(done), 1)
+ self.assertTrue(isinstance(done.pop().exception().__cause__, SucceedNeverException))
+
+ def testSucceedNeverReraise(self):
+ loop = global_event_loop()
+ func_coroutine = self._wrap_coroutine_func(SucceedNever())
+ decorator = retry(reraise=True, try_max=4, try_timeout=None,
+ delay_func=RandomExponentialBackoff(multiplier=0.1, base=2))
+ decorated_func = decorator(func_coroutine, loop=loop)
+ done, pending = loop.run_until_complete(asyncio.wait([decorated_func()], loop=loop))
+ self.assertEqual(len(done), 1)
+ self.assertTrue(isinstance(done.pop().exception(), SucceedNeverException))
+
+ def testHangForever(self):
+ loop = global_event_loop()
+ func_coroutine = self._wrap_coroutine_func(HangForever())
+ decorator = retry(try_max=2, try_timeout=0.1,
+ delay_func=RandomExponentialBackoff(multiplier=0.1, base=2))
+ decorated_func = decorator(func_coroutine, loop=loop)
+ done, pending = loop.run_until_complete(asyncio.wait([decorated_func()], loop=loop))
+ self.assertEqual(len(done), 1)
+ self.assertTrue(isinstance(done.pop().exception().__cause__, asyncio.TimeoutError))
+
+ def testHangForeverReraise(self):
+ loop = global_event_loop()
+ func_coroutine = self._wrap_coroutine_func(HangForever())
+ decorator = retry(reraise=True, try_max=2, try_timeout=0.1,
+ delay_func=RandomExponentialBackoff(multiplier=0.1, base=2))
+ decorated_func = decorator(func_coroutine, loop=loop)
+ done, pending = loop.run_until_complete(asyncio.wait([decorated_func()], loop=loop))
+ self.assertEqual(len(done), 1)
+ self.assertTrue(isinstance(done.pop().exception(), asyncio.TimeoutError))
+
+ def testCancelRetry(self):
+ loop = global_event_loop()
+ func_coroutine = self._wrap_coroutine_func(SucceedNever())
+ decorator = retry(try_timeout=0.1,
+ delay_func=RandomExponentialBackoff(multiplier=0.1, base=2))
+ decorated_func = decorator(func_coroutine, loop=loop)
+ future = decorated_func()
+ loop.call_later(0.3, future.cancel)
+ done, pending = loop.run_until_complete(asyncio.wait([future], loop=loop))
+ self.assertEqual(len(done), 1)
+ self.assertTrue(done.pop().cancelled())
+
+ def testOverallTimeoutWithException(self):
+ loop = global_event_loop()
+ func_coroutine = self._wrap_coroutine_func(SucceedNever())
+ decorator = retry(try_timeout=0.1, overall_timeout=0.3,
+ delay_func=RandomExponentialBackoff(multiplier=0.1, base=2))
+ decorated_func = decorator(func_coroutine, loop=loop)
+ done, pending = loop.run_until_complete(asyncio.wait([decorated_func()], loop=loop))
+ self.assertEqual(len(done), 1)
+ self.assertTrue(isinstance(done.pop().exception().__cause__, SucceedNeverException))
+
+ def testOverallTimeoutWithTimeoutError(self):
+ loop = global_event_loop()
+ # results in TimeoutError because it hangs forever
+ func_coroutine = self._wrap_coroutine_func(HangForever())
+ decorator = retry(try_timeout=0.1, overall_timeout=0.3,
+ delay_func=RandomExponentialBackoff(multiplier=0.1, base=2))
+ decorated_func = decorator(func_coroutine, loop=loop)
+ done, pending = loop.run_until_complete(asyncio.wait([decorated_func()], loop=loop))
+ self.assertEqual(len(done), 1)
+ self.assertTrue(isinstance(done.pop().exception().__cause__, asyncio.TimeoutError))
+
+
+class RetryForkExecutorTestCase(RetryTestCase):
+ """
+ Wrap each coroutine function with AbstractEventLoop.run_in_executor,
+ in order to test the event loop's default executor. The executor
+ may use either a thread or a subprocess, and either case is
+ automatically detected and handled.
+ """
+ def __init__(self, *pargs, **kwargs):
+ super(RetryForkExecutorTestCase, self).__init__(*pargs, **kwargs)
+ self._executor = None
+
+ def _setUpExecutor(self):
+ self._executor = ForkExecutor()
+
+ def _tearDownExecutor(self):
+ if self._executor is not None:
+ self._executor.shutdown(wait=True)
+ self._executor = None
+
+ def setUp(self):
+ self._setUpExecutor()
+
+ def tearDown(self):
+ self._tearDownExecutor()
+
+ def _wrap_coroutine_func(self, coroutine_func):
+ parent_loop = global_event_loop()
+
+ # Since ThreadPoolExecutor does not propagate cancellation of a
+ # parent_future to the underlying coroutine, use kill_switch to
+ # propagate task cancellation to wrapper, so that HangForever's
+ # thread returns when retry eventually cancels parent_future.
+ def wrapper(kill_switch):
+ loop = global_event_loop()
+ if loop is parent_loop:
+ # thread in main process
+ result = coroutine_func()
+ event = threading.Event()
+ loop.call_soon_threadsafe(result.add_done_callback,
+ lambda result: event.set())
+ loop.call_soon_threadsafe(kill_switch.add_done_callback,
+ lambda kill_switch: event.set())
+ event.wait()
+ return result.result()
+ else:
+ # child process
+ try:
+ return loop.run_until_complete(coroutine_func())
+ finally:
+ loop.close()
+
+ def execute_wrapper():
+ kill_switch = parent_loop.create_future()
+ parent_future = asyncio.ensure_future(
+ parent_loop.run_in_executor(self._executor, wrapper, kill_switch),
+ loop=parent_loop)
+ parent_future.add_done_callback(
+ lambda parent_future: None if kill_switch.done()
+ else kill_switch.set_result(None))
+ return parent_future
+
+ return execute_wrapper
+
+
+class RetryThreadExecutorTestCase(RetryForkExecutorTestCase):
+ def _setUpExecutor(self):
+ if sys.version_info.major < 3:
+ self.skipTest('ThreadPoolExecutor not supported for python2')
+ self._executor = ThreadPoolExecutor(max_workers=1)
diff --git a/lib/portage/tests/util/test_checksum.py b/lib/portage/tests/util/test_checksum.py
new file mode 100644
index 000000000..01ac8f9d0
--- /dev/null
+++ b/lib/portage/tests/util/test_checksum.py
@@ -0,0 +1,106 @@
+# Copyright 2011-2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+
+from portage.checksum import checksum_str
+from portage.exception import DigestException
+
+class ChecksumTestCase(TestCase):
+ text = b'Some test string used to check if the hash works'
+
+ def test_md5(self):
+ self.assertEqual(checksum_str(b'', 'MD5'),
+ 'd41d8cd98f00b204e9800998ecf8427e')
+ self.assertEqual(checksum_str(self.text, 'MD5'),
+ '094c3bf4732f59b39d577e9726f1e934')
+
+ def test_sha1(self):
+ self.assertEqual(checksum_str(b'', 'SHA1'),
+ 'da39a3ee5e6b4b0d3255bfef95601890afd80709')
+ self.assertEqual(checksum_str(self.text, 'SHA1'),
+ '5c572017d4e4d49e4aa03a2eda12dbb54a1e2e4f')
+
+ def test_sha256(self):
+ self.assertEqual(checksum_str(b'', 'SHA256'),
+ 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855')
+ self.assertEqual(checksum_str(self.text, 'SHA256'),
+ 'e3d4a1135181fe156d61455615bb6296198e8ca5b2f20ddeb85cb4cd27f62320')
+
+ def test_sha512(self):
+ self.assertEqual(checksum_str(b'', 'SHA512'),
+ 'cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e')
+ self.assertEqual(checksum_str(self.text, 'SHA512'),
+ 'c8eaa902d48a2c82c2185a92f1c8bab8115c63c8d7a9966a8e8e81b07abcb9762f4707a6b27075e9d720277ba9fec072a59840d6355dd2ee64681d8f39a50856')
+
+ def test_rmd160(self):
+ try:
+ self.assertEqual(checksum_str(b'', 'RMD160'),
+ '9c1185a5c5e9fc54612808977ee8f548b2258d31')
+ self.assertEqual(checksum_str(self.text, 'RMD160'),
+ 'fc453174f63fc011d6f64abd2c45fb6a53c8239b')
+ except DigestException:
+ self.skipTest('RMD160 implementation not available')
+
+ def test_whirlpool(self):
+ try:
+ self.assertEqual(checksum_str(b'', 'WHIRLPOOL'),
+ '19fa61d75522a4669b44e39c1d2e1726c530232130d407f89afee0964997f7a73e83be698b288febcf88e3e03c4f0757ea8964e59b63d93708b138cc42a66eb3')
+ self.assertEqual(checksum_str(self.text, 'WHIRLPOOL'),
+ '8f556a079b87057f19e0880eed6d833e40c916f4b133196f6842281a2517873074d399832470c11ee251696b4844a10197714a069ba3e3415c8a4eced8f91b48')
+ except DigestException:
+ self.skipTest('WHIRLPOOL implementation not available')
+
+ def test_blake2b(self):
+ try:
+ self.assertEqual(checksum_str(b'', 'BLAKE2B'),
+ '786a02f742015903c6c6fd852552d272912f4740e15847618a86e217f71f5419d25e1031afee585313896444934eb04b903a685b1448b755d56f701afe9be2ce')
+ self.assertEqual(checksum_str(self.text, 'BLAKE2B'),
+ '84cb3c88838c7147bc9797c6525f812adcdcb40137f9c075963e3a3ed1fe06aaeeb4d2bb5589bad286864dc1aa834cfc4d66b8d7e4d4a246d91d45ce3a6eee43')
+ except DigestException:
+ self.skipTest('BLAKE2B implementation not available')
+
+ def test_blake2s(self):
+ try:
+ self.assertEqual(checksum_str(b'', 'BLAKE2S'),
+ '69217a3079908094e11121d042354a7c1f55b6482ca1a51e1b250dfd1ed0eef9')
+ self.assertEqual(checksum_str(self.text, 'BLAKE2S'),
+ '823ab2429f27690450efe888b0404d092fe2ee72a9bd63d5342c251b4dbb373d')
+ except DigestException:
+ self.skipTest('BLAKE2S implementation not available')
+
+ def test_sha3_256(self):
+ try:
+ self.assertEqual(checksum_str(b'', 'SHA3_256'),
+ 'a7ffc6f8bf1ed76651c14756a061d662f580ff4de43b49fa82d80a4b80f8434a')
+ self.assertEqual(checksum_str(self.text, 'SHA3_256'),
+ '932fc0498ebb865406f9b6606280939283aa8a148562e39fd095a5d22bdec5c6')
+ except DigestException:
+ self.skipTest('SHA3_256 implementation not available')
+
+ def test_sha3_512(self):
+ try:
+ self.assertEqual(checksum_str(b'', 'SHA3_512'),
+ 'a69f73cca23a9ac5c8b567dc185a756e97c982164fe25859e0d1dcc1475c80a615b2123af1f5f94c11e3e9402c3ac558f500199d95b6d3e301758586281dcd26')
+ self.assertEqual(checksum_str(self.text, 'SHA3_512'),
+ '6634c004dc31822fa65c2f1e2e3bbf0cfa35085653cca1ca9ca42f8f3f13c908405e0b665918146181c9fc9a9d793fc05429d669c35a55517820dfaa071425ca')
+ except DigestException:
+ self.skipTest('SHA3_512 implementation not available')
+
+ def test_streebog256(self):
+ try:
+ self.assertEqual(checksum_str(b'', 'STREEBOG256'),
+ '3f539a213e97c802cc229d474c6aa32a825a360b2a933a949fd925208d9ce1bb')
+ self.assertEqual(checksum_str(self.text, 'STREEBOG256'),
+ '4992f1239c46f15b89e7b83ded4d83fb5966da3692788a4a1a6d118f78c08444')
+ except DigestException:
+ self.skipTest('STREEBOG256 implementation not available')
+
+ def test_streebog512(self):
+ try:
+ self.assertEqual(checksum_str(b'', 'STREEBOG512'),
+ '8e945da209aa869f0455928529bcae4679e9873ab707b55315f56ceb98bef0a7362f715528356ee83cda5f2aac4c6ad2ba3a715c1bcd81cb8e9f90bf4c1c1a8a')
+ self.assertEqual(checksum_str(self.text, 'STREEBOG512'),
+ '330f5c26437f4e22c0163c72b12e93b8c27202f0750627355bdee43a0e0b253c90fbf0a27adbe5414019ff01ed84b7b240a1da1cbe10fae3adffc39c2d87a51f')
+ except DigestException:
+ self.skipTest('STREEBOG512 implementation not available')
diff --git a/lib/portage/tests/util/test_digraph.py b/lib/portage/tests/util/test_digraph.py
new file mode 100644
index 000000000..01e075c99
--- /dev/null
+++ b/lib/portage/tests/util/test_digraph.py
@@ -0,0 +1,241 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.util.digraph import digraph
+#~ from portage.util import noiselimit
+import portage.util
+
+class DigraphTest(TestCase):
+
+ def _assertBFSEqual(self, result, expected):
+ result_stack = list(result)
+ result_stack.reverse()
+ expected_stack = list(reversed(expected))
+ result_compared = []
+ expected_compared = []
+ while result_stack:
+ if not expected_stack:
+ result_compared.append(result_stack.pop())
+ self.assertEqual(result_compared, expected_compared)
+ expected_set = expected_stack.pop()
+ if not isinstance(expected_set, list):
+ expected_set = [expected_set]
+ expected_set = set(expected_set)
+ while expected_set:
+ if not result_stack:
+ expected_compared.extend(expected_set)
+ self.assertEqual(result_compared, expected_compared)
+ obj = result_stack.pop()
+ try:
+ expected_set.remove(obj)
+ except KeyError:
+ expected_compared.extend(expected_set)
+ result_compared.append(obj)
+ self.assertEqual(result_compared, expected_compared)
+ else:
+ expected_compared.append(obj)
+ result_compared.append(obj)
+ if expected_stack:
+ expected_set = expected_stack.pop()
+ if not isinstance(expected_set, list):
+ expected_set = [expected_set]
+ expected_compared.extend(expected_set)
+ self.assertEqual(result_compared, expected_compared)
+
+ def testBackwardCompatibility(self):
+ g = digraph()
+ f = g.copy()
+ g.addnode("A", None)
+ self.assertEqual("A" in g, True)
+ self.assertEqual(bool(g), True)
+ self.assertEqual(g.allnodes(), ["A"])
+ self.assertEqual(g.allzeros(), ["A"])
+ self.assertEqual(g.hasnode("A"), True)
+
+ def testDigraphEmptyGraph(self):
+ g = digraph()
+ f = g.clone()
+ for x in g, f:
+ self.assertEqual(bool(x), False)
+ self.assertEqual(x.contains("A"), False)
+ self.assertEqual(x.firstzero(), None)
+ self.assertRaises(KeyError, x.remove, "A")
+ x.delnode("A")
+ self.assertEqual(list(x), [])
+ self.assertEqual(x.get("A"), None)
+ self.assertEqual(x.get("A", "default"), "default")
+ self.assertEqual(x.all_nodes(), [])
+ self.assertEqual(x.leaf_nodes(), [])
+ self.assertEqual(x.root_nodes(), [])
+ self.assertRaises(KeyError, x.child_nodes, "A")
+ self.assertRaises(KeyError, x.parent_nodes, "A")
+ self.assertEqual(x.hasallzeros(), True)
+ self.assertRaises(KeyError, list, x.bfs("A"))
+ self.assertRaises(KeyError, x.shortest_path, "A", "B")
+ self.assertRaises(KeyError, x.remove_edge, "A", "B")
+ self.assertEqual(x.get_cycles(), [])
+ x.difference_update("A")
+ portage.util.noiselimit = -2
+ x.debug_print()
+ portage.util.noiselimit = 0
+
+ def testDigraphCircle(self):
+ g = digraph()
+ g.add("A", "B", -1)
+ g.add("B", "C", 0)
+ g.add("C", "D", 1)
+ g.add("D", "A", 2)
+
+ f = g.clone()
+ h = digraph()
+ h.update(f)
+ for x in g, f, h:
+ self.assertEqual(bool(x), True)
+ self.assertEqual(x.contains("A"), True)
+ self.assertEqual(x.firstzero(), None)
+ self.assertRaises(KeyError, x.remove, "Z")
+ x.delnode("Z")
+ self.assertEqual(list(x), ["A", "B", "C", "D"])
+ self.assertEqual(x.get("A"), "A")
+ self.assertEqual(x.get("A", "default"), "A")
+ self.assertEqual(x.all_nodes(), ["A", "B", "C", "D"])
+ self.assertEqual(x.leaf_nodes(), [])
+ self.assertEqual(x.root_nodes(), [])
+ self.assertEqual(x.child_nodes("A"), ["D"])
+ self.assertEqual(x.child_nodes("A", ignore_priority=2), [])
+ self.assertEqual(x.parent_nodes("A"), ["B"])
+ self.assertEqual(x.parent_nodes("A", ignore_priority=-2), ["B"])
+ self.assertEqual(x.parent_nodes("A", ignore_priority=-1), [])
+ self.assertEqual(x.hasallzeros(), False)
+ self._assertBFSEqual(x.bfs("A"), [(None, "A"), ("A", "D"), ("D", "C"), ("C", "B")])
+ self.assertEqual(x.shortest_path("A", "D"), ["A", "D"])
+ self.assertEqual(x.shortest_path("D", "A"), ["D", "C", "B", "A"])
+ self.assertEqual(x.shortest_path("A", "D", ignore_priority=2), None)
+ self.assertEqual(x.shortest_path("D", "A", ignore_priority=-2), ["D", "C", "B", "A"])
+ cycles = set(tuple(y) for y in x.get_cycles())
+ self.assertEqual(cycles, set([("D", "C", "B", "A"), ("C", "B", "A", "D"), ("B", "A", "D", "C"), \
+ ("A", "D", "C", "B")]))
+ x.remove_edge("A", "B")
+ self.assertEqual(x.get_cycles(), [])
+ x.difference_update(["D"])
+ self.assertEqual(x.all_nodes(), ["A", "B", "C"])
+ portage.util.noiselimit = -2
+ x.debug_print()
+ portage.util.noiselimit = 0
+
+ def testDigraphTree(self):
+ g = digraph()
+ g.add("B", "A", -1)
+ g.add("C", "A", 0)
+ g.add("D", "C", 1)
+ g.add("E", "C", 2)
+
+ f = g.clone()
+ for x in g, f:
+ self.assertEqual(bool(x), True)
+ self.assertEqual(x.contains("A"), True)
+ self.assertEqual(x.has_edge("B", "A"), True)
+ self.assertEqual(x.has_edge("A", "B"), False)
+ self.assertEqual(x.firstzero(), "B")
+ self.assertRaises(KeyError, x.remove, "Z")
+ x.delnode("Z")
+ self.assertEqual(set(x), set(["A", "B", "C", "D", "E"]))
+ self.assertEqual(x.get("A"), "A")
+ self.assertEqual(x.get("A", "default"), "A")
+ self.assertEqual(set(x.all_nodes()), set(["A", "B", "C", "D", "E"]))
+ self.assertEqual(set(x.leaf_nodes()), set(["B", "D", "E"]))
+ self.assertEqual(set(x.leaf_nodes(ignore_priority=0)), set(["A", "B", "D", "E"]))
+ self.assertEqual(x.root_nodes(), ["A"])
+ self.assertEqual(set(x.root_nodes(ignore_priority=0)), set(["A", "B", "C"]))
+ self.assertEqual(set(x.child_nodes("A")), set(["B", "C"]))
+ self.assertEqual(x.child_nodes("A", ignore_priority=2), [])
+ self.assertEqual(x.parent_nodes("B"), ["A"])
+ self.assertEqual(x.parent_nodes("B", ignore_priority=-2), ["A"])
+ self.assertEqual(x.parent_nodes("B", ignore_priority=-1), [])
+ self.assertEqual(x.hasallzeros(), False)
+ self._assertBFSEqual(x.bfs("A"), [(None, "A"), [("A", "C"), ("A", "B")], [("C", "E"), ("C", "D")]])
+ self.assertEqual(x.shortest_path("A", "D"), ["A", "C", "D"])
+ self.assertEqual(x.shortest_path("D", "A"), None)
+ self.assertEqual(x.shortest_path("A", "D", ignore_priority=2), None)
+ cycles = set(tuple(y) for y in x.get_cycles())
+ self.assertEqual(cycles, set())
+ x.remove("D")
+ self.assertEqual(set(x.all_nodes()), set(["A", "B", "C", "E"]))
+ x.remove("C")
+ self.assertEqual(set(x.all_nodes()), set(["A", "B", "E"]))
+ portage.util.noiselimit = -2
+ x.debug_print()
+ portage.util.noiselimit = 0
+ self.assertRaises(KeyError, x.remove_edge, "A", "E")
+
+ def testDigraphCompleteGraph(self):
+ g = digraph()
+ g.add("A", "B", -1)
+ g.add("B", "A", 1)
+ g.add("A", "C", 1)
+ g.add("C", "A", -1)
+ g.add("C", "B", 1)
+ g.add("B", "C", 1)
+
+ f = g.clone()
+ for x in g, f:
+ self.assertEqual(bool(x), True)
+ self.assertEqual(x.contains("A"), True)
+ self.assertEqual(x.firstzero(), None)
+ self.assertRaises(KeyError, x.remove, "Z")
+ x.delnode("Z")
+ self.assertEqual(list(x), ["A", "B", "C"])
+ self.assertEqual(x.get("A"), "A")
+ self.assertEqual(x.get("A", "default"), "A")
+ self.assertEqual(x.all_nodes(), ["A", "B", "C"])
+ self.assertEqual(x.leaf_nodes(), [])
+ self.assertEqual(x.root_nodes(), [])
+ self.assertEqual(set(x.child_nodes("A")), set(["B", "C"]))
+ self.assertEqual(x.child_nodes("A", ignore_priority=0), ["B"])
+ self.assertEqual(set(x.parent_nodes("A")), set(["B", "C"]))
+ self.assertEqual(x.parent_nodes("A", ignore_priority=0), ["C"])
+ self.assertEqual(x.parent_nodes("A", ignore_priority=1), [])
+ self.assertEqual(x.hasallzeros(), False)
+ self._assertBFSEqual(x.bfs("A"), [(None, "A"), [("A", "C"), ("A", "B")]])
+ self.assertEqual(x.shortest_path("A", "C"), ["A", "C"])
+ self.assertEqual(x.shortest_path("C", "A"), ["C", "A"])
+ self.assertEqual(x.shortest_path("A", "C", ignore_priority=0), ["A", "B", "C"])
+ self.assertEqual(x.shortest_path("C", "A", ignore_priority=0), ["C", "A"])
+ cycles = set(frozenset(y) for y in x.get_cycles())
+ self.assertEqual(cycles, set([frozenset(["A", "B"]), frozenset(["A", "C"]), frozenset(["B", "C"])]))
+ x.remove_edge("A", "B")
+ cycles = set(frozenset(y) for y in x.get_cycles())
+ self.assertEqual(cycles, set([frozenset(["A", "C"]), frozenset(["C", "B"])]))
+ x.difference_update(["C"])
+ self.assertEqual(x.all_nodes(), ["A", "B"])
+ portage.util.noiselimit = -2
+ x.debug_print()
+ portage.util.noiselimit = 0
+
+ def testDigraphIgnorePriority(self):
+
+ def always_true(dummy):
+ return True
+
+ def always_false(dummy):
+ return False
+
+ g = digraph()
+ g.add("A", "B")
+
+ self.assertEqual(g.parent_nodes("A"), ["B"])
+ self.assertEqual(g.parent_nodes("A", ignore_priority=always_false), ["B"])
+ self.assertEqual(g.parent_nodes("A", ignore_priority=always_true), [])
+
+ self.assertEqual(g.child_nodes("B"), ["A"])
+ self.assertEqual(g.child_nodes("B", ignore_priority=always_false), ["A"])
+ self.assertEqual(g.child_nodes("B", ignore_priority=always_true), [])
+
+ self.assertEqual(g.leaf_nodes(), ["A"])
+ self.assertEqual(g.leaf_nodes(ignore_priority=always_false), ["A"])
+ self.assertEqual(g.leaf_nodes(ignore_priority=always_true), ["A", "B"])
+
+ self.assertEqual(g.root_nodes(), ["B"])
+ self.assertEqual(g.root_nodes(ignore_priority=always_false), ["B"])
+ self.assertEqual(g.root_nodes(ignore_priority=always_true), ["A", "B"])
diff --git a/lib/portage/tests/util/test_getconfig.py b/lib/portage/tests/util/test_getconfig.py
new file mode 100644
index 000000000..e79fca4b9
--- /dev/null
+++ b/lib/portage/tests/util/test_getconfig.py
@@ -0,0 +1,76 @@
+# Copyright 2010-2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import tempfile
+
+from portage import os
+from portage import shutil
+from portage import _unicode_encode
+from portage.const import PORTAGE_BASE_PATH
+from portage.tests import TestCase
+from portage.util import getconfig
+from portage.exception import ParseError
+
+class GetConfigTestCase(TestCase):
+ """
+ Test that getconfig() produces that same result as bash would when
+ sourcing the same input.
+ """
+
+ _cases = {
+ 'FETCHCOMMAND' : 'wget -t 3 -T 60 --passive-ftp -O "${DISTDIR}/${FILE}" "${URI}"',
+ 'FETCHCOMMAND_RSYNC' : 'rsync -avP "${URI}" "${DISTDIR}/${FILE}"',
+ 'FETCHCOMMAND_SFTP' : 'bash -c "x=\\${2#sftp://} ; host=\\${x%%/*} ; port=\\${host##*:} ; host=\\${host%:*} ; [[ \\${host} = \\${port} ]] && port= ; eval \\"declare -a ssh_opts=(\\${3})\\" ; exec sftp \\${port:+-P \\${port}} \\"\\${ssh_opts[@]}\\" \\"\\${host}:/\\${x#*/}\\" \\"\\$1\\"" sftp "${DISTDIR}/${FILE}" "${URI}" "${PORTAGE_SSH_OPTS}"',
+ 'FETCHCOMMAND_SSH' : 'bash -c "x=\\${2#ssh://} ; host=\\${x%%/*} ; port=\\${host##*:} ; host=\\${host%:*} ; [[ \\${host} = \\${port} ]] && port= ; exec rsync --rsh=\\"ssh \\${port:+-p\\${port}} \\${3}\\" -avP \\"\\${host}:/\\${x#*/}\\" \\"\\$1\\"" rsync "${DISTDIR}/${FILE}" "${URI}" "${PORTAGE_SSH_OPTS}"',
+ 'PORTAGE_ELOG_MAILSUBJECT' : '[portage] ebuild log for ${PACKAGE} on ${HOST}'
+ }
+
+ def testGetConfig(self):
+ make_globals_file = os.path.join(self.cnf_path, "make.globals")
+ d = getconfig(make_globals_file)
+ for k, v in self._cases.items():
+ self.assertEqual(d[k], v)
+
+ def testGetConfigSourceLex(self):
+ try:
+ tempdir = tempfile.mkdtemp()
+ make_conf_file = os.path.join(tempdir, 'make.conf')
+ with open(make_conf_file, 'w') as f:
+ f.write('source "${DIR}/sourced_file"\n')
+ sourced_file = os.path.join(tempdir, 'sourced_file')
+ with open(sourced_file, 'w') as f:
+ f.write('PASSES_SOURCING_TEST="True"\n')
+
+ d = getconfig(make_conf_file, allow_sourcing=True, expand={"DIR": tempdir})
+
+ # PASSES_SOURCING_TEST should exist in getconfig result.
+ self.assertTrue(d is not None)
+ self.assertEqual("True", d['PASSES_SOURCING_TEST'])
+
+ # With allow_sourcing=True and empty expand map, this should
+ # throw a FileNotFound exception.
+ self.assertRaisesMsg("An empty expand map should throw an exception",
+ ParseError, getconfig, make_conf_file, allow_sourcing=True, expand={})
+ finally:
+ shutil.rmtree(tempdir)
+
+ def testGetConfigProfileEnv(self):
+ # Test the mode which is used to parse /etc/env.d and /etc/profile.env.
+
+ cases = {
+ 'LESS_TERMCAP_mb': r"$\E[01;31m", # bug #410625
+ }
+
+ with tempfile.NamedTemporaryFile(mode='wb') as f:
+ # Format like env_update formats /etc/profile.env.
+ for k, v in cases.items():
+ if v.startswith('$') and not v.startswith('${'):
+ line = "export %s=$'%s'\n" % (k, v[1:])
+ else:
+ line = "export %s='%s'\n" % (k, v)
+ f.write(_unicode_encode(line))
+ f.flush()
+
+ d = getconfig(f.name, expand=False)
+ for k, v in cases.items():
+ self.assertEqual(d.get(k), v)
diff --git a/lib/portage/tests/util/test_grabdict.py b/lib/portage/tests/util/test_grabdict.py
new file mode 100644
index 000000000..e62a75dcc
--- /dev/null
+++ b/lib/portage/tests/util/test_grabdict.py
@@ -0,0 +1,11 @@
+# test_grabDict.py -- Portage Unit Testing Functionality
+# Copyright 2006-2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+#from portage.util import grabdict
+
+class GrabDictTestCase(TestCase):
+
+ def testGrabDictPass(self):
+ pass
diff --git a/lib/portage/tests/util/test_install_mask.py b/lib/portage/tests/util/test_install_mask.py
new file mode 100644
index 000000000..f651eb4b7
--- /dev/null
+++ b/lib/portage/tests/util/test_install_mask.py
@@ -0,0 +1,129 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.util.install_mask import InstallMask
+
+
+class InstallMaskTestCase(TestCase):
+
+ def testTrailingSlash(self):
+ """
+ Test that elements with a trailing slash match a directory
+ but not a regular file.
+ """
+ cases = (
+ (
+ '/foo/bar/ -/foo/bar/*.foo -*.baz',
+ (
+ (
+ 'foo/bar/baz',
+ True,
+ ),
+ (
+ 'foo/bar/',
+ True,
+ ),
+ # /foo/bar/ does not match
+ (
+ 'foo/bar',
+ False,
+ ),
+ # this is excluded
+ (
+ 'foo/bar/baz.foo',
+ False,
+ ),
+ # this is excluded
+ (
+ 'foo/bar/baz.baz',
+ False,
+ ),
+ (
+ 'foo/bar/baz.bar',
+ True,
+ ),
+ )
+ ),
+ (
+ '/foo/bar -/foo/bar/*.foo -*.baz',
+ (
+ (
+ 'foo/bar/baz',
+ True,
+ ),
+ # /foo/bar matches both foo/bar/ and foo/bar
+ (
+ 'foo/bar/',
+ True,
+ ),
+ (
+ 'foo/bar',
+ True,
+ ),
+ # this is excluded
+ (
+ 'foo/bar/baz.foo',
+ False,
+ ),
+ # this is excluded
+ (
+ 'foo/bar/baz.baz',
+ False,
+ ),
+ (
+ 'foo/bar/baz.bar',
+ True,
+ ),
+ )
+ ),
+ (
+ '/foo*',
+ (
+ (
+ 'foo',
+ True,
+ ),
+ (
+ 'foo/',
+ True,
+ ),
+ (
+ 'foobar',
+ True,
+ ),
+ (
+ 'foobar/',
+ True,
+ ),
+ )
+ ),
+ (
+ '/foo*/',
+ (
+ (
+ 'foo',
+ False,
+ ),
+ (
+ 'foo/',
+ True,
+ ),
+ (
+ 'foobar',
+ False,
+ ),
+ (
+ 'foobar/',
+ True,
+ ),
+ )
+ ),
+ )
+
+ for install_mask_str, paths in cases:
+ install_mask = InstallMask(install_mask_str)
+ for path, expected in paths:
+ self.assertEqual(install_mask.match(path), expected,
+ 'unexpected match result for "{}" with path {}'.\
+ format(install_mask_str, path))
diff --git a/lib/portage/tests/util/test_normalizedPath.py b/lib/portage/tests/util/test_normalizedPath.py
new file mode 100644
index 000000000..f993886ac
--- /dev/null
+++ b/lib/portage/tests/util/test_normalizedPath.py
@@ -0,0 +1,14 @@
+# test_normalizePath.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+
+class NormalizePathTestCase(TestCase):
+
+ def testNormalizePath(self):
+
+ from portage.util import normalize_path
+ path = "///foo/bar/baz"
+ good = "/foo/bar/baz"
+ self.assertEqual(normalize_path(path), good)
diff --git a/lib/portage/tests/util/test_stackDictList.py b/lib/portage/tests/util/test_stackDictList.py
new file mode 100644
index 000000000..25a723c69
--- /dev/null
+++ b/lib/portage/tests/util/test_stackDictList.py
@@ -0,0 +1,19 @@
+# test_stackDictList.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+
+class StackDictListTestCase(TestCase):
+
+ def testStackDictList(self):
+ from portage.util import stack_dictlist
+
+ tests = [
+ ({'a': 'b'}, {'x': 'y'}, False, {'a': ['b'], 'x': ['y']}),
+ ({'KEYWORDS': ['alpha', 'x86']}, {'KEYWORDS': ['-*']}, True, {}),
+ ({'KEYWORDS': ['alpha', 'x86']}, {'KEYWORDS': ['-x86']}, True, {'KEYWORDS': ['alpha']}),
+ ]
+ for test in tests:
+ self.assertEqual(
+ stack_dictlist([test[0], test[1]], incremental=test[2]), test[3])
diff --git a/lib/portage/tests/util/test_stackDicts.py b/lib/portage/tests/util/test_stackDicts.py
new file mode 100644
index 000000000..0c1dcdb78
--- /dev/null
+++ b/lib/portage/tests/util/test_stackDicts.py
@@ -0,0 +1,33 @@
+# test_stackDicts.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.util import stack_dicts
+
+
+class StackDictsTestCase(TestCase):
+
+ def testStackDictsPass(self):
+
+ tests = [
+ ([{'a': 'b'}, {'b': 'c'}], {'a': 'b', 'b': 'c'}, False, [], False),
+ ([{'a': 'b'}, {'a': 'c'}], {'a': 'b c'}, True, [], False),
+ ([{'a': 'b'}, {'a': 'c'}], {'a': 'b c'}, False, ['a'], False),
+ ([{'a': 'b'}, None], {'a': 'b'}, False, [], True),
+ ([None], {}, False, [], False),
+ ([None, {}], {}, False, [], True)
+ ]
+ for test in tests:
+ result = stack_dicts(test[0], test[2], test[3], test[4])
+ self.assertEqual(result, test[1])
+
+ def testStackDictsFail(self):
+
+ tests = [
+ ([None, {}], None, False, [], True),
+ ([{'a': 'b'}, {'a': 'c'}], {'a': 'b c'}, False, [], False)
+ ]
+ for test in tests:
+ result = stack_dicts(test[0], test[2], test[3], test[4])
+ self.assertNotEqual(result, test[1])
diff --git a/lib/portage/tests/util/test_stackLists.py b/lib/portage/tests/util/test_stackLists.py
new file mode 100644
index 000000000..3ba69ecd2
--- /dev/null
+++ b/lib/portage/tests/util/test_stackLists.py
@@ -0,0 +1,21 @@
+# test_stackLists.py -- Portage Unit Testing Functionality
+# Copyright 2006-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.util import stack_lists
+
+class StackListsTestCase(TestCase):
+
+ def testStackLists(self):
+
+ tests = [
+ ([['a', 'b', 'c'], ['d', 'e', 'f']], ['a', 'c', 'b', 'e', 'd', 'f'], False),
+ ([['a', 'x'], ['b', 'x']], ['a', 'x', 'b'], False),
+ ([['a', 'b', 'c'], ['-*']], [], True),
+ ([['a'], ['-a']], [], True)
+ ]
+
+ for test in tests:
+ result = stack_lists(test[0], test[2])
+ self.assertEqual(set(result), set(test[1]))
diff --git a/lib/portage/tests/util/test_uniqueArray.py b/lib/portage/tests/util/test_uniqueArray.py
new file mode 100644
index 000000000..aae88cce8
--- /dev/null
+++ b/lib/portage/tests/util/test_uniqueArray.py
@@ -0,0 +1,26 @@
+# test_uniqueArray.py -- Portage Unit Testing Functionality
+# Copyright 2006-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.tests import TestCase
+from portage.util import unique_array
+
+class UniqueArrayTestCase(TestCase):
+
+ def testUniqueArrayPass(self):
+ """
+ test portage.util.uniqueArray()
+ """
+
+ tests = [
+ (['a', 'a', 'a', os, os, [], [], []], ['a', os, []]),
+ ([1, 1, 1, 2, 3, 4, 4], [1, 2, 3, 4])
+ ]
+
+ for test in tests:
+ result = unique_array(test[0])
+ for item in test[1]:
+ number = result.count(item)
+ self.assertFalse(number != 1, msg=("%s contains %s of %s, "
+ "should be only 1") % (result, number, item))
diff --git a/lib/portage/tests/util/test_varExpand.py b/lib/portage/tests/util/test_varExpand.py
new file mode 100644
index 000000000..d8addf2de
--- /dev/null
+++ b/lib/portage/tests/util/test_varExpand.py
@@ -0,0 +1,92 @@
+# test_varExpand.py -- Portage Unit Testing Functionality
+# Copyright 2006-2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.util import varexpand
+
+class VarExpandTestCase(TestCase):
+
+ def testVarExpandPass(self):
+
+ varDict = {"a": "5", "b": "7", "c": "-5"}
+ for key in varDict:
+ result = varexpand("$%s" % key, varDict)
+
+ self.assertFalse(result != varDict[key],
+ msg="Got %s != %s, from varexpand(%s, %s)" %
+ (result, varDict[key], "$%s" % key, varDict))
+ result = varexpand("${%s}" % key, varDict)
+ self.assertFalse(result != varDict[key],
+ msg="Got %s != %s, from varexpand(%s, %s)" %
+ (result, varDict[key], "${%s}" % key, varDict))
+
+ def testVarExpandBackslashes(self):
+ r"""
+ We want to behave like bash does when expanding a variable
+ assignment in a sourced file, in which case it performs
+ backslash removal for \\ and \$ but nothing more. It also
+ removes escaped newline characters. Note that we don't
+ handle escaped quotes here, since getconfig() uses shlex
+ to handle that earlier.
+ """
+
+ varDict = {}
+ tests = [
+ ("\\", "\\"),
+ ("\\\\", "\\"),
+ ("\\\\\\", "\\\\"),
+ ("\\\\\\\\", "\\\\"),
+ ("\\$", "$"),
+ ("\\\\$", "\\$"),
+ ("\\a", "\\a"),
+ ("\\b", "\\b"),
+ ("\\n", "\\n"),
+ ("\\r", "\\r"),
+ ("\\t", "\\t"),
+ ("\\\n", ""),
+ ("\\\"", "\\\""),
+ ("\\'", "\\'"),
+ ]
+ for test in tests:
+ result = varexpand(test[0], varDict)
+ self.assertFalse(result != test[1],
+ msg="Got %s != %s from varexpand(%s, %s)"
+ % (result, test[1], test[0], varDict))
+
+ def testVarExpandDoubleQuotes(self):
+
+ varDict = {"a": "5"}
+ tests = [("\"${a}\"", "\"5\"")]
+ for test in tests:
+ result = varexpand(test[0], varDict)
+ self.assertFalse(result != test[1],
+ msg="Got %s != %s from varexpand(%s, %s)"
+ % (result, test[1], test[0], varDict))
+
+ def testVarExpandSingleQuotes(self):
+
+ varDict = {"a": "5"}
+ tests = [("\'${a}\'", "\'${a}\'")]
+ for test in tests:
+ result = varexpand(test[0], varDict)
+ self.assertFalse(result != test[1],
+ msg="Got %s != %s from varexpand(%s, %s)"
+ % (result, test[1], test[0], varDict))
+
+ def testVarExpandFail(self):
+
+ varDict = {"a": "5", "b": "7", "c": "15"}
+
+ testVars = ["fail"]
+
+ for var in testVars:
+ result = varexpand("$%s" % var, varDict)
+ self.assertFalse(len(result),
+ msg="Got %s == %s, from varexpand(%s, %s)"
+ % (result, var, "$%s" % var, varDict))
+
+ result = varexpand("${%s}" % var, varDict)
+ self.assertFalse(len(result),
+ msg="Got %s == %s, from varexpand(%s, %s)"
+ % (result, var, "${%s}" % var, varDict))
diff --git a/lib/portage/tests/util/test_whirlpool.py b/lib/portage/tests/util/test_whirlpool.py
new file mode 100644
index 000000000..fbe7cae56
--- /dev/null
+++ b/lib/portage/tests/util/test_whirlpool.py
@@ -0,0 +1,16 @@
+# Copyright 2011-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import subprocess
+
+import portage
+from portage import os
+from portage.const import PORTAGE_PYM_PATH
+from portage.tests import TestCase
+
+class WhirlpoolTestCase(TestCase):
+ def testBundledWhirlpool(self):
+ # execute the tests bundled with the whirlpool module
+ retval = subprocess.call([portage._python_interpreter, "-b", "-Wd",
+ os.path.join(PORTAGE_PYM_PATH, "portage/util/whirlpool.py")])
+ self.assertEqual(retval, os.EX_OK)
diff --git a/lib/portage/tests/util/test_xattr.py b/lib/portage/tests/util/test_xattr.py
new file mode 100644
index 000000000..2e2564a6e
--- /dev/null
+++ b/lib/portage/tests/util/test_xattr.py
@@ -0,0 +1,178 @@
+# Copyright 2010-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Tests for the portage.util._xattr module"""
+
+from __future__ import print_function
+
+try:
+ # Try python-3.3 module first.
+ # pylint: disable=no-name-in-module
+ from unittest import mock
+except ImportError:
+ try:
+ # Try standalone module.
+ import mock
+ except ImportError:
+ mock = None
+
+import subprocess
+
+import portage
+from portage.tests import TestCase
+from portage.util._xattr import (xattr as _xattr, _XattrSystemCommands,
+ _XattrStub)
+
+
+orig_popen = subprocess.Popen
+def MockSubprocessPopen(stdin):
+ """Helper to mock (closely) a subprocess.Popen call
+
+ The module has minor tweaks in behavior when it comes to encoding and
+ python versions, so use a real subprocess.Popen call to fake out the
+ runtime behavior. This way we don't have to also implement different
+ encodings as that gets ugly real fast.
+ """
+ # pylint: disable=protected-access
+ proc = orig_popen(['cat'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
+ proc.stdin.write(portage._unicode_encode(stdin, portage._encodings['stdio']))
+ return proc
+
+
+class SystemCommandsTest(TestCase):
+ """Test _XattrSystemCommands"""
+
+ OUTPUT = '\n'.join((
+ '# file: /bin/ping',
+ 'security.capability=0sAQAAAgAgAAAAAAAAAAAAAAAAAAA=',
+ 'user.foo="asdf"',
+ '',
+ ))
+
+ def _setUp(self):
+ if mock is None:
+ self.skipTest('need mock for testing')
+
+ return _XattrSystemCommands
+
+ def _testGetBasic(self):
+ """Verify the get() behavior"""
+ xattr = self._setUp()
+ with mock.patch.object(subprocess, 'Popen') as call_mock:
+ # Verify basic behavior, and namespace arg works as expected.
+ xattr.get('/some/file', 'user.foo')
+ xattr.get('/some/file', 'foo', namespace='user')
+ self.assertEqual(call_mock.call_args_list[0], call_mock.call_args_list[1])
+
+ # Verify nofollow behavior.
+ call_mock.reset()
+ xattr.get('/some/file', 'user.foo', nofollow=True)
+ self.assertIn('-h', call_mock.call_args[0][0])
+
+ def testGetParsing(self):
+ """Verify get() parses output sanely"""
+ xattr = self._setUp()
+ with mock.patch.object(subprocess, 'Popen') as call_mock:
+ # Verify output parsing.
+ call_mock.return_value = MockSubprocessPopen('\n'.join([
+ '# file: /some/file',
+ 'user.foo="asdf"',
+ '',
+ ]))
+ call_mock.reset()
+ self.assertEqual(xattr.get('/some/file', 'user.foo'), b'"asdf"')
+
+ def testGetAllBasic(self):
+ """Verify the get_all() behavior"""
+ xattr = self._setUp()
+ with mock.patch.object(subprocess, 'Popen') as call_mock:
+ # Verify basic behavior.
+ xattr.get_all('/some/file')
+
+ # Verify nofollow behavior.
+ call_mock.reset()
+ xattr.get_all('/some/file', nofollow=True)
+ self.assertIn('-h', call_mock.call_args[0][0])
+
+ def testGetAllParsing(self):
+ """Verify get_all() parses output sanely"""
+ xattr = self._setUp()
+ with mock.patch.object(subprocess, 'Popen') as call_mock:
+ # Verify output parsing.
+ call_mock.return_value = MockSubprocessPopen(self.OUTPUT)
+ exp = [
+ (b'security.capability', b'0sAQAAAgAgAAAAAAAAAAAAAAAAAAA='),
+ (b'user.foo', b'"asdf"'),
+ ]
+ self.assertEqual(exp, xattr.get_all('/some/file'))
+
+ def testSetBasic(self):
+ """Verify the set() behavior"""
+ xattr = self._setUp()
+ with mock.patch.object(subprocess, 'Popen') as call_mock:
+ # Verify basic behavior, and namespace arg works as expected.
+ xattr.set('/some/file', 'user.foo', 'bar')
+ xattr.set('/some/file', 'foo', 'bar', namespace='user')
+ self.assertEqual(call_mock.call_args_list[0], call_mock.call_args_list[1])
+
+ def testListBasic(self):
+ """Verify the list() behavior"""
+ xattr = self._setUp()
+ with mock.patch.object(subprocess, 'Popen') as call_mock:
+ # Verify basic behavior.
+ xattr.list('/some/file')
+
+ # Verify nofollow behavior.
+ call_mock.reset()
+ xattr.list('/some/file', nofollow=True)
+ self.assertIn('-h', call_mock.call_args[0][0])
+
+ def testListParsing(self):
+ """Verify list() parses output sanely"""
+ xattr = self._setUp()
+ with mock.patch.object(subprocess, 'Popen') as call_mock:
+ # Verify output parsing.
+ call_mock.return_value = MockSubprocessPopen(self.OUTPUT)
+ exp = [b'security.capability', b'user.foo']
+ self.assertEqual(exp, xattr.list('/some/file'))
+
+ def testRemoveBasic(self):
+ """Verify the remove() behavior"""
+ xattr = self._setUp()
+ with mock.patch.object(subprocess, 'Popen') as call_mock:
+ # Verify basic behavior, and namespace arg works as expected.
+ xattr.remove('/some/file', 'user.foo')
+ xattr.remove('/some/file', 'foo', namespace='user')
+ self.assertEqual(call_mock.call_args_list[0], call_mock.call_args_list[1])
+
+ # Verify nofollow behavior.
+ call_mock.reset()
+ xattr.remove('/some/file', 'user.foo', nofollow=True)
+ self.assertIn('-h', call_mock.call_args[0][0])
+
+
+class StubTest(TestCase):
+ """Test _XattrStub"""
+
+ def testBasic(self):
+ """Verify the stub is stubby"""
+ # Would be nice to verify raised errno is OperationNotSupported.
+ self.assertRaises(OSError, _XattrStub.get, '/', '')
+ self.assertRaises(OSError, _XattrStub.set, '/', '', '')
+ self.assertRaises(OSError, _XattrStub.get_all, '/')
+ self.assertRaises(OSError, _XattrStub.remove, '/', '')
+ self.assertRaises(OSError, _XattrStub.list, '/')
+
+
+class StandardTest(TestCase):
+ """Test basic xattr API"""
+
+ MODULES = (_xattr, _XattrSystemCommands, _XattrStub)
+ FUNCS = ('get', 'get_all', 'set', 'remove', 'list')
+
+ def testApi(self):
+ """Make sure the exported API matches"""
+ for mod in self.MODULES:
+ for f in self.FUNCS:
+ self.assertTrue(hasattr(mod, f),
+ '%s func missing in %s' % (f, mod))
diff --git a/lib/portage/tests/versions/__init__.py b/lib/portage/tests/versions/__init__.py
new file mode 100644
index 000000000..2b14180bc
--- /dev/null
+++ b/lib/portage/tests/versions/__init__.py
@@ -0,0 +1,3 @@
+# tests/portage.versions/__init__.py -- Portage Unit Test functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/tests/versions/__test__.py b/lib/portage/tests/versions/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/versions/__test__.py
diff --git a/lib/portage/tests/versions/test_cpv_sort_key.py b/lib/portage/tests/versions/test_cpv_sort_key.py
new file mode 100644
index 000000000..eeb0eae69
--- /dev/null
+++ b/lib/portage/tests/versions/test_cpv_sort_key.py
@@ -0,0 +1,17 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.versions import cpv_sort_key
+
+class CpvSortKeyTestCase(TestCase):
+
+ def testCpvSortKey(self):
+
+ tests = [
+ (("a/b-2_alpha", "a", "b", "a/b-2", "a/a-1", "a/b-1"),
+ ("a", "a/a-1", "a/b-1", "a/b-2_alpha", "a/b-2", "b")),
+ ]
+
+ for test in tests:
+ self.assertEqual(tuple(sorted(test[0], key=cpv_sort_key())), test[1])
diff --git a/lib/portage/tests/versions/test_vercmp.py b/lib/portage/tests/versions/test_vercmp.py
new file mode 100644
index 000000000..b55518f02
--- /dev/null
+++ b/lib/portage/tests/versions/test_vercmp.py
@@ -0,0 +1,81 @@
+# test_vercmp.py -- Portage Unit Testing Functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.tests import TestCase
+from portage.versions import vercmp
+
+class VerCmpTestCase(TestCase):
+ """ A simple testCase for portage.versions.vercmp()
+ """
+
+ def testVerCmpGreater(self):
+
+ tests = [
+ ("6.0", "5.0"), ("5.0", "5"),
+ ("1.0-r1", "1.0-r0"),
+ ("1.0-r1", "1.0"),
+ ("999999999999999999999999999999", "999999999999999999999999999998"),
+ ("1.0.0", "1.0"),
+ ("1.0.0", "1.0b"),
+ ("1b", "1"),
+ ("1b_p1", "1_p1"),
+ ("1.1b", "1.1"),
+ ("12.2.5", "12.2b"),
+ ]
+ for test in tests:
+ self.assertFalse(vercmp(test[0], test[1]) <= 0, msg="%s < %s? Wrong!" % (test[0], test[1]))
+
+ def testVerCmpLess(self):
+ """
+ pre < alpha < beta < rc < p -> test each of these, they are inductive (or should be..)
+ """
+ tests = [
+ ("4.0", "5.0"), ("5", "5.0"), ("1.0_pre2", "1.0_p2"),
+ ("1.0_alpha2", "1.0_p2"), ("1.0_alpha1", "1.0_beta1"), ("1.0_beta3", "1.0_rc3"),
+ ("1.001000000000000000001", "1.001000000000000000002"),
+ ("1.00100000000", "1.0010000000000000001"),
+ ("999999999999999999999999999998", "999999999999999999999999999999"),
+ ("1.01", "1.1"),
+ ("1.0-r0", "1.0-r1"),
+ ("1.0", "1.0-r1"),
+ ("1.0", "1.0.0"),
+ ("1.0b", "1.0.0"),
+ ("1_p1", "1b_p1"),
+ ("1", "1b"),
+ ("1.1", "1.1b"),
+ ("12.2b", "12.2.5"),
+ ]
+ for test in tests:
+ self.assertFalse(vercmp(test[0], test[1]) >= 0, msg="%s > %s? Wrong!" % (test[0], test[1]))
+
+ def testVerCmpEqual(self):
+
+ tests = [
+ ("4.0", "4.0"),
+ ("1.0", "1.0"),
+ ("1.0-r0", "1.0"),
+ ("1.0", "1.0-r0"),
+ ("1.0-r0", "1.0-r0"),
+ ("1.0-r1", "1.0-r1")
+ ]
+ for test in tests:
+ self.assertFalse(vercmp(test[0], test[1]) != 0, msg="%s != %s? Wrong!" % (test[0], test[1]))
+
+ def testVerNotEqual(self):
+
+ tests = [
+ ("1", "2"), ("1.0_alpha", "1.0_pre"), ("1.0_beta", "1.0_alpha"),
+ ("0", "0.0"),
+ ("1.0-r0", "1.0-r1"),
+ ("1.0-r1", "1.0-r0"),
+ ("1.0", "1.0-r1"),
+ ("1.0-r1", "1.0"),
+ ("1.0", "1.0.0"),
+ ("1_p1", "1b_p1"),
+ ("1b", "1"),
+ ("1.1b", "1.1"),
+ ("12.2b", "12.2"),
+ ]
+ for test in tests:
+ self.assertFalse(vercmp(test[0], test[1]) == 0, msg="%s == %s? Wrong!" % (test[0], test[1]))
diff --git a/lib/portage/tests/xpak/__init__.py b/lib/portage/tests/xpak/__init__.py
new file mode 100644
index 000000000..9c3f52476
--- /dev/null
+++ b/lib/portage/tests/xpak/__init__.py
@@ -0,0 +1,3 @@
+# tests/portage.dep/__init__.py -- Portage Unit Test functionality
+# Copyright 2006 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/tests/xpak/__test__.py b/lib/portage/tests/xpak/__test__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/tests/xpak/__test__.py
diff --git a/lib/portage/tests/xpak/test_decodeint.py b/lib/portage/tests/xpak/test_decodeint.py
new file mode 100644
index 000000000..2da573598
--- /dev/null
+++ b/lib/portage/tests/xpak/test_decodeint.py
@@ -0,0 +1,16 @@
+# xpak/test_decodeint.py
+# Copright Gentoo Foundation 2006
+# Portage Unit Testing Functionality
+
+from portage.tests import TestCase
+from portage.xpak import decodeint, encodeint
+
+class testDecodeIntTestCase(TestCase):
+
+ def testDecodeInt(self):
+
+ for n in range(1000):
+ self.assertEqual(decodeint(encodeint(n)), n)
+
+ for n in (2 ** 32 - 1,):
+ self.assertEqual(decodeint(encodeint(n)), n)
diff --git a/lib/portage/update.py b/lib/portage/update.py
new file mode 100644
index 000000000..83fc3d2b4
--- /dev/null
+++ b/lib/portage/update.py
@@ -0,0 +1,427 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import errno
+import io
+import re
+import stat
+import sys
+import warnings
+
+from portage import os
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.dep:Atom,dep_getkey,isvalidatom,match_from_list',
+ 'portage.util:ConfigProtect,new_protect_filename,' + \
+ 'normalize_path,write_atomic,writemsg',
+ 'portage.versions:_get_slot_re',
+)
+
+from portage.const import USER_CONFIG_PATH, VCS_DIRS
+from portage.eapi import _get_eapi_attrs
+from portage.exception import DirectoryNotFound, InvalidAtom, PortageException
+from portage.localization import _
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+ _unicode = str
+else:
+ _unicode = unicode
+
+ignored_dbentries = ("CONTENTS", "environment.bz2")
+
+def update_dbentry(update_cmd, mycontent, eapi=None, parent=None):
+
+ if parent is not None:
+ eapi = parent.eapi
+
+ if update_cmd[0] == "move":
+ old_value = _unicode(update_cmd[1])
+ new_value = _unicode(update_cmd[2])
+
+ # Use isvalidatom() to check if this move is valid for the
+ # EAPI (characters allowed in package names may vary).
+ if old_value in mycontent and isvalidatom(new_value, eapi=eapi):
+ # this split preserves existing whitespace
+ split_content = re.split(r'(\s+)', mycontent)
+ modified = False
+ for i, token in enumerate(split_content):
+ if old_value not in token:
+ continue
+ try:
+ atom = Atom(token, eapi=eapi)
+ except InvalidAtom:
+ continue
+ if atom.cp != old_value:
+ continue
+
+ new_atom = Atom(token.replace(old_value, new_value, 1),
+ eapi=eapi)
+
+ # Avoid creating self-blockers for bug #367215.
+ if new_atom.blocker and parent is not None and \
+ parent.cp == new_atom.cp and \
+ match_from_list(new_atom, [parent]):
+ continue
+
+ split_content[i] = _unicode(new_atom)
+ modified = True
+
+ if modified:
+ mycontent = "".join(split_content)
+
+ elif update_cmd[0] == "slotmove" and update_cmd[1].operator is None:
+ orig_atom, origslot, newslot = update_cmd[1:]
+ orig_cp = orig_atom.cp
+
+ # We don't support versioned slotmove atoms here, since it can be
+ # difficult to determine if the version constraints really match
+ # the atoms that we're trying to update.
+ if orig_atom.version is None and orig_cp in mycontent:
+ # this split preserves existing whitespace
+ split_content = re.split(r'(\s+)', mycontent)
+ modified = False
+ for i, token in enumerate(split_content):
+ if orig_cp not in token:
+ continue
+ try:
+ atom = Atom(token, eapi=eapi)
+ except InvalidAtom:
+ continue
+ if atom.cp != orig_cp:
+ continue
+ if atom.slot is None or atom.slot != origslot:
+ continue
+
+ slot_part = newslot
+ if atom.sub_slot is not None:
+ if atom.sub_slot == origslot:
+ sub_slot = newslot
+ else:
+ sub_slot = atom.sub_slot
+ slot_part += "/" + sub_slot
+ if atom.slot_operator is not None:
+ slot_part += atom.slot_operator
+
+ split_content[i] = atom.with_slot(slot_part)
+ modified = True
+
+ if modified:
+ mycontent = "".join(split_content)
+
+ return mycontent
+
+def update_dbentries(update_iter, mydata, eapi=None, parent=None):
+ """Performs update commands and returns a
+ dict containing only the updated items."""
+ updated_items = {}
+ for k, mycontent in mydata.items():
+ k_unicode = _unicode_decode(k,
+ encoding=_encodings['repo.content'], errors='replace')
+ if k_unicode not in ignored_dbentries:
+ orig_content = mycontent
+ mycontent = _unicode_decode(mycontent,
+ encoding=_encodings['repo.content'], errors='replace')
+ is_encoded = mycontent is not orig_content
+ orig_content = mycontent
+ for update_cmd in update_iter:
+ mycontent = update_dbentry(update_cmd, mycontent,
+ eapi=eapi, parent=parent)
+ if mycontent != orig_content:
+ if is_encoded:
+ mycontent = _unicode_encode(mycontent,
+ encoding=_encodings['repo.content'],
+ errors='backslashreplace')
+ updated_items[k] = mycontent
+ return updated_items
+
+def fixdbentries(update_iter, dbdir, eapi=None, parent=None):
+ """Performs update commands which result in search and replace operations
+ for each of the files in dbdir (excluding CONTENTS and environment.bz2).
+ Returns True when actual modifications are necessary and False otherwise."""
+
+ warnings.warn("portage.update.fixdbentries() is deprecated",
+ DeprecationWarning, stacklevel=2)
+
+ mydata = {}
+ for myfile in [f for f in os.listdir(dbdir) if f not in ignored_dbentries]:
+ file_path = os.path.join(dbdir, myfile)
+ with io.open(_unicode_encode(file_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace') as f:
+ mydata[myfile] = f.read()
+ updated_items = update_dbentries(update_iter, mydata,
+ eapi=eapi, parent=parent)
+ for myfile, mycontent in updated_items.items():
+ file_path = os.path.join(dbdir, myfile)
+ write_atomic(file_path, mycontent, encoding=_encodings['repo.content'])
+ return len(updated_items) > 0
+
+def grab_updates(updpath, prev_mtimes=None):
+ """Returns all the updates from the given directory as a sorted list of
+ tuples, each containing (file_path, statobj, content). If prev_mtimes is
+ given then updates are only returned if one or more files have different
+ mtimes. When a change is detected for a given file, updates will be
+ returned for that file and any files that come after it in the entire
+ sequence. This ensures that all relevant updates are returned for cases
+ in which the destination package of an earlier move corresponds to
+ the source package of a move that comes somewhere later in the entire
+ sequence of files.
+ """
+ try:
+ mylist = os.listdir(updpath)
+ except OSError as oe:
+ if oe.errno == errno.ENOENT:
+ raise DirectoryNotFound(updpath)
+ raise
+ if prev_mtimes is None:
+ prev_mtimes = {}
+ # validate the file name (filter out CVS directory, etc...)
+ mylist = [myfile for myfile in mylist if len(myfile) == 7 and myfile[1:3] == "Q-"]
+ if len(mylist) == 0:
+ return []
+
+ # update names are mangled to make them sort properly
+ mylist = [myfile[3:]+"-"+myfile[:2] for myfile in mylist]
+ mylist.sort()
+ mylist = [myfile[5:]+"-"+myfile[:4] for myfile in mylist]
+
+ update_data = []
+ for myfile in mylist:
+ file_path = os.path.join(updpath, myfile)
+ mystat = os.stat(file_path)
+ if update_data or \
+ file_path not in prev_mtimes or \
+ long(prev_mtimes[file_path]) != mystat[stat.ST_MTIME]:
+ f = io.open(_unicode_encode(file_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace')
+ content = f.read()
+ f.close()
+ update_data.append((file_path, mystat, content))
+ return update_data
+
+def parse_updates(mycontent):
+ """Valid updates are returned as a list of split update commands."""
+ eapi_attrs = _get_eapi_attrs(None)
+ slot_re = _get_slot_re(eapi_attrs)
+ myupd = []
+ errors = []
+ mylines = mycontent.splitlines()
+ for myline in mylines:
+ mysplit = myline.split()
+ if len(mysplit) == 0:
+ continue
+ if mysplit[0] not in ("move", "slotmove"):
+ errors.append(_("ERROR: Update type not recognized '%s'") % myline)
+ continue
+ if mysplit[0] == "move":
+ if len(mysplit) != 3:
+ errors.append(_("ERROR: Update command invalid '%s'") % myline)
+ continue
+ valid = True
+ for i in (1, 2):
+ try:
+ atom = Atom(mysplit[i])
+ except InvalidAtom:
+ atom = None
+ else:
+ if atom.blocker or atom != atom.cp:
+ atom = None
+ if atom is not None:
+ mysplit[i] = atom
+ else:
+ errors.append(
+ _("ERROR: Malformed update entry '%s'") % myline)
+ valid = False
+ break
+ if not valid:
+ continue
+
+ if mysplit[0] == "slotmove":
+ if len(mysplit)!=4:
+ errors.append(_("ERROR: Update command invalid '%s'") % myline)
+ continue
+ pkg, origslot, newslot = mysplit[1], mysplit[2], mysplit[3]
+ try:
+ atom = Atom(pkg)
+ except InvalidAtom:
+ atom = None
+ else:
+ if atom.blocker:
+ atom = None
+ if atom is not None:
+ mysplit[1] = atom
+ else:
+ errors.append(_("ERROR: Malformed update entry '%s'") % myline)
+ continue
+
+ invalid_slot = False
+ for slot in (origslot, newslot):
+ m = slot_re.match(slot)
+ if m is None:
+ invalid_slot = True
+ break
+ if "/" in slot:
+ # EAPI 4-slot-abi style SLOT is currently not supported.
+ invalid_slot = True
+ break
+
+ if invalid_slot:
+ errors.append(_("ERROR: Malformed update entry '%s'") % myline)
+ continue
+
+ # The list of valid updates is filtered by continue statements above.
+ myupd.append(mysplit)
+ return myupd, errors
+
+def update_config_files(config_root, protect, protect_mask, update_iter,
+ match_callback=None, case_insensitive=False):
+ """Perform global updates on /etc/portage/package.*, /etc/portage/profile/package.*,
+ /etc/portage/profile/packages and /etc/portage/sets.
+ config_root - location of files to update
+ protect - list of paths from CONFIG_PROTECT
+ protect_mask - list of paths from CONFIG_PROTECT_MASK
+ update_iter - list of update commands as returned from parse_updates(),
+ or dict of {repo_name: list}
+ match_callback - a callback which will be called with three arguments:
+ match_callback(repo_name, old_atom, new_atom)
+ and should return boolean value determining whether to perform the update"""
+
+ repo_dict = None
+ if isinstance(update_iter, dict):
+ repo_dict = update_iter
+ if match_callback is None:
+ def match_callback(repo_name, atoma, atomb):
+ return True
+ config_root = normalize_path(config_root)
+ update_files = {}
+ file_contents = {}
+ myxfiles = [
+ "package.accept_keywords", "package.env",
+ "package.keywords", "package.license",
+ "package.mask", "package.properties",
+ "package.unmask", "package.use", "sets"
+ ]
+ myxfiles += [os.path.join("profile", x) for x in (
+ "packages", "package.accept_keywords",
+ "package.keywords", "package.mask",
+ "package.unmask", "package.use",
+ "package.use.force", "package.use.mask",
+ "package.use.stable.force", "package.use.stable.mask"
+ )]
+ abs_user_config = os.path.join(config_root, USER_CONFIG_PATH)
+ recursivefiles = []
+ for x in myxfiles:
+ config_file = os.path.join(abs_user_config, x)
+ if os.path.isdir(config_file):
+ for parent, dirs, files in os.walk(config_file):
+ try:
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ for y_enc in list(dirs):
+ try:
+ y = _unicode_decode(y_enc,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ dirs.remove(y_enc)
+ continue
+ if y.startswith(".") or y in VCS_DIRS:
+ dirs.remove(y_enc)
+ for y in files:
+ try:
+ y = _unicode_decode(y,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if y.startswith("."):
+ continue
+ recursivefiles.append(
+ os.path.join(parent, y)[len(abs_user_config) + 1:])
+ else:
+ recursivefiles.append(x)
+ myxfiles = recursivefiles
+ for x in myxfiles:
+ f = None
+ try:
+ f = io.open(
+ _unicode_encode(os.path.join(abs_user_config, x),
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'],
+ errors='replace')
+ file_contents[x] = f.readlines()
+ except IOError:
+ continue
+ finally:
+ if f is not None:
+ f.close()
+
+ ignore_line_re = re.compile(r'^#|^\s*$')
+ if repo_dict is None:
+ update_items = [(None, update_iter)]
+ else:
+ update_items = [x for x in repo_dict.items() if x[0] != 'DEFAULT']
+ for repo_name, update_iter in update_items:
+ for update_cmd in update_iter:
+ for x, contents in file_contents.items():
+ skip_next = False
+ for pos, line in enumerate(contents):
+ if skip_next:
+ skip_next = False
+ continue
+ if ignore_line_re.match(line):
+ continue
+ atom = line.split()[0]
+ if atom[:1] == "-":
+ # package.mask supports incrementals
+ atom = atom[1:]
+ if atom[:1] == "*":
+ # packages file supports "*"-prefixed atoms as indication of system packages.
+ atom = atom[1:]
+ if not isvalidatom(atom):
+ continue
+ new_atom = update_dbentry(update_cmd, atom)
+ if atom != new_atom:
+ if match_callback(repo_name, atom, new_atom):
+ # add a comment with the update command, so
+ # the user can clearly see what happened
+ contents[pos] = "# %s\n" % \
+ " ".join("%s" % (x,) for x in update_cmd)
+ contents.insert(pos + 1,
+ line.replace("%s" % (atom,),
+ "%s" % (new_atom,), 1))
+ # we've inserted an additional line, so we need to
+ # skip it when it's reached in the next iteration
+ skip_next = True
+ update_files[x] = 1
+ sys.stdout.write("p")
+ sys.stdout.flush()
+
+ protect_obj = ConfigProtect(
+ config_root, protect, protect_mask,
+ case_insensitive=case_insensitive)
+ for x in update_files:
+ updating_file = os.path.join(abs_user_config, x)
+ if protect_obj.isprotected(updating_file):
+ updating_file = new_protect_filename(updating_file)
+ try:
+ write_atomic(updating_file, "".join(file_contents[x]))
+ except PortageException as e:
+ writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
+ writemsg(_("!!! An error occurred while updating a config file:") + \
+ " '%s'\n" % updating_file, noiselevel=-1)
+ continue
+
+def dep_transform(mydep, oldkey, newkey):
+ if dep_getkey(mydep) == oldkey:
+ return mydep.replace(oldkey, newkey, 1)
+ return mydep
diff --git a/lib/portage/util/ExtractKernelVersion.py b/lib/portage/util/ExtractKernelVersion.py
new file mode 100644
index 000000000..af4a4fe63
--- /dev/null
+++ b/lib/portage/util/ExtractKernelVersion.py
@@ -0,0 +1,78 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['ExtractKernelVersion']
+
+import io
+
+from portage import os, _encodings, _unicode_encode
+from portage.util import getconfig, grabfile
+
+def ExtractKernelVersion(base_dir):
+ """
+ Try to figure out what kernel version we are running
+ @param base_dir: Path to sources (usually /usr/src/linux)
+ @type base_dir: string
+ @rtype: tuple( version[string], error[string])
+ @return:
+ 1. tuple( version[string], error[string])
+ Either version or error is populated (but never both)
+
+ """
+ lines = []
+ pathname = os.path.join(base_dir, 'Makefile')
+ try:
+ f = io.open(_unicode_encode(pathname,
+ encoding=_encodings['fs'], errors='strict'), mode='r',
+ encoding=_encodings['content'], errors='replace')
+ except OSError as details:
+ return (None, str(details))
+ except IOError as details:
+ return (None, str(details))
+
+ try:
+ for i in range(4):
+ lines.append(f.readline())
+ except OSError as details:
+ return (None, str(details))
+ except IOError as details:
+ return (None, str(details))
+ finally:
+ f.close()
+
+ lines = [l.strip() for l in lines]
+
+ version = ''
+
+ #XXX: The following code relies on the ordering of vars within the Makefile
+ for line in lines:
+ # split on the '=' then remove annoying whitespace
+ items = line.split("=")
+ items = [i.strip() for i in items]
+ if items[0] == 'VERSION' or \
+ items[0] == 'PATCHLEVEL':
+ version += items[1]
+ version += "."
+ elif items[0] == 'SUBLEVEL':
+ version += items[1]
+ elif items[0] == 'EXTRAVERSION' and \
+ items[-1] != items[0]:
+ version += items[1]
+
+ # Grab a list of files named localversion* and sort them
+ localversions = os.listdir(base_dir)
+ for x in range(len(localversions) - 1, -1, -1):
+ if localversions[x][:12] != "localversion":
+ del localversions[x]
+ localversions.sort()
+
+ # Append the contents of each to the version string, stripping ALL whitespace
+ for lv in localversions:
+ version += "".join(" ".join(grabfile(base_dir + "/" + lv)).split())
+
+ # Check the .config for a CONFIG_LOCALVERSION and append that too, also stripping whitespace
+ kernelconfig = getconfig(base_dir+"/.config")
+ if kernelconfig and "CONFIG_LOCALVERSION" in kernelconfig:
+ version += "".join(kernelconfig["CONFIG_LOCALVERSION"].split())
+
+ return (version, None)
diff --git a/lib/portage/util/SlotObject.py b/lib/portage/util/SlotObject.py
new file mode 100644
index 000000000..ba6215874
--- /dev/null
+++ b/lib/portage/util/SlotObject.py
@@ -0,0 +1,57 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class SlotObject(object):
+ __slots__ = ("__weakref__",)
+
+ def __init__(self, **kwargs):
+ classes = [self.__class__]
+ while classes:
+ c = classes.pop()
+ if c is SlotObject:
+ continue
+ classes.extend(c.__bases__)
+ slots = getattr(c, "__slots__", None)
+ if not slots:
+ continue
+ for myattr in slots:
+ myvalue = kwargs.pop(myattr, None)
+ if myvalue is None and getattr(self, myattr, None) is not None:
+ raise AssertionError(
+ "class '%s' duplicates '%s' value in __slots__ of base class '%s'" %
+ (self.__class__.__name__, myattr, c.__name__))
+ try:
+ setattr(self, myattr, myvalue)
+ except AttributeError:
+ # Allow a property to override a __slots__ value, but raise an
+ # error if the intended value is something other than None.
+ if not (myvalue is None and
+ isinstance(getattr(type(self), myattr, None), property)):
+ raise
+
+ if kwargs:
+ raise TypeError(
+ "'%s' is an invalid keyword argument for this constructor" %
+ (next(iter(kwargs)),))
+
+ def copy(self):
+ """
+ Create a new instance and copy all attributes
+ defined from __slots__ (including those from
+ inherited classes).
+ """
+ obj = self.__class__()
+
+ classes = [self.__class__]
+ while classes:
+ c = classes.pop()
+ if c is SlotObject:
+ continue
+ classes.extend(c.__bases__)
+ slots = getattr(c, "__slots__", None)
+ if not slots:
+ continue
+ for myattr in slots:
+ setattr(obj, myattr, getattr(self, myattr))
+
+ return obj
diff --git a/lib/portage/util/_ShelveUnicodeWrapper.py b/lib/portage/util/_ShelveUnicodeWrapper.py
new file mode 100644
index 000000000..adbd5199f
--- /dev/null
+++ b/lib/portage/util/_ShelveUnicodeWrapper.py
@@ -0,0 +1,45 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+class ShelveUnicodeWrapper(object):
+ """
+ Convert unicode to str and back again, since python-2.x shelve
+ module doesn't support unicode.
+ """
+ def __init__(self, shelve_instance):
+ self._shelve = shelve_instance
+
+ def _encode(self, s):
+ if isinstance(s, unicode):
+ s = s.encode('utf_8')
+ return s
+
+ def __len__(self):
+ return len(self._shelve)
+
+ def __contains__(self, k):
+ return self._encode(k) in self._shelve
+
+ def __iter__(self):
+ return self._shelve.__iter__()
+
+ def items(self):
+ return self._shelve.iteritems()
+
+ def __setitem__(self, k, v):
+ self._shelve[self._encode(k)] = self._encode(v)
+
+ def __getitem__(self, k):
+ return self._shelve[self._encode(k)]
+
+ def __delitem__(self, k):
+ del self._shelve[self._encode(k)]
+
+ def get(self, k, *args):
+ return self._shelve.get(self._encode(k), *args)
+
+ def close(self):
+ self._shelve.close()
+
+ def clear(self):
+ self._shelve.clear()
diff --git a/lib/portage/util/__init__.py b/lib/portage/util/__init__.py
new file mode 100644
index 000000000..d63d5f156
--- /dev/null
+++ b/lib/portage/util/__init__.py
@@ -0,0 +1,1854 @@
+# Copyright 2004-2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = ['apply_permissions', 'apply_recursive_permissions',
+ 'apply_secpass_permissions', 'apply_stat_permissions', 'atomic_ofstream',
+ 'cmp_sort_key', 'ConfigProtect', 'dump_traceback', 'ensure_dirs',
+ 'find_updated_config_files', 'getconfig', 'getlibpaths', 'grabdict',
+ 'grabdict_package', 'grabfile', 'grabfile_package', 'grablines',
+ 'initialize_logger', 'LazyItemsDict', 'map_dictlist_vals',
+ 'new_protect_filename', 'normalize_path', 'pickle_read', 'stack_dictlist',
+ 'stack_dicts', 'stack_lists', 'unique_array', 'unique_everseen', 'varexpand',
+ 'write_atomic', 'writedict', 'writemsg', 'writemsg_level', 'writemsg_stdout']
+
+from copy import deepcopy
+import errno
+import io
+try:
+ from itertools import chain, filterfalse
+except ImportError:
+ from itertools import chain, ifilterfalse as filterfalse
+import logging
+import re
+import shlex
+import stat
+import string
+import sys
+import traceback
+import glob
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'pickle',
+ 'portage.dep:Atom',
+ 'subprocess',
+)
+
+from portage import os
+from portage import _encodings
+from portage import _os_merge
+from portage import _unicode_encode
+from portage import _unicode_decode
+from portage.const import VCS_DIRS
+from portage.exception import InvalidAtom, PortageException, FileNotFound, \
+ IsADirectory, OperationNotPermitted, ParseError, PermissionDenied, \
+ ReadOnlyFileSystem
+from portage.localization import _
+from portage.proxy.objectproxy import ObjectProxy
+from portage.cache.mappings import UserDict
+
+if sys.hexversion >= 0x3000000:
+ _unicode = str
+else:
+ _unicode = unicode
+
+noiselimit = 0
+
+def initialize_logger(level=logging.WARNING):
+ """Sets up basic logging of portage activities
+ Args:
+ level: the level to emit messages at ('info', 'debug', 'warning' ...)
+ Returns:
+ None
+ """
+ logging.basicConfig(level=level, format='[%(levelname)-4s] %(message)s')
+
+def writemsg(mystr, noiselevel=0, fd=None):
+ """Prints out warning and debug messages based on the noiselimit setting"""
+ global noiselimit
+ if fd is None:
+ fd = sys.stderr
+ if noiselevel <= noiselimit:
+ # avoid potential UnicodeEncodeError
+ if isinstance(fd, io.StringIO):
+ mystr = _unicode_decode(mystr,
+ encoding=_encodings['content'], errors='replace')
+ else:
+ mystr = _unicode_encode(mystr,
+ encoding=_encodings['stdio'], errors='backslashreplace')
+ if sys.hexversion >= 0x3000000 and fd in (sys.stdout, sys.stderr):
+ fd = fd.buffer
+ fd.write(mystr)
+ fd.flush()
+
+def writemsg_stdout(mystr, noiselevel=0):
+ """Prints messages stdout based on the noiselimit setting"""
+ writemsg(mystr, noiselevel=noiselevel, fd=sys.stdout)
+
+def writemsg_level(msg, level=0, noiselevel=0):
+ """
+ Show a message for the given level as defined by the logging module
+ (default is 0). When level >= logging.WARNING then the message is
+ sent to stderr, otherwise it is sent to stdout. The noiselevel is
+ passed directly to writemsg().
+
+ @type msg: str
+ @param msg: a message string, including newline if appropriate
+ @type level: int
+ @param level: a numeric logging level (see the logging module)
+ @type noiselevel: int
+ @param noiselevel: passed directly to writemsg
+ """
+ if level >= logging.WARNING:
+ fd = sys.stderr
+ else:
+ fd = sys.stdout
+ writemsg(msg, noiselevel=noiselevel, fd=fd)
+
+def normalize_path(mypath):
+ """
+ os.path.normpath("//foo") returns "//foo" instead of "/foo"
+ We dislike this behavior so we create our own normpath func
+ to fix it.
+ """
+ if sys.hexversion >= 0x3000000 and isinstance(mypath, bytes):
+ path_sep = os.path.sep.encode()
+ else:
+ path_sep = os.path.sep
+
+ if mypath.startswith(path_sep):
+ # posixpath.normpath collapses 3 or more leading slashes to just 1.
+ return os.path.normpath(2*path_sep + mypath)
+ else:
+ return os.path.normpath(mypath)
+
+def grabfile(myfilename, compat_level=0, recursive=0, remember_source_file=False):
+ """This function grabs the lines in a file, normalizes whitespace and returns lines in a list; if a line
+ begins with a #, it is ignored, as are empty lines"""
+
+ mylines = grablines(myfilename, recursive, remember_source_file=True)
+ newlines = []
+
+ for x, source_file in mylines:
+ #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
+ #into single spaces.
+ myline = x.split()
+ if x and x[0] != "#":
+ mylinetemp = []
+ for item in myline:
+ if item[:1] != "#":
+ mylinetemp.append(item)
+ else:
+ break
+ myline = mylinetemp
+
+ myline = " ".join(myline)
+ if not myline:
+ continue
+ if myline[0] == "#":
+ # Check if we have a compat-level string. BC-integration data.
+ # '##COMPAT==>N<==' 'some string attached to it'
+ mylinetest = myline.split("<==", 1)
+ if len(mylinetest) == 2:
+ myline_potential = mylinetest[1]
+ mylinetest = mylinetest[0].split("##COMPAT==>")
+ if len(mylinetest) == 2:
+ if compat_level >= int(mylinetest[1]):
+ # It's a compat line, and the key matches.
+ newlines.append(myline_potential)
+ continue
+ else:
+ continue
+ if remember_source_file:
+ newlines.append((myline, source_file))
+ else:
+ newlines.append(myline)
+ return newlines
+
+def map_dictlist_vals(func, myDict):
+ """Performs a function on each value of each key in a dictlist.
+ Returns a new dictlist."""
+ new_dl = {}
+ for key in myDict:
+ new_dl[key] = []
+ new_dl[key] = [func(x) for x in myDict[key]]
+ return new_dl
+
+def stack_dictlist(original_dicts, incremental=0, incrementals=[], ignore_none=0):
+ """
+ Stacks an array of dict-types into one array. Optionally merging or
+ overwriting matching key/value pairs for the dict[key]->list.
+ Returns a single dict. Higher index in lists is preferenced.
+
+ Example usage:
+ >>> from portage.util import stack_dictlist
+ >>> print stack_dictlist( [{'a':'b'},{'x':'y'}])
+ >>> {'a':'b','x':'y'}
+ >>> print stack_dictlist( [{'a':'b'},{'a':'c'}], incremental = True )
+ >>> {'a':['b','c'] }
+ >>> a = {'KEYWORDS':['x86','alpha']}
+ >>> b = {'KEYWORDS':['-x86']}
+ >>> print stack_dictlist( [a,b] )
+ >>> { 'KEYWORDS':['x86','alpha','-x86']}
+ >>> print stack_dictlist( [a,b], incremental=True)
+ >>> { 'KEYWORDS':['alpha'] }
+ >>> print stack_dictlist( [a,b], incrementals=['KEYWORDS'])
+ >>> { 'KEYWORDS':['alpha'] }
+
+ @param original_dicts a list of (dictionary objects or None)
+ @type list
+ @param incremental True or false depending on whether new keys should overwrite
+ keys which already exist.
+ @type boolean
+ @param incrementals A list of items that should be incremental (-foo removes foo from
+ the returned dict).
+ @type list
+ @param ignore_none Appears to be ignored, but probably was used long long ago.
+ @type boolean
+
+ """
+ final_dict = {}
+ for mydict in original_dicts:
+ if mydict is None:
+ continue
+ for y in mydict:
+ if not y in final_dict:
+ final_dict[y] = []
+
+ for thing in mydict[y]:
+ if thing:
+ if incremental or y in incrementals:
+ if thing == "-*":
+ final_dict[y] = []
+ continue
+ elif thing[:1] == '-':
+ try:
+ final_dict[y].remove(thing[1:])
+ except ValueError:
+ pass
+ continue
+ if thing not in final_dict[y]:
+ final_dict[y].append(thing)
+ if y in final_dict and not final_dict[y]:
+ del final_dict[y]
+ return final_dict
+
+def stack_dicts(dicts, incremental=0, incrementals=[], ignore_none=0):
+ """Stacks an array of dict-types into one array. Optionally merging or
+ overwriting matching key/value pairs for the dict[key]->string.
+ Returns a single dict."""
+ final_dict = {}
+ for mydict in dicts:
+ if not mydict:
+ continue
+ for k, v in mydict.items():
+ if k in final_dict and (incremental or (k in incrementals)):
+ final_dict[k] += " " + v
+ else:
+ final_dict[k] = v
+ return final_dict
+
+def append_repo(atom_list, repo_name, remember_source_file=False):
+ """
+ Takes a list of valid atoms without repo spec and appends ::repo_name.
+ If an atom already has a repo part, then it is preserved (see bug #461948).
+ """
+ if remember_source_file:
+ return [(atom.repo is not None and atom or atom.with_repo(repo_name), source) \
+ for atom, source in atom_list]
+ else:
+ return [atom.repo is not None and atom or atom.with_repo(repo_name) \
+ for atom in atom_list]
+
+def stack_lists(lists, incremental=1, remember_source_file=False,
+ warn_for_unmatched_removal=False, strict_warn_for_unmatched_removal=False, ignore_repo=False):
+ """Stacks an array of list-types into one array. Optionally removing
+ distinct values using '-value' notation. Higher index is preferenced.
+
+ all elements must be hashable."""
+ matched_removals = set()
+ unmatched_removals = {}
+ new_list = {}
+ for sub_list in lists:
+ for token in sub_list:
+ token_key = token
+ if remember_source_file:
+ token, source_file = token
+ else:
+ source_file = False
+
+ if token is None:
+ continue
+
+ if incremental:
+ if token == "-*":
+ new_list.clear()
+ elif token[:1] == '-':
+ matched = False
+ if ignore_repo and not "::" in token:
+ #Let -cat/pkg remove cat/pkg::repo.
+ to_be_removed = []
+ token_slice = token[1:]
+ for atom in new_list:
+ atom_without_repo = atom
+ if atom.repo is not None:
+ # Atom.without_repo instantiates a new Atom,
+ # which is unnecessary here, so use string
+ # replacement instead.
+ atom_without_repo = \
+ atom.replace("::" + atom.repo, "", 1)
+ if atom_without_repo == token_slice:
+ to_be_removed.append(atom)
+ if to_be_removed:
+ matched = True
+ for atom in to_be_removed:
+ new_list.pop(atom)
+ else:
+ try:
+ new_list.pop(token[1:])
+ matched = True
+ except KeyError:
+ pass
+
+ if not matched:
+ if source_file and \
+ (strict_warn_for_unmatched_removal or \
+ token_key not in matched_removals):
+ unmatched_removals.setdefault(source_file, set()).add(token)
+ else:
+ matched_removals.add(token_key)
+ else:
+ new_list[token] = source_file
+ else:
+ new_list[token] = source_file
+
+ if warn_for_unmatched_removal:
+ for source_file, tokens in unmatched_removals.items():
+ if len(tokens) > 3:
+ selected = [tokens.pop(), tokens.pop(), tokens.pop()]
+ writemsg(_("--- Unmatched removal atoms in %s: %s and %s more\n") % \
+ (source_file, ", ".join(selected), len(tokens)),
+ noiselevel=-1)
+ else:
+ writemsg(_("--- Unmatched removal atom(s) in %s: %s\n") % (source_file, ", ".join(tokens)),
+ noiselevel=-1)
+
+ if remember_source_file:
+ return list(new_list.items())
+ else:
+ return list(new_list)
+
+def grabdict(myfilename, juststrings=0, empty=0, recursive=0, incremental=1, newlines=0):
+ """
+ This function grabs the lines in a file, normalizes whitespace and returns lines in a dictionary
+
+ @param myfilename: file to process
+ @type myfilename: string (path)
+ @param juststrings: only return strings
+ @type juststrings: Boolean (integer)
+ @param empty: Ignore certain lines
+ @type empty: Boolean (integer)
+ @param recursive: Recursively grab ( support for /etc/portage/package.keywords/* and friends )
+ @type recursive: Boolean (integer)
+ @param incremental: Append to the return list, don't overwrite
+ @type incremental: Boolean (integer)
+ @param newlines: Append newlines
+ @type newlines: Boolean (integer)
+ @rtype: Dictionary
+ @return:
+ 1. Returns the lines in a file in a dictionary, for example:
+ 'sys-apps/portage x86 amd64 ppc'
+ would return
+ {"sys-apps/portage" : ['x86', 'amd64', 'ppc']}
+ """
+ newdict = {}
+ for x in grablines(myfilename, recursive):
+ #the split/join thing removes leading and trailing whitespace, and converts any whitespace in the line
+ #into single spaces.
+ if x[0] == "#":
+ continue
+ myline=x.split()
+ mylinetemp = []
+ for item in myline:
+ if item[:1] != "#":
+ mylinetemp.append(item)
+ else:
+ break
+ myline = mylinetemp
+ if len(myline) < 2 and empty == 0:
+ continue
+ if len(myline) < 1 and empty == 1:
+ continue
+ if newlines:
+ myline.append("\n")
+ if incremental:
+ newdict.setdefault(myline[0], []).extend(myline[1:])
+ else:
+ newdict[myline[0]] = myline[1:]
+ if juststrings:
+ for k, v in newdict.items():
+ newdict[k] = " ".join(v)
+ return newdict
+
+_eapi_cache = {}
+
+def read_corresponding_eapi_file(filename, default="0"):
+ """
+ Read the 'eapi' file from the directory 'filename' is in.
+ Returns "0" if the file is not present or invalid.
+ """
+ eapi_file = os.path.join(os.path.dirname(filename), "eapi")
+ try:
+ eapi = _eapi_cache[eapi_file]
+ except KeyError:
+ pass
+ else:
+ if eapi is None:
+ return default
+ return eapi
+
+ eapi = None
+ try:
+ with io.open(_unicode_encode(eapi_file,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'], errors='replace') as f:
+ lines = f.readlines()
+ if len(lines) == 1:
+ eapi = lines[0].rstrip("\n")
+ else:
+ writemsg(_("--- Invalid 'eapi' file (doesn't contain exactly one line): %s\n") % (eapi_file),
+ noiselevel=-1)
+ except IOError:
+ pass
+
+ _eapi_cache[eapi_file] = eapi
+ if eapi is None:
+ return default
+ return eapi
+
+def grabdict_package(myfilename, juststrings=0, recursive=0, newlines=0,
+ allow_wildcard=False, allow_repo=False, allow_build_id=False, allow_use=True,
+ verify_eapi=False, eapi=None, eapi_default="0"):
+ """ Does the same thing as grabdict except it validates keys
+ with isvalidatom()"""
+
+ if recursive:
+ file_list = _recursive_file_list(myfilename)
+ else:
+ file_list = [myfilename]
+
+ atoms = {}
+ for filename in file_list:
+ d = grabdict(filename, juststrings=False,
+ empty=True, recursive=False, incremental=True, newlines=newlines)
+ if not d:
+ continue
+ if verify_eapi and eapi is None:
+ eapi = read_corresponding_eapi_file(
+ myfilename, default=eapi_default)
+
+ for k, v in d.items():
+ try:
+ k = Atom(k, allow_wildcard=allow_wildcard,
+ allow_repo=allow_repo,
+ allow_build_id=allow_build_id, eapi=eapi)
+ except InvalidAtom as e:
+ writemsg(_("--- Invalid atom in %s: %s\n") % (filename, e),
+ noiselevel=-1)
+ else:
+ if not allow_use and k.use:
+ writemsg(_("--- Atom is not allowed to have USE flag(s) in %s: %s\n") % (filename, k),
+ noiselevel=-1)
+ continue
+ atoms.setdefault(k, []).extend(v)
+
+ if juststrings:
+ for k, v in atoms.items():
+ atoms[k] = " ".join(v)
+
+ return atoms
+
+def grabfile_package(myfilename, compatlevel=0, recursive=0,
+ allow_wildcard=False, allow_repo=False, allow_build_id=False,
+ remember_source_file=False, verify_eapi=False, eapi=None,
+ eapi_default="0"):
+
+ pkgs=grabfile(myfilename, compatlevel, recursive=recursive, remember_source_file=True)
+ if not pkgs:
+ return pkgs
+ if verify_eapi and eapi is None:
+ eapi = read_corresponding_eapi_file(
+ myfilename, default=eapi_default)
+ mybasename = os.path.basename(myfilename)
+ is_packages_file = mybasename == 'packages'
+ atoms = []
+ for pkg, source_file in pkgs:
+ pkg_orig = pkg
+ # for packages and package.mask files
+ if pkg[:1] == "-":
+ if is_packages_file and pkg == '-*':
+ if remember_source_file:
+ atoms.append((pkg, source_file))
+ else:
+ atoms.append(pkg)
+ continue
+ pkg = pkg[1:]
+ if pkg[:1] == '*' and is_packages_file:
+ pkg = pkg[1:]
+ try:
+ pkg = Atom(pkg, allow_wildcard=allow_wildcard,
+ allow_repo=allow_repo, allow_build_id=allow_build_id,
+ eapi=eapi)
+ except InvalidAtom as e:
+ writemsg(_("--- Invalid atom in %s: %s\n") % (source_file, e),
+ noiselevel=-1)
+ else:
+ if pkg_orig == _unicode(pkg):
+ # normal atom, so return as Atom instance
+ if remember_source_file:
+ atoms.append((pkg, source_file))
+ else:
+ atoms.append(pkg)
+ else:
+ # atom has special prefix, so return as string
+ if remember_source_file:
+ atoms.append((pkg_orig, source_file))
+ else:
+ atoms.append(pkg_orig)
+ return atoms
+
+def _recursive_basename_filter(f):
+ return not f.startswith(".") and not f.endswith("~")
+
+def _recursive_file_list(path):
+ # path may be a regular file or a directory
+
+ def onerror(e):
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(path)
+
+ stack = [os.path.split(path)]
+
+ while stack:
+ parent, fname = stack.pop()
+ fullpath = os.path.join(parent, fname)
+
+ try:
+ st = os.stat(fullpath)
+ except OSError as e:
+ onerror(e)
+ continue
+
+ if stat.S_ISDIR(st.st_mode):
+ if fname in VCS_DIRS or not _recursive_basename_filter(fname):
+ continue
+ try:
+ children = os.listdir(fullpath)
+ except OSError as e:
+ onerror(e)
+ continue
+
+ # Sort in reverse, since we pop from the end of the stack.
+ # Include regular files in the stack, so files are sorted
+ # together with directories.
+ children.sort(reverse=True)
+ stack.extend((fullpath, x) for x in children)
+
+ elif stat.S_ISREG(st.st_mode):
+ if _recursive_basename_filter(fname):
+ yield fullpath
+
+def grablines(myfilename, recursive=0, remember_source_file=False):
+ mylines = []
+ if recursive:
+ for f in _recursive_file_list(myfilename):
+ mylines.extend(grablines(f, recursive=False,
+ remember_source_file=remember_source_file))
+
+ else:
+ try:
+ with io.open(_unicode_encode(myfilename,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace') as myfile:
+ if remember_source_file:
+ mylines = [(line, myfilename) for line in myfile.readlines()]
+ else:
+ mylines = myfile.readlines()
+ except IOError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(myfilename)
+ elif e.errno in (errno.ENOENT, errno.ESTALE):
+ pass
+ else:
+ raise
+ return mylines
+
+def writedict(mydict, myfilename, writekey=True):
+ """Writes out a dict to a file; writekey=0 mode doesn't write out
+ the key and assumes all values are strings, not lists."""
+ lines = []
+ if not writekey:
+ for v in mydict.values():
+ lines.append(v + "\n")
+ else:
+ for k, v in mydict.items():
+ lines.append("%s %s\n" % (k, " ".join(v)))
+ write_atomic(myfilename, "".join(lines))
+
+def shlex_split(s):
+ """
+ This is equivalent to shlex.split, but if the current interpreter is
+ python2, it temporarily encodes unicode strings to bytes since python2's
+ shlex.split() doesn't handle unicode strings.
+ """
+ convert_to_bytes = sys.hexversion < 0x3000000 and not isinstance(s, bytes)
+ if convert_to_bytes:
+ s = _unicode_encode(s)
+ rval = shlex.split(s)
+ if convert_to_bytes:
+ rval = [_unicode_decode(x) for x in rval]
+ return rval
+
+class _getconfig_shlex(shlex.shlex):
+
+ def __init__(self, portage_tolerant=False, **kwargs):
+ shlex.shlex.__init__(self, **kwargs)
+ self.__portage_tolerant = portage_tolerant
+
+ def allow_sourcing(self, var_expand_map):
+ self.source = portage._native_string("source")
+ self.var_expand_map = var_expand_map
+
+ def sourcehook(self, newfile):
+ try:
+ newfile = varexpand(newfile, self.var_expand_map)
+ return shlex.shlex.sourcehook(self, newfile)
+ except EnvironmentError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(newfile)
+ if e.errno not in (errno.ENOENT, errno.ENOTDIR):
+ writemsg("open('%s', 'r'): %s\n" % (newfile, e), noiselevel=-1)
+ raise
+
+ msg = self.error_leader()
+ if e.errno == errno.ENOTDIR:
+ msg += _("%s: Not a directory") % newfile
+ else:
+ msg += _("%s: No such file or directory") % newfile
+
+ if self.__portage_tolerant:
+ writemsg("%s\n" % msg, noiselevel=-1)
+ else:
+ raise ParseError(msg)
+ return (newfile, io.StringIO())
+
+_invalid_var_name_re = re.compile(r'^\d|\W')
+
+def getconfig(mycfg, tolerant=False, allow_sourcing=False, expand=True,
+ recursive=False):
+
+ if isinstance(expand, dict):
+ # Some existing variable definitions have been
+ # passed in, for use in substitutions.
+ expand_map = expand
+ expand = True
+ else:
+ expand_map = {}
+ mykeys = {}
+
+ if recursive:
+ # Emulate source commands so that syntax error messages
+ # can display real file names and line numbers.
+ if not expand:
+ expand_map = False
+ fname = None
+ for fname in _recursive_file_list(mycfg):
+ mykeys.update(getconfig(fname, tolerant=tolerant,
+ allow_sourcing=allow_sourcing, expand=expand_map,
+ recursive=False) or {})
+ if fname is None:
+ return None
+ return mykeys
+
+ f = None
+ try:
+ # NOTE: shlex doesn't support unicode objects with Python 2
+ # (produces spurious \0 characters).
+ if sys.hexversion < 0x3000000:
+ f = open(_unicode_encode(mycfg,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ else:
+ f = open(_unicode_encode(mycfg,
+ encoding=_encodings['fs'], errors='strict'), mode='r',
+ encoding=_encodings['content'], errors='replace')
+ content = f.read()
+ except IOError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(mycfg)
+ if e.errno != errno.ENOENT:
+ writemsg("open('%s', 'r'): %s\n" % (mycfg, e), noiselevel=-1)
+ if e.errno not in (errno.EISDIR,):
+ raise
+ return None
+ finally:
+ if f is not None:
+ f.close()
+
+ # Since this file has unicode_literals enabled, and Python 2's
+ # shlex implementation does not support unicode, the following code
+ # uses _native_string() to encode unicode literals when necessary.
+
+ # Workaround for avoiding a silent error in shlex that is
+ # triggered by a source statement at the end of the file
+ # without a trailing newline after the source statement.
+ if content and content[-1] != portage._native_string('\n'):
+ content += portage._native_string('\n')
+
+ # Warn about dos-style line endings since that prevents
+ # people from being able to source them with bash.
+ if portage._native_string('\r') in content:
+ writemsg(("!!! " + _("Please use dos2unix to convert line endings " + \
+ "in config file: '%s'") + "\n") % mycfg, noiselevel=-1)
+
+ lex = None
+ try:
+ # The default shlex.sourcehook() implementation
+ # only joins relative paths when the infile
+ # attribute is properly set.
+ lex = _getconfig_shlex(instream=content, infile=mycfg, posix=True,
+ portage_tolerant=tolerant)
+ lex.wordchars = portage._native_string(string.digits +
+ string.ascii_letters + r"~!@#$%*_\:;?,./-+{}")
+ lex.quotes = portage._native_string("\"'")
+ if allow_sourcing:
+ lex.allow_sourcing(expand_map)
+
+ while True:
+ key = _unicode_decode(lex.get_token())
+ if key == "export":
+ key = _unicode_decode(lex.get_token())
+ if key is None:
+ #normal end of file
+ break
+
+ equ = _unicode_decode(lex.get_token())
+ if not equ:
+ msg = lex.error_leader() + _("Unexpected EOF")
+ if not tolerant:
+ raise ParseError(msg)
+ else:
+ writemsg("%s\n" % msg, noiselevel=-1)
+ return mykeys
+
+ elif equ != "=":
+ msg = lex.error_leader() + \
+ _("Invalid token '%s' (not '=')") % (equ,)
+ if not tolerant:
+ raise ParseError(msg)
+ else:
+ writemsg("%s\n" % msg, noiselevel=-1)
+ return mykeys
+
+ val = _unicode_decode(lex.get_token())
+ if val is None:
+ msg = lex.error_leader() + \
+ _("Unexpected end of config file: variable '%s'") % (key,)
+ if not tolerant:
+ raise ParseError(msg)
+ else:
+ writemsg("%s\n" % msg, noiselevel=-1)
+ return mykeys
+
+ if _invalid_var_name_re.search(key) is not None:
+ msg = lex.error_leader() + \
+ _("Invalid variable name '%s'") % (key,)
+ if not tolerant:
+ raise ParseError(msg)
+ writemsg("%s\n" % msg, noiselevel=-1)
+ continue
+
+ if expand:
+ mykeys[key] = varexpand(val, mydict=expand_map,
+ error_leader=lex.error_leader)
+ expand_map[key] = mykeys[key]
+ else:
+ mykeys[key] = val
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ if isinstance(e, ParseError) or lex is None:
+ raise
+ msg = "%s%s" % (lex.error_leader(), e)
+ writemsg("%s\n" % msg, noiselevel=-1)
+ raise
+
+ return mykeys
+
+_varexpand_word_chars = frozenset(string.ascii_letters + string.digits + "_")
+_varexpand_unexpected_eof_msg = "unexpected EOF while looking for matching `}'"
+
+def varexpand(mystring, mydict=None, error_leader=None):
+ if mydict is None:
+ mydict = {}
+
+ """
+ new variable expansion code. Preserves quotes, handles \n, etc.
+ This code is used by the configfile code, as well as others (parser)
+ This would be a good bunch of code to port to C.
+ """
+ numvars = 0
+ # in single, double quotes
+ insing = 0
+ indoub = 0
+ pos = 0
+ length = len(mystring)
+ newstring = []
+ while pos < length:
+ current = mystring[pos]
+ if current == "'":
+ if (indoub):
+ newstring.append("'")
+ else:
+ newstring.append("'") # Quote removal is handled by shlex.
+ insing=not insing
+ pos += 1
+ continue
+ elif current == '"':
+ if (insing):
+ newstring.append('"')
+ else:
+ newstring.append('"') # Quote removal is handled by shlex.
+ indoub=not indoub
+ pos += 1
+ continue
+ if not insing:
+ #expansion time
+ if current == "\n":
+ #convert newlines to spaces
+ newstring.append(" ")
+ pos += 1
+ elif current == "\\":
+ # For backslash expansion, this function used to behave like
+ # echo -e, but that's not needed for our purposes. We want to
+ # behave like bash does when expanding a variable assignment
+ # in a sourced file, in which case it performs backslash
+ # removal for \\ and \$ but nothing more. It also removes
+ # escaped newline characters. Note that we don't handle
+ # escaped quotes here, since getconfig() uses shlex
+ # to handle that earlier.
+ if pos + 1 >= len(mystring):
+ newstring.append(current)
+ break
+ else:
+ current = mystring[pos + 1]
+ pos += 2
+ if current == "$":
+ newstring.append(current)
+ elif current == "\\":
+ newstring.append(current)
+ # BUG: This spot appears buggy, but it's intended to
+ # be bug-for-bug compatible with existing behavior.
+ if pos < length and \
+ mystring[pos] in ("'", '"', "$"):
+ newstring.append(mystring[pos])
+ pos += 1
+ elif current == "\n":
+ pass
+ else:
+ newstring.append(mystring[pos - 2:pos])
+ continue
+ elif current == "$":
+ pos += 1
+ if pos == length:
+ # shells handle this like \$
+ newstring.append(current)
+ continue
+
+ if mystring[pos] == "{":
+ pos += 1
+ if pos == length:
+ msg = _varexpand_unexpected_eof_msg
+ if error_leader is not None:
+ msg = error_leader() + msg
+ writemsg(msg + "\n", noiselevel=-1)
+ return ""
+
+ braced = True
+ else:
+ braced = False
+ myvstart = pos
+ while mystring[pos] in _varexpand_word_chars:
+ if pos + 1 >= len(mystring):
+ if braced:
+ msg = _varexpand_unexpected_eof_msg
+ if error_leader is not None:
+ msg = error_leader() + msg
+ writemsg(msg + "\n", noiselevel=-1)
+ return ""
+ else:
+ pos += 1
+ break
+ pos += 1
+ myvarname = mystring[myvstart:pos]
+ if braced:
+ if mystring[pos] != "}":
+ msg = _varexpand_unexpected_eof_msg
+ if error_leader is not None:
+ msg = error_leader() + msg
+ writemsg(msg + "\n", noiselevel=-1)
+ return ""
+ else:
+ pos += 1
+ if len(myvarname) == 0:
+ msg = "$"
+ if braced:
+ msg += "{}"
+ msg += ": bad substitution"
+ if error_leader is not None:
+ msg = error_leader() + msg
+ writemsg(msg + "\n", noiselevel=-1)
+ return ""
+ numvars += 1
+ if myvarname in mydict:
+ newstring.append(mydict[myvarname])
+ else:
+ newstring.append(current)
+ pos += 1
+ else:
+ newstring.append(current)
+ pos += 1
+
+ return "".join(newstring)
+
+# broken and removed, but can still be imported
+pickle_write = None
+
+def pickle_read(filename, default=None, debug=0):
+ if not os.access(filename, os.R_OK):
+ writemsg(_("pickle_read(): File not readable. '") + filename + "'\n", 1)
+ return default
+ data = None
+ try:
+ myf = open(_unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ mypickle = pickle.Unpickler(myf)
+ data = mypickle.load()
+ myf.close()
+ del mypickle, myf
+ writemsg(_("pickle_read(): Loaded pickle. '") + filename + "'\n", 1)
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ writemsg(_("!!! Failed to load pickle: ") + str(e) + "\n", 1)
+ data = default
+ return data
+
+def dump_traceback(msg, noiselevel=1):
+ info = sys.exc_info()
+ if not info[2]:
+ stack = traceback.extract_stack()[:-1]
+ error = None
+ else:
+ stack = traceback.extract_tb(info[2])
+ error = str(info[1])
+ writemsg("\n====================================\n", noiselevel=noiselevel)
+ writemsg("%s\n\n" % msg, noiselevel=noiselevel)
+ for line in traceback.format_list(stack):
+ writemsg(line, noiselevel=noiselevel)
+ if error:
+ writemsg(error+"\n", noiselevel=noiselevel)
+ writemsg("====================================\n\n", noiselevel=noiselevel)
+
+class cmp_sort_key(object):
+ """
+ In python-3.0 the list.sort() method no longer has a "cmp" keyword
+ argument. This class acts as an adapter which converts a cmp function
+ into one that's suitable for use as the "key" keyword argument to
+ list.sort(), making it easier to port code for python-3.0 compatibility.
+ It works by generating key objects which use the given cmp function to
+ implement their __lt__ method.
+
+ Beginning with Python 2.7 and 3.2, equivalent functionality is provided
+ by functools.cmp_to_key().
+ """
+ __slots__ = ("_cmp_func",)
+
+ def __init__(self, cmp_func):
+ """
+ @type cmp_func: callable which takes 2 positional arguments
+ @param cmp_func: A cmp function.
+ """
+ self._cmp_func = cmp_func
+
+ def __call__(self, lhs):
+ return self._cmp_key(self._cmp_func, lhs)
+
+ class _cmp_key(object):
+ __slots__ = ("_cmp_func", "_obj")
+
+ def __init__(self, cmp_func, obj):
+ self._cmp_func = cmp_func
+ self._obj = obj
+
+ def __lt__(self, other):
+ if other.__class__ is not self.__class__:
+ raise TypeError("Expected type %s, got %s" % \
+ (self.__class__, other.__class__))
+ return self._cmp_func(self._obj, other._obj) < 0
+
+def unique_array(s):
+ """lifted from python cookbook, credit: Tim Peters
+ Return a list of the elements in s in arbitrary order, sans duplicates"""
+ n = len(s)
+ # assume all elements are hashable, if so, it's linear
+ try:
+ return list(set(s))
+ except TypeError:
+ pass
+
+ # so much for linear. abuse sort.
+ try:
+ t = list(s)
+ t.sort()
+ except TypeError:
+ pass
+ else:
+ assert n > 0
+ last = t[0]
+ lasti = i = 1
+ while i < n:
+ if t[i] != last:
+ t[lasti] = last = t[i]
+ lasti += 1
+ i += 1
+ return t[:lasti]
+
+ # blah. back to original portage.unique_array
+ u = []
+ for x in s:
+ if x not in u:
+ u.append(x)
+ return u
+
+def unique_everseen(iterable, key=None):
+ """
+ List unique elements, preserving order. Remember all elements ever seen.
+ Taken from itertools documentation.
+ """
+ # unique_everseen('AAAABBBCCDAABBB') --> A B C D
+ # unique_everseen('ABBCcAD', str.lower) --> A B C D
+ seen = set()
+ seen_add = seen.add
+ if key is None:
+ for element in filterfalse(seen.__contains__, iterable):
+ seen_add(element)
+ yield element
+ else:
+ for element in iterable:
+ k = key(element)
+ if k not in seen:
+ seen_add(k)
+ yield element
+
+def _do_stat(filename, follow_links=True):
+ try:
+ if follow_links:
+ return os.stat(filename)
+ else:
+ return os.lstat(filename)
+ except OSError as oe:
+ func_call = "stat('%s')" % filename
+ if oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.ENOENT:
+ raise FileNotFound(filename)
+ else:
+ raise
+
+def apply_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
+ stat_cached=None, follow_links=True):
+ """Apply user, group, and mode bits to a file if the existing bits do not
+ already match. The default behavior is to force an exact match of mode
+ bits. When mask=0 is specified, mode bits on the target file are allowed
+ to be a superset of the mode argument (via logical OR). When mask>0, the
+ mode bits that the target file is allowed to have are restricted via
+ logical XOR.
+ Returns True if the permissions were modified and False otherwise."""
+
+ modified = False
+
+ # Since Python 3.4, chown requires int type (no proxies).
+ uid = int(uid)
+ gid = int(gid)
+
+ if stat_cached is None:
+ stat_cached = _do_stat(filename, follow_links=follow_links)
+
+ if (uid != -1 and uid != stat_cached.st_uid) or \
+ (gid != -1 and gid != stat_cached.st_gid):
+ try:
+ if follow_links:
+ os.chown(filename, uid, gid)
+ else:
+ portage.data.lchown(filename, uid, gid)
+ modified = True
+ except OSError as oe:
+ func_call = "chown('%s', %i, %i)" % (filename, uid, gid)
+ if oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.EROFS:
+ raise ReadOnlyFileSystem(func_call)
+ elif oe.errno == errno.ENOENT:
+ raise FileNotFound(filename)
+ else:
+ raise
+
+ new_mode = -1
+ st_mode = stat_cached.st_mode & 0o7777 # protect from unwanted bits
+ if mask >= 0:
+ if mode == -1:
+ mode = 0 # Don't add any mode bits when mode is unspecified.
+ else:
+ mode = mode & 0o7777
+ if (mode & st_mode != mode) or \
+ ((mask ^ st_mode) & st_mode != st_mode):
+ new_mode = mode | st_mode
+ new_mode = (mask ^ new_mode) & new_mode
+ elif mode != -1:
+ mode = mode & 0o7777 # protect from unwanted bits
+ if mode != st_mode:
+ new_mode = mode
+
+ # The chown system call may clear S_ISUID and S_ISGID
+ # bits, so those bits are restored if necessary.
+ if modified and new_mode == -1 and \
+ (st_mode & stat.S_ISUID or st_mode & stat.S_ISGID):
+ if mode == -1:
+ new_mode = st_mode
+ else:
+ mode = mode & 0o7777
+ if mask >= 0:
+ new_mode = mode | st_mode
+ new_mode = (mask ^ new_mode) & new_mode
+ else:
+ new_mode = mode
+ if not (new_mode & stat.S_ISUID or new_mode & stat.S_ISGID):
+ new_mode = -1
+
+ if not follow_links and stat.S_ISLNK(stat_cached.st_mode):
+ # Mode doesn't matter for symlinks.
+ new_mode = -1
+
+ if new_mode != -1:
+ try:
+ os.chmod(filename, new_mode)
+ modified = True
+ except OSError as oe:
+ func_call = "chmod('%s', %s)" % (filename, oct(new_mode))
+ if oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.EROFS:
+ raise ReadOnlyFileSystem(func_call)
+ elif oe.errno == errno.ENOENT:
+ raise FileNotFound(filename)
+ raise
+ return modified
+
+def apply_stat_permissions(filename, newstat, **kwargs):
+ """A wrapper around apply_secpass_permissions that gets
+ uid, gid, and mode from a stat object"""
+ return apply_secpass_permissions(filename, uid=newstat.st_uid, gid=newstat.st_gid,
+ mode=newstat.st_mode, **kwargs)
+
+def apply_recursive_permissions(top, uid=-1, gid=-1,
+ dirmode=-1, dirmask=-1, filemode=-1, filemask=-1, onerror=None):
+ """A wrapper around apply_secpass_permissions that applies permissions
+ recursively. If optional argument onerror is specified, it should be a
+ function; it will be called with one argument, a PortageException instance.
+ Returns True if all permissions are applied and False if some are left
+ unapplied."""
+
+ # Avoid issues with circular symbolic links, as in bug #339670.
+ follow_links = False
+
+ if onerror is None:
+ # Default behavior is to dump errors to stderr so they won't
+ # go unnoticed. Callers can pass in a quiet instance.
+ def onerror(e):
+ if isinstance(e, OperationNotPermitted):
+ writemsg(_("Operation Not Permitted: %s\n") % str(e),
+ noiselevel=-1)
+ elif isinstance(e, FileNotFound):
+ writemsg(_("File Not Found: '%s'\n") % str(e), noiselevel=-1)
+ else:
+ raise
+
+ # For bug 554084, always apply permissions to a directory before
+ # that directory is traversed.
+ all_applied = True
+
+ try:
+ stat_cached = _do_stat(top, follow_links=follow_links)
+ except FileNotFound:
+ # backward compatibility
+ return True
+
+ if stat.S_ISDIR(stat_cached.st_mode):
+ mode = dirmode
+ mask = dirmask
+ else:
+ mode = filemode
+ mask = filemask
+
+ try:
+ applied = apply_secpass_permissions(top,
+ uid=uid, gid=gid, mode=mode, mask=mask,
+ stat_cached=stat_cached, follow_links=follow_links)
+ if not applied:
+ all_applied = False
+ except PortageException as e:
+ all_applied = False
+ onerror(e)
+
+ for dirpath, dirnames, filenames in os.walk(top):
+ for name, mode, mask in chain(
+ ((x, filemode, filemask) for x in filenames),
+ ((x, dirmode, dirmask) for x in dirnames)):
+ try:
+ applied = apply_secpass_permissions(os.path.join(dirpath, name),
+ uid=uid, gid=gid, mode=mode, mask=mask,
+ follow_links=follow_links)
+ if not applied:
+ all_applied = False
+ except PortageException as e:
+ # Ignore InvalidLocation exceptions such as FileNotFound
+ # and DirectoryNotFound since sometimes things disappear,
+ # like when adjusting permissions on DISTCC_DIR.
+ if not isinstance(e, portage.exception.InvalidLocation):
+ all_applied = False
+ onerror(e)
+ return all_applied
+
+def apply_secpass_permissions(filename, uid=-1, gid=-1, mode=-1, mask=-1,
+ stat_cached=None, follow_links=True):
+ """A wrapper around apply_permissions that uses secpass and simple
+ logic to apply as much of the permissions as possible without
+ generating an obviously avoidable permission exception. Despite
+ attempts to avoid an exception, it's possible that one will be raised
+ anyway, so be prepared.
+ Returns True if all permissions are applied and False if some are left
+ unapplied."""
+
+ if stat_cached is None:
+ stat_cached = _do_stat(filename, follow_links=follow_links)
+
+ all_applied = True
+
+ # Avoid accessing portage.data.secpass when possible, since
+ # it triggers config loading (undesirable for chmod-lite).
+ if (uid != -1 or gid != -1) and portage.data.secpass < 2:
+
+ if uid != -1 and \
+ uid != stat_cached.st_uid:
+ all_applied = False
+ uid = -1
+
+ if gid != -1 and \
+ gid != stat_cached.st_gid and \
+ gid not in os.getgroups():
+ all_applied = False
+ gid = -1
+
+ apply_permissions(filename, uid=uid, gid=gid, mode=mode, mask=mask,
+ stat_cached=stat_cached, follow_links=follow_links)
+ return all_applied
+
+class atomic_ofstream(ObjectProxy):
+ """Write a file atomically via os.rename(). Atomic replacement prevents
+ interprocess interference and prevents corruption of the target
+ file when the write is interrupted (for example, when an 'out of space'
+ error occurs)."""
+
+ def __init__(self, filename, mode='w', follow_links=True, **kargs):
+ """Opens a temporary filename.pid in the same directory as filename."""
+ ObjectProxy.__init__(self)
+ object.__setattr__(self, '_aborted', False)
+ if 'b' in mode:
+ open_func = open
+ else:
+ open_func = io.open
+ kargs.setdefault('encoding', _encodings['content'])
+ kargs.setdefault('errors', 'backslashreplace')
+
+ if follow_links:
+ canonical_path = os.path.realpath(filename)
+ object.__setattr__(self, '_real_name', canonical_path)
+ tmp_name = "%s.%i" % (canonical_path, os.getpid())
+ try:
+ object.__setattr__(self, '_file',
+ open_func(_unicode_encode(tmp_name,
+ encoding=_encodings['fs'], errors='strict'),
+ mode=mode, **kargs))
+ return
+ except IOError as e:
+ if canonical_path == filename:
+ raise
+ # Ignore this error, since it's irrelevant
+ # and the below open call will produce a
+ # new error if necessary.
+
+ object.__setattr__(self, '_real_name', filename)
+ tmp_name = "%s.%i" % (filename, os.getpid())
+ object.__setattr__(self, '_file',
+ open_func(_unicode_encode(tmp_name,
+ encoding=_encodings['fs'], errors='strict'),
+ mode=mode, **kargs))
+
+ def _get_target(self):
+ return object.__getattribute__(self, '_file')
+
+ if sys.hexversion >= 0x3000000:
+
+ def __getattribute__(self, attr):
+ if attr in ('close', 'abort', '__del__'):
+ return object.__getattribute__(self, attr)
+ return getattr(object.__getattribute__(self, '_file'), attr)
+
+ else:
+
+ # For TextIOWrapper, automatically coerce write calls to
+ # unicode, in order to avoid TypeError when writing raw
+ # bytes with python2.
+
+ def __getattribute__(self, attr):
+ if attr in ('close', 'abort', 'write', '__del__'):
+ return object.__getattribute__(self, attr)
+ return getattr(object.__getattribute__(self, '_file'), attr)
+
+ def write(self, s):
+ f = object.__getattribute__(self, '_file')
+ if isinstance(f, io.TextIOWrapper):
+ s = _unicode_decode(s)
+ return f.write(s)
+
+ def close(self):
+ """Closes the temporary file, copies permissions (if possible),
+ and performs the atomic replacement via os.rename(). If the abort()
+ method has been called, then the temp file is closed and removed."""
+ f = object.__getattribute__(self, '_file')
+ real_name = object.__getattribute__(self, '_real_name')
+ if not f.closed:
+ try:
+ f.close()
+ if not object.__getattribute__(self, '_aborted'):
+ try:
+ apply_stat_permissions(f.name, os.stat(real_name))
+ except OperationNotPermitted:
+ pass
+ except FileNotFound:
+ pass
+ except OSError as oe: # from the above os.stat call
+ if oe.errno in (errno.ENOENT, errno.EPERM):
+ pass
+ else:
+ raise
+ os.rename(f.name, real_name)
+ finally:
+ # Make sure we cleanup the temp file
+ # even if an exception is raised.
+ try:
+ os.unlink(f.name)
+ except OSError as oe:
+ pass
+
+ def abort(self):
+ """If an error occurs while writing the file, the user should
+ call this method in order to leave the target file unchanged.
+ This will call close() automatically."""
+ if not object.__getattribute__(self, '_aborted'):
+ object.__setattr__(self, '_aborted', True)
+ self.close()
+
+ def __del__(self):
+ """If the user does not explicitly call close(), it is
+ assumed that an error has occurred, so we abort()."""
+ try:
+ f = object.__getattribute__(self, '_file')
+ except AttributeError:
+ pass
+ else:
+ if not f.closed:
+ self.abort()
+ # ensure destructor from the base class is called
+ base_destructor = getattr(ObjectProxy, '__del__', None)
+ if base_destructor is not None:
+ base_destructor(self)
+
+def write_atomic(file_path, content, **kwargs):
+ f = None
+ try:
+ f = atomic_ofstream(file_path, **kwargs)
+ f.write(content)
+ f.close()
+ except (IOError, OSError) as e:
+ if f:
+ f.abort()
+ func_call = "write_atomic('%s')" % file_path
+ if e.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif e.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif e.errno == errno.EROFS:
+ raise ReadOnlyFileSystem(func_call)
+ elif e.errno == errno.ENOENT:
+ raise FileNotFound(file_path)
+ else:
+ raise
+
+def ensure_dirs(dir_path, **kwargs):
+ """Create a directory and call apply_permissions.
+ Returns True if a directory is created or the permissions needed to be
+ modified, and False otherwise.
+
+ This function's handling of EEXIST errors makes it useful for atomic
+ directory creation, in which multiple processes may be competing to
+ create the same directory.
+ """
+
+ created_dir = False
+
+ try:
+ os.makedirs(dir_path)
+ created_dir = True
+ except OSError as oe:
+ func_call = "makedirs('%s')" % dir_path
+ if oe.errno in (errno.EEXIST,):
+ pass
+ else:
+ if os.path.isdir(dir_path):
+ # NOTE: DragonFly raises EPERM for makedir('/')
+ # and that is supposed to be ignored here.
+ # Also, sometimes mkdir raises EISDIR on FreeBSD
+ # and we want to ignore that too (bug #187518).
+ pass
+ elif oe.errno == errno.EPERM:
+ raise OperationNotPermitted(func_call)
+ elif oe.errno == errno.EACCES:
+ raise PermissionDenied(func_call)
+ elif oe.errno == errno.EROFS:
+ raise ReadOnlyFileSystem(func_call)
+ else:
+ raise
+ if kwargs:
+ perms_modified = apply_permissions(dir_path, **kwargs)
+ else:
+ perms_modified = False
+ return created_dir or perms_modified
+
+class LazyItemsDict(UserDict):
+ """A mapping object that behaves like a standard dict except that it allows
+ for lazy initialization of values via callable objects. Lazy items can be
+ overwritten and deleted just as normal items."""
+
+ __slots__ = ('lazy_items',)
+
+ def __init__(self, *args, **kwargs):
+
+ self.lazy_items = {}
+ UserDict.__init__(self, *args, **kwargs)
+
+ def addLazyItem(self, item_key, value_callable, *pargs, **kwargs):
+ """Add a lazy item for the given key. When the item is requested,
+ value_callable will be called with *pargs and **kwargs arguments."""
+ self.lazy_items[item_key] = \
+ self._LazyItem(value_callable, pargs, kwargs, False)
+ # make it show up in self.keys(), etc...
+ UserDict.__setitem__(self, item_key, None)
+
+ def addLazySingleton(self, item_key, value_callable, *pargs, **kwargs):
+ """This is like addLazyItem except value_callable will only be called
+ a maximum of 1 time and the result will be cached for future requests."""
+ self.lazy_items[item_key] = \
+ self._LazyItem(value_callable, pargs, kwargs, True)
+ # make it show up in self.keys(), etc...
+ UserDict.__setitem__(self, item_key, None)
+
+ def update(self, *args, **kwargs):
+ if len(args) > 1:
+ raise TypeError(
+ "expected at most 1 positional argument, got " + \
+ repr(len(args)))
+ if args:
+ map_obj = args[0]
+ else:
+ map_obj = None
+ if map_obj is None:
+ pass
+ elif isinstance(map_obj, LazyItemsDict):
+ for k in map_obj:
+ if k in map_obj.lazy_items:
+ UserDict.__setitem__(self, k, None)
+ else:
+ UserDict.__setitem__(self, k, map_obj[k])
+ self.lazy_items.update(map_obj.lazy_items)
+ else:
+ UserDict.update(self, map_obj)
+ if kwargs:
+ UserDict.update(self, kwargs)
+
+ def __getitem__(self, item_key):
+ if item_key in self.lazy_items:
+ lazy_item = self.lazy_items[item_key]
+ pargs = lazy_item.pargs
+ if pargs is None:
+ pargs = ()
+ kwargs = lazy_item.kwargs
+ if kwargs is None:
+ kwargs = {}
+ result = lazy_item.func(*pargs, **kwargs)
+ if lazy_item.singleton:
+ self[item_key] = result
+ return result
+
+ else:
+ return UserDict.__getitem__(self, item_key)
+
+ def __setitem__(self, item_key, value):
+ if item_key in self.lazy_items:
+ del self.lazy_items[item_key]
+ UserDict.__setitem__(self, item_key, value)
+
+ def __delitem__(self, item_key):
+ if item_key in self.lazy_items:
+ del self.lazy_items[item_key]
+ UserDict.__delitem__(self, item_key)
+
+ def clear(self):
+ self.lazy_items.clear()
+ UserDict.clear(self)
+
+ def copy(self):
+ return self.__copy__()
+
+ def __copy__(self):
+ return self.__class__(self)
+
+ def __deepcopy__(self, memo=None):
+ """
+ This forces evaluation of each contained lazy item, and deepcopy of
+ the result. A TypeError is raised if any contained lazy item is not
+ a singleton, since it is not necessarily possible for the behavior
+ of this type of item to be safely preserved.
+ """
+ if memo is None:
+ memo = {}
+ result = self.__class__()
+ memo[id(self)] = result
+ for k in self:
+ k_copy = deepcopy(k, memo)
+ lazy_item = self.lazy_items.get(k)
+ if lazy_item is not None:
+ if not lazy_item.singleton:
+ raise TypeError("LazyItemsDict " + \
+ "deepcopy is unsafe with lazy items that are " + \
+ "not singletons: key=%s value=%s" % (k, lazy_item,))
+ UserDict.__setitem__(result, k_copy, deepcopy(self[k], memo))
+ return result
+
+ class _LazyItem(object):
+
+ __slots__ = ('func', 'pargs', 'kwargs', 'singleton')
+
+ def __init__(self, func, pargs, kwargs, singleton):
+
+ if not pargs:
+ pargs = None
+ if not kwargs:
+ kwargs = None
+
+ self.func = func
+ self.pargs = pargs
+ self.kwargs = kwargs
+ self.singleton = singleton
+
+ def __copy__(self):
+ return self.__class__(self.func, self.pargs,
+ self.kwargs, self.singleton)
+
+ def __deepcopy__(self, memo=None):
+ """
+ Override this since the default implementation can fail silently,
+ leaving some attributes unset.
+ """
+ if memo is None:
+ memo = {}
+ result = self.__copy__()
+ memo[id(self)] = result
+ result.func = deepcopy(self.func, memo)
+ result.pargs = deepcopy(self.pargs, memo)
+ result.kwargs = deepcopy(self.kwargs, memo)
+ result.singleton = deepcopy(self.singleton, memo)
+ return result
+
+class ConfigProtect(object):
+ def __init__(self, myroot, protect_list, mask_list,
+ case_insensitive=False):
+ self.myroot = myroot
+ self.protect_list = protect_list
+ self.mask_list = mask_list
+ self.case_insensitive = case_insensitive
+ self.updateprotect()
+
+ def updateprotect(self):
+ """Update internal state for isprotected() calls. Nonexistent paths
+ are ignored."""
+
+ os = _os_merge
+
+ self.protect = []
+ self._dirs = set()
+ for x in self.protect_list:
+ ppath = normalize_path(
+ os.path.join(self.myroot, x.lstrip(os.path.sep)))
+ # Protect files that don't exist (bug #523684). If the
+ # parent directory doesn't exist, we can safely skip it.
+ if os.path.isdir(os.path.dirname(ppath)):
+ self.protect.append(ppath)
+ try:
+ if stat.S_ISDIR(os.stat(ppath).st_mode):
+ self._dirs.add(ppath)
+ except OSError:
+ pass
+
+ self.protectmask = []
+ for x in self.mask_list:
+ ppath = normalize_path(
+ os.path.join(self.myroot, x.lstrip(os.path.sep)))
+ if self.case_insensitive:
+ ppath = ppath.lower()
+ try:
+ """Use lstat so that anything, even a broken symlink can be
+ protected."""
+ if stat.S_ISDIR(os.lstat(ppath).st_mode):
+ self._dirs.add(ppath)
+ self.protectmask.append(ppath)
+ """Now use stat in case this is a symlink to a directory."""
+ if stat.S_ISDIR(os.stat(ppath).st_mode):
+ self._dirs.add(ppath)
+ except OSError:
+ # If it doesn't exist, there's no need to mask it.
+ pass
+
+ def isprotected(self, obj):
+ """Returns True if obj is protected, False otherwise. The caller must
+ ensure that obj is normalized with a single leading slash. A trailing
+ slash is optional for directories."""
+ masked = 0
+ protected = 0
+ sep = os.path.sep
+ if self.case_insensitive:
+ obj = obj.lower()
+ for ppath in self.protect:
+ if len(ppath) > masked and obj.startswith(ppath):
+ if ppath in self._dirs:
+ if obj != ppath and not obj.startswith(ppath + sep):
+ # /etc/foo does not match /etc/foobaz
+ continue
+ elif obj != ppath:
+ # force exact match when CONFIG_PROTECT lists a
+ # non-directory
+ continue
+ protected = len(ppath)
+ #config file management
+ for pmpath in self.protectmask:
+ if len(pmpath) >= protected and obj.startswith(pmpath):
+ if pmpath in self._dirs:
+ if obj != pmpath and \
+ not obj.startswith(pmpath + sep):
+ # /etc/foo does not match /etc/foobaz
+ continue
+ elif obj != pmpath:
+ # force exact match when CONFIG_PROTECT_MASK lists
+ # a non-directory
+ continue
+ #skip, it's in the mask
+ masked = len(pmpath)
+ return protected > masked
+
+def new_protect_filename(mydest, newmd5=None, force=False):
+ """Resolves a config-protect filename for merging, optionally
+ using the last filename if the md5 matches. If force is True,
+ then a new filename will be generated even if mydest does not
+ exist yet.
+ (dest,md5) ==> 'string' --- path_to_target_filename
+ (dest) ==> ('next', 'highest') --- next_target and most-recent_target
+ """
+
+ # config protection filename format:
+ # ._cfg0000_foo
+ # 0123456789012
+
+ os = _os_merge
+
+ prot_num = -1
+ last_pfile = ""
+
+ if not force and \
+ not os.path.exists(mydest):
+ return mydest
+
+ real_filename = os.path.basename(mydest)
+ real_dirname = os.path.dirname(mydest)
+ for pfile in os.listdir(real_dirname):
+ if pfile[0:5] != "._cfg":
+ continue
+ if pfile[10:] != real_filename:
+ continue
+ try:
+ new_prot_num = int(pfile[5:9])
+ if new_prot_num > prot_num:
+ prot_num = new_prot_num
+ last_pfile = pfile
+ except ValueError:
+ continue
+ prot_num = prot_num + 1
+
+ new_pfile = normalize_path(os.path.join(real_dirname,
+ "._cfg" + str(prot_num).zfill(4) + "_" + real_filename))
+ old_pfile = normalize_path(os.path.join(real_dirname, last_pfile))
+ if last_pfile and newmd5:
+ try:
+ old_pfile_st = os.lstat(old_pfile)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ else:
+ if stat.S_ISLNK(old_pfile_st.st_mode):
+ try:
+ # Read symlink target as bytes, in case the
+ # target path has a bad encoding.
+ pfile_link = os.readlink(_unicode_encode(old_pfile,
+ encoding=_encodings['merge'], errors='strict'))
+ except OSError:
+ if e.errno != errno.ENOENT:
+ raise
+ else:
+ pfile_link = _unicode_decode(pfile_link,
+ encoding=_encodings['merge'], errors='replace')
+ if pfile_link == newmd5:
+ return old_pfile
+ else:
+ try:
+ last_pfile_md5 = \
+ portage.checksum._perform_md5_merge(old_pfile)
+ except FileNotFound:
+ # The file suddenly disappeared or it's a
+ # broken symlink.
+ pass
+ else:
+ if last_pfile_md5 == newmd5:
+ return old_pfile
+ return new_pfile
+
+def find_updated_config_files(target_root, config_protect):
+ """
+ Return a tuple of configuration files that needs to be updated.
+ The tuple contains lists organized like this:
+ [protected_dir, file_list]
+ If the protected config isn't a protected_dir but a procted_file, list is:
+ [protected_file, None]
+ If no configuration files needs to be updated, None is returned
+ """
+
+ encoding = _encodings['fs']
+
+ if config_protect:
+ # directories with some protect files in them
+ for x in config_protect:
+ files = []
+
+ x = os.path.join(target_root, x.lstrip(os.path.sep))
+ if not os.access(x, os.W_OK):
+ continue
+ try:
+ mymode = os.lstat(x).st_mode
+ except OSError:
+ continue
+
+ if stat.S_ISLNK(mymode):
+ # We want to treat it like a directory if it
+ # is a symlink to an existing directory.
+ try:
+ real_mode = os.stat(x).st_mode
+ if stat.S_ISDIR(real_mode):
+ mymode = real_mode
+ except OSError:
+ pass
+
+ if stat.S_ISDIR(mymode):
+ mycommand = \
+ "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
+ else:
+ mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
+ os.path.split(x.rstrip(os.path.sep))
+ mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
+ cmd = shlex_split(mycommand)
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000:
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see https://bugs.python.org/issue8513).
+ fullname = portage.process.find_binary(cmd[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(cmd[0])
+ cmd[0] = fullname
+
+ cmd = [_unicode_encode(arg, encoding=encoding, errors='strict')
+ for arg in cmd]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ output = _unicode_decode(proc.communicate()[0], encoding=encoding)
+ status = proc.wait()
+ if os.WIFEXITED(status) and os.WEXITSTATUS(status) == os.EX_OK:
+ files = output.split('\0')
+ # split always produces an empty string as the last element
+ if files and not files[-1]:
+ del files[-1]
+ if files:
+ if stat.S_ISDIR(mymode):
+ yield (x, files)
+ else:
+ yield (x, None)
+
+_ld_so_include_re = re.compile(r'^include\s+(\S.*)')
+
+def getlibpaths(root, env=None):
+ def read_ld_so_conf(path):
+ for l in grabfile(path):
+ include_match = _ld_so_include_re.match(l)
+ if include_match is not None:
+ subpath = os.path.join(os.path.dirname(path),
+ include_match.group(1))
+ for p in glob.glob(subpath):
+ for r in read_ld_so_conf(p):
+ yield r
+ else:
+ yield l
+
+ """ Return a list of paths that are used for library lookups """
+ if env is None:
+ env = os.environ
+ # the following is based on the information from ld.so(8)
+ rval = env.get("LD_LIBRARY_PATH", "").split(":")
+ rval.extend(read_ld_so_conf(os.path.join(root, "etc", "ld.so.conf")))
+ rval.append("/usr/lib")
+ rval.append("/lib")
+
+ return [normalize_path(x) for x in rval if x]
diff --git a/lib/portage/util/_async/AsyncFunction.py b/lib/portage/util/_async/AsyncFunction.py
new file mode 100644
index 000000000..ad3d8333f
--- /dev/null
+++ b/lib/portage/util/_async/AsyncFunction.py
@@ -0,0 +1,73 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import pickle
+import traceback
+
+from portage import os
+from portage.util._async.ForkProcess import ForkProcess
+from _emerge.PipeReader import PipeReader
+
+class AsyncFunction(ForkProcess):
+ """
+ Execute a function call in a fork, and retrieve the function
+ return value via pickling/unpickling, accessible as the
+ "result" attribute after the forked process has exited.
+ """
+
+ # NOTE: This class overrides the meaning of the SpawnProcess 'args'
+ # attribute, and uses it to hold the positional arguments for the
+ # 'target' function.
+ __slots__ = ('kwargs', 'result', 'target',
+ '_async_func_reader', '_async_func_reader_pw')
+
+ def _start(self):
+ pr, pw = os.pipe()
+ self.fd_pipes = {}
+ self.fd_pipes[pw] = pw
+ self._async_func_reader_pw = pw
+ self._async_func_reader = PipeReader(
+ input_files={"input":pr},
+ scheduler=self.scheduler)
+ self._async_func_reader.addExitListener(self._async_func_reader_exit)
+ self._async_func_reader.start()
+ ForkProcess._start(self)
+ os.close(pw)
+
+ def _run(self):
+ try:
+ result = self.target(*(self.args or []), **(self.kwargs or {}))
+ os.write(self._async_func_reader_pw, pickle.dumps(result))
+ except Exception:
+ traceback.print_exc()
+ return 1
+
+ return os.EX_OK
+
+ def _pipe_logger_exit(self, pipe_logger):
+ # Ignore this event, since we want to ensure that we exit
+ # only after _async_func_reader_exit has reached EOF.
+ self._pipe_logger = None
+
+ def _async_func_reader_exit(self, pipe_reader):
+ try:
+ self.result = pickle.loads(pipe_reader.getvalue())
+ except Exception:
+ # The child process will have printed a traceback in this case,
+ # and returned an unsuccessful returncode.
+ pass
+ self._async_func_reader = None
+ if self.returncode is None:
+ self._async_waitpid()
+ else:
+ self._unregister()
+ self._async_wait()
+
+ def _unregister(self):
+ ForkProcess._unregister(self)
+
+ pipe_reader = self._async_func_reader
+ if pipe_reader is not None:
+ self._async_func_reader = None
+ pipe_reader.removeExitListener(self._async_func_reader_exit)
+ pipe_reader.cancel()
diff --git a/lib/portage/util/_async/AsyncScheduler.py b/lib/portage/util/_async/AsyncScheduler.py
new file mode 100644
index 000000000..c6b523eaa
--- /dev/null
+++ b/lib/portage/util/_async/AsyncScheduler.py
@@ -0,0 +1,103 @@
+# Copyright 2012-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from _emerge.AsynchronousTask import AsynchronousTask
+from _emerge.PollScheduler import PollScheduler
+
+class AsyncScheduler(AsynchronousTask, PollScheduler):
+
+ def __init__(self, max_jobs=None, max_load=None, **kwargs):
+ AsynchronousTask.__init__(self)
+ PollScheduler.__init__(self, **kwargs)
+
+ if max_jobs is None:
+ max_jobs = 1
+ self._max_jobs = max_jobs
+ self._max_load = None if max_load is True else max_load
+ self._error_count = 0
+ self._running_tasks = set()
+ self._remaining_tasks = True
+ self._loadavg_check_id = None
+
+ @property
+ def scheduler(self):
+ """
+ Provides compatibility with the AsynchronousTask.scheduler attribute.
+ """
+ return self._event_loop
+
+ def _poll(self):
+ if not (self._is_work_scheduled() or self._keep_scheduling()):
+ if self._error_count > 0:
+ self.returncode = 1
+ else:
+ self.returncode = os.EX_OK
+ self._async_wait()
+ return self.returncode
+
+ def _cancel(self):
+ self._terminated.set()
+ self._termination_check()
+
+ def _terminate_tasks(self):
+ for task in list(self._running_tasks):
+ task.cancel()
+
+ def _next_task(self):
+ raise NotImplementedError(self)
+
+ def _keep_scheduling(self):
+ return self._remaining_tasks and not self._terminated.is_set()
+
+ def _running_job_count(self):
+ return len(self._running_tasks)
+
+ def _schedule_tasks(self):
+ while self._keep_scheduling() and self._can_add_job():
+ try:
+ task = self._next_task()
+ except StopIteration:
+ self._remaining_tasks = False
+ else:
+ self._running_tasks.add(task)
+ task.scheduler = self._sched_iface
+ task.addExitListener(self._task_exit)
+ task.start()
+
+ if self._loadavg_check_id is not None:
+ self._loadavg_check_id.cancel()
+ self._loadavg_check_id = self._event_loop.call_later(
+ self._loadavg_latency, self._schedule)
+
+ # Triggers cleanup and exit listeners if there's nothing left to do.
+ self.poll()
+
+ def _task_exit(self, task):
+ self._running_tasks.discard(task)
+ if task.returncode != os.EX_OK:
+ self._error_count += 1
+ self._schedule()
+
+ def _start(self):
+ if self._max_load is not None and \
+ self._loadavg_latency is not None and \
+ (self._max_jobs is True or self._max_jobs > 1):
+ # We have to schedule periodically, in case the load
+ # average has changed since the last call.
+ self._loadavg_check_id = self._event_loop.call_later(
+ self._loadavg_latency, self._schedule)
+ self._schedule()
+
+ def _cleanup(self):
+ super(AsyncScheduler, self)._cleanup()
+ if self._loadavg_check_id is not None:
+ self._loadavg_check_id.cancel()
+ self._loadavg_check_id = None
+
+ def _async_wait(self):
+ """
+ Override _async_wait to call self._cleanup().
+ """
+ self._cleanup()
+ super(AsyncScheduler, self)._async_wait()
diff --git a/lib/portage/util/_async/AsyncTaskFuture.py b/lib/portage/util/_async/AsyncTaskFuture.py
new file mode 100644
index 000000000..581f5ea66
--- /dev/null
+++ b/lib/portage/util/_async/AsyncTaskFuture.py
@@ -0,0 +1,31 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import signal
+
+from _emerge.AsynchronousTask import AsynchronousTask
+
+
+class AsyncTaskFuture(AsynchronousTask):
+ """
+ Wraps a Future in an AsynchronousTask, which is useful for
+ scheduling with TaskScheduler.
+ """
+ __slots__ = ('future',)
+ def _start(self):
+ self.future.add_done_callback(self._done_callback)
+
+ def _cancel(self):
+ if not self.future.done():
+ self.future.cancel()
+
+ def _done_callback(self, future):
+ if future.cancelled():
+ self.cancelled = True
+ self.returncode = -signal.SIGINT
+ elif future.exception() is None:
+ self.returncode = os.EX_OK
+ else:
+ self.returncode = 1
+ self._async_wait()
diff --git a/lib/portage/util/_async/FileCopier.py b/lib/portage/util/_async/FileCopier.py
new file mode 100644
index 000000000..27e5ab4c0
--- /dev/null
+++ b/lib/portage/util/_async/FileCopier.py
@@ -0,0 +1,17 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage import shutil
+from portage.util._async.ForkProcess import ForkProcess
+
+class FileCopier(ForkProcess):
+ """
+ Asynchronously copy a file.
+ """
+
+ __slots__ = ('src_path', 'dest_path')
+
+ def _run(self):
+ shutil.copy(self.src_path, self.dest_path)
+ return os.EX_OK
diff --git a/lib/portage/util/_async/FileDigester.py b/lib/portage/util/_async/FileDigester.py
new file mode 100644
index 000000000..72f06759c
--- /dev/null
+++ b/lib/portage/util/_async/FileDigester.py
@@ -0,0 +1,76 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage import os
+from portage.checksum import perform_multiple_checksums
+from portage.util._async.ForkProcess import ForkProcess
+from _emerge.PipeReader import PipeReader
+
+class FileDigester(ForkProcess):
+ """
+ Asynchronously generate file digests. Pass in file_path and
+ hash_names, and after successful execution, the digests
+ attribute will be a dict containing all of the requested
+ digests.
+ """
+
+ __slots__ = ('file_path', 'digests', 'hash_names',
+ '_digest_pipe_reader', '_digest_pw')
+
+ def _start(self):
+ pr, pw = os.pipe()
+ self.fd_pipes = {}
+ self.fd_pipes[pw] = pw
+ self._digest_pw = pw
+ self._digest_pipe_reader = PipeReader(
+ input_files={"input":pr},
+ scheduler=self.scheduler)
+ self._digest_pipe_reader.addExitListener(self._digest_pipe_reader_exit)
+ self._digest_pipe_reader.start()
+ ForkProcess._start(self)
+ os.close(pw)
+
+ def _run(self):
+ digests = perform_multiple_checksums(self.file_path,
+ hashes=self.hash_names)
+
+ buf = "".join("%s=%s\n" % item
+ for item in digests.items()).encode('utf_8')
+
+ while buf:
+ buf = buf[os.write(self._digest_pw, buf):]
+
+ return os.EX_OK
+
+ def _parse_digests(self, data):
+
+ digests = {}
+ for line in data.decode('utf_8').splitlines():
+ parts = line.split('=', 1)
+ if len(parts) == 2:
+ digests[parts[0]] = parts[1]
+
+ self.digests = digests
+
+ def _pipe_logger_exit(self, pipe_logger):
+ # Ignore this event, since we want to ensure that we
+ # exit only after _digest_pipe_reader has reached EOF.
+ self._pipe_logger = None
+
+ def _digest_pipe_reader_exit(self, pipe_reader):
+ self._parse_digests(pipe_reader.getvalue())
+ self._digest_pipe_reader = None
+ if self.pid is None:
+ self._unregister()
+ self._async_wait()
+ else:
+ self._async_waitpid()
+
+ def _unregister(self):
+ ForkProcess._unregister(self)
+
+ pipe_reader = self._digest_pipe_reader
+ if pipe_reader is not None:
+ self._digest_pipe_reader = None
+ pipe_reader.removeExitListener(self._digest_pipe_reader_exit)
+ pipe_reader.cancel()
diff --git a/lib/portage/util/_async/ForkProcess.py b/lib/portage/util/_async/ForkProcess.py
new file mode 100644
index 000000000..d84e93833
--- /dev/null
+++ b/lib/portage/util/_async/ForkProcess.py
@@ -0,0 +1,75 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import signal
+import sys
+import traceback
+
+import portage
+from portage import os
+from _emerge.SpawnProcess import SpawnProcess
+
+class ForkProcess(SpawnProcess):
+
+ __slots__ = ()
+
+ def _spawn(self, args, fd_pipes=None, **kwargs):
+ """
+ Fork a subprocess, apply local settings, and call fetch().
+ """
+
+ parent_pid = os.getpid()
+ pid = None
+ try:
+ pid = os.fork()
+
+ if pid != 0:
+ if not isinstance(pid, int):
+ raise AssertionError(
+ "fork returned non-integer: %s" % (repr(pid),))
+ return [pid]
+
+ rval = 1
+ try:
+
+ # Use default signal handlers in order to avoid problems
+ # killing subprocesses as reported in bug #353239.
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ # Unregister SIGCHLD handler and wakeup_fd for the parent
+ # process's event loop (bug 655656).
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ try:
+ wakeup_fd = signal.set_wakeup_fd(-1)
+ if wakeup_fd > 0:
+ os.close(wakeup_fd)
+ except (ValueError, OSError):
+ pass
+
+ portage.locks._close_fds()
+ # We don't exec, so use close_fds=False
+ # (see _setup_pipes docstring).
+ portage.process._setup_pipes(fd_pipes, close_fds=False)
+
+ rval = self._run()
+ except SystemExit:
+ raise
+ except:
+ traceback.print_exc()
+ # os._exit() skips stderr flush!
+ sys.stderr.flush()
+ finally:
+ os._exit(rval)
+
+ finally:
+ if pid == 0 or (pid is None and os.getpid() != parent_pid):
+ # Call os._exit() from a finally block in order
+ # to suppress any finally blocks from earlier
+ # in the call stack (see bug #345289). This
+ # finally block has to be setup before the fork
+ # in order to avoid a race condition.
+ os._exit(1)
+
+ def _run(self):
+ raise NotImplementedError(self)
diff --git a/lib/portage/util/_async/PipeLogger.py b/lib/portage/util/_async/PipeLogger.py
new file mode 100644
index 000000000..a4258f350
--- /dev/null
+++ b/lib/portage/util/_async/PipeLogger.py
@@ -0,0 +1,149 @@
+# Copyright 2008-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import fcntl
+import errno
+import gzip
+import sys
+
+import portage
+from portage import os, _encodings, _unicode_encode
+from _emerge.AbstractPollTask import AbstractPollTask
+
+class PipeLogger(AbstractPollTask):
+
+ """
+ This can be used for logging output of a child process,
+ optionally outputing to log_file_path and/or stdout_fd. It can
+ also monitor for EOF on input_fd, which may be used to detect
+ termination of a child process. If log_file_path ends with
+ '.gz' then the log file is written with compression.
+ """
+
+ __slots__ = ("input_fd", "log_file_path", "stdout_fd") + \
+ ("_log_file", "_log_file_real")
+
+ def _start(self):
+
+ log_file_path = self.log_file_path
+ if log_file_path is not None:
+
+ self._log_file = open(_unicode_encode(log_file_path,
+ encoding=_encodings['fs'], errors='strict'), mode='ab')
+ if log_file_path.endswith('.gz'):
+ self._log_file_real = self._log_file
+ self._log_file = gzip.GzipFile(filename='', mode='ab',
+ fileobj=self._log_file)
+
+ portage.util.apply_secpass_permissions(log_file_path,
+ uid=portage.portage_uid, gid=portage.portage_gid,
+ mode=0o660)
+
+ if isinstance(self.input_fd, int):
+ fd = self.input_fd
+ else:
+ fd = self.input_fd.fileno()
+
+ fcntl.fcntl(fd, fcntl.F_SETFL,
+ fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(fd, fcntl.F_SETFD,
+ fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self.scheduler.add_reader(fd, self._output_handler, fd)
+ self._registered = True
+
+ def _cancel(self):
+ self._unregister()
+ if self.returncode is None:
+ self.returncode = self._cancelled_returncode
+
+ def _output_handler(self, fd):
+
+ background = self.background
+ stdout_fd = self.stdout_fd
+ log_file = self._log_file
+
+ while True:
+ buf = self._read_buf(fd)
+
+ if buf is None:
+ # not a POLLIN event, EAGAIN, etc...
+ break
+
+ if not buf:
+ # EOF
+ self._unregister()
+ self.returncode = self.returncode or os.EX_OK
+ self._async_wait()
+ break
+
+ else:
+ if not background and stdout_fd is not None:
+ failures = 0
+ stdout_buf = buf
+ while stdout_buf:
+ try:
+ stdout_buf = \
+ stdout_buf[os.write(stdout_fd, stdout_buf):]
+ except OSError as e:
+ if e.errno != errno.EAGAIN:
+ raise
+ del e
+ failures += 1
+ if failures > 50:
+ # Avoid a potentially infinite loop. In
+ # most cases, the failure count is zero
+ # and it's unlikely to exceed 1.
+ raise
+
+ # This means that a subprocess has put an inherited
+ # stdio file descriptor (typically stdin) into
+ # O_NONBLOCK mode. This is not acceptable (see bug
+ # #264435), so revert it. We need to use a loop
+ # here since there's a race condition due to
+ # parallel processes being able to change the
+ # flags on the inherited file descriptor.
+ # TODO: When possible, avoid having child processes
+ # inherit stdio file descriptors from portage
+ # (maybe it can't be avoided with
+ # PROPERTIES=interactive).
+ fcntl.fcntl(stdout_fd, fcntl.F_SETFL,
+ fcntl.fcntl(stdout_fd,
+ fcntl.F_GETFL) ^ os.O_NONBLOCK)
+
+ if log_file is not None:
+ log_file.write(buf)
+ log_file.flush()
+
+ def _unregister(self):
+ if self.input_fd is not None:
+ if isinstance(self.input_fd, int):
+ self.scheduler.remove_reader(self.input_fd)
+ os.close(self.input_fd)
+ else:
+ self.scheduler.remove_reader(self.input_fd.fileno())
+ self.input_fd.close()
+ self.input_fd = None
+
+ if self.stdout_fd is not None:
+ os.close(self.stdout_fd)
+ self.stdout_fd = None
+
+ if self._log_file is not None:
+ self._log_file.close()
+ self._log_file = None
+
+ if self._log_file_real is not None:
+ # Avoid "ResourceWarning: unclosed file" since python 3.2.
+ self._log_file_real.close()
+ self._log_file_real = None
+
+ self._registered = False
diff --git a/lib/portage/util/_async/PipeReaderBlockingIO.py b/lib/portage/util/_async/PipeReaderBlockingIO.py
new file mode 100644
index 000000000..6933c9216
--- /dev/null
+++ b/lib/portage/util/_async/PipeReaderBlockingIO.py
@@ -0,0 +1,83 @@
+# Copyright 2012-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+try:
+ import threading
+except ImportError:
+ # dummy_threading will not suffice
+ threading = None
+
+from portage import os
+from _emerge.AbstractPollTask import AbstractPollTask
+
+class PipeReaderBlockingIO(AbstractPollTask):
+ """
+ Reads output from one or more files and saves it in memory, for
+ retrieval via the getvalue() method. This is driven by a thread
+ for each input file, in order to support blocking IO. This may
+ be useful for using threads to handle blocking IO with Jython,
+ since Jython lacks the fcntl module which is needed for
+ non-blocking IO (see http://bugs.jython.org/issue1074).
+ """
+
+ __slots__ = ("input_files", "_read_data", "_terminate",
+ "_threads", "_thread_rlock")
+
+ def _start(self):
+ self._terminate = threading.Event()
+ self._threads = {}
+ self._read_data = []
+
+ self._registered = True
+ self._thread_rlock = threading.RLock()
+ with self._thread_rlock:
+ for f in self.input_files.values():
+ t = threading.Thread(target=self._reader_thread, args=(f,))
+ t.daemon = True
+ t.start()
+ self._threads[f] = t
+
+ def _reader_thread(self, f):
+ try:
+ terminated = self._terminate.is_set
+ except AttributeError:
+ # Jython 2.7.0a2
+ terminated = self._terminate.isSet
+ bufsize = self._bufsize
+ while not terminated():
+ buf = f.read(bufsize)
+ with self._thread_rlock:
+ if terminated():
+ break
+ elif buf:
+ self._read_data.append(buf)
+ else:
+ del self._threads[f]
+ if not self._threads:
+ # Thread-safe callback to EventLoop
+ self.scheduler.call_soon_threadsafe(self._eof)
+ break
+ f.close()
+
+ def _eof(self):
+ self._registered = False
+ if self.returncode is None:
+ self.returncode = os.EX_OK
+ self._async_wait()
+
+ def _cancel(self):
+ self._terminate.set()
+ self._registered = False
+ if self.returncode is None:
+ self.returncode = self._cancelled_returncode
+ self._async_wait()
+
+ def getvalue(self):
+ """Retrieve the entire contents"""
+ with self._thread_rlock:
+ return b''.join(self._read_data)
+
+ def close(self):
+ """Free the memory buffer."""
+ with self._thread_rlock:
+ self._read_data = None
diff --git a/lib/portage/util/_async/PopenProcess.py b/lib/portage/util/_async/PopenProcess.py
new file mode 100644
index 000000000..c1931327a
--- /dev/null
+++ b/lib/portage/util/_async/PopenProcess.py
@@ -0,0 +1,33 @@
+# Copyright 2012-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.SubProcess import SubProcess
+
+class PopenProcess(SubProcess):
+
+ __slots__ = ("pipe_reader", "proc",)
+
+ def _start(self):
+
+ self.pid = self.proc.pid
+ self._registered = True
+
+ if self.pipe_reader is None:
+ self._async_waitpid()
+ else:
+ try:
+ self.pipe_reader.scheduler = self.scheduler
+ except AttributeError:
+ pass
+ self.pipe_reader.addExitListener(self._pipe_reader_exit)
+ self.pipe_reader.start()
+
+ def _pipe_reader_exit(self, pipe_reader):
+ self._async_waitpid()
+
+ def _async_waitpid_cb(self, *args, **kwargs):
+ SubProcess._async_waitpid_cb(self, *args, **kwargs)
+ if self.proc.returncode is None:
+ # Suppress warning messages like this:
+ # ResourceWarning: subprocess 1234 is still running
+ self.proc.returncode = self.returncode
diff --git a/lib/portage/util/_async/SchedulerInterface.py b/lib/portage/util/_async/SchedulerInterface.py
new file mode 100644
index 000000000..ec6417da1
--- /dev/null
+++ b/lib/portage/util/_async/SchedulerInterface.py
@@ -0,0 +1,101 @@
+# Copyright 2012-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import gzip
+import errno
+
+from portage import _encodings
+from portage import _unicode_encode
+from portage.util import writemsg_level
+from ..SlotObject import SlotObject
+
+class SchedulerInterface(SlotObject):
+
+ _event_loop_attrs = (
+ "add_reader",
+ "add_writer",
+ "call_at",
+ "call_exception_handler",
+ "call_later",
+ "call_soon",
+ "call_soon_threadsafe",
+ "close",
+ "create_future",
+ "default_exception_handler",
+ "get_debug",
+ "is_closed",
+ "is_running",
+ "remove_reader",
+ "remove_writer",
+ "run_in_executor",
+ "run_until_complete",
+ "set_debug",
+ "time",
+
+ "_asyncio_child_watcher",
+ # This attribute it used by _wrap_loop to detect if the
+ # loop already has a suitable wrapper.
+ "_asyncio_wrapper",
+ )
+
+ __slots__ = _event_loop_attrs + ("_event_loop", "_is_background")
+
+ def __init__(self, event_loop, is_background=None, **kwargs):
+ SlotObject.__init__(self, **kwargs)
+ self._event_loop = event_loop
+ if is_background is None:
+ is_background = self._return_false
+ self._is_background = is_background
+ for k in self._event_loop_attrs:
+ setattr(self, k, getattr(event_loop, k))
+
+ @staticmethod
+ def _return_false():
+ return False
+
+ def output(self, msg, log_path=None, background=None,
+ level=0, noiselevel=-1):
+ """
+ Output msg to stdout if not self._is_background(). If log_path
+ is not None then append msg to the log (appends with
+ compression if the filename extension of log_path corresponds
+ to a supported compression type).
+ """
+
+ global_background = self._is_background()
+ if background is None or global_background:
+ # Use the global value if the task does not have a local
+ # background value. For example, parallel-fetch tasks run
+ # in the background while other tasks concurrently run in
+ # the foreground.
+ background = global_background
+
+ msg_shown = False
+ if not background:
+ writemsg_level(msg, level=level, noiselevel=noiselevel)
+ msg_shown = True
+
+ if log_path is not None:
+ try:
+ f = open(_unicode_encode(log_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='ab')
+ f_real = f
+ except IOError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ raise
+ if not msg_shown:
+ writemsg_level(msg, level=level, noiselevel=noiselevel)
+ else:
+
+ if log_path.endswith('.gz'):
+ # NOTE: The empty filename argument prevents us from
+ # triggering a bug in python3 which causes GzipFile
+ # to raise AttributeError if fileobj.name is bytes
+ # instead of unicode.
+ f = gzip.GzipFile(filename='', mode='ab', fileobj=f)
+
+ f.write(_unicode_encode(msg))
+ f.close()
+ if f_real is not f:
+ f_real.close()
diff --git a/lib/portage/util/_async/TaskScheduler.py b/lib/portage/util/_async/TaskScheduler.py
new file mode 100644
index 000000000..35b3875a4
--- /dev/null
+++ b/lib/portage/util/_async/TaskScheduler.py
@@ -0,0 +1,20 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from .AsyncScheduler import AsyncScheduler
+
+class TaskScheduler(AsyncScheduler):
+
+ """
+ A simple way to handle scheduling of AbstractPollTask instances. Simply
+ pass a task iterator into the constructor and call start(). Use the
+ poll, wait, or addExitListener methods to be notified when all of the
+ tasks have completed.
+ """
+
+ def __init__(self, task_iter, **kwargs):
+ AsyncScheduler.__init__(self, **kwargs)
+ self._task_iter = task_iter
+
+ def _next_task(self):
+ return next(self._task_iter)
diff --git a/lib/portage/util/_async/__init__.py b/lib/portage/util/_async/__init__.py
new file mode 100644
index 000000000..418ad862b
--- /dev/null
+++ b/lib/portage/util/_async/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/util/_async/run_main_scheduler.py b/lib/portage/util/_async/run_main_scheduler.py
new file mode 100644
index 000000000..10fed34b3
--- /dev/null
+++ b/lib/portage/util/_async/run_main_scheduler.py
@@ -0,0 +1,41 @@
+
+import signal
+
+def run_main_scheduler(scheduler):
+ """
+ Start and run an AsyncScheduler (or compatible object), and handle
+ SIGINT or SIGTERM by calling its terminate() method and waiting
+ for it to clean up after itself. If SIGINT or SIGTERM is received,
+ return signum, else return None. Any previous SIGINT or SIGTERM
+ signal handlers are automatically saved and restored before
+ returning.
+ """
+
+ received_signal = []
+
+ def sighandler(signum, frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ received_signal.append(signum)
+ scheduler.terminate()
+
+ earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
+ earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
+
+ try:
+ scheduler.start()
+ scheduler.wait()
+ finally:
+ # Restore previous handlers
+ if earlier_sigint_handler is not None:
+ signal.signal(signal.SIGINT, earlier_sigint_handler)
+ else:
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ if earlier_sigterm_handler is not None:
+ signal.signal(signal.SIGTERM, earlier_sigterm_handler)
+ else:
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+
+ if received_signal:
+ return received_signal[0]
+ return None
diff --git a/lib/portage/util/_ctypes.py b/lib/portage/util/_ctypes.py
new file mode 100644
index 000000000..aeceebcca
--- /dev/null
+++ b/lib/portage/util/_ctypes.py
@@ -0,0 +1,47 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+try:
+ import ctypes
+ import ctypes.util
+except ImportError:
+ ctypes = None
+else:
+ try:
+ ctypes.cdll
+ except AttributeError:
+ ctypes = None
+
+_library_names = {}
+
+def find_library(name):
+ """
+ Calls ctype.util.find_library() if the ctypes module is available,
+ and otherwise returns None. Results are cached for future invocations.
+ """
+ filename = _library_names.get(name)
+ if filename is None:
+ if ctypes is not None:
+ filename = ctypes.util.find_library(name)
+ if filename is None:
+ filename = False
+ _library_names[name] = filename
+
+ if filename is False:
+ return None
+ return filename
+
+_library_handles = {}
+
+def LoadLibrary(name):
+ """
+ Calls ctypes.cdll.LoadLibrary(name) if the ctypes module is available,
+ and otherwise returns None. Results are cached for future invocations.
+ """
+ handle = _library_handles.get(name)
+
+ if handle is None and ctypes is not None:
+ handle = ctypes.CDLL(name, use_errno=True)
+ _library_handles[name] = handle
+
+ return handle
diff --git a/lib/portage/util/_desktop_entry.py b/lib/portage/util/_desktop_entry.py
new file mode 100644
index 000000000..45949215a
--- /dev/null
+++ b/lib/portage/util/_desktop_entry.py
@@ -0,0 +1,87 @@
+# Copyright 2012-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import re
+import subprocess
+import sys
+
+import portage
+from portage import _encodings, _unicode_encode, _unicode_decode
+from portage.util import writemsg
+from portage.util.configparser import (ConfigParserError, RawConfigParser,
+ read_configs)
+
+
+def parse_desktop_entry(path):
+ """
+ Parse the given file with RawConfigParser and return the
+ result. This may raise an IOError from io.open(), or a
+ ParsingError from RawConfigParser.
+ """
+ parser = RawConfigParser()
+
+ read_configs(parser, [path])
+
+ return parser
+
+_trivial_warnings = re.compile(r' looks redundant with value ')
+
+_ignored_errors = (
+ # Ignore error for emacs.desktop:
+ # https://bugs.freedesktop.org/show_bug.cgi?id=35844#c6
+ 'error: (will be fatal in the future): value "TextEditor" in key "Categories" in group "Desktop Entry" requires another category to be present among the following categories: Utility',
+ 'warning: key "Encoding" in group "Desktop Entry" is deprecated'
+)
+
+_ShowIn_exemptions = (
+ # See bug #480586.
+ 'contains an unregistered value "Pantheon"',
+)
+
+def validate_desktop_entry(path):
+ args = ["desktop-file-validate", path]
+
+ if sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000:
+ # Python 3.1 _execvp throws TypeError for non-absolute executable
+ # path passed as bytes (see https://bugs.python.org/issue8513).
+ fullname = portage.process.find_binary(args[0])
+ if fullname is None:
+ raise portage.exception.CommandNotFound(args[0])
+ args[0] = fullname
+
+ args = [_unicode_encode(x, errors='strict') for x in args]
+ proc = subprocess.Popen(args,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ output_lines = _unicode_decode(proc.communicate()[0]).splitlines()
+ proc.wait()
+
+ if output_lines:
+ filtered_output = []
+ for line in output_lines:
+ msg = line[len(path)+2:]
+ # "hint:" output is new in desktop-file-utils-0.21
+ if msg.startswith('hint: ') or msg in _ignored_errors:
+ continue
+ if 'for key "NotShowIn" in group "Desktop Entry"' in msg or \
+ 'for key "OnlyShowIn" in group "Desktop Entry"' in msg:
+ exempt = False
+ for s in _ShowIn_exemptions:
+ if s in msg:
+ exempt = True
+ break
+ if exempt:
+ continue
+ filtered_output.append(line)
+ output_lines = filtered_output
+
+ if output_lines:
+ output_lines = [line for line in output_lines
+ if _trivial_warnings.search(line) is None]
+
+ return output_lines
+
+if __name__ == "__main__":
+ for arg in sys.argv[1:]:
+ for line in validate_desktop_entry(arg):
+ writemsg(line + "\n", noiselevel=-1)
diff --git a/lib/portage/util/_dyn_libs/LinkageMapELF.py b/lib/portage/util/_dyn_libs/LinkageMapELF.py
new file mode 100644
index 000000000..a063621c1
--- /dev/null
+++ b/lib/portage/util/_dyn_libs/LinkageMapELF.py
@@ -0,0 +1,875 @@
+# Copyright 1998-2016 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import subprocess
+import sys
+
+import portage
+from portage import _encodings
+from portage import _os_merge
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.cache.mappings import slot_dict_class
+from portage.const import EPREFIX
+from portage.dep.soname.multilib_category import compute_multilib_category
+from portage.exception import CommandNotFound, InvalidData
+from portage.localization import _
+from portage.util import getlibpaths
+from portage.util import grabfile
+from portage.util import normalize_path
+from portage.util import varexpand
+from portage.util import writemsg_level
+from portage.util._dyn_libs.NeededEntry import NeededEntry
+from portage.util.elf.header import ELFHeader
+
+if sys.hexversion >= 0x3000000:
+ _unicode = str
+else:
+ _unicode = unicode
+
+# Map ELF e_machine values from NEEDED.ELF.2 to approximate multilib
+# categories. This approximation will produce incorrect results on x32
+# and mips systems, but the result is not worse than using the raw
+# e_machine value which was used by earlier versions of portage.
+_approx_multilib_categories = {
+ "386": "x86_32",
+ "68K": "m68k_32",
+ "AARCH64": "arm_64",
+ "ALPHA": "alpha_64",
+ "ARM": "arm_32",
+ "IA_64": "ia64_64",
+ "MIPS": "mips_o32",
+ "PARISC": "hppa_64",
+ "PPC": "ppc_32",
+ "PPC64": "ppc_64",
+ "S390": "s390_64",
+ "SH": "sh_32",
+ "SPARC": "sparc_32",
+ "SPARC32PLUS": "sparc_32",
+ "SPARCV9": "sparc_64",
+ "X86_64": "x86_64",
+}
+
+class LinkageMapELF(object):
+
+ """Models dynamic linker dependencies."""
+
+ _needed_aux_key = "NEEDED.ELF.2"
+ _soname_map_class = slot_dict_class(
+ ("consumers", "providers"), prefix="")
+
+ class _obj_properties_class(object):
+
+ __slots__ = ("arch", "needed", "runpaths", "soname", "alt_paths",
+ "owner",)
+
+ def __init__(self, arch, needed, runpaths, soname, alt_paths, owner):
+ self.arch = arch
+ self.needed = needed
+ self.runpaths = runpaths
+ self.soname = soname
+ self.alt_paths = alt_paths
+ self.owner = owner
+
+ def __init__(self, vardbapi):
+ self._dbapi = vardbapi
+ self._root = self._dbapi.settings['ROOT']
+ self._libs = {}
+ self._obj_properties = {}
+ self._obj_key_cache = {}
+ self._defpath = set()
+ self._path_key_cache = {}
+
+ def _clear_cache(self):
+ self._libs.clear()
+ self._obj_properties.clear()
+ self._obj_key_cache.clear()
+ self._defpath.clear()
+ self._path_key_cache.clear()
+
+ def _path_key(self, path):
+ key = self._path_key_cache.get(path)
+ if key is None:
+ key = self._ObjectKey(path, self._root)
+ self._path_key_cache[path] = key
+ return key
+
+ def _obj_key(self, path):
+ key = self._obj_key_cache.get(path)
+ if key is None:
+ key = self._ObjectKey(path, self._root)
+ self._obj_key_cache[path] = key
+ return key
+
+ class _ObjectKey(object):
+
+ """Helper class used as _obj_properties keys for objects."""
+
+ __slots__ = ("_key",)
+
+ def __init__(self, obj, root):
+ """
+ This takes a path to an object.
+
+ @param object: path to a file
+ @type object: string (example: '/usr/bin/bar')
+
+ """
+ self._key = self._generate_object_key(obj, root)
+
+ def __hash__(self):
+ return hash(self._key)
+
+ def __eq__(self, other):
+ return self._key == other._key
+
+ def _generate_object_key(self, obj, root):
+ """
+ Generate object key for a given object.
+
+ @param object: path to a file
+ @type object: string (example: '/usr/bin/bar')
+ @rtype: 2-tuple of types (long, int) if object exists. string if
+ object does not exist.
+ @return:
+ 1. 2-tuple of object's inode and device from a stat call, if object
+ exists.
+ 2. realpath of object if object does not exist.
+
+ """
+
+ os = _os_merge
+
+ try:
+ _unicode_encode(obj,
+ encoding=_encodings['merge'], errors='strict')
+ except UnicodeEncodeError:
+ # The package appears to have been merged with a
+ # different value of sys.getfilesystemencoding(),
+ # so fall back to utf_8 if appropriate.
+ try:
+ _unicode_encode(obj,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeEncodeError:
+ pass
+ else:
+ os = portage.os
+
+ abs_path = os.path.join(root, obj.lstrip(os.sep))
+ try:
+ object_stat = os.stat(abs_path)
+ except OSError:
+ # Use the realpath as the key if the file does not exists on the
+ # filesystem.
+ return os.path.realpath(abs_path)
+ # Return a tuple of the device and inode.
+ return (object_stat.st_dev, object_stat.st_ino)
+
+ def file_exists(self):
+ """
+ Determine if the file for this key exists on the filesystem.
+
+ @rtype: Boolean
+ @return:
+ 1. True if the file exists.
+ 2. False if the file does not exist or is a broken symlink.
+
+ """
+ return isinstance(self._key, tuple)
+
+ class _LibGraphNode(_ObjectKey):
+ __slots__ = ("alt_paths",)
+
+ def __init__(self, key):
+ """
+ Create a _LibGraphNode from an existing _ObjectKey.
+ This re-uses the _key attribute in order to avoid repeating
+ any previous stat calls, which helps to avoid potential race
+ conditions due to inconsistent stat results when the
+ file system is being modified concurrently.
+ """
+ self._key = key._key
+ self.alt_paths = set()
+
+ def __str__(self):
+ return str(sorted(self.alt_paths))
+
+ def rebuild(self, exclude_pkgs=None, include_file=None,
+ preserve_paths=None):
+ """
+ Raises CommandNotFound if there are preserved libs
+ and the scanelf binary is not available.
+
+ @param exclude_pkgs: A set of packages that should be excluded from
+ the LinkageMap, since they are being unmerged and their NEEDED
+ entries are therefore irrelevant and would only serve to corrupt
+ the LinkageMap.
+ @type exclude_pkgs: set
+ @param include_file: The path of a file containing NEEDED entries for
+ a package which does not exist in the vardbapi yet because it is
+ currently being merged.
+ @type include_file: String
+ @param preserve_paths: Libraries preserved by a package instance that
+ is currently being merged. They need to be explicitly passed to the
+ LinkageMap, since they are not registered in the
+ PreservedLibsRegistry yet.
+ @type preserve_paths: set
+ """
+
+ os = _os_merge
+ root = self._root
+ root_len = len(root) - 1
+ self._clear_cache()
+ self._defpath.update(getlibpaths(self._root, env=self._dbapi.settings))
+ libs = self._libs
+ obj_properties = self._obj_properties
+
+ lines = []
+
+ # Data from include_file is processed first so that it
+ # overrides any data from previously installed files.
+ if include_file is not None:
+ for line in grabfile(include_file):
+ lines.append((None, include_file, line))
+
+ aux_keys = [self._needed_aux_key]
+ can_lock = os.access(os.path.dirname(self._dbapi._dbroot), os.W_OK)
+ if can_lock:
+ self._dbapi.lock()
+ try:
+ for cpv in self._dbapi.cpv_all():
+ if exclude_pkgs is not None and cpv in exclude_pkgs:
+ continue
+ needed_file = self._dbapi.getpath(cpv,
+ filename=self._needed_aux_key)
+ for line in self._dbapi.aux_get(cpv, aux_keys)[0].splitlines():
+ lines.append((cpv, needed_file, line))
+ finally:
+ if can_lock:
+ self._dbapi.unlock()
+
+ # have to call scanelf for preserved libs here as they aren't
+ # registered in NEEDED.ELF.2 files
+ plibs = {}
+ if preserve_paths is not None:
+ plibs.update((x, None) for x in preserve_paths)
+ if self._dbapi._plib_registry and \
+ self._dbapi._plib_registry.hasEntries():
+ for cpv, items in \
+ self._dbapi._plib_registry.getPreservedLibs().items():
+ if exclude_pkgs is not None and cpv in exclude_pkgs:
+ # These preserved libs will either be unmerged,
+ # rendering them irrelevant, or they will be
+ # preserved in the replacement package and are
+ # already represented via the preserve_paths
+ # parameter.
+ continue
+ plibs.update((x, cpv) for x in items)
+ if plibs:
+ args = [os.path.join(EPREFIX or "/", "usr/bin/scanelf"), "-qF", "%a;%F;%S;%r;%n"]
+ args.extend(os.path.join(root, x.lstrip("." + os.sep)) \
+ for x in plibs)
+ try:
+ proc = subprocess.Popen(args, stdout=subprocess.PIPE)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ raise CommandNotFound(args[0])
+ else:
+ for l in proc.stdout:
+ try:
+ l = _unicode_decode(l,
+ encoding=_encodings['content'], errors='strict')
+ except UnicodeDecodeError:
+ l = _unicode_decode(l,
+ encoding=_encodings['content'], errors='replace')
+ writemsg_level(_("\nError decoding characters " \
+ "returned from scanelf: %s\n\n") % (l,),
+ level=logging.ERROR, noiselevel=-1)
+ l = l[3:].rstrip("\n")
+ if not l:
+ continue
+ try:
+ entry = NeededEntry.parse("scanelf", l)
+ except InvalidData as e:
+ writemsg_level("\n%s\n\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+ try:
+ with open(_unicode_encode(entry.filename,
+ encoding=_encodings['fs'],
+ errors='strict'), 'rb') as f:
+ elf_header = ELFHeader.read(f)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ # File removed concurrently.
+ continue
+ entry.multilib_category = compute_multilib_category(elf_header)
+ entry.filename = entry.filename[root_len:]
+ owner = plibs.pop(entry.filename, None)
+ lines.append((owner, "scanelf", _unicode(entry)))
+ proc.wait()
+ proc.stdout.close()
+
+ if plibs:
+ # Preserved libraries that did not appear in the scanelf output.
+ # This is known to happen with statically linked libraries.
+ # Generate dummy lines for these, so we can assume that every
+ # preserved library has an entry in self._obj_properties. This
+ # is important in order to prevent findConsumers from raising
+ # an unwanted KeyError.
+ for x, cpv in plibs.items():
+ lines.append((cpv, "plibs", ";".join(['', x, '', '', ''])))
+
+ # Share identical frozenset instances when available,
+ # in order to conserve memory.
+ frozensets = {}
+
+ for owner, location, l in lines:
+ l = l.rstrip("\n")
+ if not l:
+ continue
+ if '\0' in l:
+ # os.stat() will raise "TypeError: must be encoded string
+ # without NULL bytes, not str" in this case.
+ writemsg_level(_("\nLine contains null byte(s) " \
+ "in %s: %s\n\n") % (location, l),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+ try:
+ entry = NeededEntry.parse(location, l)
+ except InvalidData as e:
+ writemsg_level("\n%s\n\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ continue
+
+ # If NEEDED.ELF.2 contains the new multilib category field,
+ # then use that for categorization. Otherwise, if a mapping
+ # exists, map e_machine (entry.arch) to an approximate
+ # multilib category. If all else fails, use e_machine, just
+ # as older versions of portage did.
+ arch = entry.multilib_category
+ if arch is None:
+ arch = _approx_multilib_categories.get(
+ entry.arch, entry.arch)
+
+ obj = entry.filename
+ soname = entry.soname
+ expand = {"ORIGIN": os.path.dirname(entry.filename)}
+ path = frozenset(normalize_path(
+ varexpand(x, expand, error_leader=lambda: "%s: " % location))
+ for x in entry.runpaths)
+ path = frozensets.setdefault(path, path)
+ needed = frozenset(entry.needed)
+
+ needed = frozensets.setdefault(needed, needed)
+
+ obj_key = self._obj_key(obj)
+ indexed = True
+ myprops = obj_properties.get(obj_key)
+ if myprops is None:
+ indexed = False
+ myprops = self._obj_properties_class(
+ arch, needed, path, soname, [], owner)
+ obj_properties[obj_key] = myprops
+ # All object paths are added into the obj_properties tuple.
+ myprops.alt_paths.append(obj)
+
+ # Don't index the same file more that once since only one
+ # set of data can be correct and therefore mixing data
+ # may corrupt the index (include_file overrides previously
+ # installed).
+ if indexed:
+ continue
+
+ arch_map = libs.get(arch)
+ if arch_map is None:
+ arch_map = {}
+ libs[arch] = arch_map
+ if soname:
+ soname_map = arch_map.get(soname)
+ if soname_map is None:
+ soname_map = self._soname_map_class(
+ providers=[], consumers=[])
+ arch_map[soname] = soname_map
+ soname_map.providers.append(obj_key)
+ for needed_soname in needed:
+ soname_map = arch_map.get(needed_soname)
+ if soname_map is None:
+ soname_map = self._soname_map_class(
+ providers=[], consumers=[])
+ arch_map[needed_soname] = soname_map
+ soname_map.consumers.append(obj_key)
+
+ for arch, sonames in libs.items():
+ for soname_node in sonames.values():
+ soname_node.providers = tuple(set(soname_node.providers))
+ soname_node.consumers = tuple(set(soname_node.consumers))
+
+ def listBrokenBinaries(self, debug=False):
+ """
+ Find binaries and their needed sonames, which have no providers.
+
+ @param debug: Boolean to enable debug output
+ @type debug: Boolean
+ @rtype: dict (example: {'/usr/bin/foo': set(['libbar.so'])})
+ @return: The return value is an object -> set-of-sonames mapping, where
+ object is a broken binary and the set consists of sonames needed by
+ object that have no corresponding libraries to fulfill the dependency.
+
+ """
+
+ os = _os_merge
+
+ class _LibraryCache(object):
+
+ """
+ Caches properties associated with paths.
+
+ The purpose of this class is to prevent multiple instances of
+ _ObjectKey for the same paths.
+
+ """
+
+ def __init__(cache_self):
+ cache_self.cache = {}
+
+ def get(cache_self, obj):
+ """
+ Caches and returns properties associated with an object.
+
+ @param obj: absolute path (can be symlink)
+ @type obj: string (example: '/usr/lib/libfoo.so')
+ @rtype: 4-tuple with types
+ (string or None, string or None, 2-tuple, Boolean)
+ @return: 4-tuple with the following components:
+ 1. arch as a string or None if it does not exist,
+ 2. soname as a string or None if it does not exist,
+ 3. obj_key as 2-tuple,
+ 4. Boolean representing whether the object exists.
+ (example: ('libfoo.so.1', (123L, 456L), True))
+
+ """
+ if obj in cache_self.cache:
+ return cache_self.cache[obj]
+ else:
+ obj_key = self._obj_key(obj)
+ # Check that the library exists on the filesystem.
+ if obj_key.file_exists():
+ # Get the arch and soname from LinkageMap._obj_properties if
+ # it exists. Otherwise, None.
+ obj_props = self._obj_properties.get(obj_key)
+ if obj_props is None:
+ arch = None
+ soname = None
+ else:
+ arch = obj_props.arch
+ soname = obj_props.soname
+ return cache_self.cache.setdefault(obj, \
+ (arch, soname, obj_key, True))
+ else:
+ return cache_self.cache.setdefault(obj, \
+ (None, None, obj_key, False))
+
+ rValue = {}
+ cache = _LibraryCache()
+ providers = self.listProviders()
+
+ # Iterate over all obj_keys and their providers.
+ for obj_key, sonames in providers.items():
+ obj_props = self._obj_properties[obj_key]
+ arch = obj_props.arch
+ path = obj_props.runpaths
+ objs = obj_props.alt_paths
+ path = path.union(self._defpath)
+ # Iterate over each needed soname and the set of library paths that
+ # fulfill the soname to determine if the dependency is broken.
+ for soname, libraries in sonames.items():
+ # validLibraries is used to store libraries, which satisfy soname,
+ # so if no valid libraries are found, the soname is not satisfied
+ # for obj_key. If unsatisfied, objects associated with obj_key
+ # must be emerged.
+ validLibraries = set()
+ # It could be the case that the library to satisfy the soname is
+ # not in the obj's runpath, but a symlink to the library is (eg
+ # libnvidia-tls.so.1 in nvidia-drivers). Also, since LinkageMap
+ # does not catalog symlinks, broken or missing symlinks may go
+ # unnoticed. As a result of these cases, check that a file with
+ # the same name as the soname exists in obj's runpath.
+ # XXX If we catalog symlinks in LinkageMap, this could be improved.
+ for directory in path:
+ cachedArch, cachedSoname, cachedKey, cachedExists = \
+ cache.get(os.path.join(directory, soname))
+ # Check that this library provides the needed soname. Doing
+ # this, however, will cause consumers of libraries missing
+ # sonames to be unnecessarily emerged. (eg libmix.so)
+ if cachedSoname == soname and cachedArch == arch:
+ validLibraries.add(cachedKey)
+ if debug and cachedKey not in \
+ set(map(self._obj_key_cache.get, libraries)):
+ # XXX This is most often due to soname symlinks not in
+ # a library's directory. We could catalog symlinks in
+ # LinkageMap to avoid checking for this edge case here.
+ writemsg_level(
+ _("Found provider outside of findProviders:") + \
+ (" %s -> %s %s\n" % (os.path.join(directory, soname),
+ self._obj_properties[cachedKey].alt_paths, libraries)),
+ level=logging.DEBUG,
+ noiselevel=-1)
+ # A valid library has been found, so there is no need to
+ # continue.
+ break
+ if debug and cachedArch == arch and \
+ cachedKey in self._obj_properties:
+ writemsg_level((_("Broken symlink or missing/bad soname: " + \
+ "%(dir_soname)s -> %(cachedKey)s " + \
+ "with soname %(cachedSoname)s but expecting %(soname)s") % \
+ {"dir_soname":os.path.join(directory, soname),
+ "cachedKey": self._obj_properties[cachedKey],
+ "cachedSoname": cachedSoname, "soname":soname}) + "\n",
+ level=logging.DEBUG,
+ noiselevel=-1)
+ # This conditional checks if there are no libraries to satisfy the
+ # soname (empty set).
+ if not validLibraries:
+ for obj in objs:
+ rValue.setdefault(obj, set()).add(soname)
+ # If no valid libraries have been found by this point, then
+ # there are no files named with the soname within obj's runpath,
+ # but if there are libraries (from the providers mapping), it is
+ # likely that soname symlinks or the actual libraries are
+ # missing or broken. Thus those libraries are added to rValue
+ # in order to emerge corrupt library packages.
+ for lib in libraries:
+ rValue.setdefault(lib, set()).add(soname)
+ if debug:
+ if not os.path.isfile(lib):
+ writemsg_level(_("Missing library:") + " %s\n" % (lib,),
+ level=logging.DEBUG,
+ noiselevel=-1)
+ else:
+ writemsg_level(_("Possibly missing symlink:") + \
+ "%s\n" % (os.path.join(os.path.dirname(lib), soname)),
+ level=logging.DEBUG,
+ noiselevel=-1)
+ return rValue
+
+ def listProviders(self):
+ """
+ Find the providers for all object keys in LinkageMap.
+
+ @rtype: dict (example:
+ {(123L, 456L): {'libbar.so': set(['/lib/libbar.so.1.5'])}})
+ @return: The return value is an object key -> providers mapping, where
+ providers is a mapping of soname -> set-of-library-paths returned
+ from the findProviders method.
+
+ """
+ rValue = {}
+ if not self._libs:
+ self.rebuild()
+ # Iterate over all object keys within LinkageMap.
+ for obj_key in self._obj_properties:
+ rValue.setdefault(obj_key, self.findProviders(obj_key))
+ return rValue
+
+ def isMasterLink(self, obj):
+ """
+ Determine whether an object is a "master" symlink, which means
+ that its basename is the same as the beginning part of the
+ soname and it lacks the soname's version component.
+
+ Examples:
+
+ soname | master symlink name
+ --------------------------------------------
+ libarchive.so.2.8.4 | libarchive.so
+ libproc-3.2.8.so | libproc.so
+
+ @param obj: absolute path to an object
+ @type obj: string (example: '/usr/bin/foo')
+ @rtype: Boolean
+ @return:
+ 1. True if obj is a master link
+ 2. False if obj is not a master link
+
+ """
+ os = _os_merge
+ obj_key = self._obj_key(obj)
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s (%s) not in object list" % (obj_key, obj))
+ basename = os.path.basename(obj)
+ soname = self._obj_properties[obj_key].soname
+ return len(basename) < len(soname) and \
+ basename.endswith(".so") and \
+ soname.startswith(basename[:-3])
+
+ def listLibraryObjects(self):
+ """
+ Return a list of library objects.
+
+ Known limitation: library objects lacking an soname are not included.
+
+ @rtype: list of strings
+ @return: list of paths to all providers
+
+ """
+ rValue = []
+ if not self._libs:
+ self.rebuild()
+ for arch_map in self._libs.values():
+ for soname_map in arch_map.values():
+ for obj_key in soname_map.providers:
+ rValue.extend(self._obj_properties[obj_key].alt_paths)
+ return rValue
+
+ def getOwners(self, obj):
+ """
+ Return the package(s) associated with an object. Raises KeyError
+ if the object is unknown. Returns an empty tuple if the owner(s)
+ are unknown.
+
+ NOTE: For preserved libraries, the owner(s) may have been
+ previously uninstalled, but these uninstalled owners can be
+ returned by this method since they are registered in the
+ PreservedLibsRegistry.
+
+ @param obj: absolute path to an object
+ @type obj: string (example: '/usr/bin/bar')
+ @rtype: tuple
+ @return: a tuple of cpv
+ """
+ if not self._libs:
+ self.rebuild()
+ if isinstance(obj, self._ObjectKey):
+ obj_key = obj
+ else:
+ obj_key = self._obj_key_cache.get(obj)
+ if obj_key is None:
+ raise KeyError("%s not in object list" % obj)
+ obj_props = self._obj_properties.get(obj_key)
+ if obj_props is None:
+ raise KeyError("%s not in object list" % obj_key)
+ if obj_props.owner is None:
+ return ()
+ return (obj_props.owner,)
+
+ def getSoname(self, obj):
+ """
+ Return the soname associated with an object.
+
+ @param obj: absolute path to an object
+ @type obj: string (example: '/usr/bin/bar')
+ @rtype: string
+ @return: soname as a string
+
+ """
+ if not self._libs:
+ self.rebuild()
+ if isinstance(obj, self._ObjectKey):
+ obj_key = obj
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s not in object list" % obj_key)
+ return self._obj_properties[obj_key].soname
+ if obj not in self._obj_key_cache:
+ raise KeyError("%s not in object list" % obj)
+ return self._obj_properties[self._obj_key_cache[obj]].soname
+
+ def findProviders(self, obj):
+ """
+ Find providers for an object or object key.
+
+ This method may be called with a key from _obj_properties.
+
+ In some cases, not all valid libraries are returned. This may occur when
+ an soname symlink referencing a library is in an object's runpath while
+ the actual library is not. We should consider cataloging symlinks within
+ LinkageMap as this would avoid those cases and would be a better model of
+ library dependencies (since the dynamic linker actually searches for
+ files named with the soname in the runpaths).
+
+ @param obj: absolute path to an object or a key from _obj_properties
+ @type obj: string (example: '/usr/bin/bar') or _ObjectKey
+ @rtype: dict (example: {'libbar.so': set(['/lib/libbar.so.1.5'])})
+ @return: The return value is a soname -> set-of-library-paths, where
+ set-of-library-paths satisfy soname.
+
+ """
+
+ os = _os_merge
+
+ rValue = {}
+
+ if not self._libs:
+ self.rebuild()
+
+ # Determine the obj_key from the arguments.
+ if isinstance(obj, self._ObjectKey):
+ obj_key = obj
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s not in object list" % obj_key)
+ else:
+ obj_key = self._obj_key(obj)
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s (%s) not in object list" % (obj_key, obj))
+
+ obj_props = self._obj_properties[obj_key]
+ arch = obj_props.arch
+ needed = obj_props.needed
+ path = obj_props.runpaths
+ path_keys = set(self._path_key(x) for x in path.union(self._defpath))
+ for soname in needed:
+ rValue[soname] = set()
+ if arch not in self._libs or soname not in self._libs[arch]:
+ continue
+ # For each potential provider of the soname, add it to rValue if it
+ # resides in the obj's runpath.
+ for provider_key in self._libs[arch][soname].providers:
+ providers = self._obj_properties[provider_key].alt_paths
+ for provider in providers:
+ if self._path_key(os.path.dirname(provider)) in path_keys:
+ rValue[soname].add(provider)
+ return rValue
+
+ def findConsumers(self, obj, exclude_providers=None, greedy=True):
+ """
+ Find consumers of an object or object key.
+
+ This method may be called with a key from _obj_properties. If this
+ method is going to be called with an object key, to avoid not catching
+ shadowed libraries, do not pass new _ObjectKey instances to this method.
+ Instead pass the obj as a string.
+
+ In some cases, not all consumers are returned. This may occur when
+ an soname symlink referencing a library is in an object's runpath while
+ the actual library is not. For example, this problem is noticeable for
+ binutils since it's libraries are added to the path via symlinks that
+ are gemerated in the /usr/$CHOST/lib/ directory by binutils-config.
+ Failure to recognize consumers of these symlinks makes preserve-libs
+ fail to preserve binutils libs that are needed by these unrecognized
+ consumers.
+
+ Note that library consumption via dlopen (common for kde plugins) is
+ currently undetected. However, it is possible to use the
+ corresponding libtool archive (*.la) files to detect such consumers
+ (revdep-rebuild is able to detect them).
+
+ The exclude_providers argument is useful for determining whether
+ removal of one or more packages will create unsatisfied consumers. When
+ this option is given, consumers are excluded from the results if there
+ is an alternative provider (which is not excluded) of the required
+ soname such that the consumers will remain satisfied if the files
+ owned by exclude_providers are removed.
+
+ @param obj: absolute path to an object or a key from _obj_properties
+ @type obj: string (example: '/usr/bin/bar') or _ObjectKey
+ @param exclude_providers: A collection of callables that each take a
+ single argument referring to the path of a library (example:
+ '/usr/lib/libssl.so.0.9.8'), and return True if the library is
+ owned by a provider which is planned for removal.
+ @type exclude_providers: collection
+ @param greedy: If True, then include consumers that are satisfied
+ by alternative providers, otherwise omit them. Default is True.
+ @type greedy: Boolean
+ @rtype: set of strings (example: set(['/bin/foo', '/usr/bin/bar']))
+ @return: The return value is a soname -> set-of-library-paths, where
+ set-of-library-paths satisfy soname.
+
+ """
+
+ os = _os_merge
+
+ if not self._libs:
+ self.rebuild()
+
+ # Determine the obj_key and the set of objects matching the arguments.
+ if isinstance(obj, self._ObjectKey):
+ obj_key = obj
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s not in object list" % obj_key)
+ objs = self._obj_properties[obj_key].alt_paths
+ else:
+ objs = set([obj])
+ obj_key = self._obj_key(obj)
+ if obj_key not in self._obj_properties:
+ raise KeyError("%s (%s) not in object list" % (obj_key, obj))
+
+ # If there is another version of this lib with the
+ # same soname and the soname symlink points to that
+ # other version, this lib will be shadowed and won't
+ # have any consumers.
+ if not isinstance(obj, self._ObjectKey):
+ soname = self._obj_properties[obj_key].soname
+ soname_link = os.path.join(self._root,
+ os.path.dirname(obj).lstrip(os.path.sep), soname)
+ obj_path = os.path.join(self._root, obj.lstrip(os.sep))
+ try:
+ soname_st = os.stat(soname_link)
+ obj_st = os.stat(obj_path)
+ except OSError:
+ pass
+ else:
+ if (obj_st.st_dev, obj_st.st_ino) != \
+ (soname_st.st_dev, soname_st.st_ino):
+ return set()
+
+ obj_props = self._obj_properties[obj_key]
+ arch = obj_props.arch
+ soname = obj_props.soname
+
+ soname_node = None
+ arch_map = self._libs.get(arch)
+ if arch_map is not None:
+ soname_node = arch_map.get(soname)
+
+ defpath_keys = set(self._path_key(x) for x in self._defpath)
+ satisfied_consumer_keys = set()
+ if soname_node is not None:
+ if exclude_providers is not None or not greedy:
+ relevant_dir_keys = set()
+ for provider_key in soname_node.providers:
+ if not greedy and provider_key == obj_key:
+ continue
+ provider_objs = self._obj_properties[provider_key].alt_paths
+ for p in provider_objs:
+ provider_excluded = False
+ if exclude_providers is not None:
+ for excluded_provider_isowner in exclude_providers:
+ if excluded_provider_isowner(p):
+ provider_excluded = True
+ break
+ if not provider_excluded:
+ # This provider is not excluded. It will
+ # satisfy a consumer of this soname if it
+ # is in the default ld.so path or the
+ # consumer's runpath.
+ relevant_dir_keys.add(
+ self._path_key(os.path.dirname(p)))
+
+ if relevant_dir_keys:
+ for consumer_key in soname_node.consumers:
+ path = self._obj_properties[consumer_key].runpaths
+ path_keys = defpath_keys.copy()
+ path_keys.update(self._path_key(x) for x in path)
+ if relevant_dir_keys.intersection(path_keys):
+ satisfied_consumer_keys.add(consumer_key)
+
+ rValue = set()
+ if soname_node is not None:
+ # For each potential consumer, add it to rValue if an object from the
+ # arguments resides in the consumer's runpath.
+ objs_dir_keys = set(self._path_key(os.path.dirname(x))
+ for x in objs)
+ for consumer_key in soname_node.consumers:
+ if consumer_key in satisfied_consumer_keys:
+ continue
+ consumer_props = self._obj_properties[consumer_key]
+ path = consumer_props.runpaths
+ consumer_objs = consumer_props.alt_paths
+ path_keys = defpath_keys.union(self._path_key(x) for x in path)
+ if objs_dir_keys.intersection(path_keys):
+ rValue.update(consumer_objs)
+ return rValue
diff --git a/lib/portage/util/_dyn_libs/NeededEntry.py b/lib/portage/util/_dyn_libs/NeededEntry.py
new file mode 100644
index 000000000..c52cfce3c
--- /dev/null
+++ b/lib/portage/util/_dyn_libs/NeededEntry.py
@@ -0,0 +1,82 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import sys
+
+from portage import _encodings, _unicode_encode
+from portage.exception import InvalidData
+from portage.localization import _
+
+class NeededEntry(object):
+ """
+ Represents one entry (line) from a NEEDED.ELF.2 file. The entry
+ must have 5 or more semicolon-delimited fields in order to be
+ considered valid. The sixth field is optional, corresponding
+ to the multilib category. The multilib_category attribute is
+ None if the corresponding field is either empty or missing.
+ """
+
+ __slots__ = ("arch", "filename", "multilib_category", "needed",
+ "runpaths", "soname")
+
+ _MIN_FIELDS = 5
+ _MULTILIB_CAT_INDEX = 5
+
+ @classmethod
+ def parse(cls, filename, line):
+ """
+ Parse a NEEDED.ELF.2 entry. Raises InvalidData if necessary.
+
+ @param filename: file name for use in exception messages
+ @type filename: str
+ @param line: a single line of text from a NEEDED.ELF.2 file,
+ without a trailing newline
+ @type line: str
+ @rtype: NeededEntry
+ @return: A new NeededEntry instance containing data from line
+ """
+ fields = line.split(";")
+ if len(fields) < cls._MIN_FIELDS:
+ raise InvalidData(_("Wrong number of fields "
+ "in %s: %s\n\n") % (filename, line))
+
+ obj = cls()
+ # Extra fields may exist (for future extensions).
+ if (len(fields) > cls._MULTILIB_CAT_INDEX and
+ fields[cls._MULTILIB_CAT_INDEX]):
+ obj.multilib_category = fields[cls._MULTILIB_CAT_INDEX]
+ else:
+ obj.multilib_category = None
+
+ del fields[cls._MIN_FIELDS:]
+ obj.arch, obj.filename, obj.soname, rpaths, needed = fields
+ obj.runpaths = tuple(filter(None, rpaths.split(":")))
+ obj.needed = tuple(filter(None, needed.split(",")))
+
+ return obj
+
+ def __str__(self):
+ """
+ Format this entry for writing to a NEEDED.ELF.2 file.
+ """
+ return ";".join([
+ self.arch,
+ self.filename,
+ self.soname,
+ ":".join(self.runpaths),
+ ",".join(self.needed),
+ (self.multilib_category if self.multilib_category
+ is not None else "")
+ ]) + "\n"
+
+ if sys.hexversion < 0x3000000:
+
+ __unicode__ = __str__
+
+ def __str__(self):
+ return _unicode_encode(self.__unicode__(),
+ encoding=_encodings['content'])
+
+ __str__.__doc__ = __unicode__.__doc__
diff --git a/lib/portage/util/_dyn_libs/PreservedLibsRegistry.py b/lib/portage/util/_dyn_libs/PreservedLibsRegistry.py
new file mode 100644
index 000000000..f83b82a31
--- /dev/null
+++ b/lib/portage/util/_dyn_libs/PreservedLibsRegistry.py
@@ -0,0 +1,255 @@
+# Copyright 1998-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import json
+import logging
+import stat
+import sys
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+from portage import abssymlink
+from portage import os
+from portage import _encodings
+from portage import _os_merge
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.exception import PermissionDenied
+from portage.localization import _
+from portage.util import atomic_ofstream
+from portage.util import writemsg_level
+from portage.versions import cpv_getkey
+from portage.locks import lockfile, unlockfile
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+class PreservedLibsRegistry(object):
+ """ This class handles the tracking of preserved library objects """
+
+ # JSON read support has been available since portage-2.2.0_alpha89.
+ _json_write = True
+
+ _json_write_opts = {
+ "ensure_ascii": False,
+ "indent": "\t",
+ "sort_keys": True
+ }
+ if sys.hexversion < 0x30200F0:
+ # indent only supports int number of spaces
+ _json_write_opts["indent"] = 4
+
+ def __init__(self, root, filename):
+ """
+ @param root: root used to check existence of paths in pruneNonExisting
+ @type root: String
+ @param filename: absolute path for saving the preserved libs records
+ @type filename: String
+ """
+ self._root = root
+ self._filename = filename
+ self._data = None
+ self._lock = None
+
+ def lock(self):
+ """Grab an exclusive lock on the preserved libs registry."""
+ if self._lock is not None:
+ raise AssertionError("already locked")
+ self._lock = lockfile(self._filename)
+
+ def unlock(self):
+ """Release our exclusive lock on the preserved libs registry."""
+ if self._lock is None:
+ raise AssertionError("not locked")
+ unlockfile(self._lock)
+ self._lock = None
+
+ def load(self):
+ """ Reload the registry data from file """
+ self._data = None
+ f = None
+ content = None
+ try:
+ f = open(_unicode_encode(self._filename,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ content = f.read()
+ except EnvironmentError as e:
+ if not hasattr(e, 'errno'):
+ raise
+ elif e.errno == errno.ENOENT:
+ pass
+ elif e.errno == PermissionDenied.errno:
+ raise PermissionDenied(self._filename)
+ else:
+ raise
+ finally:
+ if f is not None:
+ f.close()
+
+ # content is empty if it's an empty lock file
+ if content:
+ try:
+ self._data = json.loads(_unicode_decode(content,
+ encoding=_encodings['repo.content'], errors='strict'))
+ except SystemExit:
+ raise
+ except Exception as e:
+ try:
+ self._data = pickle.loads(content)
+ except SystemExit:
+ raise
+ except Exception:
+ writemsg_level(_("!!! Error loading '%s': %s\n") %
+ (self._filename, e), level=logging.ERROR,
+ noiselevel=-1)
+
+ if self._data is None:
+ self._data = {}
+ else:
+ for k, v in self._data.items():
+ if isinstance(v, (list, tuple)) and len(v) == 3 and \
+ isinstance(v[2], set):
+ # convert set to list, for write with JSONEncoder
+ self._data[k] = (v[0], v[1], list(v[2]))
+
+ self._data_orig = self._data.copy()
+ self.pruneNonExisting()
+
+ def store(self):
+ """
+ Store the registry data to the file. The existing inode will be
+ replaced atomically, so if that inode is currently being used
+ for a lock then that lock will be rendered useless. Therefore,
+ it is important not to call this method until the current lock
+ is ready to be immediately released.
+ """
+ if os.environ.get("SANDBOX_ON") == "1" or \
+ self._data == self._data_orig:
+ return
+ try:
+ f = atomic_ofstream(self._filename, 'wb')
+ if self._json_write:
+ f.write(_unicode_encode(
+ json.dumps(self._data, **self._json_write_opts),
+ encoding=_encodings['repo.content'], errors='strict'))
+ else:
+ pickle.dump(self._data, f, protocol=2)
+ f.close()
+ except EnvironmentError as e:
+ if e.errno != PermissionDenied.errno:
+ writemsg_level("!!! %s %s\n" % (e, self._filename),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ self._data_orig = self._data.copy()
+
+ def _normalize_counter(self, counter):
+ """
+ For simplicity, normalize as a unicode string
+ and strip whitespace. This avoids the need for
+ int conversion and a possible ValueError resulting
+ from vardb corruption.
+ """
+ if not isinstance(counter, basestring):
+ counter = str(counter)
+ return _unicode_decode(counter).strip()
+
+ def register(self, cpv, slot, counter, paths):
+ """ Register new objects in the registry. If there is a record with the
+ same packagename (internally derived from cpv) and slot it is
+ overwritten with the new data.
+ @param cpv: package instance that owns the objects
+ @type cpv: CPV (as String)
+ @param slot: the value of SLOT of the given package instance
+ @type slot: String
+ @param counter: vdb counter value for the package instance
+ @type counter: String
+ @param paths: absolute paths of objects that got preserved during an update
+ @type paths: List
+ """
+ cp = cpv_getkey(cpv)
+ cps = cp+":"+slot
+ counter = self._normalize_counter(counter)
+ if len(paths) == 0 and cps in self._data \
+ and self._data[cps][0] == cpv and \
+ self._normalize_counter(self._data[cps][1]) == counter:
+ del self._data[cps]
+ elif len(paths) > 0:
+ if isinstance(paths, set):
+ # convert set to list, for write with JSONEncoder
+ paths = list(paths)
+ self._data[cps] = (cpv, counter, paths)
+
+ def unregister(self, cpv, slot, counter):
+ """ Remove a previous registration of preserved objects for the given package.
+ @param cpv: package instance whose records should be removed
+ @type cpv: CPV (as String)
+ @param slot: the value of SLOT of the given package instance
+ @type slot: String
+ """
+ self.register(cpv, slot, counter, [])
+
+ def pruneNonExisting(self):
+ """ Remove all records for objects that no longer exist on the filesystem. """
+
+ os = _os_merge
+
+ for cps in list(self._data):
+ cpv, counter, _paths = self._data[cps]
+
+ paths = []
+ hardlinks = set()
+ symlinks = {}
+ for f in _paths:
+ f_abs = os.path.join(self._root, f.lstrip(os.sep))
+ try:
+ lst = os.lstat(f_abs)
+ except OSError:
+ continue
+ if stat.S_ISLNK(lst.st_mode):
+ try:
+ symlinks[f] = os.readlink(f_abs)
+ except OSError:
+ continue
+ elif stat.S_ISREG(lst.st_mode):
+ hardlinks.add(f)
+ paths.append(f)
+
+ # Only count symlinks as preserved if they still point to a hardink
+ # in the same directory, in order to handle cases where a tool such
+ # as eselect-opengl has updated the symlink to point to a hardlink
+ # in a different directory (see bug #406837). The unused hardlink
+ # is automatically found by _find_unused_preserved_libs, since the
+ # soname symlink no longer points to it. After the hardlink is
+ # removed by _remove_preserved_libs, it calls pruneNonExisting
+ # which eliminates the irrelevant symlink from the registry here.
+ for f, target in symlinks.items():
+ if abssymlink(f, target=target) in hardlinks:
+ paths.append(f)
+
+ if len(paths) > 0:
+ self._data[cps] = (cpv, counter, paths)
+ else:
+ del self._data[cps]
+
+ def hasEntries(self):
+ """ Check if this registry contains any records. """
+ if self._data is None:
+ self.load()
+ return len(self._data) > 0
+
+ def getPreservedLibs(self):
+ """ Return a mapping of packages->preserved objects.
+ @return mapping of package instances to preserved objects
+ @rtype Dict cpv->list-of-paths
+ """
+ if self._data is None:
+ self.load()
+ rValue = {}
+ for cps in self._data:
+ rValue[self._data[cps][0]] = self._data[cps][2]
+ return rValue
diff --git a/lib/portage/util/_dyn_libs/__init__.py b/lib/portage/util/_dyn_libs/__init__.py
new file mode 100644
index 000000000..21a391aee
--- /dev/null
+++ b/lib/portage/util/_dyn_libs/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/util/_dyn_libs/display_preserved_libs.py b/lib/portage/util/_dyn_libs/display_preserved_libs.py
new file mode 100644
index 000000000..b16478d2b
--- /dev/null
+++ b/lib/portage/util/_dyn_libs/display_preserved_libs.py
@@ -0,0 +1,98 @@
+# Copyright 2007-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import logging
+
+import portage
+from portage.output import colorize
+
+def display_preserved_libs(vardb):
+
+ MAX_DISPLAY = 3
+
+ plibdata = vardb._plib_registry.getPreservedLibs()
+ linkmap = vardb._linkmap
+ consumer_map = {}
+ owners = {}
+
+ try:
+ linkmap.rebuild()
+ except portage.exception.CommandNotFound as e:
+ portage.util.writemsg_level("!!! Command Not Found: %s\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ search_for_owners = set()
+ for cpv in plibdata:
+ internal_plib_keys = set(linkmap._obj_key(f) \
+ for f in plibdata[cpv])
+ for f in plibdata[cpv]:
+ if f in consumer_map:
+ continue
+ consumers = []
+ for c in linkmap.findConsumers(f, greedy=False):
+ # Filter out any consumers that are also preserved libs
+ # belonging to the same package as the provider.
+ if linkmap._obj_key(c) not in internal_plib_keys:
+ consumers.append(c)
+ consumers.sort()
+ consumer_map[f] = consumers
+ search_for_owners.update(consumers[:MAX_DISPLAY+1])
+
+ owners = {}
+ for f in search_for_owners:
+ owner_set = set()
+ for owner in linkmap.getOwners(f):
+ owner_dblink = vardb._dblink(owner)
+ if owner_dblink.exists():
+ owner_set.add(owner_dblink)
+ if owner_set:
+ owners[f] = owner_set
+
+ all_preserved = set()
+ all_preserved.update(*plibdata.values())
+
+ for cpv in plibdata:
+ print(colorize("WARN", ">>>") + " package: %s" % cpv)
+ samefile_map = {}
+ for f in plibdata[cpv]:
+ obj_key = linkmap._obj_key(f)
+ alt_paths = samefile_map.get(obj_key)
+ if alt_paths is None:
+ alt_paths = set()
+ samefile_map[obj_key] = alt_paths
+ alt_paths.add(f)
+
+ for alt_paths in samefile_map.values():
+ alt_paths = sorted(alt_paths)
+ for p in alt_paths:
+ print(colorize("WARN", " * ") + " - %s" % (p,))
+ f = alt_paths[0]
+ consumers = consumer_map.get(f, [])
+ consumers_non_preserved = [c for c in consumers
+ if c not in all_preserved]
+ if consumers_non_preserved:
+ # Filter the consumers that are preserved libraries, since
+ # they don't need to be rebuilt (see bug #461908).
+ consumers = consumers_non_preserved
+
+ if len(consumers) == MAX_DISPLAY + 1:
+ # Display 1 extra consumer, instead of displaying
+ # "used by 1 other files".
+ max_display = MAX_DISPLAY + 1
+ else:
+ max_display = MAX_DISPLAY
+ for c in consumers[:max_display]:
+ if c in all_preserved:
+ # The owner is displayed elsewhere due to having
+ # its libs preserved, so distinguish this special
+ # case (see bug #461908).
+ owners_desc = "preserved"
+ else:
+ owners_desc = ", ".join(x.mycpv for x in owners.get(c, []))
+ print(colorize("WARN", " * ") + " used by %s (%s)" % \
+ (c, owners_desc))
+ if len(consumers) > max_display:
+ print(colorize("WARN", " * ") + " used by %d other files" %
+ (len(consumers) - max_display))
diff --git a/lib/portage/util/_dyn_libs/soname_deps.py b/lib/portage/util/_dyn_libs/soname_deps.py
new file mode 100644
index 000000000..544cbc8f1
--- /dev/null
+++ b/lib/portage/util/_dyn_libs/soname_deps.py
@@ -0,0 +1,168 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+import collections
+import fnmatch
+import functools
+from itertools import chain
+import os
+import re
+
+from portage.util import shlex_split
+from portage.util import (
+ normalize_path,
+ varexpand,
+)
+
+
+class SonameDepsProcessor(object):
+ """
+ Processes NEEDED.ELF.2 entries for one package, in order to generate
+ REQUIRES and PROVIDES metadata.
+
+ Any sonames provided by the package will automatically be filtered
+ from the generated REQUIRES values.
+ """
+
+ def __init__(self, provides_exclude, requires_exclude):
+ """
+ @param provides_exclude: PROVIDES_EXCLUDE value
+ @type provides_exclude: str
+ @param requires_exclude: REQUIRES_EXCLUDE value
+ @type requires_exclude: str
+ """
+ self._provides_exclude = self._exclude_pattern(provides_exclude)
+ self._requires_exclude = self._exclude_pattern(requires_exclude)
+ self._requires_map = collections.defaultdict(
+ functools.partial(collections.defaultdict, set))
+ self._provides_map = {}
+ self._provides_unfiltered = {}
+ self._basename_map = {}
+ self._provides = None
+ self._requires = None
+ self._intersected = False
+
+ @staticmethod
+ def _exclude_pattern(s):
+ # shlex_split enables quoted whitespace inside patterns
+ if s:
+ pat = re.compile("|".join(
+ fnmatch.translate(x.lstrip(os.sep))
+ for x in shlex_split(s)))
+ else:
+ pat = None
+ return pat
+
+ def add(self, entry):
+ """
+ Add one NEEDED.ELF.2 entry, for inclusion in the generated
+ REQUIRES and PROVIDES values.
+
+ @param entry: NEEDED.ELF.2 entry
+ @type entry: NeededEntry
+ """
+
+ multilib_cat = entry.multilib_category
+ if multilib_cat is None:
+ # This usage is invalid. The caller must ensure that
+ # the multilib category data is supplied here.
+ raise AssertionError(
+ "Missing multilib category data: %s" % entry.filename)
+
+ self._basename_map.setdefault(
+ os.path.basename(entry.filename), []).append(entry)
+
+ if entry.needed and (
+ self._requires_exclude is None or
+ self._requires_exclude.match(
+ entry.filename.lstrip(os.sep)) is None):
+ runpaths = frozenset()
+ if entry.runpaths is not None:
+ expand = {"ORIGIN": os.path.dirname(entry.filename)}
+ runpaths = frozenset(normalize_path(varexpand(x, expand,
+ error_leader=lambda: "%s: DT_RUNPATH: " % entry.filename))
+ for x in entry.runpaths)
+ for x in entry.needed:
+ if (self._requires_exclude is None or
+ self._requires_exclude.match(x) is None):
+ self._requires_map[multilib_cat][x].add(runpaths)
+
+ if entry.soname:
+ self._provides_unfiltered.setdefault(
+ multilib_cat, set()).add(entry.soname)
+
+ if entry.soname and (
+ self._provides_exclude is None or
+ (self._provides_exclude.match(
+ entry.filename.lstrip(os.sep)) is None and
+ self._provides_exclude.match(entry.soname) is None)):
+ self._provides_map.setdefault(
+ multilib_cat, set()).add(entry.soname)
+
+ def _intersect(self):
+ requires_map = self._requires_map
+ provides_map = self._provides_map
+ provides_unfiltered = self._provides_unfiltered
+
+ for multilib_cat in set(chain(requires_map, provides_map)):
+ provides_map.setdefault(multilib_cat, set())
+ provides_unfiltered.setdefault(multilib_cat, set())
+ for soname, consumers in list(requires_map[multilib_cat].items()):
+ if soname in provides_unfiltered[multilib_cat]:
+ del requires_map[multilib_cat][soname]
+ elif soname in self._basename_map:
+ # Handle internal libraries that lack an soname, which
+ # are resolved via DT_RUNPATH, see ebtables for example
+ # (bug 646190).
+ for entry in self._basename_map[soname]:
+ if entry.multilib_category != multilib_cat:
+ continue
+ dirname = os.path.dirname(entry.filename)
+ for runpaths in list(consumers):
+ if dirname in runpaths:
+ consumers.remove(runpaths)
+ if not consumers:
+ del requires_map[multilib_cat][soname]
+ break
+
+ provides_data = []
+ for multilib_cat in sorted(provides_map):
+ if provides_map[multilib_cat]:
+ provides_data.append(multilib_cat + ":")
+ provides_data.extend(sorted(provides_map[multilib_cat]))
+
+ if provides_data:
+ self._provides = " ".join(provides_data) + "\n"
+
+ requires_data = []
+ for multilib_cat in sorted(requires_map):
+ if requires_map[multilib_cat]:
+ requires_data.append(multilib_cat + ":")
+ requires_data.extend(sorted(requires_map[multilib_cat]))
+
+ if requires_data:
+ self._requires = " ".join(requires_data) + "\n"
+
+ self._intersected = True
+
+ @property
+ def provides(self):
+ """
+ @rtype: str
+ @return: PROVIDES value generated from NEEDED.ELF.2 entries
+ """
+ if not self._intersected:
+ self._intersect()
+ return self._provides
+
+ @property
+ def requires(self):
+ """
+ @rtype: str
+ @return: REQUIRES value generated from NEEDED.ELF.2 entries
+ """
+ if not self._intersected:
+ self._intersect()
+ return self._requires
diff --git a/lib/portage/util/_eventloop/EventLoop.py b/lib/portage/util/_eventloop/EventLoop.py
new file mode 100644
index 000000000..ffd12cff9
--- /dev/null
+++ b/lib/portage/util/_eventloop/EventLoop.py
@@ -0,0 +1,1184 @@
+# Copyright 1999-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division
+
+import collections
+import errno
+import functools
+import logging
+import os
+import select
+import signal
+import sys
+import traceback
+
+try:
+ import asyncio as _real_asyncio
+except ImportError:
+ _real_asyncio = None
+
+try:
+ import fcntl
+except ImportError:
+ # http://bugs.jython.org/issue1074
+ fcntl = None
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.util.futures:asyncio',
+ 'portage.util.futures.executor.fork:ForkExecutor',
+ 'portage.util.futures.unix_events:_PortageEventLoop,_PortageChildWatcher',
+)
+
+from portage.util import writemsg_level
+from portage.util.monotonic import monotonic
+from ..SlotObject import SlotObject
+from .PollConstants import PollConstants
+from .PollSelectAdapter import PollSelectAdapter
+
+class EventLoop(object):
+ """
+ An event loop, intended to be compatible with the GLib event loop.
+ Call the iteration method in order to execute one iteration of the
+ loop. The idle_add and timeout_add methods serve as thread-safe
+ means to interact with the loop's thread.
+ """
+
+ supports_multiprocessing = True
+
+ # TODO: Find out why SIGCHLD signals aren't delivered during poll
+ # calls, forcing us to wakeup in order to receive them.
+ _sigchld_interval = 250
+
+ class _child_callback_class(SlotObject):
+ __slots__ = ("callback", "data", "pid", "source_id")
+
+ class _idle_callback_class(SlotObject):
+ __slots__ = ("_args", "_callback", "_cancelled")
+
+ class _io_handler_class(SlotObject):
+ __slots__ = ("args", "callback", "f", "source_id")
+
+ class _timeout_handler_class(SlotObject):
+ __slots__ = ("args", "function", "calling", "interval", "source_id",
+ "timestamp")
+
+ class _handle(object):
+ """
+ A callback wrapper object, compatible with asyncio.Handle.
+ """
+ __slots__ = ("_callback_id", "_loop")
+
+ def __init__(self, callback_id, loop):
+ self._callback_id = callback_id
+ self._loop = loop
+
+ def cancel(self):
+ """
+ Cancel the call. If the callback is already canceled or executed,
+ this method has no effect.
+ """
+ self._loop.source_remove(self._callback_id)
+
+ class _call_soon_callback(object):
+ """
+ Wraps a call_soon callback, and always returns False, since these
+ callbacks are only supposed to run once.
+ """
+ __slots__ = ("_args", "_callback")
+
+ def __init__(self, callback, args):
+ self._callback = callback
+ self._args = args
+
+ def __call__(self):
+ self._callback(*self._args)
+ return False
+
+ class _selector_callback(object):
+ """
+ Wraps an callback, and always returns True, for callbacks that
+ are supposed to run repeatedly.
+ """
+ __slots__ = ("_args", "_callbacks")
+
+ def __init__(self, callbacks):
+ self._callbacks = callbacks
+
+ def __call__(self, fd, event):
+ for callback, mask in self._callbacks:
+ if event & mask:
+ callback()
+ return True
+
+ def __init__(self, main=True):
+ """
+ @param main: If True then this is a singleton instance for use
+ in the main thread, otherwise it is a local instance which
+ can safely be use in a non-main thread (default is True, so
+ that global_event_loop does not need constructor arguments)
+ @type main: bool
+ """
+ self._use_signal = main and fcntl is not None
+ self._debug = bool(os.environ.get('PYTHONASYNCIODEBUG'))
+ self._thread_rlock = threading.RLock()
+ self._thread_condition = threading.Condition(self._thread_rlock)
+ self._poll_event_queue = []
+ self._poll_event_handlers = {}
+ self._poll_event_handler_ids = {}
+ # Number of current calls to self.iteration(). A number greater
+ # than 1 indicates recursion, which is not supported by asyncio's
+ # default event loop.
+ self._iteration_depth = 0
+ # Increment id for each new handler.
+ self._event_handler_id = 0
+ # New call_soon callbacks must have an opportunity to
+ # execute before it's safe to wait on self._thread_condition
+ # without a timeout, since delaying its execution indefinitely
+ # could lead to a deadlock. The following attribute stores the
+ # event handler id of the most recently added call_soon callback.
+ # If this attribute has changed since the last time that the
+ # call_soon callbacks have been called, then it's not safe to
+ # wait on self._thread_condition without a timeout.
+ self._call_soon_id = None
+ # Use deque, with thread-safe append, in order to emulate the FIFO
+ # queue behavior of the AbstractEventLoop.call_soon method.
+ self._idle_callbacks = collections.deque()
+ self._idle_callbacks_remaining = 0
+ self._timeout_handlers = {}
+ self._timeout_interval = None
+ self._default_executor = None
+
+ self._poll_obj = None
+ try:
+ select.epoll
+ except AttributeError:
+ pass
+ else:
+ try:
+ epoll_obj = select.epoll()
+ except IOError:
+ # This happens with Linux 2.4 kernels:
+ # IOError: [Errno 38] Function not implemented
+ pass
+ else:
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000 and fcntl is not None:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(epoll_obj.fileno(), fcntl.F_SETFD,
+ fcntl.fcntl(epoll_obj.fileno(),
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ self._poll_obj = _epoll_adapter(epoll_obj)
+ self.IO_ERR = select.EPOLLERR
+ self.IO_HUP = select.EPOLLHUP
+ self.IO_IN = select.EPOLLIN
+ self.IO_NVAL = 0
+ self.IO_OUT = select.EPOLLOUT
+ self.IO_PRI = select.EPOLLPRI
+
+ if self._poll_obj is None:
+ self._poll_obj = create_poll_instance()
+ self.IO_ERR = PollConstants.POLLERR
+ self.IO_HUP = PollConstants.POLLHUP
+ self.IO_IN = PollConstants.POLLIN
+ self.IO_NVAL = PollConstants.POLLNVAL
+ self.IO_OUT = PollConstants.POLLOUT
+ self.IO_PRI = PollConstants.POLLPRI
+
+ # These trigger both reader and writer callbacks.
+ EVENT_SHARED = self.IO_HUP | self.IO_ERR | self.IO_NVAL
+
+ self._EVENT_READ = self.IO_IN | EVENT_SHARED
+ self._EVENT_WRITE = self.IO_OUT | EVENT_SHARED
+
+ self._child_handlers = {}
+ self._sigchld_read = None
+ self._sigchld_write = None
+ self._sigchld_src_id = None
+ self._pid = os.getpid()
+ self._asyncio_wrapper = _PortageEventLoop(loop=self)
+ self._asyncio_child_watcher = _PortageChildWatcher(self)
+
+ def create_future(self):
+ """
+ Create a Future object attached to the loop.
+ """
+ return asyncio.Future(loop=self._asyncio_wrapper)
+
+ def _new_source_id(self):
+ """
+ Generate a new source id. This method is thread-safe.
+ """
+ with self._thread_rlock:
+ self._event_handler_id += 1
+ return self._event_handler_id
+
+ def _poll(self, timeout=None):
+ """
+ All poll() calls pass through here. The poll events
+ are added directly to self._poll_event_queue.
+ In order to avoid endless blocking, this raises
+ StopIteration if timeout is None and there are
+ no file descriptors to poll.
+ """
+
+ if timeout is None and \
+ not self._poll_event_handlers:
+ raise StopIteration(
+ "timeout is None and there are no poll() event handlers")
+
+ while True:
+ try:
+ self._poll_event_queue.extend(self._poll_obj.poll(timeout))
+ break
+ except (IOError, select.error) as e:
+ # Silently handle EINTR, which is normal when we have
+ # received a signal such as SIGINT (epoll objects may
+ # raise IOError rather than select.error, at least in
+ # Python 3.2).
+ if not (e.args and e.args[0] == errno.EINTR):
+ writemsg_level("\n!!! select error: %s\n" % (e,),
+ level=logging.ERROR, noiselevel=-1)
+ del e
+
+ # This typically means that we've received a SIGINT, so
+ # raise StopIteration in order to break out of our current
+ # iteration and respond appropriately to the signal as soon
+ # as possible.
+ raise StopIteration("interrupted")
+
+ def iteration(self, *args):
+ """
+ Like glib.MainContext.iteration(), runs a single iteration. In order
+ to avoid blocking forever when may_block is True (the default),
+ callers must be careful to ensure that at least one of the following
+ conditions is met:
+ 1) An event source or timeout is registered which is guaranteed
+ to trigger at least on event (a call to an idle function
+ only counts as an event if it returns a False value which
+ causes it to stop being called)
+ 2) Another thread is guaranteed to call one of the thread-safe
+ methods which notify iteration to stop waiting (such as
+ idle_add or timeout_add).
+ These rules ensure that iteration is able to block until an event
+ arrives, without doing any busy waiting that would waste CPU time.
+ @type may_block: bool
+ @param may_block: if True the call may block waiting for an event
+ (default is True).
+ @rtype: bool
+ @return: True if events were dispatched.
+ """
+ self._iteration_depth += 1
+ try:
+ return self._iteration(*args)
+ finally:
+ self._iteration_depth -= 1
+
+ def _iteration(self, *args):
+ may_block = True
+
+ if args:
+ if len(args) > 1:
+ raise TypeError(
+ "expected at most 1 argument (%s given)" % len(args))
+ may_block = args[0]
+
+ event_queue = self._poll_event_queue
+ event_handlers = self._poll_event_handlers
+ events_handled = 0
+ timeouts_checked = False
+
+ if not event_handlers:
+ with self._thread_condition:
+ prev_call_soon_id = self._call_soon_id
+ if self._run_timeouts():
+ events_handled += 1
+ timeouts_checked = True
+
+ call_soon = prev_call_soon_id is not self._call_soon_id
+ if self._call_soon_id is not None and self._call_soon_id._cancelled:
+ # Allow garbage collection of cancelled callback.
+ self._call_soon_id = None
+
+ if (not call_soon and not event_handlers
+ and not events_handled and may_block):
+ # Block so that we don't waste cpu time by looping too
+ # quickly. This makes EventLoop useful for code that needs
+ # to wait for timeout callbacks regardless of whether or
+ # not any IO handlers are currently registered.
+ timeout = self._get_poll_timeout()
+ if timeout is None:
+ wait_timeout = None
+ else:
+ wait_timeout = timeout / 1000
+ # NOTE: In order to avoid a possible infinite wait when
+ # wait_timeout is None, the previous _run_timeouts()
+ # call must have returned False *with* _thread_condition
+ # acquired. Otherwise, we would risk going to sleep after
+ # our only notify event has already passed.
+ self._thread_condition.wait(wait_timeout)
+ if self._run_timeouts():
+ events_handled += 1
+ timeouts_checked = True
+
+ # If any timeouts have executed, then return immediately,
+ # in order to minimize latency in termination of iteration
+ # loops that they may control.
+ if events_handled or not event_handlers:
+ return bool(events_handled)
+
+ if not event_queue:
+
+ if may_block:
+ timeout = self._get_poll_timeout()
+
+ # Avoid blocking for IO if there are any timeout
+ # or idle callbacks available to process.
+ if timeout != 0 and not timeouts_checked:
+ if self._run_timeouts():
+ events_handled += 1
+ timeouts_checked = True
+ if events_handled:
+ # Minimize latency for loops controlled
+ # by timeout or idle callback events.
+ timeout = 0
+ else:
+ timeout = 0
+
+ try:
+ self._poll(timeout=timeout)
+ except StopIteration:
+ # This can be triggered by EINTR which is caused by signals.
+ pass
+
+ # NOTE: IO event handlers may be re-entrant, in case something
+ # like AbstractPollTask._wait_loop() needs to be called inside
+ # a handler for some reason.
+ while event_queue:
+ events_handled += 1
+ f, event = event_queue.pop()
+ try:
+ x = event_handlers[f]
+ except KeyError:
+ # This is known to be triggered by the epoll
+ # implementation in qemu-user-1.2.2, and appears
+ # to be harmless (see bug #451326).
+ continue
+ if not x.callback(f, event, *x.args):
+ self.source_remove(x.source_id)
+
+ if not timeouts_checked:
+ if self._run_timeouts():
+ events_handled += 1
+ timeouts_checked = True
+
+ return bool(events_handled)
+
+ def _get_poll_timeout(self):
+
+ with self._thread_rlock:
+ if self._child_handlers:
+ if self._timeout_interval is None:
+ timeout = self._sigchld_interval
+ else:
+ timeout = min(self._sigchld_interval,
+ self._timeout_interval)
+ else:
+ timeout = self._timeout_interval
+
+ return timeout
+
+ def child_watch_add(self, pid, callback, data=None):
+ """
+ Like glib.child_watch_add(), sets callback to be called with the
+ user data specified by data when the child indicated by pid exits.
+ The signature for the callback is:
+
+ def callback(pid, condition, user_data)
+
+ where pid is is the child process id, condition is the status
+ information about the child process and user_data is data.
+
+ @type int
+ @param pid: process id of a child process to watch
+ @type callback: callable
+ @param callback: a function to call
+ @type data: object
+ @param data: the optional data to pass to function
+ @rtype: int
+ @return: an integer ID
+ """
+ source_id = self._new_source_id()
+ self._child_handlers[source_id] = self._child_callback_class(
+ callback=callback, data=data, pid=pid, source_id=source_id)
+
+ if self._use_signal:
+ if self._sigchld_read is None:
+ self._sigchld_read, self._sigchld_write = os.pipe()
+
+ fcntl.fcntl(self._sigchld_read, fcntl.F_SETFL,
+ fcntl.fcntl(self._sigchld_read,
+ fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ # FD_CLOEXEC is enabled by default in Python >=3.4.
+ if sys.hexversion < 0x3040000:
+ try:
+ fcntl.FD_CLOEXEC
+ except AttributeError:
+ pass
+ else:
+ fcntl.fcntl(self._sigchld_read, fcntl.F_SETFD,
+ fcntl.fcntl(self._sigchld_read,
+ fcntl.F_GETFD) | fcntl.FD_CLOEXEC)
+
+ # The IO watch is dynamically registered and unregistered as
+ # needed, since we don't want to consider it as a valid source
+ # of events when there are no child listeners. It's important
+ # to distinguish when there are no valid sources of IO events,
+ # in order to avoid an endless poll call if there's no timeout.
+ if self._sigchld_src_id is None:
+ self._sigchld_src_id = self.io_add_watch(
+ self._sigchld_read, self.IO_IN, self._sigchld_io_cb)
+ signal.signal(signal.SIGCHLD, self._sigchld_sig_cb)
+
+ # poll soon, in case the SIGCHLD has already arrived
+ self.call_soon(self._poll_child_processes)
+ return source_id
+
+ def _sigchld_sig_cb(self, signum, frame):
+ # If this signal handler was not installed by the
+ # current process then the signal doesn't belong to
+ # this EventLoop instance.
+ if os.getpid() == self._pid:
+ os.write(self._sigchld_write, b'\0')
+
+ def _sigchld_io_cb(self, fd, events):
+ try:
+ while True:
+ os.read(self._sigchld_read, 4096)
+ except OSError:
+ # read until EAGAIN
+ pass
+ self._poll_child_processes()
+ return True
+
+ def _poll_child_processes(self):
+ if not self._child_handlers:
+ return False
+
+ calls = 0
+
+ for x in list(self._child_handlers.values()):
+ if x.source_id not in self._child_handlers:
+ # it's already been called via re-entrance
+ continue
+ try:
+ wait_retval = os.waitpid(x.pid, os.WNOHANG)
+ except OSError as e:
+ if e.errno != errno.ECHILD:
+ raise
+ del e
+ self.source_remove(x.source_id)
+ else:
+ # With waitpid and WNOHANG, only check the
+ # first element of the tuple since the second
+ # element may vary (bug #337465).
+ if wait_retval[0] != 0:
+ calls += 1
+ self.source_remove(x.source_id)
+ x.callback(x.pid, wait_retval[1], x.data)
+
+ return bool(calls)
+
+ def idle_add(self, callback, *args):
+ """
+ Like glib.idle_add(), if callback returns False it is
+ automatically removed from the list of event sources and will
+ not be called again. This method is thread-safe.
+
+ The idle_add method is deprecated. Use the call_soon and
+ call_soon_threadsafe methods instead.
+
+ @type callback: callable
+ @param callback: a function to call
+ @return: a handle which can be used to cancel the callback
+ via the source_remove method
+ @rtype: object
+ """
+ with self._thread_condition:
+ source_id = self._idle_add(callback, *args)
+ self._thread_condition.notify()
+ return source_id
+
+ def _idle_add(self, callback, *args):
+ """Like idle_add(), but without thread safety."""
+ # Hold self._thread_condition when assigning self._call_soon_id,
+ # since it might be modified via a thread-safe method.
+ with self._thread_condition:
+ handle = self._call_soon_id = self._idle_callback_class(
+ _args=args, _callback=callback)
+ # This deque append is thread-safe, but it does *not* notify the
+ # loop's thread, so the caller must notify if appropriate.
+ self._idle_callbacks.append(handle)
+ return handle
+
+ def _run_idle_callbacks(self):
+ # assumes caller has acquired self._thread_rlock
+ if not self._idle_callbacks:
+ return False
+ state_change = 0
+ reschedule = []
+ # Use remaining count to avoid calling any newly scheduled callbacks,
+ # since self._idle_callbacks can be modified during the exection of
+ # these callbacks. The remaining count can be reset by recursive
+ # calls to this method. Recursion must remain supported until all
+ # consumers of AsynchronousLock.unlock() have been migrated to the
+ # async_unlock() method, see bug 614108.
+ self._idle_callbacks_remaining = len(self._idle_callbacks)
+
+ while self._idle_callbacks_remaining:
+ self._idle_callbacks_remaining -= 1
+ try:
+ x = self._idle_callbacks.popleft() # thread-safe
+ except IndexError:
+ break
+ if x._cancelled:
+ # it got cancelled while executing another callback
+ continue
+ if x._callback(*x._args):
+ # Reschedule, but not until after it's called, since
+ # we don't want it to call itself in a recursive call
+ # to this method.
+ self._idle_callbacks.append(x)
+ else:
+ x._cancelled = True
+ state_change += 1
+
+ return bool(state_change)
+
+ def timeout_add(self, interval, function, *args):
+ """
+ Like glib.timeout_add(), interval argument is the number of
+ milliseconds between calls to your function, and your function
+ should return False to stop being called, or True to continue
+ being called. Any additional positional arguments given here
+ are passed to your function when it's called. This method is
+ thread-safe.
+ """
+ with self._thread_condition:
+ source_id = self._new_source_id()
+ self._timeout_handlers[source_id] = \
+ self._timeout_handler_class(
+ interval=interval, function=function, args=args,
+ source_id=source_id, timestamp=self.time())
+ if self._timeout_interval is None or \
+ self._timeout_interval > interval:
+ self._timeout_interval = interval
+ self._thread_condition.notify()
+ return source_id
+
+ def _run_timeouts(self):
+
+ calls = 0
+ if not self._use_signal:
+ if self._poll_child_processes():
+ calls += 1
+
+ with self._thread_rlock:
+
+ if self._run_idle_callbacks():
+ calls += 1
+
+ if not self._timeout_handlers:
+ return bool(calls)
+
+ ready_timeouts = []
+ current_time = self.time()
+ for x in self._timeout_handlers.values():
+ elapsed_seconds = current_time - x.timestamp
+ # elapsed_seconds < 0 means the system clock has been adjusted
+ if elapsed_seconds < 0 or \
+ (x.interval - 1000 * elapsed_seconds) <= 0:
+ ready_timeouts.append(x)
+
+ # Iterate of our local list, since self._timeout_handlers can be
+ # modified during the exection of these callbacks.
+ for x in ready_timeouts:
+ if x.source_id not in self._timeout_handlers:
+ # it got cancelled while executing another timeout
+ continue
+ if x.calling:
+ # don't call it recursively
+ continue
+ calls += 1
+ x.calling = True
+ try:
+ x.timestamp = self.time()
+ if not x.function(*x.args):
+ self.source_remove(x.source_id)
+ finally:
+ x.calling = False
+
+ return bool(calls)
+
+ def add_reader(self, fd, callback, *args):
+ """
+ Start watching the file descriptor for read availability and then
+ call the callback with specified arguments.
+
+ Use functools.partial to pass keywords to the callback.
+ """
+ handler = self._poll_event_handlers.get(fd)
+ callbacks = [(functools.partial(callback, *args), self._EVENT_READ)]
+ selector_mask = self._EVENT_READ
+ if handler is not None:
+ if not isinstance(handler.callback, self._selector_callback):
+ raise AssertionError("add_reader called with fd "
+ "registered directly via io_add_watch")
+ for item in handler.callback._callbacks:
+ callback, mask = item
+ if mask != self._EVENT_READ:
+ selector_mask |= mask
+ callbacks.append(item)
+ self.source_remove(handler.source_id)
+ self.io_add_watch(fd, selector_mask, self._selector_callback(callbacks))
+
+ def remove_reader(self, fd):
+ """
+ Stop watching the file descriptor for read availability.
+ """
+ handler = self._poll_event_handlers.get(fd)
+ if handler is not None:
+ if not isinstance(handler.callback, self._selector_callback):
+ raise AssertionError("remove_reader called with fd "
+ "registered directly via io_add_watch")
+ callbacks = []
+ selector_mask = 0
+ removed = False
+ for item in handler.callback._callbacks:
+ callback, mask = item
+ if mask == self._EVENT_READ:
+ removed = True
+ else:
+ selector_mask |= mask
+ callbacks.append(item)
+ self.source_remove(handler.source_id)
+ if callbacks:
+ self.io_add_watch(fd, selector_mask,
+ self._selector_callback(callbacks))
+ return removed
+ return False
+
+ def add_writer(self, fd, callback, *args):
+ """
+ Start watching the file descriptor for write availability and then
+ call the callback with specified arguments.
+
+ Use functools.partial to pass keywords to the callback.
+ """
+ handler = self._poll_event_handlers.get(fd)
+ callbacks = [(functools.partial(callback, *args), self._EVENT_WRITE)]
+ selector_mask = self._EVENT_WRITE
+ if handler is not None:
+ if not isinstance(handler.callback, self._selector_callback):
+ raise AssertionError("add_reader called with fd "
+ "registered directly via io_add_watch")
+ for item in handler.callback._callbacks:
+ callback, mask = item
+ if mask != self._EVENT_WRITE:
+ selector_mask |= mask
+ callbacks.append(item)
+ self.source_remove(handler.source_id)
+ self.io_add_watch(fd, selector_mask, self._selector_callback(callbacks))
+
+ def remove_writer(self, fd):
+ """
+ Stop watching the file descriptor for write availability.
+ """
+ handler = self._poll_event_handlers.get(fd)
+ if handler is not None:
+ if not isinstance(handler.callback, self._selector_callback):
+ raise AssertionError("remove_reader called with fd "
+ "registered directly via io_add_watch")
+ callbacks = []
+ selector_mask = 0
+ removed = False
+ for item in handler.callback._callbacks:
+ callback, mask = item
+ if mask == self._EVENT_WRITE:
+ removed = True
+ else:
+ selector_mask |= mask
+ callbacks.append(item)
+ self.source_remove(handler.source_id)
+ if callbacks:
+ self.io_add_watch(fd, selector_mask,
+ self._selector_callback(callbacks))
+ return removed
+ return False
+
+ def io_add_watch(self, f, condition, callback, *args):
+ """
+ Like glib.io_add_watch(), your function should return False to
+ stop being called, or True to continue being called. Any
+ additional positional arguments given here are passed to your
+ function when it's called.
+
+ @type f: int or object with fileno() method
+ @param f: a file descriptor to monitor
+ @type condition: int
+ @param condition: a condition mask
+ @type callback: callable
+ @param callback: a function to call
+ @rtype: int
+ @return: an integer ID of the event source
+ """
+ if f in self._poll_event_handlers:
+ raise AssertionError("fd %d is already registered" % f)
+ source_id = self._new_source_id()
+ self._poll_event_handler_ids[source_id] = f
+ self._poll_event_handlers[f] = self._io_handler_class(
+ args=args, callback=callback, f=f, source_id=source_id)
+ self._poll_obj.register(f, condition)
+ return source_id
+
+ def source_remove(self, reg_id):
+ """
+ Like glib.source_remove(), this returns True if the given reg_id
+ is found and removed, and False if the reg_id is invalid or has
+ already been removed.
+ """
+ if isinstance(reg_id, self._idle_callback_class):
+ if not reg_id._cancelled:
+ reg_id._cancelled = True
+ return True
+ return False
+
+ x = self._child_handlers.pop(reg_id, None)
+ if x is not None:
+ if not self._child_handlers and self._use_signal:
+ signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+ self.source_remove(self._sigchld_src_id)
+ self._sigchld_src_id = None
+ return True
+
+ with self._thread_rlock:
+ timeout_handler = self._timeout_handlers.pop(reg_id, None)
+ if timeout_handler is not None:
+ if timeout_handler.interval == self._timeout_interval:
+ if self._timeout_handlers:
+ self._timeout_interval = min(x.interval
+ for x in self._timeout_handlers.values())
+ else:
+ self._timeout_interval = None
+ return True
+
+ f = self._poll_event_handler_ids.pop(reg_id, None)
+ if f is None:
+ return False
+ self._poll_obj.unregister(f)
+ if self._poll_event_queue:
+ # Discard any unhandled events that belong to this file,
+ # in order to prevent these events from being erroneously
+ # delivered to a future handler that is using a reallocated
+ # file descriptor of the same numeric value (causing
+ # extremely confusing bugs).
+ remaining_events = []
+ discarded_events = False
+ for event in self._poll_event_queue:
+ if event[0] == f:
+ discarded_events = True
+ else:
+ remaining_events.append(event)
+
+ if discarded_events:
+ self._poll_event_queue[:] = remaining_events
+
+ del self._poll_event_handlers[f]
+ return True
+
+ def run_until_complete(self, future):
+ """
+ Run until the Future is done.
+
+ @type future: asyncio.Future
+ @param future: a Future to wait for
+ @rtype: object
+ @return: the Future's result
+ @raise: the Future's exception
+ """
+ future = asyncio.ensure_future(future, loop=self._asyncio_wrapper)
+
+ # Since done callbacks are executed via call_soon, it's desirable
+ # to continue iterating until those callbacks have executed, which
+ # is easily achieved by registering a done callback and waiting for
+ # it to execute.
+ waiter = self.create_future()
+ future.add_done_callback(waiter.set_result)
+ while not waiter.done():
+ self.iteration()
+
+ return future.result()
+
+ def call_soon(self, callback, *args, **kwargs):
+ """
+ Arrange for a callback to be called as soon as possible. The callback
+ is called after call_soon() returns, when control returns to the event
+ loop.
+
+ This operates as a FIFO queue, callbacks are called in the order in
+ which they are registered. Each callback will be called exactly once.
+
+ Any positional arguments after the callback will be passed to the
+ callback when it is called.
+
+ The context argument currently does nothing, but exists for minimal
+ interoperability with Future instances that require it for PEP 567.
+
+ An object compatible with asyncio.Handle is returned, which can
+ be used to cancel the callback.
+
+ @type callback: callable
+ @param callback: a function to call
+ @type context: contextvars.Context
+ @param context: An optional keyword-only context argument allows
+ specifying a custom contextvars.Context for the callback to run
+ in. The current context is used when no context is provided.
+ @return: a handle which can be used to cancel the callback
+ @rtype: asyncio.Handle (or compatible)
+ """
+ try:
+ unexpected = next(key for key in kwargs if key != 'context')
+ except StopIteration:
+ pass
+ else:
+ raise TypeError("call_soon() got an unexpected keyword argument '%s'" % unexpected)
+ return self._handle(self._idle_add(
+ self._call_soon_callback(callback, args)), self)
+
+ def call_soon_threadsafe(self, callback, *args, **kwargs):
+ """Like call_soon(), but thread safe."""
+ try:
+ unexpected = next(key for key in kwargs if key != 'context')
+ except StopIteration:
+ pass
+ else:
+ raise TypeError("call_soon_threadsafe() got an unexpected keyword argument '%s'" % unexpected)
+ # idle_add provides thread safety
+ return self._handle(self.idle_add(
+ self._call_soon_callback(callback, args)), self)
+
+ def time(self):
+ """Return the time according to the event loop's clock.
+
+ This is a float expressed in seconds since an epoch, but the
+ epoch, precision, accuracy and drift are unspecified and may
+ differ per event loop.
+ """
+ return monotonic()
+
+ def call_later(self, delay, callback, *args, **kwargs):
+ """
+ Arrange for the callback to be called after the given delay seconds
+ (either an int or float).
+
+ An instance of asyncio.Handle is returned, which can be used to cancel
+ the callback.
+
+ callback will be called exactly once per call to call_later(). If two
+ callbacks are scheduled for exactly the same time, it is undefined
+ which will be called first.
+
+ The optional positional args will be passed to the callback when
+ it is called. If you want the callback to be called with some named
+ arguments, use a closure or functools.partial().
+
+ The context argument currently does nothing, but exists for minimal
+ interoperability with Future instances that require it for PEP 567.
+
+ Use functools.partial to pass keywords to the callback.
+
+ @type delay: int or float
+ @param delay: delay seconds
+ @type callback: callable
+ @param callback: a function to call
+ @type context: contextvars.Context
+ @param context: An optional keyword-only context argument allows
+ specifying a custom contextvars.Context for the callback to run
+ in. The current context is used when no context is provided.
+ @return: a handle which can be used to cancel the callback
+ @rtype: asyncio.Handle (or compatible)
+ """
+ try:
+ unexpected = next(key for key in kwargs if key != 'context')
+ except StopIteration:
+ pass
+ else:
+ raise TypeError("call_later() got an unexpected keyword argument '%s'" % unexpected)
+ return self._handle(self.timeout_add(
+ delay * 1000, self._call_soon_callback(callback, args)), self)
+
+ def call_at(self, when, callback, *args, **kwargs):
+ """
+ Arrange for the callback to be called at the given absolute
+ timestamp when (an int or float), using the same time reference as
+ AbstractEventLoop.time().
+
+ This method's behavior is the same as call_later().
+
+ An instance of asyncio.Handle is returned, which can be used to
+ cancel the callback.
+
+ Use functools.partial to pass keywords to the callback.
+
+ @type when: int or float
+ @param when: absolute timestamp when to call callback
+ @type callback: callable
+ @param callback: a function to call
+ @type context: contextvars.Context
+ @param context: An optional keyword-only context argument allows
+ specifying a custom contextvars.Context for the callback to run
+ in. The current context is used when no context is provided.
+ @return: a handle which can be used to cancel the callback
+ @rtype: asyncio.Handle (or compatible)
+ """
+ try:
+ unexpected = next(key for key in kwargs if key != 'context')
+ except StopIteration:
+ pass
+ else:
+ raise TypeError("call_at() got an unexpected keyword argument '%s'" % unexpected)
+ delta = when - self.time()
+ return self.call_later(delta if delta > 0 else 0, callback, *args)
+
+ def run_in_executor(self, executor, func, *args):
+ """
+ Arrange for a func to be called in the specified executor.
+
+ The executor argument should be an Executor instance. The default
+ executor is used if executor is None.
+
+ Use functools.partial to pass keywords to the *func*.
+
+ @param executor: executor
+ @type executor: concurrent.futures.Executor or None
+ @param func: a function to call
+ @type func: callable
+ @return: a Future
+ @rtype: asyncio.Future (or compatible)
+ """
+ if executor is None:
+ executor = self._default_executor
+ if executor is None:
+ executor = ForkExecutor(loop=self)
+ self._default_executor = executor
+ future = executor.submit(func, *args)
+ if _real_asyncio is not None:
+ future = _real_asyncio.wrap_future(future,
+ loop=self._asyncio_wrapper)
+ return future
+
+ def is_running(self):
+ """Return whether the event loop is currently running."""
+ return self._iteration_depth > 0
+
+ def is_closed(self):
+ """Returns True if the event loop was closed."""
+ return self._poll_obj is None
+
+ def close(self):
+ """Close the event loop.
+
+ This clears the queues and shuts down the executor,
+ and waits for it to finish.
+ """
+ executor = self._default_executor
+ if executor is not None:
+ self._default_executor = None
+ executor.shutdown(wait=True)
+
+ if self._poll_obj is not None:
+ close = getattr(self._poll_obj, 'close', None)
+ if close is not None:
+ close()
+ self._poll_obj = None
+
+ def default_exception_handler(self, context):
+ """
+ Default exception handler.
+
+ This is called when an exception occurs and no exception
+ handler is set, and can be called by a custom exception
+ handler that wants to defer to the default behavior.
+
+ The context parameter has the same meaning as in
+ `call_exception_handler()`.
+
+ @param context: exception context
+ @type context: dict
+ """
+ message = context.get('message')
+ if not message:
+ message = 'Unhandled exception in event loop'
+
+ exception = context.get('exception')
+ if exception is not None:
+ exc_info = (type(exception), exception, exception.__traceback__)
+ else:
+ exc_info = False
+
+ log_lines = [message]
+ for key in sorted(context):
+ if key in {'message', 'exception'}:
+ continue
+ value = context[key]
+ if key == 'source_traceback':
+ tb = ''.join(traceback.format_list(value))
+ value = 'Object created at (most recent call last):\n'
+ value += tb.rstrip()
+ elif key == 'handle_traceback':
+ tb = ''.join(traceback.format_list(value))
+ value = 'Handle created at (most recent call last):\n'
+ value += tb.rstrip()
+ else:
+ value = repr(value)
+ log_lines.append('{}: {}'.format(key, value))
+
+ logging.error('\n'.join(log_lines), exc_info=exc_info)
+ os.kill(os.getpid(), signal.SIGTERM)
+
+ def call_exception_handler(self, context):
+ """
+ Call the current event loop's exception handler.
+
+ The context argument is a dict containing the following keys:
+
+ - 'message': Error message;
+ - 'exception' (optional): Exception object;
+ - 'future' (optional): Future instance;
+ - 'handle' (optional): Handle instance;
+ - 'protocol' (optional): Protocol instance;
+ - 'transport' (optional): Transport instance;
+ - 'socket' (optional): Socket instance;
+ - 'asyncgen' (optional): Asynchronous generator that caused
+ the exception.
+
+ New keys may be introduced in the future.
+
+ @param context: exception context
+ @type context: dict
+ """
+ self.default_exception_handler(context)
+
+ def get_debug(self):
+ """
+ Get the debug mode (bool) of the event loop.
+
+ The default value is True if the environment variable
+ PYTHONASYNCIODEBUG is set to a non-empty string, False otherwise.
+ """
+ return self._debug
+
+ def set_debug(self, enabled):
+ """Set the debug mode of the event loop."""
+ self._debug = enabled
+
+
+_can_poll_device = None
+
+def can_poll_device():
+ """
+ Test if it's possible to use poll() on a device such as a pty. This
+ is known to fail on Darwin.
+ @rtype: bool
+ @return: True if poll() on a device succeeds, False otherwise.
+ """
+
+ global _can_poll_device
+ if _can_poll_device is not None:
+ return _can_poll_device
+
+ if not hasattr(select, "poll"):
+ _can_poll_device = False
+ return _can_poll_device
+
+ try:
+ dev_null = open('/dev/null', 'rb')
+ except IOError:
+ _can_poll_device = False
+ return _can_poll_device
+
+ p = select.poll()
+ try:
+ p.register(dev_null.fileno(), PollConstants.POLLIN)
+ except TypeError:
+ # Jython: Object 'org.python.core.io.FileIO@f8f175' is not watchable
+ _can_poll_device = False
+ return _can_poll_device
+
+ invalid_request = False
+ for f, event in p.poll():
+ if event & PollConstants.POLLNVAL:
+ invalid_request = True
+ break
+ dev_null.close()
+
+ _can_poll_device = not invalid_request
+ return _can_poll_device
+
+def create_poll_instance():
+ """
+ Create an instance of select.poll, or an instance of
+ PollSelectAdapter there is no poll() implementation or
+ it is broken somehow.
+ """
+ if can_poll_device():
+ return select.poll()
+ return PollSelectAdapter()
+
+class _epoll_adapter(object):
+ """
+ Wraps a select.epoll instance in order to make it compatible
+ with select.poll instances. This is necessary since epoll instances
+ interpret timeout arguments differently. Note that the file descriptor
+ that is associated with an epoll instance will close automatically when
+ it is garbage collected, so it's not necessary to close it explicitly.
+ """
+ __slots__ = ('_epoll_obj', 'close')
+
+ def __init__(self, epoll_obj):
+ self._epoll_obj = epoll_obj
+ self.close = epoll_obj.close
+
+ def register(self, fd, *args):
+ self._epoll_obj.register(fd, *args)
+
+ def unregister(self, fd):
+ self._epoll_obj.unregister(fd)
+
+ def poll(self, *args):
+ if len(args) > 1:
+ raise TypeError(
+ "poll expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+ timeout = -1
+ if args:
+ timeout = args[0]
+ if timeout is None or timeout < 0:
+ timeout = -1
+ elif timeout != 0:
+ timeout = timeout / 1000
+
+ return self._epoll_obj.poll(timeout)
diff --git a/lib/portage/util/_eventloop/PollConstants.py b/lib/portage/util/_eventloop/PollConstants.py
new file mode 100644
index 000000000..d0270a996
--- /dev/null
+++ b/lib/portage/util/_eventloop/PollConstants.py
@@ -0,0 +1,18 @@
+# Copyright 1999-2009 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import select
+class PollConstants(object):
+
+ """
+ Provides POLL* constants that are equivalent to those from the
+ select module, for use by PollSelectAdapter.
+ """
+
+ names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
+ v = 1
+ for k in names:
+ locals()[k] = getattr(select, k, v)
+ v *= 2
+ del k, v
+
diff --git a/lib/portage/util/_eventloop/PollSelectAdapter.py b/lib/portage/util/_eventloop/PollSelectAdapter.py
new file mode 100644
index 000000000..32b404b67
--- /dev/null
+++ b/lib/portage/util/_eventloop/PollSelectAdapter.py
@@ -0,0 +1,76 @@
+# Copyright 1999-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division
+
+from .PollConstants import PollConstants
+import select
+
+class PollSelectAdapter(object):
+
+ """
+ Use select to emulate a poll object, for
+ systems that don't support poll().
+ """
+
+ def __init__(self):
+ self._registered = {}
+ self._select_args = [[], [], []]
+
+ def register(self, fd, *args):
+ """
+ Only POLLIN is currently supported!
+ """
+ if len(args) > 1:
+ raise TypeError(
+ "register expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+
+ eventmask = PollConstants.POLLIN | \
+ PollConstants.POLLPRI | PollConstants.POLLOUT
+ if args:
+ eventmask = args[0]
+
+ self._registered[fd] = eventmask
+ self._select_args = None
+
+ def unregister(self, fd):
+ self._select_args = None
+ del self._registered[fd]
+
+ def poll(self, *args):
+ if len(args) > 1:
+ raise TypeError(
+ "poll expected at most 2 arguments, got " + \
+ repr(1 + len(args)))
+
+ timeout = None
+ if args:
+ timeout = args[0]
+
+ select_args = self._select_args
+ if select_args is None:
+ select_args = [list(self._registered), [], []]
+
+ if timeout is not None:
+ select_args = select_args[:]
+ # Translate poll() timeout args to select() timeout args:
+ #
+ # | units | value(s) for indefinite block
+ # ---------|--------------|------------------------------
+ # poll | milliseconds | omitted, negative, or None
+ # ---------|--------------|------------------------------
+ # select | seconds | omitted
+ # ---------|--------------|------------------------------
+
+ if timeout is not None and timeout < 0:
+ timeout = None
+ if timeout is not None:
+ select_args.append(timeout / 1000)
+
+ select_events = select.select(*select_args)
+ poll_events = []
+ for fd in select_events[0]:
+ poll_events.append((fd, PollConstants.POLLIN))
+ return poll_events
+
diff --git a/lib/portage/util/_eventloop/__init__.py b/lib/portage/util/_eventloop/__init__.py
new file mode 100644
index 000000000..418ad862b
--- /dev/null
+++ b/lib/portage/util/_eventloop/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/util/_eventloop/asyncio_event_loop.py b/lib/portage/util/_eventloop/asyncio_event_loop.py
new file mode 100644
index 000000000..ea0e03b23
--- /dev/null
+++ b/lib/portage/util/_eventloop/asyncio_event_loop.py
@@ -0,0 +1,137 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import pdb
+import signal
+import sys
+
+try:
+ import asyncio as _real_asyncio
+ from asyncio.events import AbstractEventLoop as _AbstractEventLoop
+except ImportError:
+ # Allow ImportModulesTestCase to succeed.
+ _real_asyncio = None
+ _AbstractEventLoop = object
+
+import portage
+
+
+class AsyncioEventLoop(_AbstractEventLoop):
+ """
+ Implementation of asyncio.AbstractEventLoop which wraps asyncio's
+ event loop and is minimally compatible with _PortageEventLoop.
+ """
+
+ # Use portage's internal event loop in subprocesses, as a workaround
+ # for https://bugs.python.org/issue22087, and also
+ # https://bugs.python.org/issue29703 which affects pypy3-5.10.1.
+ supports_multiprocessing = False
+
+ def __init__(self, loop=None):
+ loop = loop or _real_asyncio.get_event_loop()
+ self._loop = loop
+ self.run_until_complete = (self._run_until_complete
+ if portage._internal_caller else loop.run_until_complete)
+ self.call_soon = loop.call_soon
+ self.call_soon_threadsafe = loop.call_soon_threadsafe
+ self.call_later = loop.call_later
+ self.call_at = loop.call_at
+ self.is_running = loop.is_running
+ self.is_closed = loop.is_closed
+ self.close = loop.close
+ self.create_future = (loop.create_future
+ if hasattr(loop, 'create_future') else self._create_future)
+ self.create_task = loop.create_task
+ self.add_reader = loop.add_reader
+ self.remove_reader = loop.remove_reader
+ self.add_writer = loop.add_writer
+ self.remove_writer = loop.remove_writer
+ self.run_in_executor = loop.run_in_executor
+ self.time = loop.time
+ self.default_exception_handler = loop.default_exception_handler
+ self.call_exception_handler = loop.call_exception_handler
+ self.set_debug = loop.set_debug
+ self.get_debug = loop.get_debug
+ self._wakeup_fd = -1
+
+ if portage._internal_caller:
+ loop.set_exception_handler(self._internal_caller_exception_handler)
+
+ @staticmethod
+ def _internal_caller_exception_handler(loop, context):
+ """
+ An exception handler which drops to a pdb shell if std* streams
+ refer to a tty, and otherwise kills the process with SIGTERM.
+
+ In order to avoid potential interference with API consumers, this
+ implementation is only used when portage._internal_caller is True.
+ """
+ loop.default_exception_handler(context)
+ if 'exception' in context:
+ # If we have a tty then start the debugger, since in might
+ # aid in diagnosis of the problem. If there's no tty, then
+ # exit immediately.
+ if all(s.isatty() for s in (sys.stdout, sys.stderr, sys.stdin)):
+ pdb.set_trace()
+ else:
+ # Normally emerge will wait for all coroutines to complete
+ # after SIGTERM has been received. However, an unhandled
+ # exception will prevent the interrupted coroutine from
+ # completing, therefore use the default SIGTERM handler
+ # in order to ensure that emerge exits immediately (though
+ # uncleanly).
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ os.kill(os.getpid(), signal.SIGTERM)
+
+ def _create_future(self):
+ """
+ Provide AbstractEventLoop.create_future() for python3.4.
+ """
+ return _real_asyncio.Future(loop=self._loop)
+
+ @property
+ def _asyncio_child_watcher(self):
+ """
+ Portage internals use this as a layer of indirection for
+ asyncio.get_child_watcher(), in order to support versions of
+ python where asyncio is not available.
+
+ @rtype: asyncio.AbstractChildWatcher
+ @return: the internal event loop's AbstractChildWatcher interface
+ """
+ return _real_asyncio.get_child_watcher()
+
+ @property
+ def _asyncio_wrapper(self):
+ """
+ Portage internals use this as a layer of indirection in cases
+ where a wrapper around an asyncio.AbstractEventLoop implementation
+ is needed for purposes of compatiblity.
+
+ @rtype: asyncio.AbstractEventLoop
+ @return: the internal event loop's AbstractEventLoop interface
+ """
+ return self
+
+ def _run_until_complete(self, future):
+ """
+ An implementation of AbstractEventLoop.run_until_complete that supresses
+ spurious error messages like the following reported in bug 655656:
+
+ Exception ignored when trying to write to the signal wakeup fd:
+ BlockingIOError: [Errno 11] Resource temporarily unavailable
+
+ In order to avoid potential interference with API consumers, this
+ implementation is only used when portage._internal_caller is True.
+ """
+ if self._wakeup_fd != -1:
+ signal.set_wakeup_fd(self._wakeup_fd)
+ self._wakeup_fd = -1
+ # Account for any signals that may have arrived between
+ # set_wakeup_fd calls.
+ os.kill(os.getpid(), signal.SIGCHLD)
+ try:
+ return self._loop.run_until_complete(future)
+ finally:
+ self._wakeup_fd = signal.set_wakeup_fd(-1)
diff --git a/lib/portage/util/_eventloop/global_event_loop.py b/lib/portage/util/_eventloop/global_event_loop.py
new file mode 100644
index 000000000..2f6371dc1
--- /dev/null
+++ b/lib/portage/util/_eventloop/global_event_loop.py
@@ -0,0 +1,40 @@
+# Copyright 2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import sys
+
+from .EventLoop import EventLoop
+from portage.util._eventloop.asyncio_event_loop import AsyncioEventLoop
+
+_asyncio_enabled = sys.version_info >= (3, 4)
+_default_constructor = AsyncioEventLoop if _asyncio_enabled else EventLoop
+
+# If _default_constructor doesn't support multiprocessing,
+# then _multiprocessing_constructor is used in subprocesses.
+_multiprocessing_constructor = EventLoop
+
+_MAIN_PID = os.getpid()
+_instances = {}
+
+def global_event_loop():
+ """
+ Get a global EventLoop (or compatible object) instance which
+ belongs exclusively to the current process.
+ """
+
+ pid = os.getpid()
+ instance = _instances.get(pid)
+ if instance is not None:
+ return instance
+
+ constructor = _default_constructor
+ if not constructor.supports_multiprocessing and pid != _MAIN_PID:
+ constructor = _multiprocessing_constructor
+
+ # Use the _asyncio_wrapper attribute, so that unit tests can compare
+ # the reference to one retured from _wrap_loop(), since they should
+ # not close the loop if it refers to a global event loop.
+ instance = constructor()._asyncio_wrapper
+ _instances[pid] = instance
+ return instance
diff --git a/lib/portage/util/_get_vm_info.py b/lib/portage/util/_get_vm_info.py
new file mode 100644
index 000000000..e8ad93805
--- /dev/null
+++ b/lib/portage/util/_get_vm_info.py
@@ -0,0 +1,80 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import platform
+import subprocess
+
+from portage import _unicode_decode
+
+def get_vm_info():
+
+ vm_info = {}
+
+ if platform.system() == 'Linux':
+ try:
+ proc = subprocess.Popen(["free"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ pass
+ else:
+ output = _unicode_decode(proc.communicate()[0])
+ if proc.wait() == os.EX_OK:
+ for line in output.splitlines():
+ line = line.split()
+ if len(line) < 2:
+ continue
+ if line[0] == "Mem:":
+ try:
+ vm_info["ram.total"] = int(line[1]) * 1024
+ except ValueError:
+ pass
+ if len(line) > 3:
+ try:
+ vm_info["ram.free"] = int(line[3]) * 1024
+ except ValueError:
+ pass
+ elif line[0] == "Swap:":
+ try:
+ vm_info["swap.total"] = int(line[1]) * 1024
+ except ValueError:
+ pass
+ if len(line) > 3:
+ try:
+ vm_info["swap.free"] = int(line[3]) * 1024
+ except ValueError:
+ pass
+
+ else:
+
+ try:
+ proc = subprocess.Popen(["sysctl", "-a"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ pass
+ else:
+ output = _unicode_decode(proc.communicate()[0])
+ if proc.wait() == os.EX_OK:
+ for line in output.splitlines():
+ line = line.split(":", 1)
+ if len(line) != 2:
+ continue
+ line[1] = line[1].strip()
+ if line[0] == "hw.physmem":
+ try:
+ vm_info["ram.total"] = int(line[1])
+ except ValueError:
+ pass
+ elif line[0] == "vm.swap_total":
+ try:
+ vm_info["swap.total"] = int(line[1])
+ except ValueError:
+ pass
+ elif line[0] == "Free Memory Pages":
+ if line[1][-1] == "K":
+ try:
+ vm_info["ram.free"] = int(line[1][:-1]) * 1024
+ except ValueError:
+ pass
+
+ return vm_info
diff --git a/lib/portage/util/_info_files.py b/lib/portage/util/_info_files.py
new file mode 100644
index 000000000..fabf74b0f
--- /dev/null
+++ b/lib/portage/util/_info_files.py
@@ -0,0 +1,138 @@
+# Copyright 1999-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import logging
+import re
+import stat
+import subprocess
+
+import portage
+from portage import os
+
+def chk_updated_info_files(root, infodirs, prev_mtimes):
+
+ if os.path.exists("/usr/bin/install-info"):
+ out = portage.output.EOutput()
+ regen_infodirs = []
+ for z in infodirs:
+ if z == '':
+ continue
+ inforoot = portage.util.normalize_path(root + z)
+ if os.path.isdir(inforoot) and \
+ not [x for x in os.listdir(inforoot) \
+ if x.startswith('.keepinfodir')]:
+ infomtime = os.stat(inforoot)[stat.ST_MTIME]
+ if inforoot not in prev_mtimes or \
+ prev_mtimes[inforoot] != infomtime:
+ regen_infodirs.append(inforoot)
+
+ if not regen_infodirs:
+ portage.util.writemsg_stdout("\n")
+ if portage.util.noiselimit >= 0:
+ out.einfo("GNU info directory index is up-to-date.")
+ else:
+ portage.util.writemsg_stdout("\n")
+ if portage.util.noiselimit >= 0:
+ out.einfo("Regenerating GNU info directory index...")
+
+ dir_extensions = ("", ".gz", ".bz2")
+ icount = 0
+ badcount = 0
+ errmsg = ""
+ for inforoot in regen_infodirs:
+ if inforoot == '':
+ continue
+
+ if not os.path.isdir(inforoot) or \
+ not os.access(inforoot, os.W_OK):
+ continue
+
+ file_list = os.listdir(inforoot)
+ file_list.sort()
+ dir_file = os.path.join(inforoot, "dir")
+ moved_old_dir = False
+ processed_count = 0
+ for x in file_list:
+ if x.startswith(".") or \
+ os.path.isdir(os.path.join(inforoot, x)):
+ continue
+ if x.startswith("dir"):
+ skip = False
+ for ext in dir_extensions:
+ if x == "dir" + ext or \
+ x == "dir" + ext + ".old":
+ skip = True
+ break
+ if skip:
+ continue
+ if processed_count == 0:
+ for ext in dir_extensions:
+ try:
+ os.rename(dir_file + ext, dir_file + ext + ".old")
+ moved_old_dir = True
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+ processed_count += 1
+ try:
+ proc = subprocess.Popen(
+ ['/usr/bin/install-info',
+ '--dir-file=%s' % os.path.join(inforoot, "dir"),
+ os.path.join(inforoot, x)],
+ env=dict(os.environ, LANG="C", LANGUAGE="C"),
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ myso = None
+ else:
+ myso = portage._unicode_decode(
+ proc.communicate()[0]).rstrip("\n")
+ proc.wait()
+ existsstr = "already exists, for file `"
+ if myso:
+ if re.search(existsstr, myso):
+ # Already exists... Don't increment the count for this.
+ pass
+ elif myso[:44] == "install-info: warning: no info dir entry in ":
+ # This info file doesn't contain a DIR-header: install-info produces this
+ # (harmless) warning (the --quiet switch doesn't seem to work).
+ # Don't increment the count for this.
+ pass
+ else:
+ badcount += 1
+ errmsg += myso + "\n"
+ icount += 1
+
+ if moved_old_dir and not os.path.exists(dir_file):
+ # We didn't generate a new dir file, so put the old file
+ # back where it was originally found.
+ for ext in dir_extensions:
+ try:
+ os.rename(dir_file + ext + ".old", dir_file + ext)
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+ # Clean dir.old cruft so that they don't prevent
+ # unmerge of otherwise empty directories.
+ for ext in dir_extensions:
+ try:
+ os.unlink(dir_file + ext + ".old")
+ except EnvironmentError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ del e
+
+ #update mtime so we can potentially avoid regenerating.
+ prev_mtimes[inforoot] = os.stat(inforoot)[stat.ST_MTIME]
+
+ if badcount:
+ out.eerror("Processed %d info files; %d errors." % \
+ (icount, badcount))
+ portage.util.writemsg_level(errmsg,
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ if icount > 0 and portage.util.noiselimit >= 0:
+ out.einfo("Processed %d info files." % (icount,))
diff --git a/lib/portage/util/_path.py b/lib/portage/util/_path.py
new file mode 100644
index 000000000..6fbcb438c
--- /dev/null
+++ b/lib/portage/util/_path.py
@@ -0,0 +1,27 @@
+# Copyright 2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import stat
+
+from portage import os
+from portage.exception import PermissionDenied
+
+def exists_raise_eaccess(path):
+ try:
+ os.stat(path)
+ except OSError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied("stat('%s')" % path)
+ return False
+ else:
+ return True
+
+def isdir_raise_eaccess(path):
+ try:
+ st = os.stat(path)
+ except OSError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied("stat('%s')" % path)
+ return False
+ else:
+ return stat.S_ISDIR(st.st_mode)
diff --git a/lib/portage/util/_pty.py b/lib/portage/util/_pty.py
new file mode 100644
index 000000000..11c8b92af
--- /dev/null
+++ b/lib/portage/util/_pty.py
@@ -0,0 +1,78 @@
+# Copyright 2010-2011 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import platform
+import pty
+import termios
+
+from portage import os
+from portage.output import get_term_size, set_term_size
+from portage.util import writemsg
+
+# Disable the use of openpty on Solaris as it seems Python's openpty
+# implementation doesn't play nice on Solaris with Portage's
+# behaviour causing hangs/deadlocks.
+# Additional note for the future: on Interix, pipes do NOT work, so
+# _disable_openpty on Interix must *never* be True
+_disable_openpty = platform.system() in ("SunOS",)
+
+_fbsd_test_pty = platform.system() == 'FreeBSD'
+
+def _create_pty_or_pipe(copy_term_size=None):
+ """
+ Try to create a pty and if then fails then create a normal
+ pipe instead.
+
+ @param copy_term_size: If a tty file descriptor is given
+ then the term size will be copied to the pty.
+ @type copy_term_size: int
+ @rtype: tuple
+ @return: A tuple of (is_pty, master_fd, slave_fd) where
+ is_pty is True if a pty was successfully allocated, and
+ False if a normal pipe was allocated.
+ """
+
+ got_pty = False
+
+ global _disable_openpty, _fbsd_test_pty
+
+ if _fbsd_test_pty and not _disable_openpty:
+ # Test for python openpty breakage after freebsd7 to freebsd8
+ # upgrade, which results in a 'Function not implemented' error
+ # and the process being killed.
+ pid = os.fork()
+ if pid == 0:
+ pty.openpty()
+ os._exit(os.EX_OK)
+ pid, status = os.waitpid(pid, 0)
+ if (status & 0xff) == 140:
+ _disable_openpty = True
+ _fbsd_test_pty = False
+
+ if _disable_openpty:
+ master_fd, slave_fd = os.pipe()
+ else:
+ try:
+ master_fd, slave_fd = pty.openpty()
+ got_pty = True
+ except EnvironmentError as e:
+ _disable_openpty = True
+ writemsg("openpty failed: '%s'\n" % str(e),
+ noiselevel=-1)
+ del e
+ master_fd, slave_fd = os.pipe()
+
+ if got_pty:
+ # Disable post-processing of output since otherwise weird
+ # things like \n -> \r\n transformations may occur.
+ mode = termios.tcgetattr(slave_fd)
+ mode[1] &= ~termios.OPOST
+ termios.tcsetattr(slave_fd, termios.TCSANOW, mode)
+
+ if got_pty and \
+ copy_term_size is not None and \
+ os.isatty(copy_term_size):
+ rows, columns = get_term_size()
+ set_term_size(rows, columns, slave_fd)
+
+ return (got_pty, master_fd, slave_fd)
diff --git a/lib/portage/util/_urlopen.py b/lib/portage/util/_urlopen.py
new file mode 100644
index 000000000..fc9db74a0
--- /dev/null
+++ b/lib/portage/util/_urlopen.py
@@ -0,0 +1,104 @@
+# Copyright 2012-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import io
+import sys
+from datetime import datetime
+from time import mktime
+from email.utils import formatdate, parsedate
+
+try:
+ from urllib.request import urlopen as _urlopen
+ import urllib.parse as urllib_parse
+ import urllib.request as urllib_request
+ from urllib.parse import splituser as urllib_parse_splituser
+except ImportError:
+ from urllib import urlopen as _urlopen
+ import urlparse as urllib_parse
+ import urllib2 as urllib_request
+ from urllib import splituser as urllib_parse_splituser
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+# to account for the difference between TIMESTAMP of the index' contents
+# and the file-'mtime'
+TIMESTAMP_TOLERANCE = 5
+
+
+def have_pep_476():
+ """
+ Test whether ssl certificate verification is enabled by default for
+ stdlib http clients (PEP 476).
+
+ @returns: bool, True if ssl certificate verification is enabled by
+ default
+ """
+ return hasattr(__import__('ssl'), '_create_unverified_context')
+
+
+def urlopen(url, if_modified_since=None):
+ parse_result = urllib_parse.urlparse(url)
+ if parse_result.scheme not in ("http", "https"):
+ return _urlopen(url)
+ else:
+ netloc = urllib_parse_splituser(parse_result.netloc)[1]
+ url = urllib_parse.urlunparse((parse_result.scheme, netloc, parse_result.path, parse_result.params, parse_result.query, parse_result.fragment))
+ password_manager = urllib_request.HTTPPasswordMgrWithDefaultRealm()
+ request = urllib_request.Request(url)
+ request.add_header('User-Agent', 'Gentoo Portage')
+ if if_modified_since:
+ request.add_header('If-Modified-Since', _timestamp_to_http(if_modified_since))
+ if parse_result.username is not None:
+ password_manager.add_password(None, url, parse_result.username, parse_result.password)
+ auth_handler = CompressedResponseProcessor(password_manager)
+ opener = urllib_request.build_opener(auth_handler)
+ hdl = opener.open(request)
+ if hdl.headers.get('last-modified', ''):
+ try:
+ add_header = hdl.headers.add_header
+ except AttributeError:
+ # Python 2
+ add_header = hdl.headers.addheader
+ add_header('timestamp', _http_to_timestamp(hdl.headers.get('last-modified')))
+ return hdl
+
+def _timestamp_to_http(timestamp):
+ dt = datetime.fromtimestamp(float(long(timestamp)+TIMESTAMP_TOLERANCE))
+ stamp = mktime(dt.timetuple())
+ return formatdate(timeval=stamp, localtime=False, usegmt=True)
+
+def _http_to_timestamp(http_datetime_string):
+ tuple = parsedate(http_datetime_string)
+ timestamp = mktime(tuple)
+ return str(long(timestamp))
+
+class CompressedResponseProcessor(urllib_request.HTTPBasicAuthHandler):
+ # Handler for compressed responses.
+
+ def http_request(self, req):
+ req.add_header('Accept-Encoding', 'bzip2,gzip,deflate')
+ return req
+ https_request = http_request
+
+ def http_response(self, req, response):
+ decompressed = None
+ if response.headers.get('content-encoding') == 'bzip2':
+ import bz2
+ decompressed = io.BytesIO(bz2.decompress(response.read()))
+ elif response.headers.get('content-encoding') == 'gzip':
+ from gzip import GzipFile
+ decompressed = GzipFile(fileobj=io.BytesIO(response.read()), mode='r')
+ elif response.headers.get('content-encoding') == 'deflate':
+ import zlib
+ try:
+ decompressed = io.BytesIO(zlib.decompress(response.read()))
+ except zlib.error: # they ignored RFC1950
+ decompressed = io.BytesIO(zlib.decompress(response.read(), -zlib.MAX_WBITS))
+ if decompressed:
+ old_response = response
+ response = urllib_request.addinfourl(decompressed, old_response.headers, old_response.url, old_response.code)
+ response.msg = old_response.msg
+ return response
+ https_response = http_response
diff --git a/lib/portage/util/_xattr.py b/lib/portage/util/_xattr.py
new file mode 100644
index 000000000..9a8704d70
--- /dev/null
+++ b/lib/portage/util/_xattr.py
@@ -0,0 +1,228 @@
+# Copyright 2010-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Portability shim for xattr support
+
+Exported API is the xattr object with get/get_all/set/remove/list operations.
+We do not include the functions that Python 3.3+ provides in the os module as
+the signature there is different compared to xattr.
+
+See the standard xattr module for more documentation:
+ https://pypi.python.org/pypi/pyxattr
+"""
+
+from __future__ import print_function
+
+import contextlib
+import os
+import subprocess
+
+from portage.exception import OperationNotSupported
+
+
+class _XattrGetAll(object):
+ """Implement get_all() using list()/get() if there is no easy bulk method"""
+
+ @classmethod
+ def get_all(cls, item, nofollow=False, namespace=None):
+ return [(name, cls.get(item, name, nofollow=nofollow, namespace=namespace))
+ for name in cls.list(item, nofollow=nofollow, namespace=namespace)]
+
+
+class _XattrSystemCommands(_XattrGetAll):
+ """Implement things with getfattr/setfattr"""
+
+ @staticmethod
+ def _parse_output(output):
+ for line in output.readlines():
+ if line.startswith(b'#'):
+ continue
+ line = line.rstrip()
+ if not line:
+ continue
+ # The lines will have the format:
+ # user.hex=0x12345
+ # user.base64=0sAQAAAgAgAAAAAAAAAAAAAAAAAAA=
+ # user.string="value0"
+ # But since we don't do interpretation on the value (we just
+ # save & restore it), don't bother with decoding here.
+ yield line.split(b'=', 1)
+
+ @staticmethod
+ def _call(*args, **kwargs):
+ proc = subprocess.Popen(*args, **kwargs)
+ if proc.stdin:
+ proc.stdin.close()
+ proc.wait()
+ return proc
+
+ @classmethod
+ def get(cls, item, name, nofollow=False, namespace=None):
+ if namespace:
+ name = '%s.%s' % (namespace, name)
+ cmd = ['getfattr', '--absolute-names', '-n', name, item]
+ if nofollow:
+ cmd += ['-h']
+ proc = cls._call(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ value = None
+ for _, value in cls._parse_output(proc.stdout):
+ break
+
+ proc.stdout.close()
+ return value
+
+ @classmethod
+ def set(cls, item, name, value, _flags=0, namespace=None):
+ if namespace:
+ name = '%s.%s' % (namespace, name)
+ cmd = ['setfattr', '-n', name, '-v', value, item]
+ cls._call(cmd)
+
+ @classmethod
+ def remove(cls, item, name, nofollow=False, namespace=None):
+ if namespace:
+ name = '%s.%s' % (namespace, name)
+ cmd = ['setfattr', '-x', name, item]
+ if nofollow:
+ cmd += ['-h']
+ cls._call(cmd)
+
+ @classmethod
+ def list(cls, item, nofollow=False, namespace=None, _names_only=True):
+ cmd = ['getfattr', '-d', '--absolute-names', item]
+ if nofollow:
+ cmd += ['-h']
+ cmd += ['-m', ('^%s[.]' % namespace) if namespace else '-']
+ proc = cls._call(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ ret = []
+ if namespace:
+ namespace = '%s.' % namespace
+ for name, value in cls._parse_output(proc.stdout):
+ if namespace:
+ if name.startswith(namespace):
+ name = name[len(namespace):]
+ else:
+ continue
+ if _names_only:
+ ret.append(name)
+ else:
+ ret.append((name, value))
+
+ proc.stdout.close()
+ return ret
+
+ @classmethod
+ def get_all(cls, item, nofollow=False, namespace=None):
+ return cls.list(item, nofollow=nofollow, namespace=namespace,
+ _names_only=False)
+
+
+class _XattrStub(_XattrGetAll):
+ """Fake object since system doesn't support xattrs"""
+
+ # pylint: disable=unused-argument
+
+ @staticmethod
+ def _raise():
+ e = OSError('stub')
+ e.errno = OperationNotSupported.errno
+ raise e
+
+ @classmethod
+ def get(cls, item, name, nofollow=False, namespace=None):
+ cls._raise()
+
+ @classmethod
+ def set(cls, item, name, value, flags=0, namespace=None):
+ cls._raise()
+
+ @classmethod
+ def remove(cls, item, name, nofollow=False, namespace=None):
+ cls._raise()
+
+ @classmethod
+ def list(cls, item, nofollow=False, namespace=None):
+ cls._raise()
+
+
+if hasattr(os, 'getxattr'):
+ # Easy as pie -- active python supports it.
+ class xattr(_XattrGetAll):
+ """Python >=3.3 and GNU/Linux"""
+
+ # pylint: disable=unused-argument
+
+ @staticmethod
+ def get(item, name, nofollow=False, namespace=None):
+ return os.getxattr(item, name, follow_symlinks=not nofollow)
+
+ @staticmethod
+ def set(item, name, value, flags=0, namespace=None):
+ return os.setxattr(item, name, value, flags=flags)
+
+ @staticmethod
+ def remove(item, name, nofollow=False, namespace=None):
+ return os.removexattr(item, name, follow_symlinks=not nofollow)
+
+ @staticmethod
+ def list(item, nofollow=False, namespace=None):
+ return os.listxattr(item, follow_symlinks=not nofollow)
+
+else:
+ try:
+ # Maybe we have the xattr module.
+ import xattr
+
+ except ImportError:
+ try:
+ # Maybe we have the attr package.
+ with open(os.devnull, 'wb') as f:
+ subprocess.call(['getfattr', '--version'], stdout=f)
+ subprocess.call(['setfattr', '--version'], stdout=f)
+ xattr = _XattrSystemCommands
+
+ except OSError:
+ # Stub it out completely.
+ xattr = _XattrStub
+
+
+# Add a knob so code can take evasive action as needed.
+XATTRS_WORKS = xattr != _XattrStub
+
+
+@contextlib.contextmanager
+def preserve_xattrs(path, nofollow=False, namespace=None):
+ """Context manager to save/restore extended attributes on |path|
+
+ If you want to rewrite a file (possibly replacing it with a new one), but
+ want to preserve the extended attributes, this will do the trick.
+
+ # First read all the extended attributes.
+ with save_xattrs('/some/file'):
+ ... rewrite the file ...
+ # Now the extended attributes are restored as needed.
+ """
+ kwargs = {'nofollow': nofollow,}
+ if namespace:
+ # Compiled xattr python module does not like it when namespace=None.
+ kwargs['namespace'] = namespace
+
+ old_attrs = dict(xattr.get_all(path, **kwargs))
+ try:
+ yield
+ finally:
+ new_attrs = dict(xattr.get_all(path, **kwargs))
+ for name, value in new_attrs.items():
+ if name not in old_attrs:
+ # Clear out new ones.
+ xattr.remove(path, name, **kwargs)
+ elif new_attrs[name] != old_attrs[name]:
+ # Update changed ones.
+ xattr.set(path, name, value, **kwargs)
+
+ for name, value in old_attrs.items():
+ if name not in new_attrs:
+ # Re-add missing ones.
+ xattr.set(path, name, value, **kwargs)
diff --git a/lib/portage/util/backoff.py b/lib/portage/util/backoff.py
new file mode 100644
index 000000000..ee39007ef
--- /dev/null
+++ b/lib/portage/util/backoff.py
@@ -0,0 +1,53 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'ExponentialBackoff',
+ 'RandomExponentialBackoff',
+)
+
+import random
+import sys
+
+
+class ExponentialBackoff(object):
+ """
+ An object that when called with number of previous tries, calculates
+ an exponential delay for the next try.
+ """
+ def __init__(self, multiplier=1, base=2, limit=sys.maxsize):
+ """
+ @param multiplier: constant multiplier
+ @type multiplier: int or float
+ @param base: maximum number of tries
+ @type base: int or float
+ @param limit: maximum number of seconds to delay
+ @type limit: int or float
+ """
+ self._multiplier = multiplier
+ self._base = base
+ self._limit = limit
+
+ def __call__(self, tries):
+ """
+ Given a number of previous tries, calculate the amount of time
+ to delay the next try.
+
+ @param tries: number of previous tries
+ @type tries: int
+ @return: amount of time to delay the next try
+ @rtype: int
+ """
+ try:
+ return min(self._limit, self._multiplier * (self._base ** tries))
+ except OverflowError:
+ return self._limit
+
+
+class RandomExponentialBackoff(ExponentialBackoff):
+ """
+ Equivalent to ExponentialBackoff, with an extra multiplier that uses
+ a random distribution between 0 and 1.
+ """
+ def __call__(self, tries):
+ return random.random() * super(RandomExponentialBackoff, self).__call__(tries)
diff --git a/lib/portage/util/changelog.py b/lib/portage/util/changelog.py
new file mode 100644
index 000000000..9fc5ab6df
--- /dev/null
+++ b/lib/portage/util/changelog.py
@@ -0,0 +1,69 @@
+#!/usr/bin/python -b
+# Copyright 2009-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+
+from portage.manifest import guessManifestFileType
+from portage.versions import _unicode, pkgsplit, vercmp
+
+
+class ChangeLogTypeSort(_unicode):
+ """
+ Helps to sort file names by file type and other criteria.
+ """
+ def __new__(cls, status_change, file_name):
+ return _unicode.__new__(cls, status_change + file_name)
+
+ def __init__(self, status_change, file_name):
+ _unicode.__init__(status_change + file_name)
+ self.status_change = status_change
+ self.file_name = file_name
+ self.file_type = guessManifestFileType(file_name)
+
+ @staticmethod
+ def _file_type_lt(a, b):
+ """
+ Defines an ordering between file types.
+ """
+ first = a.file_type
+ second = b.file_type
+ if first == second:
+ return False
+
+ if first == "EBUILD":
+ return True
+ elif first == "MISC":
+ return second in ("EBUILD",)
+ elif first == "AUX":
+ return second in ("EBUILD", "MISC")
+ elif first == "DIST":
+ return second in ("EBUILD", "MISC", "AUX")
+ elif first is None:
+ return False
+ else:
+ raise ValueError("Unknown file type '%s'" % first)
+
+ def __lt__(self, other):
+ """
+ Compare different file names, first by file type and then
+ for ebuilds by version and lexicographically for others.
+ EBUILD < MISC < AUX < DIST < None
+ """
+ if self.__class__ != other.__class__:
+ raise NotImplementedError
+
+ # Sort by file type as defined by _file_type_lt().
+ if self._file_type_lt(self, other):
+ return True
+ elif self._file_type_lt(other, self):
+ return False
+
+ # Files have the same type.
+ if self.file_type == "EBUILD":
+ # Sort by version. Lowest first.
+ ver = "-".join(pkgsplit(self.file_name[:-7])[1:3])
+ other_ver = "-".join(pkgsplit(other.file_name[:-7])[1:3])
+ return vercmp(ver, other_ver) < 0
+ else:
+ # Sort lexicographically.
+ return self.file_name < other.file_name
diff --git a/lib/portage/util/compression_probe.py b/lib/portage/util/compression_probe.py
new file mode 100644
index 000000000..29d0eedff
--- /dev/null
+++ b/lib/portage/util/compression_probe.py
@@ -0,0 +1,111 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+import re
+import sys
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+from portage import _encodings, _unicode_encode
+from portage.exception import FileNotFound, PermissionDenied
+
+_compressors = {
+ "bzip2": {
+ "compress": "${PORTAGE_BZIP2_COMMAND} ${BINPKG_COMPRESS_FLAGS}",
+ "decompress": "${PORTAGE_BUNZIP2_COMMAND}",
+ "decompress_alt": "${PORTAGE_BZIP2_COMMAND} -d",
+ "package": "app-arch/bzip2",
+ },
+ "gzip": {
+ "compress": "gzip ${BINPKG_COMPRESS_FLAGS}",
+ "decompress": "gzip -d",
+ "package": "app-arch/gzip",
+ },
+ "lz4": {
+ "compress": "lz4 ${BINPKG_COMPRESS_FLAGS}",
+ "decompress": "lz4 -d",
+ "package": "app-arch/lz4",
+ },
+ "lzip": {
+ "compress": "lzip ${BINPKG_COMPRESS_FLAGS}",
+ "decompress": "lzip -d",
+ "package": "app-arch/lzip",
+ },
+ "lzop": {
+ "compress": "lzop ${BINPKG_COMPRESS_FLAGS}",
+ "decompress": "lzop -d",
+ "package": "app-arch/lzop",
+ },
+ "xz": {
+ "compress": "xz ${BINPKG_COMPRESS_FLAGS}",
+ "decompress": "xz -d",
+ "package": "app-arch/xz-utils",
+ },
+ "zstd": {
+ "compress": "zstd ${BINPKG_COMPRESS_FLAGS}",
+ "decompress": "zstd -d",
+ "package": "app-arch/zstd",
+ },
+}
+
+_compression_re = re.compile(b'^(' +
+ b'(?P<bzip2>\x42\x5a\x68\x39)|' +
+ b'(?P<gzip>\x1f\x8b)|' +
+ b'(?P<lz4>(?:\x04\x22\x4d\x18|\x02\x21\x4c\x18))|' +
+ b'(?P<lzip>LZIP)|' +
+ b'(?P<lzop>\x89LZO\x00\x0d\x0a\x1a\x0a)|' +
+ b'(?P<xz>\xfd\x37\x7a\x58\x5a\x00)|' +
+ b'(?P<zstd>([\x22-\x28]\xb5\x2f\xfd)))')
+
+_max_compression_re_len = 9
+
+def compression_probe(f):
+ """
+ Identify the compression type of a file. Returns one of the
+ following identifier strings:
+
+ bzip2
+ gzip
+ lz4
+ lzip
+ lzop
+ xz
+ zstd
+
+ @param f: a file path, or file-like object
+ @type f: str or file
+ @return: a string identifying the compression type, or None if the
+ compression type is unrecognized
+ @rtype str or None
+ """
+
+ open_file = isinstance(f, basestring)
+ if open_file:
+ try:
+ f = open(_unicode_encode(f,
+ encoding=_encodings['fs'], errors='strict'), mode='rb')
+ except IOError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(f)
+ elif e.errno in (errno.ENOENT, errno.ESTALE):
+ raise FileNotFound(f)
+ else:
+ raise
+
+ try:
+ return _compression_probe_file(f)
+ finally:
+ if open_file:
+ f.close()
+
+def _compression_probe_file(f):
+
+ m = _compression_re.match(f.read(_max_compression_re_len))
+ if m is not None:
+ for k, v in m.groupdict().items():
+ if v is not None:
+ return k
+
+ return None
diff --git a/lib/portage/util/configparser.py b/lib/portage/util/configparser.py
new file mode 100644
index 000000000..c4c92a603
--- /dev/null
+++ b/lib/portage/util/configparser.py
@@ -0,0 +1,76 @@
+# Copyright 2016 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['ConfigParserError', 'NoOptionError', 'ParsingError',
+ 'RawConfigParser', 'SafeConfigParser', 'read_configs']
+
+# the following scary compatibility thing provides two classes:
+# - SafeConfigParser that provides safe interpolation for values,
+# - RawConfigParser that provides no interpolation for values.
+
+import io
+import sys
+
+try:
+ from configparser import (Error as ConfigParserError,
+ NoOptionError, ParsingError, RawConfigParser)
+ if sys.hexversion >= 0x3020000:
+ from configparser import ConfigParser as SafeConfigParser
+ else:
+ from configparser import SafeConfigParser
+except ImportError:
+ from ConfigParser import (Error as ConfigParserError,
+ NoOptionError, ParsingError, RawConfigParser, SafeConfigParser)
+
+from portage import _encodings
+from portage import _unicode_encode
+
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+
+def read_configs(parser, paths):
+ """
+ Read configuration files from given paths into the specified
+ ConfigParser, handling path encoding portably.
+ @param parser: target *ConfigParser instance
+ @type parser: SafeConfigParser or RawConfigParser
+ @param paths: list of paths to read
+ @type paths: iterable
+ """
+ # use read_file/readfp in order to control decoding of unicode
+ try:
+ # Python >=3.2
+ read_file = parser.read_file
+ source_kwarg = 'source'
+ except AttributeError:
+ read_file = parser.readfp
+ source_kwarg = 'filename'
+
+ for p in paths:
+ if isinstance(p, basestring):
+ f = None
+ try:
+ f = io.open(_unicode_encode(p,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['repo.content'],
+ errors='replace')
+ except EnvironmentError:
+ pass
+ else:
+ # The 'source' keyword argument is needed since otherwise
+ # ConfigParser in Python <3.3.3 may throw a TypeError
+ # because it assumes that f.name is a native string rather
+ # than binary when constructing error messages.
+ kwargs = {source_kwarg: p}
+ read_file(f, **kwargs)
+ finally:
+ if f is not None:
+ f.close()
+ elif isinstance(p, io.StringIO):
+ kwargs = {source_kwarg: "<io.StringIO>"}
+ read_file(p, **kwargs)
+ else:
+ raise TypeError("Unsupported type %r of element %r of 'paths' argument" % (type(p), p))
diff --git a/lib/portage/util/cpuinfo.py b/lib/portage/util/cpuinfo.py
new file mode 100644
index 000000000..669e707b5
--- /dev/null
+++ b/lib/portage/util/cpuinfo.py
@@ -0,0 +1,18 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['get_cpu_count']
+
+
+def get_cpu_count():
+ """
+ Try to obtain the number of CPUs available.
+
+ @return: Number of CPUs or None if unable to obtain.
+ """
+
+ try:
+ import multiprocessing
+ return multiprocessing.cpu_count()
+ except (ImportError, NotImplementedError):
+ return None
diff --git a/lib/portage/util/digraph.py b/lib/portage/util/digraph.py
new file mode 100644
index 000000000..d279b7867
--- /dev/null
+++ b/lib/portage/util/digraph.py
@@ -0,0 +1,390 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = ['digraph']
+
+import bisect
+from collections import deque
+import sys
+
+from portage.util import writemsg
+
+class digraph(object):
+ """
+ A directed graph object.
+ """
+
+ def __init__(self):
+ """Create an empty digraph"""
+
+ # { node : ( { child : priority } , { parent : priority } ) }
+ self.nodes = {}
+ self.order = []
+
+ def add(self, node, parent, priority=0):
+ """Adds the specified node with the specified parent.
+
+ If the dep is a soft-dep and the node already has a hard
+ relationship to the parent, the relationship is left as hard."""
+
+ if node not in self.nodes:
+ self.nodes[node] = ({}, {}, node)
+ self.order.append(node)
+
+ if not parent:
+ return
+
+ if parent not in self.nodes:
+ self.nodes[parent] = ({}, {}, parent)
+ self.order.append(parent)
+
+ priorities = self.nodes[node][1].get(parent)
+ if priorities is None:
+ priorities = []
+ self.nodes[node][1][parent] = priorities
+ self.nodes[parent][0][node] = priorities
+
+ if not priorities or priorities[-1] is not priority:
+ bisect.insort(priorities, priority)
+
+ def discard(self, node):
+ """
+ Like remove(), except it doesn't raises KeyError if the
+ node doesn't exist.
+ """
+ try:
+ self.remove(node)
+ except KeyError:
+ pass
+
+ def remove(self, node):
+ """Removes the specified node from the digraph, also removing
+ and ties to other nodes in the digraph. Raises KeyError if the
+ node doesn't exist."""
+
+ if node not in self.nodes:
+ raise KeyError(node)
+
+ for parent in self.nodes[node][1]:
+ del self.nodes[parent][0][node]
+ for child in self.nodes[node][0]:
+ del self.nodes[child][1][node]
+
+ del self.nodes[node]
+ self.order.remove(node)
+
+ def update(self, other):
+ """
+ Add all nodes and edges from another digraph instance.
+ """
+ for node in other.order:
+ children, parents, node = other.nodes[node]
+ if parents:
+ for parent, priorities in parents.items():
+ for priority in priorities:
+ self.add(node, parent, priority=priority)
+ else:
+ self.add(node, None)
+
+ def clear(self):
+ """
+ Remove all nodes and edges.
+ """
+ self.nodes.clear()
+ del self.order[:]
+
+ def difference_update(self, t):
+ """
+ Remove all given nodes from node_set. This is more efficient
+ than multiple calls to the remove() method.
+ """
+ if isinstance(t, (list, tuple)) or \
+ not hasattr(t, "__contains__"):
+ t = frozenset(t)
+ order = []
+ for node in self.order:
+ if node not in t:
+ order.append(node)
+ continue
+ for parent in self.nodes[node][1]:
+ del self.nodes[parent][0][node]
+ for child in self.nodes[node][0]:
+ del self.nodes[child][1][node]
+ del self.nodes[node]
+ self.order = order
+
+ def has_edge(self, child, parent):
+ """
+ Return True if the given edge exists.
+ """
+ try:
+ return child in self.nodes[parent][0]
+ except KeyError:
+ return False
+
+ def remove_edge(self, child, parent):
+ """
+ Remove edge in the direction from child to parent. Note that it is
+ possible for a remaining edge to exist in the opposite direction.
+ Any endpoint vertices that become isolated will remain in the graph.
+ """
+
+ # Nothing should be modified when a KeyError is raised.
+ for k in parent, child:
+ if k not in self.nodes:
+ raise KeyError(k)
+
+ # Make sure the edge exists.
+ if child not in self.nodes[parent][0]:
+ raise KeyError(child)
+ if parent not in self.nodes[child][1]:
+ raise KeyError(parent)
+
+ # Remove the edge.
+ del self.nodes[child][1][parent]
+ del self.nodes[parent][0][child]
+
+ def __iter__(self):
+ return iter(self.order)
+
+ def contains(self, node):
+ """Checks if the digraph contains mynode"""
+ return node in self.nodes
+
+ def get(self, key, default=None):
+ node_data = self.nodes.get(key, self)
+ if node_data is self:
+ return default
+ return node_data[2]
+
+ def all_nodes(self):
+ """Return a list of all nodes in the graph"""
+ return self.order[:]
+
+ def child_nodes(self, node, ignore_priority=None):
+ """Return all children of the specified node"""
+ if ignore_priority is None:
+ return list(self.nodes[node][0])
+ children = []
+ if hasattr(ignore_priority, '__call__'):
+ for child, priorities in self.nodes[node][0].items():
+ for priority in reversed(priorities):
+ if not ignore_priority(priority):
+ children.append(child)
+ break
+ else:
+ for child, priorities in self.nodes[node][0].items():
+ if ignore_priority < priorities[-1]:
+ children.append(child)
+ return children
+
+ def parent_nodes(self, node, ignore_priority=None):
+ """Return all parents of the specified node"""
+ if ignore_priority is None:
+ return list(self.nodes[node][1])
+ parents = []
+ if hasattr(ignore_priority, '__call__'):
+ for parent, priorities in self.nodes[node][1].items():
+ for priority in reversed(priorities):
+ if not ignore_priority(priority):
+ parents.append(parent)
+ break
+ else:
+ for parent, priorities in self.nodes[node][1].items():
+ if ignore_priority < priorities[-1]:
+ parents.append(parent)
+ return parents
+
+ def leaf_nodes(self, ignore_priority=None):
+ """Return all nodes that have no children
+
+ If ignore_soft_deps is True, soft deps are not counted as
+ children in calculations."""
+
+ leaf_nodes = []
+ if ignore_priority is None:
+ for node in self.order:
+ if not self.nodes[node][0]:
+ leaf_nodes.append(node)
+ elif hasattr(ignore_priority, '__call__'):
+ for node in self.order:
+ is_leaf_node = True
+ for child, priorities in self.nodes[node][0].items():
+ for priority in reversed(priorities):
+ if not ignore_priority(priority):
+ is_leaf_node = False
+ break
+ if not is_leaf_node:
+ break
+ if is_leaf_node:
+ leaf_nodes.append(node)
+ else:
+ for node in self.order:
+ is_leaf_node = True
+ for child, priorities in self.nodes[node][0].items():
+ if ignore_priority < priorities[-1]:
+ is_leaf_node = False
+ break
+ if is_leaf_node:
+ leaf_nodes.append(node)
+ return leaf_nodes
+
+ def root_nodes(self, ignore_priority=None):
+ """Return all nodes that have no parents.
+
+ If ignore_soft_deps is True, soft deps are not counted as
+ parents in calculations."""
+
+ root_nodes = []
+ if ignore_priority is None:
+ for node in self.order:
+ if not self.nodes[node][1]:
+ root_nodes.append(node)
+ elif hasattr(ignore_priority, '__call__'):
+ for node in self.order:
+ is_root_node = True
+ for parent, priorities in self.nodes[node][1].items():
+ for priority in reversed(priorities):
+ if not ignore_priority(priority):
+ is_root_node = False
+ break
+ if not is_root_node:
+ break
+ if is_root_node:
+ root_nodes.append(node)
+ else:
+ for node in self.order:
+ is_root_node = True
+ for parent, priorities in self.nodes[node][1].items():
+ if ignore_priority < priorities[-1]:
+ is_root_node = False
+ break
+ if is_root_node:
+ root_nodes.append(node)
+ return root_nodes
+
+ def __bool__(self):
+ return bool(self.nodes)
+
+ def is_empty(self):
+ """Checks if the digraph is empty"""
+ return len(self.nodes) == 0
+
+ def clone(self):
+ clone = digraph()
+ clone.nodes = {}
+ memo = {}
+ for children, parents, node in self.nodes.values():
+ children_clone = {}
+ for child, priorities in children.items():
+ priorities_clone = memo.get(id(priorities))
+ if priorities_clone is None:
+ priorities_clone = priorities[:]
+ memo[id(priorities)] = priorities_clone
+ children_clone[child] = priorities_clone
+ parents_clone = {}
+ for parent, priorities in parents.items():
+ priorities_clone = memo.get(id(priorities))
+ if priorities_clone is None:
+ priorities_clone = priorities[:]
+ memo[id(priorities)] = priorities_clone
+ parents_clone[parent] = priorities_clone
+ clone.nodes[node] = (children_clone, parents_clone, node)
+ clone.order = self.order[:]
+ return clone
+
+ def delnode(self, node):
+ try:
+ self.remove(node)
+ except KeyError:
+ pass
+
+ def firstzero(self):
+ leaf_nodes = self.leaf_nodes()
+ if leaf_nodes:
+ return leaf_nodes[0]
+ return None
+
+ def hasallzeros(self, ignore_priority=None):
+ return len(self.leaf_nodes(ignore_priority=ignore_priority)) == \
+ len(self.order)
+
+ def debug_print(self):
+ def output(s):
+ writemsg(s, noiselevel=-1)
+ # Use unicode_literals to force unicode format
+ # strings for python-2.x safety, ensuring that
+ # node.__unicode__() is used when necessary.
+ for node in self.nodes:
+ output("%s " % (node,))
+ if self.nodes[node][0]:
+ output("depends on\n")
+ else:
+ output("(no children)\n")
+ for child, priorities in self.nodes[node][0].items():
+ output(" %s (%s)\n" % (child, priorities[-1],))
+
+ def bfs(self, start, ignore_priority=None):
+ if start not in self:
+ raise KeyError(start)
+
+ queue, enqueued = deque([(None, start)]), set([start])
+ while queue:
+ parent, n = queue.popleft()
+ yield parent, n
+ new = set(self.child_nodes(n, ignore_priority)) - enqueued
+ enqueued |= new
+ queue.extend([(n, child) for child in new])
+
+ def shortest_path(self, start, end, ignore_priority=None):
+ if start not in self:
+ raise KeyError(start)
+ elif end not in self:
+ raise KeyError(end)
+
+ paths = {None: []}
+ for parent, child in self.bfs(start, ignore_priority):
+ paths[child] = paths[parent] + [child]
+ if child == end:
+ return paths[child]
+ return None
+
+ def get_cycles(self, ignore_priority=None, max_length=None):
+ """
+ Returns all cycles that have at most length 'max_length'.
+ If 'max_length' is 'None', all cycles are returned.
+ """
+ all_cycles = []
+ for node in self.nodes:
+ # If we have multiple paths of the same length, we have to
+ # return them all, so that we always get the same results
+ # even with PYTHONHASHSEED="random" enabled.
+ shortest_path = None
+ candidates = []
+ for child in self.child_nodes(node, ignore_priority):
+ path = self.shortest_path(child, node, ignore_priority)
+ if path is None:
+ continue
+ if not shortest_path or len(shortest_path) >= len(path):
+ shortest_path = path
+ candidates.append(path)
+ if shortest_path and \
+ (not max_length or len(shortest_path) <= max_length):
+ for path in candidates:
+ if len(path) == len(shortest_path):
+ all_cycles.append(path)
+ return all_cycles
+
+ # Backward compatibility
+ addnode = add
+ allnodes = all_nodes
+ allzeros = leaf_nodes
+ hasnode = contains
+ __contains__ = contains
+ empty = is_empty
+ copy = clone
+
+ if sys.hexversion < 0x3000000:
+ __nonzero__ = __bool__
diff --git a/lib/portage/util/elf/__init__.py b/lib/portage/util/elf/__init__.py
new file mode 100644
index 000000000..4725d3317
--- /dev/null
+++ b/lib/portage/util/elf/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/util/elf/constants.py b/lib/portage/util/elf/constants.py
new file mode 100644
index 000000000..2704e85c3
--- /dev/null
+++ b/lib/portage/util/elf/constants.py
@@ -0,0 +1,46 @@
+# Copyright 2015-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#
+# These constants are available from elfutils:
+# https://sourceware.org/git/?p=elfutils.git;a=blob;f=libelf/elf.h;hb=HEAD
+
+EI_CLASS = 4
+ELFCLASS32 = 1
+ELFCLASS64 = 2
+
+EI_DATA = 5
+ELFDATA2LSB = 1
+ELFDATA2MSB = 2
+
+E_TYPE = 16
+ET_REL = 1
+ET_EXEC = 2
+ET_DYN = 3
+ET_CORE = 4
+
+E_MACHINE = 18
+EM_SPARC = 2
+EM_386 = 3
+EM_68K = 4
+EM_MIPS = 8
+EM_PARISC = 15
+EM_SPARC32PLUS = 18
+EM_PPC = 20
+EM_PPC64 = 21
+EM_S390 = 22
+EM_ARM = 40
+EM_SH = 42
+EM_SPARCV9 = 43
+EM_IA_64 = 50
+EM_X86_64 = 62
+EM_ALTERA_NIOS2 = 113
+EM_AARCH64 = 183
+EM_ALPHA = 0x9026
+
+E_ENTRY = 24
+EF_MIPS_ABI = 0x0000f000
+EF_MIPS_ABI2 = 0x00000020
+E_MIPS_ABI_O32 = 0x00001000
+E_MIPS_ABI_O64 = 0x00002000
+E_MIPS_ABI_EABI32 = 0x00003000
+E_MIPS_ABI_EABI64 = 0x00004000
diff --git a/lib/portage/util/elf/header.py b/lib/portage/util/elf/header.py
new file mode 100644
index 000000000..3d2307402
--- /dev/null
+++ b/lib/portage/util/elf/header.py
@@ -0,0 +1,65 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.util.endian.decode import (decode_uint16_le,
+ decode_uint32_le, decode_uint16_be, decode_uint32_be)
+from portage.util.elf.constants import (E_ENTRY, E_MACHINE, E_TYPE,
+ EI_CLASS, ELFCLASS32, ELFCLASS64, ELFDATA2LSB, ELFDATA2MSB)
+
+class ELFHeader(object):
+
+ __slots__ = ('e_flags', 'e_machine', 'e_type', 'ei_class',
+ 'ei_data')
+
+ @classmethod
+ def read(cls, f):
+ """
+ @param f: an open ELF file
+ @type f: file
+ @rtype: ELFHeader
+ @return: A new ELFHeader instance containing data from f
+ """
+ f.seek(EI_CLASS)
+ ei_class = ord(f.read(1))
+ ei_data = ord(f.read(1))
+
+ if ei_class == ELFCLASS32:
+ width = 32
+ elif ei_class == ELFCLASS64:
+ width = 64
+ else:
+ width = None
+
+ if ei_data == ELFDATA2LSB:
+ uint16 = decode_uint16_le
+ uint32 = decode_uint32_le
+ elif ei_data == ELFDATA2MSB:
+ uint16 = decode_uint16_be
+ uint32 = decode_uint32_be
+ else:
+ uint16 = None
+ uint32 = None
+
+ if width is None or uint16 is None:
+ e_flags = None
+ e_machine = None
+ e_type = None
+ else:
+ f.seek(E_TYPE)
+ e_type = uint16(f.read(2))
+ f.seek(E_MACHINE)
+ e_machine = uint16(f.read(2))
+
+ # E_ENTRY + 3 * sizeof(uintN)
+ e_flags_offset = E_ENTRY + 3 * width // 8
+ f.seek(e_flags_offset)
+ e_flags = uint32(f.read(4))
+
+ obj = cls()
+ obj.e_flags = e_flags
+ obj.e_machine = e_machine
+ obj.e_type = e_type
+ obj.ei_class = ei_class
+ obj.ei_data = ei_data
+
+ return obj
diff --git a/lib/portage/util/endian/__init__.py b/lib/portage/util/endian/__init__.py
new file mode 100644
index 000000000..4725d3317
--- /dev/null
+++ b/lib/portage/util/endian/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/util/endian/decode.py b/lib/portage/util/endian/decode.py
new file mode 100644
index 000000000..9833b53ca
--- /dev/null
+++ b/lib/portage/util/endian/decode.py
@@ -0,0 +1,48 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import struct
+
+def decode_uint16_be(data):
+ """
+ Decode an unsigned 16-bit integer with big-endian encoding.
+
+ @param data: string of bytes of length 2
+ @type data: bytes
+ @rtype: int
+ @return: unsigned integer value of the decoded data
+ """
+ return struct.unpack_from(">H", data)[0]
+
+def decode_uint16_le(data):
+ """
+ Decode an unsigned 16-bit integer with little-endian encoding.
+
+ @param data: string of bytes of length 2
+ @type data: bytes
+ @rtype: int
+ @return: unsigned integer value of the decoded data
+ """
+ return struct.unpack_from("<H", data)[0]
+
+def decode_uint32_be(data):
+ """
+ Decode an unsigned 32-bit integer with big-endian encoding.
+
+ @param data: string of bytes of length 4
+ @type data: bytes
+ @rtype: int
+ @return: unsigned integer value of the decoded data
+ """
+ return struct.unpack_from(">I", data)[0]
+
+def decode_uint32_le(data):
+ """
+ Decode an unsigned 32-bit integer with little-endian encoding.
+
+ @param data: string of bytes of length 4
+ @type data: bytes
+ @rtype: int
+ @return: unsigned integer value of the decoded data
+ """
+ return struct.unpack_from("<I", data)[0]
diff --git a/lib/portage/util/env_update.py b/lib/portage/util/env_update.py
new file mode 100644
index 000000000..032101043
--- /dev/null
+++ b/lib/portage/util/env_update.py
@@ -0,0 +1,365 @@
+# Copyright 2010-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['env_update']
+
+import errno
+import glob
+import io
+import stat
+import sys
+import time
+
+import portage
+from portage import os, _encodings, _unicode_decode, _unicode_encode
+from portage.checksum import prelink_capable
+from portage.data import ostype
+from portage.exception import ParseError
+from portage.localization import _
+from portage.process import find_binary
+from portage.util import atomic_ofstream, ensure_dirs, getconfig, \
+ normalize_path, writemsg
+from portage.util.listdir import listdir
+from portage.dbapi.vartree import vartree
+from portage.package.ebuild.config import config
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ long = int
+
+def env_update(makelinks=1, target_root=None, prev_mtimes=None, contents=None,
+ env=None, writemsg_level=None, vardbapi=None):
+ """
+ Parse /etc/env.d and use it to generate /etc/profile.env, csh.env,
+ ld.so.conf, and prelink.conf. Finally, run ldconfig. When ldconfig is
+ called, its -X option will be used in order to avoid potential
+ interference with installed soname symlinks that are required for
+ correct operation of FEATURES=preserve-libs for downgrade operations.
+ It's not necessary for ldconfig to create soname symlinks, since
+ portage will use NEEDED.ELF.2 data to automatically create them
+ after src_install if they happen to be missing.
+ @param makelinks: True if ldconfig should be called, False otherwise
+ @param target_root: root that is passed to the ldconfig -r option,
+ defaults to portage.settings["ROOT"].
+ @type target_root: String (Path)
+ """
+ if vardbapi is None:
+ if isinstance(env, config):
+ vardbapi = vartree(settings=env).dbapi
+ else:
+ if target_root is None:
+ eprefix = portage.settings["EPREFIX"]
+ target_root = portage.settings["ROOT"]
+ target_eroot = portage.settings['EROOT']
+ else:
+ eprefix = portage.const.EPREFIX
+ target_eroot = os.path.join(target_root,
+ eprefix.lstrip(os.sep))
+ target_eroot = target_eroot.rstrip(os.sep) + os.sep
+ if hasattr(portage, "db") and target_eroot in portage.db:
+ vardbapi = portage.db[target_eroot]["vartree"].dbapi
+ else:
+ settings = config(config_root=target_root,
+ target_root=target_root, eprefix=eprefix)
+ target_root = settings["ROOT"]
+ if env is None:
+ env = settings
+ vardbapi = vartree(settings=settings).dbapi
+
+ # Lock the config memory file to prevent symlink creation
+ # in merge_contents from overlapping with env-update.
+ vardbapi._fs_lock()
+ try:
+ return _env_update(makelinks, target_root, prev_mtimes, contents,
+ env, writemsg_level)
+ finally:
+ vardbapi._fs_unlock()
+
+def _env_update(makelinks, target_root, prev_mtimes, contents, env,
+ writemsg_level):
+ if writemsg_level is None:
+ writemsg_level = portage.util.writemsg_level
+ if target_root is None:
+ target_root = portage.settings["ROOT"]
+ if prev_mtimes is None:
+ prev_mtimes = portage.mtimedb["ldpath"]
+ if env is None:
+ settings = portage.settings
+ else:
+ settings = env
+
+ eprefix = settings.get("EPREFIX", "")
+ eprefix_lstrip = eprefix.lstrip(os.sep)
+ eroot = normalize_path(os.path.join(target_root, eprefix_lstrip)).rstrip(os.sep) + os.sep
+ envd_dir = os.path.join(eroot, "etc", "env.d")
+ ensure_dirs(envd_dir, mode=0o755)
+ fns = listdir(envd_dir, EmptyOnError=1)
+ fns.sort()
+ templist = []
+ for x in fns:
+ if len(x) < 3:
+ continue
+ if not x[0].isdigit() or not x[1].isdigit():
+ continue
+ if x.startswith(".") or x.endswith("~") or x.endswith(".bak"):
+ continue
+ templist.append(x)
+ fns = templist
+ del templist
+
+ space_separated = set(["CONFIG_PROTECT", "CONFIG_PROTECT_MASK"])
+ colon_separated = set(["ADA_INCLUDE_PATH", "ADA_OBJECTS_PATH",
+ "CLASSPATH", "INFODIR", "INFOPATH", "KDEDIRS", "LDPATH", "MANPATH",
+ "PATH", "PKG_CONFIG_PATH", "PRELINK_PATH", "PRELINK_PATH_MASK",
+ "PYTHONPATH", "ROOTPATH"])
+
+ config_list = []
+
+ for x in fns:
+ file_path = os.path.join(envd_dir, x)
+ try:
+ myconfig = getconfig(file_path, expand=False)
+ except ParseError as e:
+ writemsg("!!! '%s'\n" % str(e), noiselevel=-1)
+ del e
+ continue
+ if myconfig is None:
+ # broken symlink or file removed by a concurrent process
+ writemsg("!!! File Not Found: '%s'\n" % file_path, noiselevel=-1)
+ continue
+
+ config_list.append(myconfig)
+ if "SPACE_SEPARATED" in myconfig:
+ space_separated.update(myconfig["SPACE_SEPARATED"].split())
+ del myconfig["SPACE_SEPARATED"]
+ if "COLON_SEPARATED" in myconfig:
+ colon_separated.update(myconfig["COLON_SEPARATED"].split())
+ del myconfig["COLON_SEPARATED"]
+
+ env = {}
+ specials = {}
+ for var in space_separated:
+ mylist = []
+ for myconfig in config_list:
+ if var in myconfig:
+ for item in myconfig[var].split():
+ if item and not item in mylist:
+ mylist.append(item)
+ del myconfig[var] # prepare for env.update(myconfig)
+ if mylist:
+ env[var] = " ".join(mylist)
+ specials[var] = mylist
+
+ for var in colon_separated:
+ mylist = []
+ for myconfig in config_list:
+ if var in myconfig:
+ for item in myconfig[var].split(":"):
+ if item and not item in mylist:
+ mylist.append(item)
+ del myconfig[var] # prepare for env.update(myconfig)
+ if mylist:
+ env[var] = ":".join(mylist)
+ specials[var] = mylist
+
+ for myconfig in config_list:
+ """Cumulative variables have already been deleted from myconfig so that
+ they won't be overwritten by this dict.update call."""
+ env.update(myconfig)
+
+ ldsoconf_path = os.path.join(eroot, "etc", "ld.so.conf")
+ try:
+ myld = io.open(_unicode_encode(ldsoconf_path,
+ encoding=_encodings['fs'], errors='strict'),
+ mode='r', encoding=_encodings['content'], errors='replace')
+ myldlines = myld.readlines()
+ myld.close()
+ oldld = []
+ for x in myldlines:
+ #each line has at least one char (a newline)
+ if x[:1] == "#":
+ continue
+ oldld.append(x[:-1])
+ except (IOError, OSError) as e:
+ if e.errno != errno.ENOENT:
+ raise
+ oldld = None
+
+ newld = specials["LDPATH"]
+ if (oldld != newld):
+ #ld.so.conf needs updating and ldconfig needs to be run
+ myfd = atomic_ofstream(ldsoconf_path)
+ myfd.write("# ld.so.conf autogenerated by env-update; make all changes to\n")
+ myfd.write("# contents of /etc/env.d directory\n")
+ for x in specials["LDPATH"]:
+ myfd.write(x + "\n")
+ myfd.close()
+
+ potential_lib_dirs = set()
+ for lib_dir_glob in ('usr/lib*', 'lib*'):
+ x = os.path.join(eroot, lib_dir_glob)
+ for y in glob.glob(_unicode_encode(x,
+ encoding=_encodings['fs'], errors='strict')):
+ try:
+ y = _unicode_decode(y,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ if os.path.basename(y) != 'libexec':
+ potential_lib_dirs.add(y[len(eroot):])
+
+ # Update prelink.conf if we are prelink-enabled
+ if prelink_capable:
+ prelink_d = os.path.join(eroot, 'etc', 'prelink.conf.d')
+ ensure_dirs(prelink_d)
+ newprelink = atomic_ofstream(os.path.join(prelink_d, 'portage.conf'))
+ newprelink.write("# prelink.conf autogenerated by env-update; make all changes to\n")
+ newprelink.write("# contents of /etc/env.d directory\n")
+
+ for x in sorted(potential_lib_dirs) + ['bin', 'sbin']:
+ newprelink.write('-l /%s\n' % (x,));
+ prelink_paths = set()
+ prelink_paths |= set(specials.get('LDPATH', []))
+ prelink_paths |= set(specials.get('PATH', []))
+ prelink_paths |= set(specials.get('PRELINK_PATH', []))
+ prelink_path_mask = specials.get('PRELINK_PATH_MASK', [])
+ for x in prelink_paths:
+ if not x:
+ continue
+ if x[-1:] != '/':
+ x += "/"
+ plmasked = 0
+ for y in prelink_path_mask:
+ if not y:
+ continue
+ if y[-1] != '/':
+ y += "/"
+ if y == x[0:len(y)]:
+ plmasked = 1
+ break
+ if not plmasked:
+ newprelink.write("-h %s\n" % (x,))
+ for x in prelink_path_mask:
+ newprelink.write("-b %s\n" % (x,))
+ newprelink.close()
+
+ # Migration code path. If /etc/prelink.conf was generated by us, then
+ # point it to the new stuff until the prelink package re-installs.
+ prelink_conf = os.path.join(eroot, 'etc', 'prelink.conf')
+ try:
+ with open(_unicode_encode(prelink_conf,
+ encoding=_encodings['fs'], errors='strict'), 'rb') as f:
+ if f.readline() == b'# prelink.conf autogenerated by env-update; make all changes to\n':
+ f = atomic_ofstream(prelink_conf)
+ f.write('-c /etc/prelink.conf.d/*.conf\n')
+ f.close()
+ except IOError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+ current_time = long(time.time())
+ mtime_changed = False
+
+ lib_dirs = set()
+ for lib_dir in set(specials['LDPATH']) | potential_lib_dirs:
+ x = os.path.join(eroot, lib_dir.lstrip(os.sep))
+ try:
+ newldpathtime = os.stat(x)[stat.ST_MTIME]
+ lib_dirs.add(normalize_path(x))
+ except OSError as oe:
+ if oe.errno == errno.ENOENT:
+ try:
+ del prev_mtimes[x]
+ except KeyError:
+ pass
+ # ignore this path because it doesn't exist
+ continue
+ raise
+ if newldpathtime == current_time:
+ # Reset mtime to avoid the potential ambiguity of times that
+ # differ by less than 1 second.
+ newldpathtime -= 1
+ os.utime(x, (newldpathtime, newldpathtime))
+ prev_mtimes[x] = newldpathtime
+ mtime_changed = True
+ elif x in prev_mtimes:
+ if prev_mtimes[x] == newldpathtime:
+ pass
+ else:
+ prev_mtimes[x] = newldpathtime
+ mtime_changed = True
+ else:
+ prev_mtimes[x] = newldpathtime
+ mtime_changed = True
+
+ if makelinks and \
+ not mtime_changed and \
+ contents is not None:
+ libdir_contents_changed = False
+ for mypath, mydata in contents.items():
+ if mydata[0] not in ("obj", "sym"):
+ continue
+ head, tail = os.path.split(mypath)
+ if head in lib_dirs:
+ libdir_contents_changed = True
+ break
+ if not libdir_contents_changed:
+ makelinks = False
+
+ if "CHOST" in settings and "CBUILD" in settings and \
+ settings["CHOST"] != settings["CBUILD"]:
+ ldconfig = find_binary("%s-ldconfig" % settings["CHOST"])
+ else:
+ ldconfig = os.path.join(eroot, "sbin", "ldconfig")
+
+ if ldconfig is None:
+ pass
+ elif not (os.access(ldconfig, os.X_OK) and os.path.isfile(ldconfig)):
+ ldconfig = None
+
+ # Only run ldconfig as needed
+ if makelinks and ldconfig:
+ # ldconfig has very different behaviour between FreeBSD and Linux
+ if ostype == "Linux" or ostype.lower().endswith("gnu"):
+ # We can't update links if we haven't cleaned other versions first, as
+ # an older package installed ON TOP of a newer version will cause ldconfig
+ # to overwrite the symlinks we just made. -X means no links. After 'clean'
+ # we can safely create links.
+ writemsg_level(_(">>> Regenerating %setc/ld.so.cache...\n") % \
+ (target_root,))
+ os.system("cd / ; %s -X -r '%s'" % (ldconfig, target_root))
+ elif ostype in ("FreeBSD", "DragonFly"):
+ writemsg_level(_(">>> Regenerating %svar/run/ld-elf.so.hints...\n") % \
+ target_root)
+ os.system(("cd / ; %s -elf -i " + \
+ "-f '%svar/run/ld-elf.so.hints' '%setc/ld.so.conf'") % \
+ (ldconfig, target_root, target_root))
+
+ del specials["LDPATH"]
+
+ penvnotice = "# THIS FILE IS AUTOMATICALLY GENERATED BY env-update.\n"
+ penvnotice += "# DO NOT EDIT THIS FILE. CHANGES TO STARTUP PROFILES\n"
+ cenvnotice = penvnotice[:]
+ penvnotice += "# GO INTO /etc/profile NOT /etc/profile.env\n\n"
+ cenvnotice += "# GO INTO /etc/csh.cshrc NOT /etc/csh.env\n\n"
+
+ #create /etc/profile.env for bash support
+ outfile = atomic_ofstream(os.path.join(eroot, "etc", "profile.env"))
+ outfile.write(penvnotice)
+
+ env_keys = [x for x in env if x != "LDPATH"]
+ env_keys.sort()
+ for k in env_keys:
+ v = env[k]
+ if v.startswith('$') and not v.startswith('${'):
+ outfile.write("export %s=$'%s'\n" % (k, v[1:]))
+ else:
+ outfile.write("export %s='%s'\n" % (k, v))
+ outfile.close()
+
+ #create /etc/csh.env for (t)csh support
+ outfile = atomic_ofstream(os.path.join(eroot, "etc", "csh.env"))
+ outfile.write(cenvnotice)
+ for x in env_keys:
+ outfile.write("setenv %s '%s'\n" % (x, env[x]))
+ outfile.close()
diff --git a/lib/portage/util/file_copy/__init__.py b/lib/portage/util/file_copy/__init__.py
new file mode 100644
index 000000000..3d9b745be
--- /dev/null
+++ b/lib/portage/util/file_copy/__init__.py
@@ -0,0 +1,36 @@
+# Copyright 2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import shutil
+import tempfile
+
+try:
+ from portage.util.file_copy.reflink_linux import file_copy as _file_copy
+except ImportError:
+ _file_copy = None
+
+
+def _optimized_copyfile(src, dst):
+ """
+ Copy the contents (no metadata) of the file named src to a file
+ named dst.
+
+ If possible, copying is done within the kernel, and uses
+ "copy acceleration" techniques (such as reflinks). This also
+ supports sparse files.
+
+ @param src: path of source file
+ @type src: str
+ @param dst: path of destination file
+ @type dst: str
+ """
+ with open(src, 'rb', buffering=0) as src_file, \
+ open(dst, 'wb', buffering=0) as dst_file:
+ _file_copy(src_file.fileno(), dst_file.fileno())
+
+
+if _file_copy is None:
+ copyfile = shutil.copyfile
+else:
+ copyfile = _optimized_copyfile
diff --git a/lib/portage/util/formatter.py b/lib/portage/util/formatter.py
new file mode 100644
index 000000000..ce6799e3f
--- /dev/null
+++ b/lib/portage/util/formatter.py
@@ -0,0 +1,69 @@
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#
+# This is a minimalistic derivation of Python's deprecated formatter module,
+# supporting only the methods related to style, literal data, and line breaks.
+
+import sys
+
+
+class AbstractFormatter(object):
+ """The standard formatter."""
+
+ def __init__(self, writer):
+ self.writer = writer # Output device
+ self.style_stack = [] # Other state, e.g. color
+ self.hard_break = True # Have a hard break
+
+ def add_line_break(self):
+ if not self.hard_break:
+ self.writer.send_line_break()
+ self.hard_break = True
+
+ def add_literal_data(self, data):
+ if not data: return
+ self.hard_break = data[-1:] == '\n'
+ self.writer.send_literal_data(data)
+
+ def push_style(self, *styles):
+ for style in styles:
+ self.style_stack.append(style)
+ self.writer.new_styles(tuple(self.style_stack))
+
+ def pop_style(self, n=1):
+ del self.style_stack[-n:]
+ self.writer.new_styles(tuple(self.style_stack))
+
+
+class NullWriter(object):
+ """Minimal writer interface to use in testing & inheritance.
+
+ A writer which only provides the interface definition; no actions are
+ taken on any methods. This should be the base class for all writers
+ which do not need to inherit any implementation methods.
+ """
+ def __init__(self): pass
+ def flush(self): pass
+ def new_styles(self, styles): pass
+ def send_line_break(self): pass
+ def send_literal_data(self, data): pass
+
+
+class DumbWriter(NullWriter):
+ """Simple writer class which writes output on the file object passed in
+ as the file parameter or, if file is omitted, on standard output.
+ """
+
+ def __init__(self, file=None, maxcol=None):
+ NullWriter.__init__(self)
+ self.file = file or sys.stdout
+
+ def flush(self):
+ self.file.flush()
+
+ def send_line_break(self):
+ self.file.write('\n')
+
+ def send_literal_data(self, data):
+ self.file.write(data)
+
diff --git a/lib/portage/util/futures/__init__.py b/lib/portage/util/futures/__init__.py
new file mode 100644
index 000000000..bdeac90d5
--- /dev/null
+++ b/lib/portage/util/futures/__init__.py
@@ -0,0 +1,8 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'asyncio',
+)
+
+from portage.util.futures import _asyncio as asyncio
diff --git a/lib/portage/util/futures/_asyncio/__init__.py b/lib/portage/util/futures/_asyncio/__init__.py
new file mode 100644
index 000000000..acfd59396
--- /dev/null
+++ b/lib/portage/util/futures/_asyncio/__init__.py
@@ -0,0 +1,185 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'ALL_COMPLETED',
+ 'FIRST_COMPLETED',
+ 'FIRST_EXCEPTION',
+ 'ensure_future',
+ 'CancelledError',
+ 'Future',
+ 'InvalidStateError',
+ 'TimeoutError',
+ 'get_child_watcher',
+ 'get_event_loop',
+ 'set_child_watcher',
+ 'get_event_loop_policy',
+ 'set_event_loop_policy',
+ 'sleep',
+ 'Task',
+ 'wait',
+)
+
+try:
+ import asyncio as _real_asyncio
+except ImportError:
+ _real_asyncio = None
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.util.futures.unix_events:_PortageEventLoopPolicy',
+)
+from portage.util._eventloop.asyncio_event_loop import AsyncioEventLoop as _AsyncioEventLoop
+from portage.util._eventloop.global_event_loop import (
+ _asyncio_enabled,
+ global_event_loop as _global_event_loop,
+)
+from portage.util.futures.futures import (
+ CancelledError,
+ Future,
+ InvalidStateError,
+ TimeoutError,
+)
+from portage.util.futures._asyncio.tasks import (
+ ALL_COMPLETED,
+ FIRST_COMPLETED,
+ FIRST_EXCEPTION,
+ wait,
+)
+
+
+_lock = threading.Lock()
+_policy = None
+
+
+def get_event_loop_policy():
+ """
+ Get the current event loop policy.
+
+ @rtype: asyncio.AbstractEventLoopPolicy (or compatible)
+ @return: the current event loop policy
+ """
+ global _lock, _policy
+ with _lock:
+ if _policy is None:
+ _policy = _PortageEventLoopPolicy()
+ return _policy
+
+
+def set_event_loop_policy(policy):
+ """
+ Set the current event loop policy. If policy is None, the default
+ policy is restored.
+
+ @type policy: asyncio.AbstractEventLoopPolicy or None
+ @param policy: new event loop policy
+ """
+ global _lock, _policy
+ with _lock:
+ _policy = policy or _PortageEventLoopPolicy()
+
+
+def get_event_loop():
+ """
+ Equivalent to calling get_event_loop_policy().get_event_loop().
+
+ @rtype: asyncio.AbstractEventLoop (or compatible)
+ @return: the event loop for the current context
+ """
+ return get_event_loop_policy().get_event_loop()
+
+
+def get_child_watcher():
+ """Equivalent to calling get_event_loop_policy().get_child_watcher()."""
+ return get_event_loop_policy().get_child_watcher()
+
+
+def set_child_watcher(watcher):
+ """Equivalent to calling
+ get_event_loop_policy().set_child_watcher(watcher)."""
+ return get_event_loop_policy().set_child_watcher(watcher)
+
+
+class Task(Future):
+ """
+ Schedule the execution of a coroutine: wrap it in a future. A task
+ is a subclass of Future.
+ """
+ def __init__(self, coro, loop=None):
+ raise NotImplementedError
+
+
+def ensure_future(coro_or_future, loop=None):
+ """
+ Wrap a coroutine or an awaitable in a future.
+
+ If the argument is a Future, it is returned directly.
+
+ @type coro_or_future: coroutine or Future
+ @param coro_or_future: coroutine or future to wrap
+ @type loop: asyncio.AbstractEventLoop (or compatible)
+ @param loop: event loop
+ @rtype: asyncio.Future (or compatible)
+ @return: an instance of Future
+ """
+ if isinstance(coro_or_future, Future):
+ return coro_or_future
+ raise NotImplementedError
+
+
+def sleep(delay, result=None, loop=None):
+ """
+ Create a future that completes after a given time (in seconds). If
+ result is provided, it is produced to the caller when the future
+ completes.
+
+ @type delay: int or float
+ @param delay: delay seconds
+ @type result: object
+ @param result: result of the future
+ @type loop: asyncio.AbstractEventLoop (or compatible)
+ @param loop: event loop
+ @rtype: asyncio.Future (or compatible)
+ @return: an instance of Future
+ """
+ loop = _wrap_loop(loop)
+ future = loop.create_future()
+ handle = loop.call_later(delay, future.set_result, result)
+ def cancel_callback(future):
+ if future.cancelled():
+ handle.cancel()
+ future.add_done_callback(cancel_callback)
+ return future
+
+
+def _wrap_loop(loop=None):
+ """
+ In order to deal with asyncio event loop compatibility issues,
+ use this function to wrap the loop parameter for functions
+ that support it. For example, since python3.4 does not have the
+ AbstractEventLoop.create_future() method, this helper function
+ can be used to add a wrapper that implements the create_future
+ method for python3.4.
+
+ @type loop: asyncio.AbstractEventLoop (or compatible)
+ @param loop: event loop
+ @rtype: asyncio.AbstractEventLoop (or compatible)
+ @return: event loop
+ """
+ return loop or _global_event_loop()
+
+
+if _asyncio_enabled:
+ # The default loop returned by _wrap_loop should be consistent
+ # with global_event_loop, in order to avoid accidental registration
+ # of callbacks with a loop that is not intended to run.
+
+ def _wrap_loop(loop=None):
+ loop = loop or _global_event_loop()
+ return (loop if hasattr(loop, '_asyncio_wrapper')
+ else _AsyncioEventLoop(loop=loop))
diff --git a/lib/portage/util/futures/_asyncio/tasks.py b/lib/portage/util/futures/_asyncio/tasks.py
new file mode 100644
index 000000000..b20765b7a
--- /dev/null
+++ b/lib/portage/util/futures/_asyncio/tasks.py
@@ -0,0 +1,105 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+___all___ = (
+ 'ALL_COMPLETED',
+ 'FIRST_COMPLETED',
+ 'FIRST_EXCEPTION',
+ 'wait',
+)
+
+try:
+ from asyncio import ALL_COMPLETED, FIRST_COMPLETED, FIRST_EXCEPTION
+except ImportError:
+ ALL_COMPLETED = 'ALL_COMPLETED'
+ FIRST_COMPLETED ='FIRST_COMPLETED'
+ FIRST_EXCEPTION = 'FIRST_EXCEPTION'
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.util.futures:asyncio',
+)
+from portage.util._eventloop.global_event_loop import (
+ global_event_loop as _global_event_loop,
+)
+
+
+def wait(futures, loop=None, timeout=None, return_when=ALL_COMPLETED):
+ """
+ Use portage's internal EventLoop to emulate asyncio.wait:
+ https://docs.python.org/3/library/asyncio-task.html#asyncio.wait
+
+ @param futures: futures to wait for
+ @type futures: asyncio.Future (or compatible)
+ @param timeout: number of seconds to wait (wait indefinitely if
+ not specified)
+ @type timeout: int or float
+ @param return_when: indicates when this function should return, must
+ be one of the constants ALL_COMPLETED, FIRST_COMPLETED, or
+ FIRST_EXCEPTION (default is ALL_COMPLETED)
+ @type return_when: object
+ @param loop: event loop
+ @type loop: EventLoop
+ @return: tuple of (done, pending).
+ @rtype: asyncio.Future (or compatible)
+ """
+ loop = asyncio._wrap_loop(loop)
+ result_future = loop.create_future()
+ _Waiter(futures, timeout, return_when, result_future, loop)
+ return result_future
+
+
+class _Waiter(object):
+ def __init__(self, futures, timeout, return_when, result_future, loop):
+ self._futures = futures
+ self._completed = set()
+ self._exceptions = set()
+ self._return_when = return_when
+ self._result_future = result_future
+ self._loop = loop
+ self._ready = False
+ self._timeout = None
+ result_future.add_done_callback(self._cancel_callback)
+ for future in self._futures:
+ future.add_done_callback(self._done_callback)
+ if timeout is not None:
+ self._timeout = loop.call_later(timeout, self._timeout_callback)
+
+ def _cancel_callback(self, future):
+ if future.cancelled():
+ self._ready_callback()
+
+ def _timeout_callback(self):
+ if not self._ready:
+ self._ready = True
+ self._ready_callback()
+
+ def _done_callback(self, future):
+ if future.cancelled() or future.exception() is None:
+ self._completed.add(id(future))
+ else:
+ self._exceptions.add(id(future))
+ if not self._ready and (
+ (self._return_when is FIRST_COMPLETED and self._completed) or
+ (self._return_when is FIRST_EXCEPTION and self._exceptions) or
+ (len(self._futures) == len(self._completed) + len(self._exceptions))):
+ self._ready = True
+ # use call_soon in case multiple callbacks complete in quick succession
+ self._loop.call_soon(self._ready_callback)
+
+ def _ready_callback(self):
+ if self._timeout is not None:
+ self._timeout.cancel()
+ self._timeout = None
+ if self._result_future.cancelled():
+ return
+ done = []
+ pending = []
+ done_ids = self._completed.union(self._exceptions)
+ for future in self._futures:
+ if id(future) in done_ids:
+ done.append(future)
+ else:
+ pending.append(future)
+ future.remove_done_callback(self._done_callback)
+ self._result_future.set_result((set(done), set(pending)))
diff --git a/lib/portage/util/futures/compat_coroutine.py b/lib/portage/util/futures/compat_coroutine.py
new file mode 100644
index 000000000..17400b74d
--- /dev/null
+++ b/lib/portage/util/futures/compat_coroutine.py
@@ -0,0 +1,112 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from portage.util.futures import asyncio
+import functools
+
+
+def coroutine(generator_func):
+ """
+ A decorator for a generator function that behaves as coroutine function.
+ The generator should yield a Future instance in order to wait for it,
+ and the result becomes the result of the current yield-expression,
+ via the PEP 342 generator send() method.
+
+ The decorated function returns a Future which is done when the generator
+ is exhausted. The generator can return a value via the coroutine_return
+ function.
+
+ @param generator_func: A generator function that yields Futures, and
+ will receive the result of each Future as the result of the
+ corresponding yield-expression.
+ @type generator_func: function
+ @rtype: function
+ @return: A function which calls the given generator function and
+ returns a Future that is done when the generator is exhausted.
+ """
+ # Note that functools.partial does not work for decoration of
+ # methods, since it doesn't implement the descriptor protocol.
+ # This problem is solve by defining a wrapper function.
+ @functools.wraps(generator_func)
+ def wrapped(*args, **kwargs):
+ return _generator_future(generator_func, *args, **kwargs)
+ return wrapped
+
+
+def coroutine_return(result=None):
+ """
+ Terminate the current coroutine and set the result of the associated
+ Future.
+
+ @param result: of the current coroutine's Future
+ @type object
+ """
+ raise _CoroutineReturnValue(result)
+
+
+def _generator_future(generator_func, *args, **kwargs):
+ """
+ Call generator_func with the given arguments, and return a Future
+ that is done when the resulting generation is exhausted. If a
+ keyword argument named 'loop' is given, then it is used instead of
+ the default event loop.
+ """
+ loop = asyncio._wrap_loop(kwargs.get('loop'))
+ result = loop.create_future()
+ _GeneratorTask(generator_func(*args, **kwargs), result, loop=loop)
+ return result
+
+
+class _CoroutineReturnValue(Exception):
+ def __init__(self, result):
+ self.result = result
+
+
+class _GeneratorTask(object):
+ """
+ Asynchronously executes the generator to completion, waiting for
+ the result of each Future that it yields, and sending the result
+ to the generator.
+ """
+ def __init__(self, generator, result, loop):
+ self._generator = generator
+ self._result = result
+ self._loop = loop
+ result.add_done_callback(self._cancel_callback)
+ loop.call_soon(self._next)
+
+ def _cancel_callback(self, result):
+ if result.cancelled():
+ self._generator.close()
+
+ def _next(self, previous=None):
+ if self._result.cancelled():
+ if previous is not None:
+ # Consume exceptions, in order to avoid triggering
+ # the event loop's exception handler.
+ previous.cancelled() or previous.exception()
+ return
+ try:
+ if previous is None:
+ future = next(self._generator)
+ elif previous.cancelled():
+ self._generator.throw(asyncio.CancelledError())
+ future = next(self._generator)
+ elif previous.exception() is None:
+ future = self._generator.send(previous.result())
+ else:
+ self._generator.throw(previous.exception())
+ future = next(self._generator)
+
+ except _CoroutineReturnValue as e:
+ if not self._result.cancelled():
+ self._result.set_result(e.result)
+ except StopIteration:
+ if not self._result.cancelled():
+ self._result.set_result(None)
+ except Exception as e:
+ if not self._result.cancelled():
+ self._result.set_exception(e)
+ else:
+ future = asyncio.ensure_future(future, loop=self._loop)
+ future.add_done_callback(self._next)
diff --git a/lib/portage/util/futures/events.py b/lib/portage/util/futures/events.py
new file mode 100644
index 000000000..b772bc242
--- /dev/null
+++ b/lib/portage/util/futures/events.py
@@ -0,0 +1,191 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'AbstractEventLoopPolicy',
+ 'AbstractEventLoop',
+)
+
+import socket
+import subprocess
+
+try:
+ from asyncio.events import (
+ AbstractEventLoop as _AbstractEventLoop,
+ AbstractEventLoopPolicy as _AbstractEventLoopPolicy,
+ )
+except ImportError:
+ _AbstractEventLoop = object
+ _AbstractEventLoopPolicy = object
+
+
+class AbstractEventLoopPolicy(_AbstractEventLoopPolicy):
+ """Abstract policy for accessing the event loop."""
+
+ def get_event_loop(self):
+ raise NotImplementedError
+
+ def set_event_loop(self, loop):
+ raise NotImplementedError
+
+ def new_event_loop(self):
+ raise NotImplementedError
+
+ def get_child_watcher(self):
+ raise NotImplementedError
+
+ def set_child_watcher(self, watcher):
+ raise NotImplementedError
+
+
+class AbstractEventLoop(_AbstractEventLoop):
+ """Abstract event loop."""
+
+ def run_forever(self):
+ raise NotImplementedError
+
+ def run_until_complete(self, future):
+ raise NotImplementedError
+
+ def stop(self):
+ raise NotImplementedError
+
+ def is_running(self):
+ raise NotImplementedError
+
+ def is_closed(self):
+ raise NotImplementedError
+
+ def close(self):
+ raise NotImplementedError
+
+ def shutdown_asyncgens(self):
+ raise NotImplementedError
+
+ def _timer_handle_cancelled(self, handle):
+ raise NotImplementedError
+
+ def call_soon(self, callback, *args):
+ return self.call_later(0, callback, *args)
+
+ def call_later(self, delay, callback, *args):
+ raise NotImplementedError
+
+ def call_at(self, when, callback, *args):
+ raise NotImplementedError
+
+ def time(self):
+ raise NotImplementedError
+
+ def create_future(self):
+ raise NotImplementedError
+
+ def create_task(self, coro):
+ raise NotImplementedError
+
+ def call_soon_threadsafe(self, callback, *args):
+ raise NotImplementedError
+
+ def run_in_executor(self, executor, func, *args):
+ raise NotImplementedError
+
+ def set_default_executor(self, executor):
+ raise NotImplementedError
+
+ def getaddrinfo(self, host, port, family=0, type=0, proto=0, flags=0):
+ raise NotImplementedError
+
+ def getnameinfo(self, sockaddr, flags=0):
+ raise NotImplementedError
+
+ def create_connection(self, protocol_factory, host=None, port=None,
+ ssl=None, family=0, proto=0, flags=0, sock=None,
+ local_addr=None, server_hostname=None):
+ raise NotImplementedError
+
+ def create_server(self, protocol_factory, host=None, port=None,
+ family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE,
+ sock=None, backlog=100, ssl=None, reuse_address=None,
+ reuse_port=None):
+ raise NotImplementedError
+
+ def create_unix_connection(self, protocol_factory, path,
+ ssl=None, sock=None,
+ server_hostname=None):
+ raise NotImplementedError
+
+ def create_unix_server(self, protocol_factory, path,
+ sock=None, backlog=100, ssl=None):
+ raise NotImplementedError
+
+ def create_datagram_endpoint(self, protocol_factory,
+ local_addr=None, remote_addr=None,
+ family=0, proto=0, flags=0,
+ reuse_address=None, reuse_port=None,
+ allow_broadcast=None, sock=None):
+ raise NotImplementedError
+
+ def connect_read_pipe(self, protocol_factory, pipe):
+ raise NotImplementedError
+
+ def connect_write_pipe(self, protocol_factory, pipe):
+ raise NotImplementedError
+
+ def subprocess_shell(self, protocol_factory, cmd, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ **kwargs):
+ raise NotImplementedError
+
+ def subprocess_exec(self, protocol_factory, *args, **kwargs):
+ for k in ('stdin', 'stdout', 'stderr'):
+ kwargs.setdefault(k, subprocess.PIPE)
+ raise NotImplementedError
+
+ def add_writer(self, fd, callback, *args):
+ raise NotImplementedError
+
+ def remove_writer(self, fd):
+ raise NotImplementedError
+
+ def sock_recv(self, sock, nbytes):
+ raise NotImplementedError
+
+ def sock_sendall(self, sock, data):
+ raise NotImplementedError
+
+ def sock_connect(self, sock, address):
+ raise NotImplementedError
+
+ def sock_accept(self, sock):
+ raise NotImplementedError
+
+ def add_signal_handler(self, sig, callback, *args):
+ raise NotImplementedError
+
+ def remove_signal_handler(self, sig):
+ raise NotImplementedError
+
+ def set_task_factory(self, factory):
+ raise NotImplementedError
+
+ def get_task_factory(self):
+ raise NotImplementedError
+
+ def get_exception_handler(self):
+ raise NotImplementedError
+
+ def set_exception_handler(self, handler):
+ raise NotImplementedError
+
+ def default_exception_handler(self, context):
+ raise NotImplementedError
+
+ def call_exception_handler(self, context):
+ raise NotImplementedError
+
+ def get_debug(self):
+ raise NotImplementedError
+
+ def set_debug(self, enabled):
+ raise NotImplementedError
+
diff --git a/lib/portage/util/futures/executor/__init__.py b/lib/portage/util/futures/executor/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/lib/portage/util/futures/executor/__init__.py
diff --git a/lib/portage/util/futures/executor/fork.py b/lib/portage/util/futures/executor/fork.py
new file mode 100644
index 000000000..72844403c
--- /dev/null
+++ b/lib/portage/util/futures/executor/fork.py
@@ -0,0 +1,136 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'ForkExecutor',
+)
+
+import collections
+import functools
+import multiprocessing
+import os
+import sys
+import traceback
+
+from portage.util._async.AsyncFunction import AsyncFunction
+from portage.util.futures import asyncio
+
+
+class ForkExecutor(object):
+ """
+ An implementation of concurrent.futures.Executor that forks a
+ new process for each task, with support for cancellation of tasks.
+
+ This is entirely driven by an event loop.
+ """
+ def __init__(self, max_workers=None, loop=None):
+ self._max_workers = max_workers or multiprocessing.cpu_count()
+ self._loop = asyncio._wrap_loop(loop)
+ self._submit_queue = collections.deque()
+ self._running_tasks = {}
+ self._shutdown = False
+ self._shutdown_future = self._loop.create_future()
+
+ def submit(self, fn, *args, **kwargs):
+ """Submits a callable to be executed with the given arguments.
+
+ Schedules the callable to be executed as fn(*args, **kwargs) and returns
+ a Future instance representing the execution of the callable.
+
+ Returns:
+ A Future representing the given call.
+ """
+ future = self._loop.create_future()
+ proc = AsyncFunction(target=functools.partial(
+ self._guarded_fn_call, fn, args, kwargs))
+ self._submit_queue.append((future, proc))
+ self._schedule()
+ return future
+
+ def _schedule(self):
+ while (not self._shutdown and self._submit_queue and
+ len(self._running_tasks) < self._max_workers):
+ future, proc = self._submit_queue.popleft()
+ future.add_done_callback(functools.partial(self._cancel_cb, proc))
+ proc.addExitListener(functools.partial(self._proc_exit, future))
+ proc.scheduler = self._loop
+ proc.start()
+ self._running_tasks[id(proc)] = proc
+
+ def _cancel_cb(self, proc, future):
+ if future.cancelled():
+ # async, handle the rest in _proc_exit
+ proc.cancel()
+
+ @staticmethod
+ def _guarded_fn_call(fn, args, kwargs):
+ try:
+ result = fn(*args, **kwargs)
+ exception = None
+ except Exception as e:
+ result = None
+ exception = _ExceptionWithTraceback(e)
+
+ return result, exception
+
+ def _proc_exit(self, future, proc):
+ if not future.cancelled():
+ if proc.returncode == os.EX_OK:
+ result, exception = proc.result
+ if exception is not None:
+ future.set_exception(exception)
+ else:
+ future.set_result(result)
+ else:
+ # TODO: add special exception class for this, maybe
+ # distinguish between kill and crash
+ future.set_exception(
+ Exception('pid {} crashed or killed, exitcode {}'.\
+ format(proc.pid, proc.returncode)))
+
+ del self._running_tasks[id(proc)]
+ self._schedule()
+ if self._shutdown and not self._running_tasks:
+ self._shutdown_future.set_result(None)
+
+ def shutdown(self, wait=True):
+ self._shutdown = True
+ if not self._running_tasks and not self._shutdown_future.done():
+ self._shutdown_future.set_result(None)
+ if wait:
+ self._loop.run_until_complete(self._shutdown_future)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.shutdown(wait=True)
+ return False
+
+
+class _ExceptionWithTraceback:
+ def __init__(self, exc):
+ tb = traceback.format_exception(type(exc), exc, exc.__traceback__)
+ tb = ''.join(tb)
+ self.exc = exc
+ self.tb = '\n"""\n%s"""' % tb
+ def __reduce__(self):
+ return _rebuild_exc, (self.exc, self.tb)
+
+
+class _RemoteTraceback(Exception):
+ def __init__(self, tb):
+ self.tb = tb
+ def __str__(self):
+ return self.tb
+
+
+def _rebuild_exc(exc, tb):
+ exc.__cause__ = _RemoteTraceback(tb)
+ return exc
+
+
+if sys.version_info < (3,):
+ # Python 2 does not support exception chaining, so
+ # don't bother to preserve the traceback.
+ _ExceptionWithTraceback = lambda exc: exc
diff --git a/lib/portage/util/futures/extendedfutures.py b/lib/portage/util/futures/extendedfutures.py
new file mode 100644
index 000000000..af384c745
--- /dev/null
+++ b/lib/portage/util/futures/extendedfutures.py
@@ -0,0 +1,73 @@
+# Copyright 2016 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#
+# This module provides an extended subset of the asyncio.futures.Futures
+# interface.
+
+from __future__ import unicode_literals
+
+__all__ = (
+ 'CancelledError',
+ 'ExtendedFuture',
+ 'InvalidStateError',
+)
+
+from portage.util.futures.futures import (Future, InvalidStateError,
+ CancelledError)
+
+# Create our one time settable unset constant
+UNSET_CONST = Future()
+UNSET_CONST.set_result(object())
+
+
+class ExtendedFuture(Future):
+ '''Extended Future class adding convienince get and set operations with
+ default result capabilities for unset result(). It also adds pass
+ capability for duplicate set_result() calls.
+ '''
+
+ def __init__(self, default_result=UNSET_CONST.result()):
+ '''Class init
+
+ @param default_result: Optional data type/value to return in the event
+ of a result() call when result has not yet been
+ set.
+ '''
+ self.default_result = default_result
+ super(ExtendedFuture, self).__init__()
+ self.set = self.set_result
+
+ def set_result(self, data, ignore_InvalidState=False):
+ '''Set the Future's result to the data, optionally don't raise
+ an error for 'InvalidStateError' errors
+
+ @param ignore_exception: Boolean
+ '''
+ if ignore_InvalidState:
+ try:
+ super(ExtendedFuture, self).set_result(data)
+ except InvalidStateError:
+ pass
+ else:
+ super(ExtendedFuture, self).set_result(data)
+
+ def get(self, default=UNSET_CONST.result()):
+ '''Convienience function to wrap result() but adds an optional
+ default value to return rather than raise an InvalidStateError
+
+ @param default: Optional override for the classwide default_result
+ @returns: the result data or the default value, raisies an exception
+ if result is unset and no default is defined.
+ '''
+ if default is not UNSET_CONST.result():
+ pass
+ elif self.default_result is not UNSET_CONST.result():
+ default = self.default_result
+ if default is not UNSET_CONST.result():
+ try:
+ data = super(ExtendedFuture, self).result()
+ except InvalidStateError:
+ data = default
+ else:
+ data = super(ExtendedFuture, self).result()
+ return data
diff --git a/lib/portage/util/futures/futures.py b/lib/portage/util/futures/futures.py
new file mode 100644
index 000000000..9c9900d4c
--- /dev/null
+++ b/lib/portage/util/futures/futures.py
@@ -0,0 +1,199 @@
+# Copyright 2016-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+#
+# For compatibility with python versions which do not have the
+# asyncio module (Python 3.3 and earlier), this module provides a
+# subset of the asyncio.futures.Futures interface.
+
+from __future__ import unicode_literals
+
+__all__ = (
+ 'CancelledError',
+ 'Future',
+ 'InvalidStateError',
+ 'TimeoutError',
+)
+
+try:
+ from asyncio import (
+ CancelledError,
+ Future,
+ InvalidStateError,
+ TimeoutError,
+ )
+except ImportError:
+
+ from portage.exception import PortageException
+
+ class Error(PortageException):
+ pass
+
+ class CancelledError(Error):
+ def __init__(self):
+ Error.__init__(self, "cancelled")
+
+ class TimeoutError(Error):
+ def __init__(self):
+ Error.__init__(self, "timed out")
+
+ class InvalidStateError(Error):
+ pass
+
+ Future = None
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.util._eventloop.global_event_loop:global_event_loop@_global_event_loop',
+)
+
+_PENDING = 'PENDING'
+_CANCELLED = 'CANCELLED'
+_FINISHED = 'FINISHED'
+
+class _EventLoopFuture(object):
+ """
+ This class provides (a subset of) the asyncio.Future interface, for
+ use with the EventLoop class, because EventLoop is currently
+ missing some of the asyncio.AbstractEventLoop methods that
+ asyncio.Future requires.
+ """
+
+ # Class variables serving as defaults for instance variables.
+ _state = _PENDING
+ _result = None
+ _exception = None
+ _loop = None
+
+ def __init__(self, loop=None):
+ """Initialize the future.
+
+ The optional loop argument allows explicitly setting the event
+ loop object used by the future. If it's not provided, the future uses
+ the default event loop.
+ """
+ if loop is None:
+ self._loop = _global_event_loop()
+ else:
+ self._loop = loop
+ self._callbacks = []
+
+ def cancel(self):
+ """Cancel the future and schedule callbacks.
+
+ If the future is already done or cancelled, return False. Otherwise,
+ change the future's state to cancelled, schedule the callbacks and
+ return True.
+ """
+ if self._state != _PENDING:
+ return False
+ self._state = _CANCELLED
+ self._schedule_callbacks()
+ return True
+
+ def _schedule_callbacks(self):
+ """Internal: Ask the event loop to call all callbacks.
+
+ The callbacks are scheduled to be called as soon as possible. Also
+ clears the callback list.
+ """
+ callbacks = self._callbacks[:]
+ if not callbacks:
+ return
+
+ self._callbacks[:] = []
+ for callback in callbacks:
+ self._loop.call_soon(callback, self)
+
+ def cancelled(self):
+ """Return True if the future was cancelled."""
+ return self._state == _CANCELLED
+
+ def done(self):
+ """Return True if the future is done.
+
+ Done means either that a result / exception are available, or that the
+ future was cancelled.
+ """
+ return self._state != _PENDING
+
+ def result(self):
+ """Return the result this future represents.
+
+ If the future has been cancelled, raises CancelledError. If the
+ future's result isn't yet available, raises InvalidStateError. If
+ the future is done and has an exception set, this exception is raised.
+ """
+ if self._state == _CANCELLED:
+ raise CancelledError()
+ if self._state != _FINISHED:
+ raise InvalidStateError('Result is not ready.')
+ if self._exception is not None:
+ raise self._exception
+ return self._result
+
+ def exception(self):
+ """Return the exception that was set on this future.
+
+ The exception (or None if no exception was set) is returned only if
+ the future is done. If the future has been cancelled, raises
+ CancelledError. If the future isn't done yet, raises
+ InvalidStateError.
+ """
+ if self._state == _CANCELLED:
+ raise CancelledError
+ if self._state != _FINISHED:
+ raise InvalidStateError('Exception is not set.')
+ return self._exception
+
+ def add_done_callback(self, fn):
+ """Add a callback to be run when the future becomes done.
+
+ The callback is called with a single argument - the future object. If
+ the future is already done when this is called, the callback is
+ scheduled with call_soon.
+ """
+ if self._state != _PENDING:
+ self._loop.call_soon(fn, self)
+ else:
+ self._callbacks.append(fn)
+
+ def remove_done_callback(self, fn):
+ """Remove all instances of a callback from the "call when done" list.
+
+ Returns the number of callbacks removed.
+ """
+ filtered_callbacks = [f for f in self._callbacks if f != fn]
+ removed_count = len(self._callbacks) - len(filtered_callbacks)
+ if removed_count:
+ self._callbacks[:] = filtered_callbacks
+ return removed_count
+
+ def set_result(self, result):
+ """Mark the future done and set its result.
+
+ If the future is already done when this method is called, raises
+ InvalidStateError.
+ """
+ if self._state != _PENDING:
+ raise InvalidStateError('{}: {!r}'.format(self._state, self))
+ self._result = result
+ self._state = _FINISHED
+ self._schedule_callbacks()
+
+ def set_exception(self, exception):
+ """Mark the future done and set an exception.
+
+ If the future is already done when this method is called, raises
+ InvalidStateError.
+ """
+ if self._state != _PENDING:
+ raise InvalidStateError('{}: {!r}'.format(self._state, self))
+ if isinstance(exception, type):
+ exception = exception()
+ self._exception = exception
+ self._state = _FINISHED
+ self._schedule_callbacks()
+
+
+if Future is None:
+ Future = _EventLoopFuture
diff --git a/lib/portage/util/futures/iter_completed.py b/lib/portage/util/futures/iter_completed.py
new file mode 100644
index 000000000..31b5e0c78
--- /dev/null
+++ b/lib/portage/util/futures/iter_completed.py
@@ -0,0 +1,183 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import functools
+import multiprocessing
+
+from portage.util._async.AsyncTaskFuture import AsyncTaskFuture
+from portage.util._async.TaskScheduler import TaskScheduler
+from portage.util.futures import asyncio
+
+
+def iter_completed(futures, max_jobs=None, max_load=None, loop=None):
+ """
+ This is similar to asyncio.as_completed, but takes an iterator of
+ futures as input, and includes support for max_jobs and max_load
+ parameters.
+
+ @param futures: iterator of asyncio.Future (or compatible)
+ @type futures: iterator
+ @param max_jobs: max number of futures to process concurrently (default
+ is multiprocessing.cpu_count())
+ @type max_jobs: int
+ @param max_load: max load allowed when scheduling a new future,
+ otherwise schedule no more than 1 future at a time (default
+ is multiprocessing.cpu_count())
+ @type max_load: int or float
+ @param loop: event loop
+ @type loop: EventLoop
+ @return: iterator of futures that are done
+ @rtype: iterator
+ """
+ loop = asyncio._wrap_loop(loop)
+
+ for future_done_set in async_iter_completed(futures,
+ max_jobs=max_jobs, max_load=max_load, loop=loop):
+ for future in loop.run_until_complete(future_done_set):
+ yield future
+
+
+def async_iter_completed(futures, max_jobs=None, max_load=None, loop=None):
+ """
+ An asynchronous version of iter_completed. This yields futures, which
+ when done, result in a set of input futures that are done. This serves
+ as a wrapper around portage's internal TaskScheduler class, using
+ standard asyncio interfaces.
+
+ @param futures: iterator of asyncio.Future (or compatible)
+ @type futures: iterator
+ @param max_jobs: max number of futures to process concurrently (default
+ is multiprocessing.cpu_count())
+ @type max_jobs: int
+ @param max_load: max load allowed when scheduling a new future,
+ otherwise schedule no more than 1 future at a time (default
+ is multiprocessing.cpu_count())
+ @type max_load: int or float
+ @param loop: event loop
+ @type loop: EventLoop
+ @return: iterator of futures, which when done, result in a set of
+ input futures that are done
+ @rtype: iterator
+ """
+ loop = asyncio._wrap_loop(loop)
+
+ max_jobs = max_jobs or multiprocessing.cpu_count()
+ max_load = max_load or multiprocessing.cpu_count()
+
+ future_map = {}
+ def task_generator():
+ for future in futures:
+ future_map[id(future)] = future
+ yield AsyncTaskFuture(future=future)
+
+ scheduler = TaskScheduler(
+ task_generator(),
+ max_jobs=max_jobs,
+ max_load=max_load,
+ event_loop=loop)
+
+ def done_callback(future_done_set, wait_result):
+ """Propagate results from wait_result to future_done_set."""
+ if future_done_set.cancelled():
+ return
+ done, pending = wait_result.result()
+ for future in done:
+ del future_map[id(future)]
+ future_done_set.set_result(done)
+
+ def cancel_callback(wait_result, future_done_set):
+ """Cancel wait_result if future_done_set has been cancelled."""
+ if future_done_set.cancelled() and not wait_result.done():
+ wait_result.cancel()
+
+ try:
+ scheduler.start()
+
+ # scheduler should ensure that future_map is non-empty until
+ # task_generator is exhausted
+ while future_map:
+ wait_result = asyncio.ensure_future(
+ asyncio.wait(list(future_map.values()),
+ return_when=asyncio.FIRST_COMPLETED, loop=loop), loop=loop)
+ future_done_set = loop.create_future()
+ future_done_set.add_done_callback(
+ functools.partial(cancel_callback, wait_result))
+ wait_result.add_done_callback(
+ functools.partial(done_callback, future_done_set))
+ yield future_done_set
+ finally:
+ # cleanup in case of interruption by SIGINT, etc
+ scheduler.cancel()
+ scheduler.wait()
+
+
+def iter_gather(futures, max_jobs=None, max_load=None, loop=None):
+ """
+ This is similar to asyncio.gather, but takes an iterator of
+ futures as input, and includes support for max_jobs and max_load
+ parameters.
+
+ @param futures: iterator of asyncio.Future (or compatible)
+ @type futures: iterator
+ @param max_jobs: max number of futures to process concurrently (default
+ is multiprocessing.cpu_count())
+ @type max_jobs: int
+ @param max_load: max load allowed when scheduling a new future,
+ otherwise schedule no more than 1 future at a time (default
+ is multiprocessing.cpu_count())
+ @type max_load: int or float
+ @param loop: event loop
+ @type loop: EventLoop
+ @return: a Future resulting in a list of done input futures, in the
+ same order that they were yielded from the input iterator
+ @rtype: asyncio.Future (or compatible)
+ """
+ loop = asyncio._wrap_loop(loop)
+ result = loop.create_future()
+ futures_list = []
+
+ def future_generator():
+ for future in futures:
+ futures_list.append(future)
+ yield future
+
+ completed_iter = async_iter_completed(
+ future_generator(),
+ max_jobs=max_jobs,
+ max_load=max_load,
+ loop=loop,
+ )
+
+ def handle_result(future_done_set):
+ if result.cancelled():
+ if not future_done_set.cancelled():
+ # All exceptions must be consumed from future_done_set, in order
+ # to avoid triggering the event loop's exception handler.
+ list(future.exception() for future in future_done_set.result()
+ if not future.cancelled())
+ return
+
+ try:
+ handle_result.current_task = next(completed_iter)
+ except StopIteration:
+ result.set_result(futures_list)
+ else:
+ handle_result.current_task.add_done_callback(handle_result)
+
+ try:
+ handle_result.current_task = next(completed_iter)
+ except StopIteration:
+ handle_result.current_task = None
+ result.set_result(futures_list)
+ else:
+ handle_result.current_task.add_done_callback(handle_result)
+
+ def cancel_callback(result):
+ if (result.cancelled() and
+ handle_result.current_task is not None and
+ not handle_result.current_task.done()):
+ handle_result.current_task.cancel()
+
+ result.add_done_callback(cancel_callback)
+
+ return result
diff --git a/lib/portage/util/futures/retry.py b/lib/portage/util/futures/retry.py
new file mode 100644
index 000000000..ccfc087ab
--- /dev/null
+++ b/lib/portage/util/futures/retry.py
@@ -0,0 +1,182 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'RetryError',
+ 'retry',
+)
+
+import functools
+
+from portage.exception import PortageException
+from portage.util.futures import asyncio
+
+
+class RetryError(PortageException):
+ """Raised when retry fails."""
+ def __init__(self):
+ PortageException.__init__(self, "retry error")
+
+
+def retry(try_max=None, try_timeout=None, overall_timeout=None,
+ delay_func=None, reraise=False, loop=None):
+ """
+ Create and return a retry decorator. The decorator is intended to
+ operate only on a coroutine function.
+
+ @param try_max: maximum number of tries
+ @type try_max: int or None
+ @param try_timeout: number of seconds to wait for a try to succeed
+ before cancelling it, which is only effective if func returns
+ tasks that support cancellation
+ @type try_timeout: float or None
+ @param overall_timeout: number of seconds to wait for retires to
+ succeed before aborting, which is only effective if func returns
+ tasks that support cancellation
+ @type overall_timeout: float or None
+ @param delay_func: function that takes an int argument corresponding
+ to the number of previous tries and returns a number of seconds
+ to wait before the next try
+ @type delay_func: callable
+ @param reraise: Reraise the last exception, instead of RetryError
+ @type reraise: bool
+ @param loop: event loop
+ @type loop: EventLoop
+ @return: func decorated with retry support
+ @rtype: callable
+ """
+ return functools.partial(_retry_wrapper, loop, try_max, try_timeout,
+ overall_timeout, delay_func, reraise)
+
+
+def _retry_wrapper(_loop, try_max, try_timeout, overall_timeout, delay_func,
+ reraise, func, loop=None):
+ """
+ Create and return a decorated function.
+ """
+ return functools.partial(_retry, loop or _loop, try_max, try_timeout,
+ overall_timeout, delay_func, reraise, func)
+
+
+def _retry(loop, try_max, try_timeout, overall_timeout, delay_func,
+ reraise, func, *args, **kwargs):
+ """
+ Retry coroutine, used to implement retry decorator.
+
+ @return: func return value
+ @rtype: asyncio.Future (or compatible)
+ """
+ loop = asyncio._wrap_loop(loop)
+ future = loop.create_future()
+ _Retry(future, loop, try_max, try_timeout, overall_timeout, delay_func,
+ reraise, functools.partial(func, *args, **kwargs))
+ return future
+
+
+class _Retry(object):
+ def __init__(self, future, loop, try_max, try_timeout, overall_timeout,
+ delay_func, reraise, func):
+ self._future = future
+ self._loop = loop
+ self._try_max = try_max
+ self._try_timeout = try_timeout
+ self._delay_func = delay_func
+ self._reraise = reraise
+ self._func = func
+
+ self._try_timeout_handle = None
+ self._overall_timeout_handle = None
+ self._overall_timeout_expired = None
+ self._tries = 0
+ self._current_task = None
+ self._previous_result = None
+
+ future.add_done_callback(self._cancel_callback)
+ if overall_timeout is not None:
+ self._overall_timeout_handle = loop.call_later(
+ overall_timeout, self._overall_timeout_callback)
+ self._begin_try()
+
+ def _cancel_callback(self, future):
+ if future.cancelled() and self._current_task is not None:
+ self._current_task.cancel()
+
+ def _try_timeout_callback(self):
+ self._try_timeout_handle = None
+ self._current_task.cancel()
+
+ def _overall_timeout_callback(self):
+ self._overall_timeout_handle = None
+ self._overall_timeout_expired = True
+ self._current_task.cancel()
+ self._retry_error()
+
+ def _begin_try(self):
+ self._tries += 1
+ self._current_task = self._func()
+ self._current_task.add_done_callback(self._try_done)
+ if self._try_timeout is not None:
+ self._try_timeout_handle = self._loop.call_later(
+ self._try_timeout, self._try_timeout_callback)
+
+ def _try_done(self, future):
+ self._current_task = None
+
+ if self._try_timeout_handle is not None:
+ self._try_timeout_handle.cancel()
+ self._try_timeout_handle = None
+
+ if not future.cancelled():
+ # consume exception, so that the event loop
+ # exception handler does not report it
+ future.exception()
+
+ if self._overall_timeout_expired:
+ return
+
+ try:
+ if self._future.cancelled():
+ return
+
+ self._previous_result = future
+ if not (future.cancelled() or future.exception() is not None):
+ # success
+ self._future.set_result(future.result())
+ return
+ finally:
+ if self._future.done() and self._overall_timeout_handle is not None:
+ self._overall_timeout_handle.cancel()
+ self._overall_timeout_handle = None
+
+ if self._try_max is not None and self._tries >= self._try_max:
+ self._retry_error()
+ return
+
+ if self._delay_func is not None:
+ delay = self._delay_func(self._tries)
+ self._current_task = self._loop.call_later(delay, self._delay_done)
+ return
+
+ self._begin_try()
+
+ def _delay_done(self):
+ self._current_task = None
+
+ if self._future.cancelled() or self._overall_timeout_expired:
+ return
+
+ self._begin_try()
+
+ def _retry_error(self):
+ if self._previous_result is None or self._previous_result.cancelled():
+ cause = asyncio.TimeoutError()
+ else:
+ cause = self._previous_result.exception()
+
+ if self._reraise:
+ e = cause
+ else:
+ e = RetryError()
+ e.__cause__ = cause
+
+ self._future.set_exception(e)
diff --git a/lib/portage/util/futures/transports.py b/lib/portage/util/futures/transports.py
new file mode 100644
index 000000000..60ea93073
--- /dev/null
+++ b/lib/portage/util/futures/transports.py
@@ -0,0 +1,90 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+try:
+ from asyncio.transports import Transport as _Transport
+except ImportError:
+ _Transport = object
+
+
+class _FlowControlMixin(_Transport):
+ """
+ This is identical to the standard library's private
+ asyncio.transports._FlowControlMixin class.
+
+ All the logic for (write) flow control in a mix-in base class.
+
+ The subclass must implement get_write_buffer_size(). It must call
+ _maybe_pause_protocol() whenever the write buffer size increases,
+ and _maybe_resume_protocol() whenever it decreases. It may also
+ override set_write_buffer_limits() (e.g. to specify different
+ defaults).
+
+ The subclass constructor must call super().__init__(extra). This
+ will call set_write_buffer_limits().
+
+ The user may call set_write_buffer_limits() and
+ get_write_buffer_size(), and their protocol's pause_writing() and
+ resume_writing() may be called.
+ """
+
+ def __init__(self, extra=None, loop=None):
+ super().__init__(extra)
+ assert loop is not None
+ self._loop = loop
+ self._protocol_paused = False
+ self._set_write_buffer_limits()
+
+ def _maybe_pause_protocol(self):
+ size = self.get_write_buffer_size()
+ if size <= self._high_water:
+ return
+ if not self._protocol_paused:
+ self._protocol_paused = True
+ try:
+ self._protocol.pause_writing()
+ except Exception as exc:
+ self._loop.call_exception_handler({
+ 'message': 'protocol.pause_writing() failed',
+ 'exception': exc,
+ 'transport': self,
+ 'protocol': self._protocol,
+ })
+
+ def _maybe_resume_protocol(self):
+ if (self._protocol_paused and
+ self.get_write_buffer_size() <= self._low_water):
+ self._protocol_paused = False
+ try:
+ self._protocol.resume_writing()
+ except Exception as exc:
+ self._loop.call_exception_handler({
+ 'message': 'protocol.resume_writing() failed',
+ 'exception': exc,
+ 'transport': self,
+ 'protocol': self._protocol,
+ })
+
+ def get_write_buffer_limits(self):
+ return (self._low_water, self._high_water)
+
+ def _set_write_buffer_limits(self, high=None, low=None):
+ if high is None:
+ if low is None:
+ high = 64*1024
+ else:
+ high = 4*low
+ if low is None:
+ low = high // 4
+ if not high >= low >= 0:
+ raise ValueError('high (%r) must be >= low (%r) must be >= 0' %
+ (high, low))
+ self._high_water = high
+ self._low_water = low
+
+ def set_write_buffer_limits(self, high=None, low=None):
+ self._set_write_buffer_limits(high=high, low=low)
+ self._maybe_pause_protocol()
+
+ def get_write_buffer_size(self):
+ raise NotImplementedError
diff --git a/lib/portage/util/futures/unix_events.py b/lib/portage/util/futures/unix_events.py
new file mode 100644
index 000000000..3381eaa7d
--- /dev/null
+++ b/lib/portage/util/futures/unix_events.py
@@ -0,0 +1,705 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = (
+ 'AbstractChildWatcher',
+ 'DefaultEventLoopPolicy',
+)
+
+try:
+ import asyncio as _real_asyncio
+ from asyncio.base_subprocess import BaseSubprocessTransport as _BaseSubprocessTransport
+ from asyncio.unix_events import AbstractChildWatcher as _AbstractChildWatcher
+ from asyncio.transports import (
+ ReadTransport as _ReadTransport,
+ WriteTransport as _WriteTransport,
+ )
+except ImportError:
+ _real_asyncio = None
+ _AbstractChildWatcher = object
+ _BaseSubprocessTransport = object
+ _ReadTransport = object
+ _WriteTransport = object
+
+import errno
+import fcntl
+import functools
+import logging
+import os
+import socket
+import stat
+import subprocess
+import sys
+
+from portage.util._eventloop.global_event_loop import (
+ _asyncio_enabled,
+ global_event_loop as _global_event_loop,
+)
+from portage.util.futures import (
+ asyncio,
+ events,
+)
+
+from portage.util.futures.transports import _FlowControlMixin
+
+
+class _PortageEventLoop(events.AbstractEventLoop):
+ """
+ Implementation of asyncio.AbstractEventLoop which wraps portage's
+ internal event loop.
+ """
+
+ def __init__(self, loop):
+ """
+ @type loop: EventLoop
+ @param loop: an instance of portage's internal event loop
+ """
+ self._loop = loop
+ self.run_until_complete = loop.run_until_complete
+ self.call_soon = loop.call_soon
+ self.call_soon_threadsafe = loop.call_soon_threadsafe
+ self.call_later = loop.call_later
+ self.call_at = loop.call_at
+ self.is_running = loop.is_running
+ self.is_closed = loop.is_closed
+ self.close = loop.close
+ self.create_future = loop.create_future
+ self.add_reader = loop.add_reader
+ self.remove_reader = loop.remove_reader
+ self.add_writer = loop.add_writer
+ self.remove_writer = loop.remove_writer
+ self.run_in_executor = loop.run_in_executor
+ self.time = loop.time
+ self.default_exception_handler = loop.default_exception_handler
+ self.call_exception_handler = loop.call_exception_handler
+ self.set_debug = loop.set_debug
+ self.get_debug = loop.get_debug
+
+ @property
+ def _asyncio_child_watcher(self):
+ """
+ In order to avoid accessing the internal _loop attribute, portage
+ internals should use this property when possible.
+
+ @rtype: asyncio.AbstractChildWatcher
+ @return: the internal event loop's AbstractChildWatcher interface
+ """
+ return self._loop._asyncio_child_watcher
+
+ @property
+ def _asyncio_wrapper(self):
+ """
+ In order to avoid accessing the internal _loop attribute, portage
+ internals should use this property when possible.
+
+ @rtype: asyncio.AbstractEventLoop
+ @return: the internal event loop's AbstractEventLoop interface
+ """
+ return self
+
+ def create_task(self, coro):
+ """
+ Schedule a coroutine object.
+
+ @type coro: coroutine
+ @param coro: a coroutine to schedule
+ @rtype: asyncio.Task
+ @return: a task object
+ """
+ return asyncio.Task(coro, loop=self)
+
+ def connect_read_pipe(self, protocol_factory, pipe):
+ """
+ Register read pipe in event loop. Set the pipe to non-blocking mode.
+
+ @type protocol_factory: callable
+ @param protocol_factory: must instantiate object with Protocol interface
+ @type pipe: file
+ @param pipe: a pipe to read from
+ @rtype: asyncio.Future
+ @return: Return pair (transport, protocol), where transport supports the
+ ReadTransport interface.
+ """
+ protocol = protocol_factory()
+ result = self.create_future()
+ waiter = self.create_future()
+ transport = self._make_read_pipe_transport(pipe, protocol, waiter=waiter)
+
+ def waiter_callback(waiter):
+ try:
+ waiter.result()
+ except Exception as e:
+ transport.close()
+ result.set_exception(e)
+ else:
+ result.set_result((transport, protocol))
+
+ waiter.add_done_callback(waiter_callback)
+ return result
+
+ def connect_write_pipe(self, protocol_factory, pipe):
+ """
+ Register write pipe in event loop. Set the pipe to non-blocking mode.
+
+ @type protocol_factory: callable
+ @param protocol_factory: must instantiate object with Protocol interface
+ @type pipe: file
+ @param pipe: a pipe to write to
+ @rtype: asyncio.Future
+ @return: Return pair (transport, protocol), where transport supports the
+ WriteTransport interface.
+ """
+ protocol = protocol_factory()
+ result = self.create_future()
+ waiter = self.create_future()
+ transport = self._make_write_pipe_transport(pipe, protocol, waiter)
+
+ def waiter_callback(waiter):
+ try:
+ waiter.result()
+ except Exception as e:
+ transport.close()
+ result.set_exception(e)
+ else:
+ result.set_result((transport, protocol))
+
+ waiter.add_done_callback(waiter_callback)
+ return result
+
+ def subprocess_exec(self, protocol_factory, program, *args, **kwargs):
+ """
+ Run subprocesses asynchronously using the subprocess module.
+
+ @type protocol_factory: callable
+ @param protocol_factory: must instantiate a subclass of the
+ asyncio.SubprocessProtocol class
+ @type program: str or bytes
+ @param program: the program to execute
+ @type args: str or bytes
+ @param args: program's arguments
+ @type kwargs: varies
+ @param kwargs: subprocess.Popen parameters
+ @rtype: asyncio.Future
+ @return: Returns a pair of (transport, protocol), where transport
+ is an instance of BaseSubprocessTransport
+ """
+
+ # python2.7 does not allow arguments with defaults after *args
+ stdin = kwargs.pop('stdin', subprocess.PIPE)
+ stdout = kwargs.pop('stdout', subprocess.PIPE)
+ stderr = kwargs.pop('stderr', subprocess.PIPE)
+
+ universal_newlines = kwargs.pop('universal_newlines', False)
+ shell = kwargs.pop('shell', False)
+ bufsize = kwargs.pop('bufsize', 0)
+
+ if universal_newlines:
+ raise ValueError("universal_newlines must be False")
+ if shell:
+ raise ValueError("shell must be False")
+ if bufsize != 0:
+ raise ValueError("bufsize must be 0")
+ popen_args = (program,) + args
+ for arg in popen_args:
+ if not isinstance(arg, (str, bytes)):
+ raise TypeError("program arguments must be "
+ "a bytes or text string, not %s"
+ % type(arg).__name__)
+ result = self.create_future()
+ self._make_subprocess_transport(
+ result, protocol_factory(), popen_args, False, stdin, stdout, stderr,
+ bufsize, **kwargs)
+ return result
+
+ def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
+ extra=None):
+ return _UnixReadPipeTransport(self, pipe, protocol, waiter, extra)
+
+ def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
+ extra=None):
+ return _UnixWritePipeTransport(self, pipe, protocol, waiter, extra)
+
+ def _make_subprocess_transport(self, result, protocol, args, shell,
+ stdin, stdout, stderr, bufsize, extra=None, **kwargs):
+ waiter = self.create_future()
+ transp = _UnixSubprocessTransport(self,
+ protocol, args, shell, stdin, stdout, stderr, bufsize,
+ waiter=waiter, extra=extra,
+ **kwargs)
+
+ self._loop._asyncio_child_watcher.add_child_handler(
+ transp.get_pid(), self._child_watcher_callback, transp)
+
+ waiter.add_done_callback(functools.partial(
+ self._subprocess_transport_callback, transp, protocol, result))
+
+ def _subprocess_transport_callback(self, transp, protocol, result, waiter):
+ if waiter.exception() is None:
+ result.set_result((transp, protocol))
+ else:
+ transp.close()
+ wait_transp = asyncio.ensure_future(transp._wait(), loop=self)
+ wait_transp.add_done_callback(
+ functools.partial(self._subprocess_transport_failure,
+ result, waiter.exception()))
+
+ def _child_watcher_callback(self, pid, returncode, transp):
+ self.call_soon_threadsafe(transp._process_exited, returncode)
+
+ def _subprocess_transport_failure(self, result, exception, wait_transp):
+ result.set_exception(wait_transp.exception() or exception)
+
+
+if hasattr(os, 'set_blocking'):
+ def _set_nonblocking(fd):
+ os.set_blocking(fd, False)
+else:
+ def _set_nonblocking(fd):
+ flags = fcntl.fcntl(fd, fcntl.F_GETFL)
+ flags = flags | os.O_NONBLOCK
+ fcntl.fcntl(fd, fcntl.F_SETFL, flags)
+
+
+class _UnixReadPipeTransport(_ReadTransport):
+ """
+ This is identical to the standard library's private
+ asyncio.unix_events._UnixReadPipeTransport class, except that it
+ only calls public AbstractEventLoop methods.
+ """
+
+ max_size = 256 * 1024 # max bytes we read in one event loop iteration
+
+ def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
+ super().__init__(extra)
+ self._extra['pipe'] = pipe
+ self._loop = loop
+ self._pipe = pipe
+ self._fileno = pipe.fileno()
+ self._protocol = protocol
+ self._closing = False
+
+ mode = os.fstat(self._fileno).st_mode
+ if not (stat.S_ISFIFO(mode) or
+ stat.S_ISSOCK(mode) or
+ stat.S_ISCHR(mode)):
+ self._pipe = None
+ self._fileno = None
+ self._protocol = None
+ raise ValueError("Pipe transport is for pipes/sockets only.")
+
+ _set_nonblocking(self._fileno)
+
+ self._loop.call_soon(self._protocol.connection_made, self)
+ # only start reading when connection_made() has been called
+ self._loop.call_soon(self._loop.add_reader,
+ self._fileno, self._read_ready)
+ if waiter is not None:
+ # only wake up the waiter when connection_made() has been called
+ self._loop.call_soon(
+ lambda: None if waiter.cancelled() else waiter.set_result(None))
+
+ def _read_ready(self):
+ try:
+ data = os.read(self._fileno, self.max_size)
+ except (BlockingIOError, InterruptedError):
+ pass
+ except OSError as exc:
+ self._fatal_error(exc, 'Fatal read error on pipe transport')
+ else:
+ if data:
+ self._protocol.data_received(data)
+ else:
+ self._closing = True
+ self._loop.remove_reader(self._fileno)
+ self._loop.call_soon(self._protocol.eof_received)
+ self._loop.call_soon(self._call_connection_lost, None)
+
+ def pause_reading(self):
+ self._loop.remove_reader(self._fileno)
+
+ def resume_reading(self):
+ self._loop.add_reader(self._fileno, self._read_ready)
+
+ def set_protocol(self, protocol):
+ self._protocol = protocol
+
+ def get_protocol(self):
+ return self._protocol
+
+ def is_closing(self):
+ return self._closing
+
+ def close(self):
+ if not self._closing:
+ self._close(None)
+
+ def _fatal_error(self, exc, message='Fatal error on pipe transport'):
+ # should be called by exception handler only
+ if (isinstance(exc, OSError) and exc.errno == errno.EIO):
+ if self._loop.get_debug():
+ logging.debug("%r: %s", self, message, exc_info=True)
+ else:
+ self._loop.call_exception_handler({
+ 'message': message,
+ 'exception': exc,
+ 'transport': self,
+ 'protocol': self._protocol,
+ })
+ self._close(exc)
+
+ def _close(self, exc):
+ self._closing = True
+ self._loop.remove_reader(self._fileno)
+ self._loop.call_soon(self._call_connection_lost, exc)
+
+ def _call_connection_lost(self, exc):
+ try:
+ self._protocol.connection_lost(exc)
+ finally:
+ self._pipe.close()
+ self._pipe = None
+ self._protocol = None
+ self._loop = None
+
+
+class _UnixWritePipeTransport(_FlowControlMixin, _WriteTransport):
+ """
+ This is identical to the standard library's private
+ asyncio.unix_events._UnixWritePipeTransport class, except that it
+ only calls public AbstractEventLoop methods.
+ """
+
+ def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
+ super().__init__(extra, loop)
+ self._extra['pipe'] = pipe
+ self._pipe = pipe
+ self._fileno = pipe.fileno()
+ self._protocol = protocol
+ self._buffer = bytearray()
+ self._conn_lost = 0
+ self._closing = False # Set when close() or write_eof() called.
+
+ mode = os.fstat(self._fileno).st_mode
+ is_char = stat.S_ISCHR(mode)
+ is_fifo = stat.S_ISFIFO(mode)
+ is_socket = stat.S_ISSOCK(mode)
+ if not (is_char or is_fifo or is_socket):
+ self._pipe = None
+ self._fileno = None
+ self._protocol = None
+ raise ValueError("Pipe transport is only for "
+ "pipes, sockets and character devices")
+
+ _set_nonblocking(self._fileno)
+ self._loop.call_soon(self._protocol.connection_made, self)
+
+ # On AIX, the reader trick (to be notified when the read end of the
+ # socket is closed) only works for sockets. On other platforms it
+ # works for pipes and sockets. (Exception: OS X 10.4? Issue #19294.)
+ if is_socket or (is_fifo and not sys.platform.startswith("aix")):
+ # only start reading when connection_made() has been called
+ self._loop.call_soon(self._loop.add_reader,
+ self._fileno, self._read_ready)
+
+ if waiter is not None:
+ # only wake up the waiter when connection_made() has been called
+ self._loop.call_soon(
+ lambda: None if waiter.cancelled() else waiter.set_result(None))
+
+ def get_write_buffer_size(self):
+ return len(self._buffer)
+
+ def _read_ready(self):
+ # Pipe was closed by peer.
+ if self._loop.get_debug():
+ logging.info("%r was closed by peer", self)
+ if self._buffer:
+ self._close(BrokenPipeError())
+ else:
+ self._close()
+
+ def write(self, data):
+ assert isinstance(data, (bytes, bytearray, memoryview)), repr(data)
+ if isinstance(data, bytearray):
+ data = memoryview(data)
+ if not data:
+ return
+
+ if self._conn_lost or self._closing:
+ self._conn_lost += 1
+ return
+
+ if not self._buffer:
+ # Attempt to send it right away first.
+ try:
+ n = os.write(self._fileno, data)
+ except (BlockingIOError, InterruptedError):
+ n = 0
+ except Exception as exc:
+ self._conn_lost += 1
+ self._fatal_error(exc, 'Fatal write error on pipe transport')
+ return
+ if n == len(data):
+ return
+ elif n > 0:
+ data = memoryview(data)[n:]
+ self._loop.add_writer(self._fileno, self._write_ready)
+
+ self._buffer += data
+ self._maybe_pause_protocol()
+
+ def _write_ready(self):
+ assert self._buffer, 'Data should not be empty'
+
+ try:
+ n = os.write(self._fileno, self._buffer)
+ except (BlockingIOError, InterruptedError):
+ pass
+ except Exception as exc:
+ self._buffer.clear()
+ self._conn_lost += 1
+ # Remove writer here, _fatal_error() doesn't it
+ # because _buffer is empty.
+ self._loop.remove_writer(self._fileno)
+ self._fatal_error(exc, 'Fatal write error on pipe transport')
+ else:
+ if n == len(self._buffer):
+ self._buffer.clear()
+ self._loop.remove_writer(self._fileno)
+ self._maybe_resume_protocol() # May append to buffer.
+ if self._closing:
+ self._loop.remove_reader(self._fileno)
+ self._call_connection_lost(None)
+ return
+ elif n > 0:
+ del self._buffer[:n]
+
+ def can_write_eof(self):
+ return True
+
+ def write_eof(self):
+ if self._closing:
+ return
+ assert self._pipe
+ self._closing = True
+ if not self._buffer:
+ self._loop.remove_reader(self._fileno)
+ self._loop.call_soon(self._call_connection_lost, None)
+
+ def set_protocol(self, protocol):
+ self._protocol = protocol
+
+ def get_protocol(self):
+ return self._protocol
+
+ def is_closing(self):
+ return self._closing
+
+ def close(self):
+ if self._pipe is not None and not self._closing:
+ # write_eof is all what we needed to close the write pipe
+ self.write_eof()
+
+ def abort(self):
+ self._close(None)
+
+ def _fatal_error(self, exc, message='Fatal error on pipe transport'):
+ # should be called by exception handler only
+ if isinstance(exc,
+ (BrokenPipeError, ConnectionResetError, ConnectionAbortedError)):
+ if self._loop.get_debug():
+ logging.debug("%r: %s", self, message, exc_info=True)
+ else:
+ self._loop.call_exception_handler({
+ 'message': message,
+ 'exception': exc,
+ 'transport': self,
+ 'protocol': self._protocol,
+ })
+ self._close(exc)
+
+ def _close(self, exc=None):
+ self._closing = True
+ if self._buffer:
+ self._loop.remove_writer(self._fileno)
+ self._buffer.clear()
+ self._loop.remove_reader(self._fileno)
+ self._loop.call_soon(self._call_connection_lost, exc)
+
+ def _call_connection_lost(self, exc):
+ try:
+ self._protocol.connection_lost(exc)
+ finally:
+ self._pipe.close()
+ self._pipe = None
+ self._protocol = None
+ self._loop = None
+
+
+if hasattr(os, 'set_inheritable'):
+ # Python 3.4 and newer
+ _set_inheritable = os.set_inheritable
+else:
+ def _set_inheritable(fd, inheritable):
+ cloexec_flag = getattr(fcntl, 'FD_CLOEXEC', 1)
+
+ old = fcntl.fcntl(fd, fcntl.F_GETFD)
+ if not inheritable:
+ fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
+ else:
+ fcntl.fcntl(fd, fcntl.F_SETFD, old & ~cloexec_flag)
+
+
+class _UnixSubprocessTransport(_BaseSubprocessTransport):
+ """
+ This is identical to the standard library's private
+ asyncio.unix_events._UnixSubprocessTransport class, except that it
+ only calls public AbstractEventLoop methods.
+ """
+ def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
+ stdin_w = None
+ if stdin == subprocess.PIPE:
+ # Use a socket pair for stdin, since not all platforms
+ # support selecting read events on the write end of a
+ # socket (which we use in order to detect closing of the
+ # other end). Notably this is needed on AIX, and works
+ # just fine on other platforms.
+ stdin, stdin_w = socket.socketpair()
+
+ # Mark the write end of the stdin pipe as non-inheritable,
+ # needed by close_fds=False on Python 3.3 and older
+ # (Python 3.4 implements the PEP 446, socketpair returns
+ # non-inheritable sockets)
+ _set_inheritable(stdin_w.fileno(), False)
+ self._proc = subprocess.Popen(
+ args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
+ universal_newlines=False, bufsize=bufsize, **kwargs)
+ if stdin_w is not None:
+ stdin.close()
+ self._proc.stdin = os.fdopen(stdin_w.detach(), 'wb', bufsize)
+
+
+class AbstractChildWatcher(_AbstractChildWatcher):
+ def add_child_handler(self, pid, callback, *args):
+ raise NotImplementedError()
+
+ def remove_child_handler(self, pid):
+ raise NotImplementedError()
+
+ def attach_loop(self, loop):
+ raise NotImplementedError()
+
+ def close(self):
+ raise NotImplementedError()
+
+ def __enter__(self):
+ raise NotImplementedError()
+
+ def __exit__(self, a, b, c):
+ raise NotImplementedError()
+
+
+class _PortageChildWatcher(_AbstractChildWatcher):
+ def __init__(self, loop):
+ """
+ @type loop: EventLoop
+ @param loop: an instance of portage's internal event loop
+ """
+ self._loop = loop
+ self._callbacks = {}
+
+ def close(self):
+ pass
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, a, b, c):
+ pass
+
+ def _child_exit(self, pid, status, data):
+ self._callbacks.pop(pid)
+ callback, args = data
+ callback(pid, self._compute_returncode(status), *args)
+
+ def _compute_returncode(self, status):
+ if os.WIFSIGNALED(status):
+ return -os.WTERMSIG(status)
+ elif os.WIFEXITED(status):
+ return os.WEXITSTATUS(status)
+ else:
+ return status
+
+ def add_child_handler(self, pid, callback, *args):
+ """
+ Register a new child handler.
+
+ Arrange for callback(pid, returncode, *args) to be called when
+ process 'pid' terminates. Specifying another callback for the same
+ process replaces the previous handler.
+ """
+ source_id = self._callbacks.get(pid)
+ if source_id is not None:
+ self._loop.source_remove(source_id)
+ self._callbacks[pid] = self._loop.child_watch_add(
+ pid, self._child_exit, data=(callback, args))
+
+ def remove_child_handler(self, pid):
+ """
+ Removes the handler for process 'pid'.
+
+ The function returns True if the handler was successfully removed,
+ False if there was nothing to remove.
+ """
+ source_id = self._callbacks.pop(pid, None)
+ if source_id is not None:
+ return self._loop.source_remove(source_id)
+ return False
+
+
+class _PortageEventLoopPolicy(events.AbstractEventLoopPolicy):
+ """
+ Implementation of asyncio.AbstractEventLoopPolicy based on portage's
+ internal event loop. This supports running event loops in forks,
+ which is not supported by the default asyncio event loop policy,
+ see https://bugs.python.org/issue22087.
+ """
+ def get_event_loop(self):
+ """
+ Get the event loop for the current context.
+
+ Returns an event loop object implementing the AbstractEventLoop
+ interface.
+
+ @rtype: asyncio.AbstractEventLoop (or compatible)
+ @return: the current event loop policy
+ """
+ return _global_event_loop()._asyncio_wrapper
+
+ def get_child_watcher(self):
+ """Get the watcher for child processes."""
+ return _global_event_loop()._asyncio_child_watcher
+
+
+class _AsyncioEventLoopPolicy(_PortageEventLoopPolicy):
+ """
+ A subclass of _PortageEventLoopPolicy which raises
+ NotImplementedError if it is set as the real asyncio event loop
+ policy, since this class is intended to *wrap* the real asyncio
+ event loop policy.
+ """
+ def _check_recursion(self):
+ if _real_asyncio.get_event_loop_policy() is self:
+ raise NotImplementedError('this class is only a wrapper')
+
+ def get_event_loop(self):
+ self._check_recursion()
+ return super(_AsyncioEventLoopPolicy, self).get_event_loop()
+
+ def get_child_watcher(self):
+ self._check_recursion()
+ return super(_AsyncioEventLoopPolicy, self).get_child_watcher()
+
+
+DefaultEventLoopPolicy = (_AsyncioEventLoopPolicy if _asyncio_enabled
+ else _PortageEventLoopPolicy)
diff --git a/lib/portage/util/install_mask.py b/lib/portage/util/install_mask.py
new file mode 100644
index 000000000..32627eb05
--- /dev/null
+++ b/lib/portage/util/install_mask.py
@@ -0,0 +1,128 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['install_mask_dir', 'InstallMask']
+
+import errno
+import fnmatch
+import sys
+
+from portage import os, _unicode_decode
+from portage.exception import (
+ OperationNotPermitted, PermissionDenied, FileNotFound)
+from portage.util import normalize_path
+
+if sys.hexversion >= 0x3000000:
+ _unicode = str
+else:
+ _unicode = unicode
+
+
+class InstallMask(object):
+ def __init__(self, install_mask):
+ """
+ @param install_mask: INSTALL_MASK value
+ @type install_mask: str
+ """
+ self._install_mask = install_mask.split()
+
+ def match(self, path):
+ """
+ @param path: file path relative to ${ED}
+ @type path: str
+ @rtype: bool
+ @return: True if path matches INSTALL_MASK, False otherwise
+ """
+ ret = False
+ for pattern in self._install_mask:
+ # if pattern starts with -, possibly exclude this path
+ is_inclusive = not pattern.startswith('-')
+ if not is_inclusive:
+ pattern = pattern[1:]
+ # absolute path pattern
+ if pattern.startswith('/'):
+ # handle trailing slash for explicit directory match
+ if path.endswith('/'):
+ pattern = pattern.rstrip('/') + '/'
+ # match either exact path or one of parent dirs
+ # the latter is done via matching pattern/*
+ if (fnmatch.fnmatch(path, pattern[1:])
+ or fnmatch.fnmatch(path, pattern[1:].rstrip('/') + '/*')):
+ ret = is_inclusive
+ # filename
+ else:
+ if fnmatch.fnmatch(os.path.basename(path), pattern):
+ ret = is_inclusive
+ return ret
+
+
+_exc_map = {
+ errno.ENOENT: FileNotFound,
+ errno.EPERM: OperationNotPermitted,
+ errno.EACCES: PermissionDenied,
+}
+
+
+def _raise_exc(e):
+ """
+ Wrap OSError with portage.exception wrapper exceptions, with
+ __cause__ chaining when python supports it.
+
+ @param e: os exception
+ @type e: OSError
+ @raise PortageException: portage.exception wrapper exception
+ """
+ wrapper_cls = _exc_map.get(e.errno)
+ if wrapper_cls is None:
+ raise
+ wrapper = wrapper_cls(_unicode(e))
+ wrapper.__cause__ = e
+ raise wrapper
+
+
+def install_mask_dir(base_dir, install_mask, onerror=None):
+ """
+ Remove files and directories matched by INSTALL_MASK.
+
+ @param base_dir: directory path corresponding to ${ED}
+ @type base_dir: str
+ @param install_mask: INSTALL_MASK configuration
+ @type install_mask: InstallMask
+ """
+ onerror = onerror or _raise_exc
+ base_dir = normalize_path(base_dir)
+ base_dir_len = len(base_dir) + 1
+ dir_stack = []
+
+ # Remove masked files.
+ for parent, dirs, files in os.walk(base_dir, onerror=onerror):
+ try:
+ parent = _unicode_decode(parent, errors='strict')
+ except UnicodeDecodeError:
+ continue
+ dir_stack.append(parent)
+ for fname in files:
+ try:
+ fname = _unicode_decode(fname, errors='strict')
+ except UnicodeDecodeError:
+ continue
+ abs_path = os.path.join(parent, fname)
+ relative_path = abs_path[base_dir_len:]
+ if install_mask.match(relative_path):
+ try:
+ os.unlink(abs_path)
+ except OSError as e:
+ onerror(e)
+
+ # Remove masked dirs (unless non-empty due to exclusions).
+ while True:
+ try:
+ dir_path = dir_stack.pop()
+ except IndexError:
+ break
+
+ if install_mask.match(dir_path[base_dir_len:] + '/'):
+ try:
+ os.rmdir(dir_path)
+ except OSError:
+ pass
diff --git a/lib/portage/util/iterators/MultiIterGroupBy.py b/lib/portage/util/iterators/MultiIterGroupBy.py
new file mode 100644
index 000000000..2c31f269f
--- /dev/null
+++ b/lib/portage/util/iterators/MultiIterGroupBy.py
@@ -0,0 +1,94 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import bisect
+
+class MultiIterGroupBy(object):
+ """
+ This class functions similarly to the itertools.groupby function,
+ except that it takes multiple source iterators as input. The source
+ iterators must yield objects in sorted order. A group is yielded as
+ soon as the progress of all iterators reaches a state which
+ guarantees that there can not be any remaining (unseen) elements of
+ the group. This is useful for incremental display of grouped search
+ results.
+ """
+
+ def __init__(self, iterators, key=None):
+ self._iterators = iterators
+ self._key = key
+
+ def __iter__(self):
+
+ trackers = []
+ for iterator in self._iterators:
+ trackers.append(_IteratorTracker(iterator))
+
+ key_map = {}
+ key_list = []
+ eof = []
+ key_getter = self._key
+ if key_getter is None:
+ key_getter = lambda x: x
+ min_progress = None
+
+ while trackers:
+
+ for tracker in trackers:
+
+ if tracker.current is not None and \
+ tracker.current != min_progress:
+ # The trackers are sorted by progress, so the
+ # remaining trackers are guaranteed to have
+ # sufficient progress.
+ break
+
+ # In order to avoid over-buffering (waste of memory),
+ # only grab a single entry.
+ try:
+ entry = next(tracker.iterator)
+ except StopIteration:
+ eof.append(tracker)
+ else:
+ tracker.current = key_getter(entry)
+ key_group = key_map.get(tracker.current)
+ if key_group is None:
+ key_group = []
+ key_map[tracker.current] = key_group
+ bisect.insort(key_list, tracker.current)
+ key_group.append(entry)
+
+ if eof:
+ for tracker in eof:
+ trackers.remove(tracker)
+ del eof[:]
+
+ if trackers:
+ trackers.sort()
+ min_progress = trackers[0].current
+ # yield if key <= min_progress
+ i = bisect.bisect_right(key_list, min_progress)
+ yield_these = key_list[:i]
+ del key_list[:i]
+ else:
+ yield_these = key_list
+ key_list = []
+
+ if yield_these:
+ for k in yield_these:
+ yield key_map.pop(k)
+
+class _IteratorTracker(object):
+
+ __slots__ = ('current', 'iterator')
+
+ def __init__(self, iterator):
+
+ self.iterator = iterator
+ self.current = None
+
+ def __lt__(self, other):
+ if self.current is None:
+ return other.current is not None
+ return other.current is not None and \
+ self.current < other.current
diff --git a/lib/portage/util/iterators/__init__.py b/lib/portage/util/iterators/__init__.py
new file mode 100644
index 000000000..7cd880e11
--- /dev/null
+++ b/lib/portage/util/iterators/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/util/lafilefixer.py b/lib/portage/util/lafilefixer.py
new file mode 100644
index 000000000..110010363
--- /dev/null
+++ b/lib/portage/util/lafilefixer.py
@@ -0,0 +1,185 @@
+# Copyright 2010-2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os as _os
+import re
+
+from portage import _unicode_decode
+from portage.exception import InvalidData
+
+#########################################################
+# This an re-implementaion of dev-util/lafilefixer-0.5.
+# rewrite_lafile() takes the contents of an lafile as a string
+# It then parses the dependency_libs and inherited_linker_flags
+# entries.
+# We insist on dependency_libs being present. inherited_linker_flags
+# is optional.
+# There are strict rules about the syntax imposed by libtool's libltdl.
+# See 'parse_dotla_file' and 'trim' functions in libltdl/ltdl.c.
+# Note that duplicated entries of dependency_libs and inherited_linker_flags
+# are ignored by libtool (last one wins), but we treat it as error (like
+# lafilefixer does).
+# What it does:
+# * Replaces all .la files with absolut paths in dependency_libs with
+# corresponding -l* and -L* entries
+# (/usr/lib64/libfoo.la -> -L/usr/lib64 -lfoo)
+# * Moves various flags (see flag_re below) to inherited_linker_flags,
+# if such an entry was present.
+# * Reorders dependency_libs such that all -R* entries precede -L* entries
+# and these precede all other entries.
+# * Remove duplicated entries from dependency_libs
+# * Takes care that no entry to inherited_linker_flags is added that is
+# already there.
+#########################################################
+
+#These regexes are used to parse the interesting entries in the la file
+dep_libs_re = re.compile(b"dependency_libs='(?P<value>[^']*)'$")
+inh_link_flags_re = re.compile(b"inherited_linker_flags='(?P<value>[^']*)'$")
+
+#regexes for replacing stuff in -L entries.
+#replace 'X11R6/lib' and 'local/lib' with 'lib', no idea what's this about.
+X11_local_sub = re.compile(b"X11R6/lib|local/lib")
+#get rid of the '..'
+pkgconfig_sub1 = re.compile(br"usr/lib[^/]*/pkgconfig/\.\./\.\.")
+pkgconfig_sub2 = re.compile(br"(?P<usrlib>usr/lib[^/]*)/pkgconfig/\.\.")
+
+#detect flags that should go into inherited_linker_flags instead of dependency_libs
+flag_re = re.compile(b"-mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe|-threads")
+
+def _parse_lafile_contents(contents):
+ """
+ Parses 'dependency_libs' and 'inherited_linker_flags' lines.
+ """
+
+ dep_libs = None
+ inh_link_flags = None
+
+ for line in contents.split(b"\n"):
+ m = dep_libs_re.match(line)
+ if m:
+ if dep_libs is not None:
+ raise InvalidData("duplicated dependency_libs entry")
+ dep_libs = m.group("value")
+ continue
+
+ m = inh_link_flags_re.match(line)
+ if m:
+ if inh_link_flags is not None:
+ raise InvalidData("duplicated inherited_linker_flags entry")
+ inh_link_flags = m.group("value")
+ continue
+
+ return dep_libs, inh_link_flags
+
+def rewrite_lafile(contents):
+ """
+ Given the contents of an .la file, parse and fix it.
+ This operates with strings of raw bytes (assumed to contain some ascii
+ characters), in order to avoid any potential character encoding issues.
+ Raises 'InvalidData' if the .la file is invalid.
+ @param contents: the contents of a libtool archive file
+ @type contents: bytes
+ @rtype: tuple
+ @return: (True, fixed_contents) if something needed to be
+ fixed, (False, None) otherwise.
+ """
+ #Parse the 'dependency_libs' and 'inherited_linker_flags' lines.
+ dep_libs, inh_link_flags = \
+ _parse_lafile_contents(contents)
+
+ if dep_libs is None:
+ raise InvalidData("missing or invalid dependency_libs")
+
+ new_dep_libs = []
+ new_inh_link_flags = []
+ librpath = []
+ libladir = []
+
+ if inh_link_flags is not None:
+ new_inh_link_flags = inh_link_flags.split()
+
+ #Check entries in 'dependency_libs'.
+ for dep_libs_entry in dep_libs.split():
+ if dep_libs_entry.startswith(b"-l"):
+ #-lfoo, keep it
+ if dep_libs_entry not in new_dep_libs:
+ new_dep_libs.append(dep_libs_entry)
+
+ elif dep_libs_entry.endswith(b".la"):
+ #Two cases:
+ #1) /usr/lib64/libfoo.la, turn it into -lfoo and append -L/usr/lib64 to libladir
+ #2) libfoo.la, keep it
+ dir, file = _os.path.split(dep_libs_entry)
+
+ if not dir or not file.startswith(b"lib"):
+ if dep_libs_entry not in new_dep_libs:
+ new_dep_libs.append(dep_libs_entry)
+ else:
+ #/usr/lib64/libfoo.la -> -lfoo
+ lib = b"-l" + file[3:-3]
+ if lib not in new_dep_libs:
+ new_dep_libs.append(lib)
+ #/usr/lib64/libfoo.la -> -L/usr/lib64
+ ladir = b"-L" + dir
+ if ladir not in libladir:
+ libladir.append(ladir)
+
+ elif dep_libs_entry.startswith(b"-L"):
+ #Do some replacement magic and store them in 'libladir'.
+ #This allows us to place all -L entries at the beginning
+ #of 'dependency_libs'.
+ ladir = dep_libs_entry
+
+ ladir = X11_local_sub.sub(b"lib", ladir)
+ ladir = pkgconfig_sub1.sub(b"usr", ladir)
+ ladir = pkgconfig_sub2.sub(br"\g<usrlib>", ladir)
+
+ if ladir not in libladir:
+ libladir.append(ladir)
+
+ elif dep_libs_entry.startswith(b"-R"):
+ if dep_libs_entry not in librpath:
+ librpath.append(dep_libs_entry)
+
+ elif flag_re.match(dep_libs_entry):
+ #All this stuff goes into inh_link_flags, if the la file has such an entry.
+ #If it doesn't, they stay in 'dependency_libs'.
+ if inh_link_flags is not None:
+ if dep_libs_entry not in new_inh_link_flags:
+ new_inh_link_flags.append(dep_libs_entry)
+ else:
+ if dep_libs_entry not in new_dep_libs:
+ new_dep_libs.append(dep_libs_entry)
+
+ else:
+ raise InvalidData("Error: Unexpected entry '%s' in 'dependency_libs'" \
+ % _unicode_decode(dep_libs_entry))
+
+ #What should 'dependency_libs' and 'inherited_linker_flags' look like?
+ expected_dep_libs = b""
+ for x in (librpath, libladir, new_dep_libs):
+ if x:
+ expected_dep_libs += b" " + b" ".join(x)
+
+ expected_inh_link_flags = b""
+ if new_inh_link_flags:
+ expected_inh_link_flags += b" " + b" ".join(new_inh_link_flags)
+
+ #Don't touch the file if we don't need to, otherwise put the expected values into
+ #'contents' and write it into the la file.
+
+ changed = False
+ if dep_libs != expected_dep_libs:
+ contents = contents.replace(b"dependency_libs='" + dep_libs + b"'", \
+ b"dependency_libs='" + expected_dep_libs + b"'")
+ changed = True
+
+ if inh_link_flags is not None and expected_inh_link_flags != inh_link_flags:
+ contents = contents.replace(b"inherited_linker_flags='" + inh_link_flags + b"'", \
+ b"inherited_linker_flags='" + expected_inh_link_flags + b"'")
+ changed = True
+
+ if changed:
+ return True, contents
+ else:
+ return False, None
diff --git a/lib/portage/util/listdir.py b/lib/portage/util/listdir.py
new file mode 100644
index 000000000..2012e145f
--- /dev/null
+++ b/lib/portage/util/listdir.py
@@ -0,0 +1,139 @@
+# Copyright 2010-2013 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['cacheddir', 'listdir']
+
+import errno
+import stat
+import sys
+
+if sys.hexversion < 0x3000000:
+ from itertools import izip as zip
+
+from portage import os
+from portage.const import VCS_DIRS
+from portage.exception import DirectoryNotFound, PermissionDenied, PortageException
+from portage.util import normalize_path
+
+# The global dircache is no longer supported, since it could
+# be a memory leak for API consumers. Any cacheddir callers
+# should use higher-level caches instead, when necessary.
+# TODO: Remove dircache variable after stable portage does
+# not use is (keep it for now, in case API consumers clear
+# it manually).
+dircache = {}
+
+def cacheddir(my_original_path, ignorecvs, ignorelist, EmptyOnError, followSymlinks=True):
+ mypath = normalize_path(my_original_path)
+ try:
+ pathstat = os.stat(mypath)
+ if not stat.S_ISDIR(pathstat.st_mode):
+ raise DirectoryNotFound(mypath)
+ except EnvironmentError as e:
+ if e.errno == PermissionDenied.errno:
+ raise PermissionDenied(mypath)
+ del e
+ return [], []
+ except PortageException:
+ return [], []
+ else:
+ try:
+ fpaths = os.listdir(mypath)
+ except EnvironmentError as e:
+ if e.errno != errno.EACCES:
+ raise
+ del e
+ raise PermissionDenied(mypath)
+ ftype = []
+ for x in fpaths:
+ try:
+ if followSymlinks:
+ pathstat = os.stat(mypath+"/"+x)
+ else:
+ pathstat = os.lstat(mypath+"/"+x)
+
+ if stat.S_ISREG(pathstat[stat.ST_MODE]):
+ ftype.append(0)
+ elif stat.S_ISDIR(pathstat[stat.ST_MODE]):
+ ftype.append(1)
+ elif stat.S_ISLNK(pathstat[stat.ST_MODE]):
+ ftype.append(2)
+ else:
+ ftype.append(3)
+ except (IOError, OSError):
+ ftype.append(3)
+
+ if ignorelist or ignorecvs:
+ ret_list = []
+ ret_ftype = []
+ for file_path, file_type in zip(fpaths, ftype):
+ if file_path in ignorelist:
+ pass
+ elif ignorecvs:
+ if file_path[:2] != ".#" and \
+ not (file_type == 1 and file_path in VCS_DIRS):
+ ret_list.append(file_path)
+ ret_ftype.append(file_type)
+ else:
+ ret_list = fpaths
+ ret_ftype = ftype
+
+ return ret_list, ret_ftype
+
+def listdir(mypath, recursive=False, filesonly=False, ignorecvs=False, ignorelist=[], followSymlinks=True,
+ EmptyOnError=False, dirsonly=False):
+ """
+ Portage-specific implementation of os.listdir
+
+ @param mypath: Path whose contents you wish to list
+ @type mypath: String
+ @param recursive: Recursively scan directories contained within mypath
+ @type recursive: Boolean
+ @param filesonly; Only return files, not more directories
+ @type filesonly: Boolean
+ @param ignorecvs: Ignore VCS directories
+ @type ignorecvs: Boolean
+ @param ignorelist: List of filenames/directories to exclude
+ @type ignorelist: List
+ @param followSymlinks: Follow Symlink'd files and directories
+ @type followSymlinks: Boolean
+ @param EmptyOnError: Return [] if an error occurs (deprecated, always True)
+ @type EmptyOnError: Boolean
+ @param dirsonly: Only return directories.
+ @type dirsonly: Boolean
+ @rtype: List
+ @return: A list of files and directories (or just files or just directories) or an empty list.
+ """
+
+ fpaths, ftype = cacheddir(mypath, ignorecvs, ignorelist, EmptyOnError, followSymlinks)
+
+ if fpaths is None:
+ fpaths = []
+ if ftype is None:
+ ftype = []
+
+ if not (filesonly or dirsonly or recursive):
+ return fpaths
+
+ if recursive:
+ stack = list(zip(fpaths, ftype))
+ fpaths = []
+ ftype = []
+ while stack:
+ file_path, file_type = stack.pop()
+ fpaths.append(file_path)
+ ftype.append(file_type)
+ if file_type == 1:
+ subdir_list, subdir_types = cacheddir(
+ os.path.join(mypath, file_path), ignorecvs,
+ ignorelist, EmptyOnError, followSymlinks)
+ stack.extend((os.path.join(file_path, x), x_type)
+ for x, x_type in zip(subdir_list, subdir_types))
+
+ if filesonly:
+ fpaths = [x for x, x_type in zip(fpaths, ftype) if x_type == 0]
+
+ elif dirsonly:
+ fpaths = [x for x, x_type in zip(fpaths, ftype) if x_type == 1]
+
+ return fpaths
diff --git a/lib/portage/util/locale.py b/lib/portage/util/locale.py
new file mode 100644
index 000000000..5b09945d6
--- /dev/null
+++ b/lib/portage/util/locale.py
@@ -0,0 +1,144 @@
+#-*- coding:utf-8 -*-
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+"""
+Function to check whether the current used LC_CTYPE handles case
+transformations of ASCII characters in a way compatible with the POSIX
+locale.
+"""
+from __future__ import absolute_import, unicode_literals
+
+import locale
+import logging
+import os
+import textwrap
+import traceback
+
+import portage
+from portage.util import _unicode_decode, writemsg_level
+from portage.util._ctypes import find_library, LoadLibrary
+
+
+locale_categories = (
+ 'LC_COLLATE', 'LC_CTYPE', 'LC_MONETARY', 'LC_MESSAGES',
+ 'LC_NUMERIC', 'LC_TIME',
+ # GNU extensions
+ 'LC_ADDRESS', 'LC_IDENTIFICATION', 'LC_MEASUREMENT', 'LC_NAME',
+ 'LC_PAPER', 'LC_TELEPHONE',
+)
+
+_check_locale_cache = {}
+
+
+def _check_locale(silent):
+ """
+ The inner locale check function.
+ """
+ try:
+ from portage.util import libc
+ except ImportError:
+ libc_fn = find_library("c")
+ if libc_fn is None:
+ return None
+ libc = LoadLibrary(libc_fn)
+ if libc is None:
+ return None
+
+ lc = list(range(ord('a'), ord('z')+1))
+ uc = list(range(ord('A'), ord('Z')+1))
+ rlc = [libc.tolower(c) for c in uc]
+ ruc = [libc.toupper(c) for c in lc]
+
+ if lc != rlc or uc != ruc:
+ if silent:
+ return False
+
+ msg = ("WARNING: The LC_CTYPE variable is set to a locale " +
+ "that specifies transformation between lowercase " +
+ "and uppercase ASCII characters that is different than " +
+ "the one specified by POSIX locale. This can break " +
+ "ebuilds and cause issues in programs that rely on " +
+ "the common character conversion scheme. " +
+ "Please consider enabling another locale (such as " +
+ "en_US.UTF-8) in /etc/locale.gen and setting it " +
+ "as LC_CTYPE in make.conf.")
+ msg = [l for l in textwrap.wrap(msg, 70)]
+ msg.append("")
+ chars = lambda l: ''.join(_unicode_decode(chr(x)) for x in l)
+ if uc != ruc:
+ msg.extend([
+ " %s -> %s" % (chars(lc), chars(ruc)),
+ " %28s: %s" % ('expected', chars(uc))])
+ if lc != rlc:
+ msg.extend([
+ " %s -> %s" % (chars(uc), chars(rlc)),
+ " %28s: %s" % ('expected', chars(lc))])
+ writemsg_level("".join(["!!! %s\n" % l for l in msg]),
+ level=logging.ERROR, noiselevel=-1)
+ return False
+
+ return True
+
+
+def check_locale(silent=False, env=None):
+ """
+ Check whether the locale is sane. Returns True if it is, prints
+ warning and returns False if it is not. Returns None if the check
+ can not be executed due to platform limitations.
+ """
+
+ if env is not None:
+ for v in ("LC_ALL", "LC_CTYPE", "LANG"):
+ if v in env:
+ mylocale = env[v]
+ break
+ else:
+ mylocale = "C"
+
+ try:
+ return _check_locale_cache[mylocale]
+ except KeyError:
+ pass
+
+ pid = os.fork()
+ if pid == 0:
+ try:
+ if env is not None:
+ try:
+ locale.setlocale(locale.LC_CTYPE,
+ portage._native_string(mylocale))
+ except locale.Error:
+ os._exit(2)
+
+ ret = _check_locale(silent)
+ if ret is None:
+ os._exit(2)
+ else:
+ os._exit(0 if ret else 1)
+ except Exception:
+ traceback.print_exc()
+ os._exit(2)
+
+ pid2, ret = os.waitpid(pid, 0)
+ assert pid == pid2
+ pyret = None
+ if os.WIFEXITED(ret):
+ ret = os.WEXITSTATUS(ret)
+ if ret != 2:
+ pyret = ret == 0
+
+ if env is not None:
+ _check_locale_cache[mylocale] = pyret
+ return pyret
+
+
+def split_LC_ALL(env):
+ """
+ Replace LC_ALL with split-up LC_* variables if it is defined.
+ Works on the passed environment (or settings instance).
+ """
+ lc_all = env.get("LC_ALL")
+ if lc_all is not None:
+ for c in locale_categories:
+ env[c] = lc_all
+ del env["LC_ALL"]
diff --git a/lib/portage/util/monotonic.py b/lib/portage/util/monotonic.py
new file mode 100644
index 000000000..e50564851
--- /dev/null
+++ b/lib/portage/util/monotonic.py
@@ -0,0 +1,34 @@
+# Copyright 2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['monotonic']
+
+import time
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+monotonic = getattr(time, 'monotonic', None)
+
+if monotonic is None:
+ def monotonic():
+ """
+ Emulate time.monotonic() which is available in Python 3.3 and later.
+
+ @return: A float expressed in seconds since an epoch.
+ """
+ with monotonic._lock:
+ current = time.time() + monotonic._offset
+ delta = current - monotonic._previous
+ if delta < 0:
+ monotonic._offset -= delta
+ current = monotonic._previous
+ else:
+ monotonic._previous = current
+ return current
+
+ # offset is used to counteract any backward movements
+ monotonic._offset = 0
+ monotonic._previous = time.time()
+ monotonic._lock = threading.Lock()
diff --git a/lib/portage/util/movefile.py b/lib/portage/util/movefile.py
new file mode 100644
index 000000000..5477a669f
--- /dev/null
+++ b/lib/portage/util/movefile.py
@@ -0,0 +1,369 @@
+# Copyright 2010-2018 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import absolute_import, unicode_literals
+
+__all__ = ['movefile']
+
+import errno
+import fnmatch
+import os as _os
+import stat
+import sys
+import textwrap
+
+import portage
+from portage import bsd_chflags, _encodings, _os_overrides, _selinux, \
+ _unicode_decode, _unicode_encode, _unicode_func_wrapper, \
+ _unicode_module_wrapper
+from portage.const import MOVE_BINARY
+from portage.exception import OperationNotSupported
+from portage.localization import _
+from portage.process import spawn
+from portage.util import writemsg
+from portage.util._xattr import xattr
+from portage.util.file_copy import copyfile
+
+
+def _apply_stat(src_stat, dest):
+ _os.chown(dest, src_stat.st_uid, src_stat.st_gid)
+ _os.chmod(dest, stat.S_IMODE(src_stat.st_mode))
+
+_xattr_excluder_cache = {}
+
+def _get_xattr_excluder(pattern):
+
+ try:
+ value = _xattr_excluder_cache[pattern]
+ except KeyError:
+ value = _xattr_excluder(pattern)
+ _xattr_excluder_cache[pattern] = value
+
+ return value
+
+class _xattr_excluder(object):
+
+ __slots__ = ('_pattern_split',)
+
+ def __init__(self, pattern):
+
+ if pattern is None:
+ self._pattern_split = None
+ else:
+ pattern = pattern.split()
+ if not pattern:
+ self._pattern_split = None
+ else:
+ pattern.sort()
+ self._pattern_split = tuple(pattern)
+
+ def __call__(self, attr):
+
+ if self._pattern_split is None:
+ return False
+
+ match = fnmatch.fnmatch
+ for x in self._pattern_split:
+ if match(attr, x):
+ return True
+
+ return False
+
+def _copyxattr(src, dest, exclude=None):
+ """Copy the extended attributes from |src| to |dest|"""
+ try:
+ attrs = xattr.list(src)
+ except (OSError, IOError) as e:
+ if e.errno != OperationNotSupported.errno:
+ raise
+ attrs = ()
+
+ if attrs:
+ if exclude is not None and isinstance(attrs[0], bytes):
+ exclude = exclude.encode(_encodings['fs'])
+ exclude = _get_xattr_excluder(exclude)
+
+ for attr in attrs:
+ if exclude(attr):
+ continue
+ try:
+ xattr.set(dest, attr, xattr.get(src, attr))
+ raise_exception = False
+ except (OSError, IOError):
+ raise_exception = True
+ if raise_exception:
+ raise OperationNotSupported(_("Filesystem containing file '%s' "
+ "does not support extended attribute '%s'") %
+ (_unicode_decode(dest), _unicode_decode(attr)))
+
+def movefile(src, dest, newmtime=None, sstat=None, mysettings=None,
+ hardlink_candidates=None, encoding=_encodings['fs']):
+ """moves a file from src to dest, preserving all permissions and attributes; mtime will
+ be preserved even when moving across filesystems. Returns mtime as integer on success
+ and None on failure. mtime is expressed in seconds in Python <3.3 and nanoseconds in
+ Python >=3.3. Move is atomic."""
+
+ if mysettings is None:
+ mysettings = portage.settings
+
+ src_bytes = _unicode_encode(src, encoding=encoding, errors='strict')
+ dest_bytes = _unicode_encode(dest, encoding=encoding, errors='strict')
+ xattr_enabled = "xattr" in mysettings.features
+ selinux_enabled = mysettings.selinux_enabled()
+ if selinux_enabled:
+ selinux = _unicode_module_wrapper(_selinux, encoding=encoding)
+ _copyfile = selinux.copyfile
+ _rename = selinux.rename
+ else:
+ _copyfile = copyfile
+ _rename = _os.rename
+
+ lchown = _unicode_func_wrapper(portage.data.lchown, encoding=encoding)
+ os = _unicode_module_wrapper(_os,
+ encoding=encoding, overrides=_os_overrides)
+
+ try:
+ if not sstat:
+ sstat = os.lstat(src)
+
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ writemsg("!!! %s\n" % _("Stating source file failed... movefile()"),
+ noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
+ return None
+
+ destexists = 1
+ try:
+ dstat = os.lstat(dest)
+ except (OSError, IOError):
+ dstat = os.lstat(os.path.dirname(dest))
+ destexists = 0
+
+ if bsd_chflags:
+ if destexists and dstat.st_flags != 0:
+ bsd_chflags.lchflags(dest, 0)
+ # Use normal stat/chflags for the parent since we want to
+ # follow any symlinks to the real parent directory.
+ pflags = os.stat(os.path.dirname(dest)).st_flags
+ if pflags != 0:
+ bsd_chflags.chflags(os.path.dirname(dest), 0)
+
+ if destexists:
+ if stat.S_ISLNK(dstat[stat.ST_MODE]):
+ try:
+ os.unlink(dest)
+ destexists = 0
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ pass
+
+ if stat.S_ISLNK(sstat[stat.ST_MODE]):
+ try:
+ target = os.readlink(src)
+ if mysettings and "D" in mysettings and \
+ target.startswith(mysettings["D"]):
+ target = target[len(mysettings["D"])-1:]
+ if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
+ os.unlink(dest)
+ try:
+ if selinux_enabled:
+ selinux.symlink(target, dest, src)
+ else:
+ os.symlink(target, dest)
+ except OSError as e:
+ # Some programs will create symlinks automatically, so we have
+ # to tolerate these links being recreated during the merge
+ # process. In any case, if the link is pointing at the right
+ # place, we're in good shape.
+ if e.errno not in (errno.ENOENT, errno.EEXIST) or \
+ target != os.readlink(dest):
+ raise
+ lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
+
+ try:
+ _os.unlink(src_bytes)
+ except OSError:
+ pass
+
+ if sys.hexversion >= 0x3030000:
+ try:
+ os.utime(dest, ns=(sstat.st_mtime_ns, sstat.st_mtime_ns), follow_symlinks=False)
+ except NotImplementedError:
+ # utimensat() and lutimes() missing in libc.
+ return os.stat(dest, follow_symlinks=False).st_mtime_ns
+ else:
+ return sstat.st_mtime_ns
+ else:
+ # utime() in Python <3.3 only works on the target of a symlink, so it's not
+ # possible to preserve mtime on symlinks.
+ return os.lstat(dest)[stat.ST_MTIME]
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ writemsg("!!! %s\n" % _("failed to properly create symlink:"),
+ noiselevel=-1)
+ writemsg("!!! %s -> %s\n" % (dest, target), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
+ return None
+
+ hardlinked = False
+ # Since identical files might be merged to multiple filesystems,
+ # so os.link() calls might fail for some paths, so try them all.
+ # For atomic replacement, first create the link as a temp file
+ # and them use os.rename() to replace the destination.
+ if hardlink_candidates:
+ head, tail = os.path.split(dest)
+ hardlink_tmp = os.path.join(head, ".%s._portage_merge_.%s" % \
+ (tail, os.getpid()))
+ try:
+ os.unlink(hardlink_tmp)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ writemsg(_("!!! Failed to remove hardlink temp file: %s\n") % \
+ (hardlink_tmp,), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
+ return None
+ del e
+ for hardlink_src in hardlink_candidates:
+ try:
+ os.link(hardlink_src, hardlink_tmp)
+ except OSError:
+ continue
+ else:
+ try:
+ os.rename(hardlink_tmp, dest)
+ except OSError as e:
+ writemsg(_("!!! Failed to rename %s to %s\n") % \
+ (hardlink_tmp, dest), noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
+ return None
+ hardlinked = True
+ try:
+ _os.unlink(src_bytes)
+ except OSError:
+ pass
+ break
+
+ renamefailed = 1
+ if hardlinked:
+ renamefailed = False
+ if not hardlinked and (selinux_enabled or sstat.st_dev == dstat.st_dev):
+ try:
+ if selinux_enabled:
+ selinux.rename(src, dest)
+ else:
+ os.rename(src, dest)
+ renamefailed = 0
+ except OSError as e:
+ if e.errno != errno.EXDEV:
+ # Some random error.
+ writemsg("!!! %s\n" % _("Failed to move %(src)s to %(dest)s") %
+ {"src": src, "dest": dest}, noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
+ return None
+ # Invalid cross-device-link 'bind' mounted or actually Cross-Device
+ if renamefailed:
+ if stat.S_ISREG(sstat[stat.ST_MODE]):
+ dest_tmp = dest + "#new"
+ dest_tmp_bytes = _unicode_encode(dest_tmp, encoding=encoding,
+ errors='strict')
+ try: # For safety copy then move it over.
+ _copyfile(src_bytes, dest_tmp_bytes)
+ _apply_stat(sstat, dest_tmp_bytes)
+ if xattr_enabled:
+ try:
+ _copyxattr(src_bytes, dest_tmp_bytes,
+ exclude=mysettings.get("PORTAGE_XATTR_EXCLUDE", ""))
+ except SystemExit:
+ raise
+ except:
+ msg = _("Failed to copy extended attributes. "
+ "In order to avoid this error, set "
+ "FEATURES=\"-xattr\" in make.conf.")
+ msg = textwrap.wrap(msg, 65)
+ for line in msg:
+ writemsg("!!! %s\n" % (line,), noiselevel=-1)
+ raise
+ _rename(dest_tmp_bytes, dest_bytes)
+ _os.unlink(src_bytes)
+ except SystemExit as e:
+ raise
+ except Exception as e:
+ writemsg("!!! %s\n" % _('copy %(src)s -> %(dest)s failed.') %
+ {"src": src, "dest": dest}, noiselevel=-1)
+ writemsg("!!! %s\n" % (e,), noiselevel=-1)
+ return None
+ else:
+ #we don't yet handle special, so we need to fall back to /bin/mv
+ a = spawn([MOVE_BINARY, '-f', src, dest], env=os.environ)
+ if a != os.EX_OK:
+ writemsg(_("!!! Failed to move special file:\n"), noiselevel=-1)
+ writemsg(_("!!! '%(src)s' to '%(dest)s'\n") % \
+ {"src": _unicode_decode(src, encoding=encoding),
+ "dest": _unicode_decode(dest, encoding=encoding)}, noiselevel=-1)
+ writemsg("!!! %s\n" % a, noiselevel=-1)
+ return None # failure
+
+ # In Python <3.3 always use stat_obj[stat.ST_MTIME] for the integral timestamp
+ # which is returned, since the stat_obj.st_mtime float attribute rounds *up*
+ # if the nanosecond part of the timestamp is 999999881 ns or greater.
+ try:
+ if hardlinked:
+ if sys.hexversion >= 0x3030000:
+ newmtime = os.stat(dest).st_mtime_ns
+ else:
+ newmtime = os.stat(dest)[stat.ST_MTIME]
+ else:
+ # Note: It is not possible to preserve nanosecond precision
+ # (supported in POSIX.1-2008 via utimensat) with the IEEE 754
+ # double precision float which only has a 53 bit significand.
+ if newmtime is not None:
+ if sys.hexversion >= 0x3030000:
+ os.utime(dest, ns=(newmtime, newmtime))
+ else:
+ os.utime(dest, (newmtime, newmtime))
+ else:
+ if sys.hexversion >= 0x3030000:
+ newmtime = sstat.st_mtime_ns
+ else:
+ newmtime = sstat[stat.ST_MTIME]
+ if renamefailed:
+ if sys.hexversion >= 0x3030000:
+ # If rename succeeded then timestamps are automatically
+ # preserved with complete precision because the source
+ # and destination inodes are the same. Otherwise, manually
+ # update timestamps with nanosecond precision.
+ os.utime(dest, ns=(newmtime, newmtime))
+ else:
+ # If rename succeeded then timestamps are automatically
+ # preserved with complete precision because the source
+ # and destination inodes are the same. Otherwise, round
+ # down to the nearest whole second since python's float
+ # st_mtime cannot be used to preserve the st_mtim.tv_nsec
+ # field with complete precision. Note that we have to use
+ # stat_obj[stat.ST_MTIME] here because the float
+ # stat_obj.st_mtime rounds *up* sometimes.
+ os.utime(dest, (newmtime, newmtime))
+ except OSError:
+ # The utime can fail here with EPERM even though the move succeeded.
+ # Instead of failing, use stat to return the mtime if possible.
+ try:
+ if sys.hexversion >= 0x3030000:
+ newmtime = os.stat(dest).st_mtime_ns
+ else:
+ newmtime = os.stat(dest)[stat.ST_MTIME]
+ except OSError as e:
+ writemsg(_("!!! Failed to stat in movefile()\n"), noiselevel=-1)
+ writemsg("!!! %s\n" % dest, noiselevel=-1)
+ writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ return None
+
+ if bsd_chflags:
+ # Restore the flags we saved before moving
+ if pflags:
+ bsd_chflags.chflags(os.path.dirname(dest), pflags)
+
+ return newmtime
diff --git a/lib/portage/util/mtimedb.py b/lib/portage/util/mtimedb.py
new file mode 100644
index 000000000..30922a901
--- /dev/null
+++ b/lib/portage/util/mtimedb.py
@@ -0,0 +1,128 @@
+# Copyright 2010-2012 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+__all__ = ['MtimeDB']
+
+import copy
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+import errno
+import io
+import json
+import sys
+
+import portage
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+from portage.data import portage_gid, uid
+from portage.localization import _
+from portage.util import apply_secpass_permissions, atomic_ofstream, writemsg
+
+class MtimeDB(dict):
+
+ # JSON read support has been available since portage-2.1.10.49.
+ _json_write = True
+
+ _json_write_opts = {
+ "ensure_ascii": False,
+ "indent": "\t",
+ "sort_keys": True
+ }
+ if sys.hexversion < 0x30200F0:
+ # indent only supports int number of spaces
+ _json_write_opts["indent"] = 4
+
+ def __init__(self, filename):
+ dict.__init__(self)
+ self.filename = filename
+ self._load(filename)
+
+ def _load(self, filename):
+ f = None
+ content = None
+ try:
+ f = open(_unicode_encode(filename), 'rb')
+ content = f.read()
+ except EnvironmentError as e:
+ if getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
+ pass
+ else:
+ writemsg(_("!!! Error loading '%s': %s\n") % \
+ (filename, e), noiselevel=-1)
+ finally:
+ if f is not None:
+ f.close()
+
+ d = None
+ if content:
+ try:
+ d = json.loads(_unicode_decode(content,
+ encoding=_encodings['repo.content'], errors='strict'))
+ except SystemExit:
+ raise
+ except Exception as e:
+ try:
+ mypickle = pickle.Unpickler(io.BytesIO(content))
+ try:
+ mypickle.find_global = None
+ except AttributeError:
+ # Python >=3
+ pass
+ d = mypickle.load()
+ except SystemExit:
+ raise
+ except Exception:
+ writemsg(_("!!! Error loading '%s': %s\n") % \
+ (filename, e), noiselevel=-1)
+
+ if d is None:
+ d = {}
+
+ if "old" in d:
+ d["updates"] = d["old"]
+ del d["old"]
+ if "cur" in d:
+ del d["cur"]
+
+ d.setdefault("starttime", 0)
+ d.setdefault("version", "")
+ for k in ("info", "ldpath", "updates"):
+ d.setdefault(k, {})
+
+ mtimedbkeys = set(("info", "ldpath", "resume", "resume_backup",
+ "starttime", "updates", "version"))
+
+ for k in list(d):
+ if k not in mtimedbkeys:
+ writemsg(_("Deleting invalid mtimedb key: %s\n") % str(k))
+ del d[k]
+ self.update(d)
+ self._clean_data = copy.deepcopy(d)
+
+ def commit(self):
+ if not self.filename:
+ return
+ d = {}
+ d.update(self)
+ # Only commit if the internal state has changed.
+ if d != self._clean_data:
+ d["version"] = str(portage.VERSION)
+ try:
+ f = atomic_ofstream(self.filename, mode='wb')
+ except EnvironmentError:
+ pass
+ else:
+ if self._json_write:
+ f.write(_unicode_encode(
+ json.dumps(d, **self._json_write_opts),
+ encoding=_encodings['repo.content'], errors='strict'))
+ else:
+ pickle.dump(d, f, protocol=2)
+ f.close()
+ apply_secpass_permissions(self.filename,
+ uid=uid, gid=portage_gid, mode=0o644)
+ self._clean_data = copy.deepcopy(d)
diff --git a/lib/portage/util/path.py b/lib/portage/util/path.py
new file mode 100644
index 000000000..a0b96c7f3
--- /dev/null
+++ b/lib/portage/util/path.py
@@ -0,0 +1,48 @@
+# Copyright 2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import errno
+
+from portage import os
+
+def first_existing(path):
+ """
+ Returns the first existing path element, traversing from the given
+ path to the root directory. A path is considered to exist if lstat
+ either succeeds or raises an error other than ENOENT or ESTALE.
+
+ This can be particularly useful to check if there is permission to
+ create a particular file or directory, without actually creating
+ anything.
+
+ @param path: a filesystem path
+ @type path: str
+ @rtype: str
+ @return: the element that exists
+ """
+ existing = False
+ for path in iter_parents(path):
+ try:
+ os.lstat(path)
+ existing = True
+ except OSError as e:
+ if e.errno not in (errno.ENOENT, errno.ESTALE):
+ existing = True
+
+ if existing:
+ return path
+
+ return os.sep
+
+def iter_parents(path):
+ """
+ @param path: a filesystem path
+ @type path: str
+ @rtype: iterator
+ @return: an iterator which yields path and all parents of path,
+ ending with the root directory
+ """
+ yield path
+ while path != os.sep:
+ path = os.path.dirname(path)
+ yield path
diff --git a/lib/portage/util/socks5.py b/lib/portage/util/socks5.py
new file mode 100644
index 000000000..74b0714eb
--- /dev/null
+++ b/lib/portage/util/socks5.py
@@ -0,0 +1,81 @@
+# SOCKSv5 proxy manager for network-sandbox
+# Copyright 2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+import signal
+
+from portage import _python_interpreter
+from portage.data import portage_gid, portage_uid, userpriv_groups
+from portage.process import atexit_register, spawn
+
+
+class ProxyManager(object):
+ """
+ A class to start and control a single running SOCKSv5 server process
+ for Portage.
+ """
+
+ def __init__(self):
+ self.socket_path = None
+ self._pids = []
+
+ def start(self, settings):
+ """
+ Start the SOCKSv5 server.
+
+ @param settings: Portage settings instance (used to determine
+ paths)
+ @type settings: portage.config
+ """
+ try:
+ import asyncio # NOQA
+ except ImportError:
+ raise NotImplementedError('SOCKSv5 proxy requires asyncio module')
+
+ self.socket_path = os.path.join(settings['PORTAGE_TMPDIR'],
+ '.portage.%d.net.sock' % os.getpid())
+ server_bin = os.path.join(settings['PORTAGE_BIN_PATH'], 'socks5-server.py')
+ self._pids = spawn([_python_interpreter, server_bin, self.socket_path],
+ returnpid=True, uid=portage_uid, gid=portage_gid,
+ groups=userpriv_groups, umask=0o077)
+
+ def stop(self):
+ """
+ Stop the SOCKSv5 server.
+ """
+ for p in self._pids:
+ os.kill(p, signal.SIGINT)
+ os.waitpid(p, 0)
+
+ self.socket_path = None
+ self._pids = []
+
+ def is_running(self):
+ """
+ Check whether the SOCKSv5 server is running.
+
+ @return: True if the server is running, False otherwise
+ """
+ return self.socket_path is not None
+
+
+proxy = ProxyManager()
+
+
+def get_socks5_proxy(settings):
+ """
+ Get UNIX socket path for a SOCKSv5 proxy. A new proxy is started if
+ one isn't running yet, and an atexit event is added to stop the proxy
+ on exit.
+
+ @param settings: Portage settings instance (used to determine paths)
+ @type settings: portage.config
+ @return: (string) UNIX socket path
+ """
+
+ if not proxy.is_running():
+ proxy.start(settings)
+ atexit_register(proxy.stop)
+
+ return proxy.socket_path
diff --git a/lib/portage/util/whirlpool.py b/lib/portage/util/whirlpool.py
new file mode 100644
index 000000000..170ae73f8
--- /dev/null
+++ b/lib/portage/util/whirlpool.py
@@ -0,0 +1,796 @@
+## whirlpool.py - pure Python implementation of the Whirlpool algorithm.
+## Bjorn Edstrom <be@bjrn.se> 16 december 2007.
+##
+## Copyrights
+## ==========
+##
+## This code is based on the reference implementation by
+## Paulo S.L.M. Barreto and Vincent Rijmen. The reference implementation
+## is placed in the public domain but has the following headers:
+##
+## * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
+## * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+## * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+## * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
+## * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+## * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+## * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+## * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+## * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+## * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+## * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+## *
+## */
+## /* The code contained in this file (Whirlpool.c) is in the public domain. */
+##
+## This Python implementation is therefore also placed in the public domain.
+
+import sys
+if sys.hexversion >= 0x3000000:
+ xrange = range
+
+#block_size = 64
+digest_size = 64
+digestsize = 64
+
+class Whirlpool:
+ """Return a new Whirlpool object. An optional string argument
+ may be provided; if present, this string will be automatically
+ hashed."""
+ def __init__(self, arg=None):
+ self.ctx = WhirlpoolStruct()
+ if arg:
+ self.update(arg)
+ self.digest_status = 0
+
+ def update(self, arg):
+ """update(arg)"""
+ WhirlpoolAdd(arg, len(arg)*8, self.ctx)
+ self.digest_status = 0
+
+ def digest(self):
+ """digest()"""
+ if self.digest_status == 0:
+ self.dig = WhirlpoolFinalize(self.ctx)
+ self.digest_status = 1
+ return self.dig
+
+ def hexdigest(self):
+ """hexdigest()"""
+ dig = self.digest()
+ tempstr = ''
+ for d in dig:
+ xxx = '%02x' % (ord(d))
+ tempstr = tempstr + xxx
+ return tempstr
+
+ def copy(self):
+ """copy()"""
+ import copy
+ return copy.deepcopy(self)
+
+
+def new(init=None):
+ """Return a new Whirlpool object. An optional string argument
+ may be provided; if present, this string will be automatically
+ hashed."""
+ return Whirlpool(init)
+
+#
+# Private.
+#
+
+R = 10
+
+C0 = [
+0x18186018c07830d8, 0x23238c2305af4626, 0xc6c63fc67ef991b8, 0xe8e887e8136fcdfb,
+0x878726874ca113cb, 0xb8b8dab8a9626d11, 0x0101040108050209, 0x4f4f214f426e9e0d,
+0x3636d836adee6c9b, 0xa6a6a2a6590451ff, 0xd2d26fd2debdb90c, 0xf5f5f3f5fb06f70e,
+0x7979f979ef80f296, 0x6f6fa16f5fcede30, 0x91917e91fcef3f6d, 0x52525552aa07a4f8,
+0x60609d6027fdc047, 0xbcbccabc89766535, 0x9b9b569baccd2b37, 0x8e8e028e048c018a,
+0xa3a3b6a371155bd2, 0x0c0c300c603c186c, 0x7b7bf17bff8af684, 0x3535d435b5e16a80,
+0x1d1d741de8693af5, 0xe0e0a7e05347ddb3, 0xd7d77bd7f6acb321, 0xc2c22fc25eed999c,
+0x2e2eb82e6d965c43, 0x4b4b314b627a9629, 0xfefedffea321e15d, 0x575741578216aed5,
+0x15155415a8412abd, 0x7777c1779fb6eee8, 0x3737dc37a5eb6e92, 0xe5e5b3e57b56d79e,
+0x9f9f469f8cd92313, 0xf0f0e7f0d317fd23, 0x4a4a354a6a7f9420, 0xdada4fda9e95a944,
+0x58587d58fa25b0a2, 0xc9c903c906ca8fcf, 0x2929a429558d527c, 0x0a0a280a5022145a,
+0xb1b1feb1e14f7f50, 0xa0a0baa0691a5dc9, 0x6b6bb16b7fdad614, 0x85852e855cab17d9,
+0xbdbdcebd8173673c, 0x5d5d695dd234ba8f, 0x1010401080502090, 0xf4f4f7f4f303f507,
+0xcbcb0bcb16c08bdd, 0x3e3ef83eedc67cd3, 0x0505140528110a2d, 0x676781671fe6ce78,
+0xe4e4b7e47353d597, 0x27279c2725bb4e02, 0x4141194132588273, 0x8b8b168b2c9d0ba7,
+0xa7a7a6a7510153f6, 0x7d7de97dcf94fab2, 0x95956e95dcfb3749, 0xd8d847d88e9fad56,
+0xfbfbcbfb8b30eb70, 0xeeee9fee2371c1cd, 0x7c7ced7cc791f8bb, 0x6666856617e3cc71,
+0xdddd53dda68ea77b, 0x17175c17b84b2eaf, 0x4747014702468e45, 0x9e9e429e84dc211a,
+0xcaca0fca1ec589d4, 0x2d2db42d75995a58, 0xbfbfc6bf9179632e, 0x07071c07381b0e3f,
+0xadad8ead012347ac, 0x5a5a755aea2fb4b0, 0x838336836cb51bef, 0x3333cc3385ff66b6,
+0x636391633ff2c65c, 0x02020802100a0412, 0xaaaa92aa39384993, 0x7171d971afa8e2de,
+0xc8c807c80ecf8dc6, 0x19196419c87d32d1, 0x494939497270923b, 0xd9d943d9869aaf5f,
+0xf2f2eff2c31df931, 0xe3e3abe34b48dba8, 0x5b5b715be22ab6b9, 0x88881a8834920dbc,
+0x9a9a529aa4c8293e, 0x262698262dbe4c0b, 0x3232c8328dfa64bf, 0xb0b0fab0e94a7d59,
+0xe9e983e91b6acff2, 0x0f0f3c0f78331e77, 0xd5d573d5e6a6b733, 0x80803a8074ba1df4,
+0xbebec2be997c6127, 0xcdcd13cd26de87eb, 0x3434d034bde46889, 0x48483d487a759032,
+0xffffdbffab24e354, 0x7a7af57af78ff48d, 0x90907a90f4ea3d64, 0x5f5f615fc23ebe9d,
+0x202080201da0403d, 0x6868bd6867d5d00f, 0x1a1a681ad07234ca, 0xaeae82ae192c41b7,
+0xb4b4eab4c95e757d, 0x54544d549a19a8ce, 0x93937693ece53b7f, 0x222288220daa442f,
+0x64648d6407e9c863, 0xf1f1e3f1db12ff2a, 0x7373d173bfa2e6cc, 0x12124812905a2482,
+0x40401d403a5d807a, 0x0808200840281048, 0xc3c32bc356e89b95, 0xecec97ec337bc5df,
+0xdbdb4bdb9690ab4d, 0xa1a1bea1611f5fc0, 0x8d8d0e8d1c830791, 0x3d3df43df5c97ac8,
+0x97976697ccf1335b, 0x0000000000000000, 0xcfcf1bcf36d483f9, 0x2b2bac2b4587566e,
+0x7676c57697b3ece1, 0x8282328264b019e6, 0xd6d67fd6fea9b128, 0x1b1b6c1bd87736c3,
+0xb5b5eeb5c15b7774, 0xafaf86af112943be, 0x6a6ab56a77dfd41d, 0x50505d50ba0da0ea,
+0x45450945124c8a57, 0xf3f3ebf3cb18fb38, 0x3030c0309df060ad, 0xefef9bef2b74c3c4,
+0x3f3ffc3fe5c37eda, 0x55554955921caac7, 0xa2a2b2a2791059db, 0xeaea8fea0365c9e9,
+0x656589650fecca6a, 0xbabad2bab9686903, 0x2f2fbc2f65935e4a, 0xc0c027c04ee79d8e,
+0xdede5fdebe81a160, 0x1c1c701ce06c38fc, 0xfdfdd3fdbb2ee746, 0x4d4d294d52649a1f,
+0x92927292e4e03976, 0x7575c9758fbceafa, 0x06061806301e0c36, 0x8a8a128a249809ae,
+0xb2b2f2b2f940794b, 0xe6e6bfe66359d185, 0x0e0e380e70361c7e, 0x1f1f7c1ff8633ee7,
+0x6262956237f7c455, 0xd4d477d4eea3b53a, 0xa8a89aa829324d81, 0x96966296c4f43152,
+0xf9f9c3f99b3aef62, 0xc5c533c566f697a3, 0x2525942535b14a10, 0x59597959f220b2ab,
+0x84842a8454ae15d0, 0x7272d572b7a7e4c5, 0x3939e439d5dd72ec, 0x4c4c2d4c5a619816,
+0x5e5e655eca3bbc94, 0x7878fd78e785f09f, 0x3838e038ddd870e5, 0x8c8c0a8c14860598,
+0xd1d163d1c6b2bf17, 0xa5a5aea5410b57e4, 0xe2e2afe2434dd9a1, 0x616199612ff8c24e,
+0xb3b3f6b3f1457b42, 0x2121842115a54234, 0x9c9c4a9c94d62508, 0x1e1e781ef0663cee,
+0x4343114322528661, 0xc7c73bc776fc93b1, 0xfcfcd7fcb32be54f, 0x0404100420140824,
+0x51515951b208a2e3, 0x99995e99bcc72f25, 0x6d6da96d4fc4da22, 0x0d0d340d68391a65,
+0xfafacffa8335e979, 0xdfdf5bdfb684a369, 0x7e7ee57ed79bfca9, 0x242490243db44819,
+0x3b3bec3bc5d776fe, 0xabab96ab313d4b9a, 0xcece1fce3ed181f0, 0x1111441188552299,
+0x8f8f068f0c890383, 0x4e4e254e4a6b9c04, 0xb7b7e6b7d1517366, 0xebeb8beb0b60cbe0,
+0x3c3cf03cfdcc78c1, 0x81813e817cbf1ffd, 0x94946a94d4fe3540, 0xf7f7fbf7eb0cf31c,
+0xb9b9deb9a1676f18, 0x13134c13985f268b, 0x2c2cb02c7d9c5851, 0xd3d36bd3d6b8bb05,
+0xe7e7bbe76b5cd38c, 0x6e6ea56e57cbdc39, 0xc4c437c46ef395aa, 0x03030c03180f061b,
+0x565645568a13acdc, 0x44440d441a49885e, 0x7f7fe17fdf9efea0, 0xa9a99ea921374f88,
+0x2a2aa82a4d825467, 0xbbbbd6bbb16d6b0a, 0xc1c123c146e29f87, 0x53535153a202a6f1,
+0xdcdc57dcae8ba572, 0x0b0b2c0b58271653, 0x9d9d4e9d9cd32701, 0x6c6cad6c47c1d82b,
+0x3131c43195f562a4, 0x7474cd7487b9e8f3, 0xf6f6fff6e309f115, 0x464605460a438c4c,
+0xacac8aac092645a5, 0x89891e893c970fb5, 0x14145014a04428b4, 0xe1e1a3e15b42dfba,
+0x16165816b04e2ca6, 0x3a3ae83acdd274f7, 0x6969b9696fd0d206, 0x09092409482d1241,
+0x7070dd70a7ade0d7, 0xb6b6e2b6d954716f, 0xd0d067d0ceb7bd1e, 0xeded93ed3b7ec7d6,
+0xcccc17cc2edb85e2, 0x424215422a578468, 0x98985a98b4c22d2c, 0xa4a4aaa4490e55ed,
+0x2828a0285d885075, 0x5c5c6d5cda31b886, 0xf8f8c7f8933fed6b, 0x8686228644a411c2,
+]
+C1 = [
+0xd818186018c07830, 0x2623238c2305af46, 0xb8c6c63fc67ef991, 0xfbe8e887e8136fcd,
+0xcb878726874ca113, 0x11b8b8dab8a9626d, 0x0901010401080502, 0x0d4f4f214f426e9e,
+0x9b3636d836adee6c, 0xffa6a6a2a6590451, 0x0cd2d26fd2debdb9, 0x0ef5f5f3f5fb06f7,
+0x967979f979ef80f2, 0x306f6fa16f5fcede, 0x6d91917e91fcef3f, 0xf852525552aa07a4,
+0x4760609d6027fdc0, 0x35bcbccabc897665, 0x379b9b569baccd2b, 0x8a8e8e028e048c01,
+0xd2a3a3b6a371155b, 0x6c0c0c300c603c18, 0x847b7bf17bff8af6, 0x803535d435b5e16a,
+0xf51d1d741de8693a, 0xb3e0e0a7e05347dd, 0x21d7d77bd7f6acb3, 0x9cc2c22fc25eed99,
+0x432e2eb82e6d965c, 0x294b4b314b627a96, 0x5dfefedffea321e1, 0xd5575741578216ae,
+0xbd15155415a8412a, 0xe87777c1779fb6ee, 0x923737dc37a5eb6e, 0x9ee5e5b3e57b56d7,
+0x139f9f469f8cd923, 0x23f0f0e7f0d317fd, 0x204a4a354a6a7f94, 0x44dada4fda9e95a9,
+0xa258587d58fa25b0, 0xcfc9c903c906ca8f, 0x7c2929a429558d52, 0x5a0a0a280a502214,
+0x50b1b1feb1e14f7f, 0xc9a0a0baa0691a5d, 0x146b6bb16b7fdad6, 0xd985852e855cab17,
+0x3cbdbdcebd817367, 0x8f5d5d695dd234ba, 0x9010104010805020, 0x07f4f4f7f4f303f5,
+0xddcbcb0bcb16c08b, 0xd33e3ef83eedc67c, 0x2d0505140528110a, 0x78676781671fe6ce,
+0x97e4e4b7e47353d5, 0x0227279c2725bb4e, 0x7341411941325882, 0xa78b8b168b2c9d0b,
+0xf6a7a7a6a7510153, 0xb27d7de97dcf94fa, 0x4995956e95dcfb37, 0x56d8d847d88e9fad,
+0x70fbfbcbfb8b30eb, 0xcdeeee9fee2371c1, 0xbb7c7ced7cc791f8, 0x716666856617e3cc,
+0x7bdddd53dda68ea7, 0xaf17175c17b84b2e, 0x454747014702468e, 0x1a9e9e429e84dc21,
+0xd4caca0fca1ec589, 0x582d2db42d75995a, 0x2ebfbfc6bf917963, 0x3f07071c07381b0e,
+0xacadad8ead012347, 0xb05a5a755aea2fb4, 0xef838336836cb51b, 0xb63333cc3385ff66,
+0x5c636391633ff2c6, 0x1202020802100a04, 0x93aaaa92aa393849, 0xde7171d971afa8e2,
+0xc6c8c807c80ecf8d, 0xd119196419c87d32, 0x3b49493949727092, 0x5fd9d943d9869aaf,
+0x31f2f2eff2c31df9, 0xa8e3e3abe34b48db, 0xb95b5b715be22ab6, 0xbc88881a8834920d,
+0x3e9a9a529aa4c829, 0x0b262698262dbe4c, 0xbf3232c8328dfa64, 0x59b0b0fab0e94a7d,
+0xf2e9e983e91b6acf, 0x770f0f3c0f78331e, 0x33d5d573d5e6a6b7, 0xf480803a8074ba1d,
+0x27bebec2be997c61, 0xebcdcd13cd26de87, 0x893434d034bde468, 0x3248483d487a7590,
+0x54ffffdbffab24e3, 0x8d7a7af57af78ff4, 0x6490907a90f4ea3d, 0x9d5f5f615fc23ebe,
+0x3d202080201da040, 0x0f6868bd6867d5d0, 0xca1a1a681ad07234, 0xb7aeae82ae192c41,
+0x7db4b4eab4c95e75, 0xce54544d549a19a8, 0x7f93937693ece53b, 0x2f222288220daa44,
+0x6364648d6407e9c8, 0x2af1f1e3f1db12ff, 0xcc7373d173bfa2e6, 0x8212124812905a24,
+0x7a40401d403a5d80, 0x4808082008402810, 0x95c3c32bc356e89b, 0xdfecec97ec337bc5,
+0x4ddbdb4bdb9690ab, 0xc0a1a1bea1611f5f, 0x918d8d0e8d1c8307, 0xc83d3df43df5c97a,
+0x5b97976697ccf133, 0x0000000000000000, 0xf9cfcf1bcf36d483, 0x6e2b2bac2b458756,
+0xe17676c57697b3ec, 0xe68282328264b019, 0x28d6d67fd6fea9b1, 0xc31b1b6c1bd87736,
+0x74b5b5eeb5c15b77, 0xbeafaf86af112943, 0x1d6a6ab56a77dfd4, 0xea50505d50ba0da0,
+0x5745450945124c8a, 0x38f3f3ebf3cb18fb, 0xad3030c0309df060, 0xc4efef9bef2b74c3,
+0xda3f3ffc3fe5c37e, 0xc755554955921caa, 0xdba2a2b2a2791059, 0xe9eaea8fea0365c9,
+0x6a656589650fecca, 0x03babad2bab96869, 0x4a2f2fbc2f65935e, 0x8ec0c027c04ee79d,
+0x60dede5fdebe81a1, 0xfc1c1c701ce06c38, 0x46fdfdd3fdbb2ee7, 0x1f4d4d294d52649a,
+0x7692927292e4e039, 0xfa7575c9758fbcea, 0x3606061806301e0c, 0xae8a8a128a249809,
+0x4bb2b2f2b2f94079, 0x85e6e6bfe66359d1, 0x7e0e0e380e70361c, 0xe71f1f7c1ff8633e,
+0x556262956237f7c4, 0x3ad4d477d4eea3b5, 0x81a8a89aa829324d, 0x5296966296c4f431,
+0x62f9f9c3f99b3aef, 0xa3c5c533c566f697, 0x102525942535b14a, 0xab59597959f220b2,
+0xd084842a8454ae15, 0xc57272d572b7a7e4, 0xec3939e439d5dd72, 0x164c4c2d4c5a6198,
+0x945e5e655eca3bbc, 0x9f7878fd78e785f0, 0xe53838e038ddd870, 0x988c8c0a8c148605,
+0x17d1d163d1c6b2bf, 0xe4a5a5aea5410b57, 0xa1e2e2afe2434dd9, 0x4e616199612ff8c2,
+0x42b3b3f6b3f1457b, 0x342121842115a542, 0x089c9c4a9c94d625, 0xee1e1e781ef0663c,
+0x6143431143225286, 0xb1c7c73bc776fc93, 0x4ffcfcd7fcb32be5, 0x2404041004201408,
+0xe351515951b208a2, 0x2599995e99bcc72f, 0x226d6da96d4fc4da, 0x650d0d340d68391a,
+0x79fafacffa8335e9, 0x69dfdf5bdfb684a3, 0xa97e7ee57ed79bfc, 0x19242490243db448,
+0xfe3b3bec3bc5d776, 0x9aabab96ab313d4b, 0xf0cece1fce3ed181, 0x9911114411885522,
+0x838f8f068f0c8903, 0x044e4e254e4a6b9c, 0x66b7b7e6b7d15173, 0xe0ebeb8beb0b60cb,
+0xc13c3cf03cfdcc78, 0xfd81813e817cbf1f, 0x4094946a94d4fe35, 0x1cf7f7fbf7eb0cf3,
+0x18b9b9deb9a1676f, 0x8b13134c13985f26, 0x512c2cb02c7d9c58, 0x05d3d36bd3d6b8bb,
+0x8ce7e7bbe76b5cd3, 0x396e6ea56e57cbdc, 0xaac4c437c46ef395, 0x1b03030c03180f06,
+0xdc565645568a13ac, 0x5e44440d441a4988, 0xa07f7fe17fdf9efe, 0x88a9a99ea921374f,
+0x672a2aa82a4d8254, 0x0abbbbd6bbb16d6b, 0x87c1c123c146e29f, 0xf153535153a202a6,
+0x72dcdc57dcae8ba5, 0x530b0b2c0b582716, 0x019d9d4e9d9cd327, 0x2b6c6cad6c47c1d8,
+0xa43131c43195f562, 0xf37474cd7487b9e8, 0x15f6f6fff6e309f1, 0x4c464605460a438c,
+0xa5acac8aac092645, 0xb589891e893c970f, 0xb414145014a04428, 0xbae1e1a3e15b42df,
+0xa616165816b04e2c, 0xf73a3ae83acdd274, 0x066969b9696fd0d2, 0x4109092409482d12,
+0xd77070dd70a7ade0, 0x6fb6b6e2b6d95471, 0x1ed0d067d0ceb7bd, 0xd6eded93ed3b7ec7,
+0xe2cccc17cc2edb85, 0x68424215422a5784, 0x2c98985a98b4c22d, 0xeda4a4aaa4490e55,
+0x752828a0285d8850, 0x865c5c6d5cda31b8, 0x6bf8f8c7f8933fed, 0xc28686228644a411,
+]
+C2 = [
+0x30d818186018c078, 0x462623238c2305af, 0x91b8c6c63fc67ef9, 0xcdfbe8e887e8136f,
+0x13cb878726874ca1, 0x6d11b8b8dab8a962, 0x0209010104010805, 0x9e0d4f4f214f426e,
+0x6c9b3636d836adee, 0x51ffa6a6a2a65904, 0xb90cd2d26fd2debd, 0xf70ef5f5f3f5fb06,
+0xf2967979f979ef80, 0xde306f6fa16f5fce, 0x3f6d91917e91fcef, 0xa4f852525552aa07,
+0xc04760609d6027fd, 0x6535bcbccabc8976, 0x2b379b9b569baccd, 0x018a8e8e028e048c,
+0x5bd2a3a3b6a37115, 0x186c0c0c300c603c, 0xf6847b7bf17bff8a, 0x6a803535d435b5e1,
+0x3af51d1d741de869, 0xddb3e0e0a7e05347, 0xb321d7d77bd7f6ac, 0x999cc2c22fc25eed,
+0x5c432e2eb82e6d96, 0x96294b4b314b627a, 0xe15dfefedffea321, 0xaed5575741578216,
+0x2abd15155415a841, 0xeee87777c1779fb6, 0x6e923737dc37a5eb, 0xd79ee5e5b3e57b56,
+0x23139f9f469f8cd9, 0xfd23f0f0e7f0d317, 0x94204a4a354a6a7f, 0xa944dada4fda9e95,
+0xb0a258587d58fa25, 0x8fcfc9c903c906ca, 0x527c2929a429558d, 0x145a0a0a280a5022,
+0x7f50b1b1feb1e14f, 0x5dc9a0a0baa0691a, 0xd6146b6bb16b7fda, 0x17d985852e855cab,
+0x673cbdbdcebd8173, 0xba8f5d5d695dd234, 0x2090101040108050, 0xf507f4f4f7f4f303,
+0x8bddcbcb0bcb16c0, 0x7cd33e3ef83eedc6, 0x0a2d050514052811, 0xce78676781671fe6,
+0xd597e4e4b7e47353, 0x4e0227279c2725bb, 0x8273414119413258, 0x0ba78b8b168b2c9d,
+0x53f6a7a7a6a75101, 0xfab27d7de97dcf94, 0x374995956e95dcfb, 0xad56d8d847d88e9f,
+0xeb70fbfbcbfb8b30, 0xc1cdeeee9fee2371, 0xf8bb7c7ced7cc791, 0xcc716666856617e3,
+0xa77bdddd53dda68e, 0x2eaf17175c17b84b, 0x8e45474701470246, 0x211a9e9e429e84dc,
+0x89d4caca0fca1ec5, 0x5a582d2db42d7599, 0x632ebfbfc6bf9179, 0x0e3f07071c07381b,
+0x47acadad8ead0123, 0xb4b05a5a755aea2f, 0x1bef838336836cb5, 0x66b63333cc3385ff,
+0xc65c636391633ff2, 0x041202020802100a, 0x4993aaaa92aa3938, 0xe2de7171d971afa8,
+0x8dc6c8c807c80ecf, 0x32d119196419c87d, 0x923b494939497270, 0xaf5fd9d943d9869a,
+0xf931f2f2eff2c31d, 0xdba8e3e3abe34b48, 0xb6b95b5b715be22a, 0x0dbc88881a883492,
+0x293e9a9a529aa4c8, 0x4c0b262698262dbe, 0x64bf3232c8328dfa, 0x7d59b0b0fab0e94a,
+0xcff2e9e983e91b6a, 0x1e770f0f3c0f7833, 0xb733d5d573d5e6a6, 0x1df480803a8074ba,
+0x6127bebec2be997c, 0x87ebcdcd13cd26de, 0x68893434d034bde4, 0x903248483d487a75,
+0xe354ffffdbffab24, 0xf48d7a7af57af78f, 0x3d6490907a90f4ea, 0xbe9d5f5f615fc23e,
+0x403d202080201da0, 0xd00f6868bd6867d5, 0x34ca1a1a681ad072, 0x41b7aeae82ae192c,
+0x757db4b4eab4c95e, 0xa8ce54544d549a19, 0x3b7f93937693ece5, 0x442f222288220daa,
+0xc86364648d6407e9, 0xff2af1f1e3f1db12, 0xe6cc7373d173bfa2, 0x248212124812905a,
+0x807a40401d403a5d, 0x1048080820084028, 0x9b95c3c32bc356e8, 0xc5dfecec97ec337b,
+0xab4ddbdb4bdb9690, 0x5fc0a1a1bea1611f, 0x07918d8d0e8d1c83, 0x7ac83d3df43df5c9,
+0x335b97976697ccf1, 0x0000000000000000, 0x83f9cfcf1bcf36d4, 0x566e2b2bac2b4587,
+0xece17676c57697b3, 0x19e68282328264b0, 0xb128d6d67fd6fea9, 0x36c31b1b6c1bd877,
+0x7774b5b5eeb5c15b, 0x43beafaf86af1129, 0xd41d6a6ab56a77df, 0xa0ea50505d50ba0d,
+0x8a5745450945124c, 0xfb38f3f3ebf3cb18, 0x60ad3030c0309df0, 0xc3c4efef9bef2b74,
+0x7eda3f3ffc3fe5c3, 0xaac755554955921c, 0x59dba2a2b2a27910, 0xc9e9eaea8fea0365,
+0xca6a656589650fec, 0x6903babad2bab968, 0x5e4a2f2fbc2f6593, 0x9d8ec0c027c04ee7,
+0xa160dede5fdebe81, 0x38fc1c1c701ce06c, 0xe746fdfdd3fdbb2e, 0x9a1f4d4d294d5264,
+0x397692927292e4e0, 0xeafa7575c9758fbc, 0x0c3606061806301e, 0x09ae8a8a128a2498,
+0x794bb2b2f2b2f940, 0xd185e6e6bfe66359, 0x1c7e0e0e380e7036, 0x3ee71f1f7c1ff863,
+0xc4556262956237f7, 0xb53ad4d477d4eea3, 0x4d81a8a89aa82932, 0x315296966296c4f4,
+0xef62f9f9c3f99b3a, 0x97a3c5c533c566f6, 0x4a102525942535b1, 0xb2ab59597959f220,
+0x15d084842a8454ae, 0xe4c57272d572b7a7, 0x72ec3939e439d5dd, 0x98164c4c2d4c5a61,
+0xbc945e5e655eca3b, 0xf09f7878fd78e785, 0x70e53838e038ddd8, 0x05988c8c0a8c1486,
+0xbf17d1d163d1c6b2, 0x57e4a5a5aea5410b, 0xd9a1e2e2afe2434d, 0xc24e616199612ff8,
+0x7b42b3b3f6b3f145, 0x42342121842115a5, 0x25089c9c4a9c94d6, 0x3cee1e1e781ef066,
+0x8661434311432252, 0x93b1c7c73bc776fc, 0xe54ffcfcd7fcb32b, 0x0824040410042014,
+0xa2e351515951b208, 0x2f2599995e99bcc7, 0xda226d6da96d4fc4, 0x1a650d0d340d6839,
+0xe979fafacffa8335, 0xa369dfdf5bdfb684, 0xfca97e7ee57ed79b, 0x4819242490243db4,
+0x76fe3b3bec3bc5d7, 0x4b9aabab96ab313d, 0x81f0cece1fce3ed1, 0x2299111144118855,
+0x03838f8f068f0c89, 0x9c044e4e254e4a6b, 0x7366b7b7e6b7d151, 0xcbe0ebeb8beb0b60,
+0x78c13c3cf03cfdcc, 0x1ffd81813e817cbf, 0x354094946a94d4fe, 0xf31cf7f7fbf7eb0c,
+0x6f18b9b9deb9a167, 0x268b13134c13985f, 0x58512c2cb02c7d9c, 0xbb05d3d36bd3d6b8,
+0xd38ce7e7bbe76b5c, 0xdc396e6ea56e57cb, 0x95aac4c437c46ef3, 0x061b03030c03180f,
+0xacdc565645568a13, 0x885e44440d441a49, 0xfea07f7fe17fdf9e, 0x4f88a9a99ea92137,
+0x54672a2aa82a4d82, 0x6b0abbbbd6bbb16d, 0x9f87c1c123c146e2, 0xa6f153535153a202,
+0xa572dcdc57dcae8b, 0x16530b0b2c0b5827, 0x27019d9d4e9d9cd3, 0xd82b6c6cad6c47c1,
+0x62a43131c43195f5, 0xe8f37474cd7487b9, 0xf115f6f6fff6e309, 0x8c4c464605460a43,
+0x45a5acac8aac0926, 0x0fb589891e893c97, 0x28b414145014a044, 0xdfbae1e1a3e15b42,
+0x2ca616165816b04e, 0x74f73a3ae83acdd2, 0xd2066969b9696fd0, 0x124109092409482d,
+0xe0d77070dd70a7ad, 0x716fb6b6e2b6d954, 0xbd1ed0d067d0ceb7, 0xc7d6eded93ed3b7e,
+0x85e2cccc17cc2edb, 0x8468424215422a57, 0x2d2c98985a98b4c2, 0x55eda4a4aaa4490e,
+0x50752828a0285d88, 0xb8865c5c6d5cda31, 0xed6bf8f8c7f8933f, 0x11c28686228644a4,
+]
+C3 = [
+0x7830d818186018c0, 0xaf462623238c2305, 0xf991b8c6c63fc67e, 0x6fcdfbe8e887e813,
+0xa113cb878726874c, 0x626d11b8b8dab8a9, 0x0502090101040108, 0x6e9e0d4f4f214f42,
+0xee6c9b3636d836ad, 0x0451ffa6a6a2a659, 0xbdb90cd2d26fd2de, 0x06f70ef5f5f3f5fb,
+0x80f2967979f979ef, 0xcede306f6fa16f5f, 0xef3f6d91917e91fc, 0x07a4f852525552aa,
+0xfdc04760609d6027, 0x766535bcbccabc89, 0xcd2b379b9b569bac, 0x8c018a8e8e028e04,
+0x155bd2a3a3b6a371, 0x3c186c0c0c300c60, 0x8af6847b7bf17bff, 0xe16a803535d435b5,
+0x693af51d1d741de8, 0x47ddb3e0e0a7e053, 0xacb321d7d77bd7f6, 0xed999cc2c22fc25e,
+0x965c432e2eb82e6d, 0x7a96294b4b314b62, 0x21e15dfefedffea3, 0x16aed55757415782,
+0x412abd15155415a8, 0xb6eee87777c1779f, 0xeb6e923737dc37a5, 0x56d79ee5e5b3e57b,
+0xd923139f9f469f8c, 0x17fd23f0f0e7f0d3, 0x7f94204a4a354a6a, 0x95a944dada4fda9e,
+0x25b0a258587d58fa, 0xca8fcfc9c903c906, 0x8d527c2929a42955, 0x22145a0a0a280a50,
+0x4f7f50b1b1feb1e1, 0x1a5dc9a0a0baa069, 0xdad6146b6bb16b7f, 0xab17d985852e855c,
+0x73673cbdbdcebd81, 0x34ba8f5d5d695dd2, 0x5020901010401080, 0x03f507f4f4f7f4f3,
+0xc08bddcbcb0bcb16, 0xc67cd33e3ef83eed, 0x110a2d0505140528, 0xe6ce78676781671f,
+0x53d597e4e4b7e473, 0xbb4e0227279c2725, 0x5882734141194132, 0x9d0ba78b8b168b2c,
+0x0153f6a7a7a6a751, 0x94fab27d7de97dcf, 0xfb374995956e95dc, 0x9fad56d8d847d88e,
+0x30eb70fbfbcbfb8b, 0x71c1cdeeee9fee23, 0x91f8bb7c7ced7cc7, 0xe3cc716666856617,
+0x8ea77bdddd53dda6, 0x4b2eaf17175c17b8, 0x468e454747014702, 0xdc211a9e9e429e84,
+0xc589d4caca0fca1e, 0x995a582d2db42d75, 0x79632ebfbfc6bf91, 0x1b0e3f07071c0738,
+0x2347acadad8ead01, 0x2fb4b05a5a755aea, 0xb51bef838336836c, 0xff66b63333cc3385,
+0xf2c65c636391633f, 0x0a04120202080210, 0x384993aaaa92aa39, 0xa8e2de7171d971af,
+0xcf8dc6c8c807c80e, 0x7d32d119196419c8, 0x70923b4949394972, 0x9aaf5fd9d943d986,
+0x1df931f2f2eff2c3, 0x48dba8e3e3abe34b, 0x2ab6b95b5b715be2, 0x920dbc88881a8834,
+0xc8293e9a9a529aa4, 0xbe4c0b262698262d, 0xfa64bf3232c8328d, 0x4a7d59b0b0fab0e9,
+0x6acff2e9e983e91b, 0x331e770f0f3c0f78, 0xa6b733d5d573d5e6, 0xba1df480803a8074,
+0x7c6127bebec2be99, 0xde87ebcdcd13cd26, 0xe468893434d034bd, 0x75903248483d487a,
+0x24e354ffffdbffab, 0x8ff48d7a7af57af7, 0xea3d6490907a90f4, 0x3ebe9d5f5f615fc2,
+0xa0403d202080201d, 0xd5d00f6868bd6867, 0x7234ca1a1a681ad0, 0x2c41b7aeae82ae19,
+0x5e757db4b4eab4c9, 0x19a8ce54544d549a, 0xe53b7f93937693ec, 0xaa442f222288220d,
+0xe9c86364648d6407, 0x12ff2af1f1e3f1db, 0xa2e6cc7373d173bf, 0x5a24821212481290,
+0x5d807a40401d403a, 0x2810480808200840, 0xe89b95c3c32bc356, 0x7bc5dfecec97ec33,
+0x90ab4ddbdb4bdb96, 0x1f5fc0a1a1bea161, 0x8307918d8d0e8d1c, 0xc97ac83d3df43df5,
+0xf1335b97976697cc, 0x0000000000000000, 0xd483f9cfcf1bcf36, 0x87566e2b2bac2b45,
+0xb3ece17676c57697, 0xb019e68282328264, 0xa9b128d6d67fd6fe, 0x7736c31b1b6c1bd8,
+0x5b7774b5b5eeb5c1, 0x2943beafaf86af11, 0xdfd41d6a6ab56a77, 0x0da0ea50505d50ba,
+0x4c8a574545094512, 0x18fb38f3f3ebf3cb, 0xf060ad3030c0309d, 0x74c3c4efef9bef2b,
+0xc37eda3f3ffc3fe5, 0x1caac75555495592, 0x1059dba2a2b2a279, 0x65c9e9eaea8fea03,
+0xecca6a656589650f, 0x686903babad2bab9, 0x935e4a2f2fbc2f65, 0xe79d8ec0c027c04e,
+0x81a160dede5fdebe, 0x6c38fc1c1c701ce0, 0x2ee746fdfdd3fdbb, 0x649a1f4d4d294d52,
+0xe0397692927292e4, 0xbceafa7575c9758f, 0x1e0c360606180630, 0x9809ae8a8a128a24,
+0x40794bb2b2f2b2f9, 0x59d185e6e6bfe663, 0x361c7e0e0e380e70, 0x633ee71f1f7c1ff8,
+0xf7c4556262956237, 0xa3b53ad4d477d4ee, 0x324d81a8a89aa829, 0xf4315296966296c4,
+0x3aef62f9f9c3f99b, 0xf697a3c5c533c566, 0xb14a102525942535, 0x20b2ab59597959f2,
+0xae15d084842a8454, 0xa7e4c57272d572b7, 0xdd72ec3939e439d5, 0x6198164c4c2d4c5a,
+0x3bbc945e5e655eca, 0x85f09f7878fd78e7, 0xd870e53838e038dd, 0x8605988c8c0a8c14,
+0xb2bf17d1d163d1c6, 0x0b57e4a5a5aea541, 0x4dd9a1e2e2afe243, 0xf8c24e616199612f,
+0x457b42b3b3f6b3f1, 0xa542342121842115, 0xd625089c9c4a9c94, 0x663cee1e1e781ef0,
+0x5286614343114322, 0xfc93b1c7c73bc776, 0x2be54ffcfcd7fcb3, 0x1408240404100420,
+0x08a2e351515951b2, 0xc72f2599995e99bc, 0xc4da226d6da96d4f, 0x391a650d0d340d68,
+0x35e979fafacffa83, 0x84a369dfdf5bdfb6, 0x9bfca97e7ee57ed7, 0xb44819242490243d,
+0xd776fe3b3bec3bc5, 0x3d4b9aabab96ab31, 0xd181f0cece1fce3e, 0x5522991111441188,
+0x8903838f8f068f0c, 0x6b9c044e4e254e4a, 0x517366b7b7e6b7d1, 0x60cbe0ebeb8beb0b,
+0xcc78c13c3cf03cfd, 0xbf1ffd81813e817c, 0xfe354094946a94d4, 0x0cf31cf7f7fbf7eb,
+0x676f18b9b9deb9a1, 0x5f268b13134c1398, 0x9c58512c2cb02c7d, 0xb8bb05d3d36bd3d6,
+0x5cd38ce7e7bbe76b, 0xcbdc396e6ea56e57, 0xf395aac4c437c46e, 0x0f061b03030c0318,
+0x13acdc565645568a, 0x49885e44440d441a, 0x9efea07f7fe17fdf, 0x374f88a9a99ea921,
+0x8254672a2aa82a4d, 0x6d6b0abbbbd6bbb1, 0xe29f87c1c123c146, 0x02a6f153535153a2,
+0x8ba572dcdc57dcae, 0x2716530b0b2c0b58, 0xd327019d9d4e9d9c, 0xc1d82b6c6cad6c47,
+0xf562a43131c43195, 0xb9e8f37474cd7487, 0x09f115f6f6fff6e3, 0x438c4c464605460a,
+0x2645a5acac8aac09, 0x970fb589891e893c, 0x4428b414145014a0, 0x42dfbae1e1a3e15b,
+0x4e2ca616165816b0, 0xd274f73a3ae83acd, 0xd0d2066969b9696f, 0x2d12410909240948,
+0xade0d77070dd70a7, 0x54716fb6b6e2b6d9, 0xb7bd1ed0d067d0ce, 0x7ec7d6eded93ed3b,
+0xdb85e2cccc17cc2e, 0x578468424215422a, 0xc22d2c98985a98b4, 0x0e55eda4a4aaa449,
+0x8850752828a0285d, 0x31b8865c5c6d5cda, 0x3fed6bf8f8c7f893, 0xa411c28686228644,
+]
+C4 = [
+0xc07830d818186018, 0x05af462623238c23, 0x7ef991b8c6c63fc6, 0x136fcdfbe8e887e8,
+0x4ca113cb87872687, 0xa9626d11b8b8dab8, 0x0805020901010401, 0x426e9e0d4f4f214f,
+0xadee6c9b3636d836, 0x590451ffa6a6a2a6, 0xdebdb90cd2d26fd2, 0xfb06f70ef5f5f3f5,
+0xef80f2967979f979, 0x5fcede306f6fa16f, 0xfcef3f6d91917e91, 0xaa07a4f852525552,
+0x27fdc04760609d60, 0x89766535bcbccabc, 0xaccd2b379b9b569b, 0x048c018a8e8e028e,
+0x71155bd2a3a3b6a3, 0x603c186c0c0c300c, 0xff8af6847b7bf17b, 0xb5e16a803535d435,
+0xe8693af51d1d741d, 0x5347ddb3e0e0a7e0, 0xf6acb321d7d77bd7, 0x5eed999cc2c22fc2,
+0x6d965c432e2eb82e, 0x627a96294b4b314b, 0xa321e15dfefedffe, 0x8216aed557574157,
+0xa8412abd15155415, 0x9fb6eee87777c177, 0xa5eb6e923737dc37, 0x7b56d79ee5e5b3e5,
+0x8cd923139f9f469f, 0xd317fd23f0f0e7f0, 0x6a7f94204a4a354a, 0x9e95a944dada4fda,
+0xfa25b0a258587d58, 0x06ca8fcfc9c903c9, 0x558d527c2929a429, 0x5022145a0a0a280a,
+0xe14f7f50b1b1feb1, 0x691a5dc9a0a0baa0, 0x7fdad6146b6bb16b, 0x5cab17d985852e85,
+0x8173673cbdbdcebd, 0xd234ba8f5d5d695d, 0x8050209010104010, 0xf303f507f4f4f7f4,
+0x16c08bddcbcb0bcb, 0xedc67cd33e3ef83e, 0x28110a2d05051405, 0x1fe6ce7867678167,
+0x7353d597e4e4b7e4, 0x25bb4e0227279c27, 0x3258827341411941, 0x2c9d0ba78b8b168b,
+0x510153f6a7a7a6a7, 0xcf94fab27d7de97d, 0xdcfb374995956e95, 0x8e9fad56d8d847d8,
+0x8b30eb70fbfbcbfb, 0x2371c1cdeeee9fee, 0xc791f8bb7c7ced7c, 0x17e3cc7166668566,
+0xa68ea77bdddd53dd, 0xb84b2eaf17175c17, 0x02468e4547470147, 0x84dc211a9e9e429e,
+0x1ec589d4caca0fca, 0x75995a582d2db42d, 0x9179632ebfbfc6bf, 0x381b0e3f07071c07,
+0x012347acadad8ead, 0xea2fb4b05a5a755a, 0x6cb51bef83833683, 0x85ff66b63333cc33,
+0x3ff2c65c63639163, 0x100a041202020802, 0x39384993aaaa92aa, 0xafa8e2de7171d971,
+0x0ecf8dc6c8c807c8, 0xc87d32d119196419, 0x7270923b49493949, 0x869aaf5fd9d943d9,
+0xc31df931f2f2eff2, 0x4b48dba8e3e3abe3, 0xe22ab6b95b5b715b, 0x34920dbc88881a88,
+0xa4c8293e9a9a529a, 0x2dbe4c0b26269826, 0x8dfa64bf3232c832, 0xe94a7d59b0b0fab0,
+0x1b6acff2e9e983e9, 0x78331e770f0f3c0f, 0xe6a6b733d5d573d5, 0x74ba1df480803a80,
+0x997c6127bebec2be, 0x26de87ebcdcd13cd, 0xbde468893434d034, 0x7a75903248483d48,
+0xab24e354ffffdbff, 0xf78ff48d7a7af57a, 0xf4ea3d6490907a90, 0xc23ebe9d5f5f615f,
+0x1da0403d20208020, 0x67d5d00f6868bd68, 0xd07234ca1a1a681a, 0x192c41b7aeae82ae,
+0xc95e757db4b4eab4, 0x9a19a8ce54544d54, 0xece53b7f93937693, 0x0daa442f22228822,
+0x07e9c86364648d64, 0xdb12ff2af1f1e3f1, 0xbfa2e6cc7373d173, 0x905a248212124812,
+0x3a5d807a40401d40, 0x4028104808082008, 0x56e89b95c3c32bc3, 0x337bc5dfecec97ec,
+0x9690ab4ddbdb4bdb, 0x611f5fc0a1a1bea1, 0x1c8307918d8d0e8d, 0xf5c97ac83d3df43d,
+0xccf1335b97976697, 0x0000000000000000, 0x36d483f9cfcf1bcf, 0x4587566e2b2bac2b,
+0x97b3ece17676c576, 0x64b019e682823282, 0xfea9b128d6d67fd6, 0xd87736c31b1b6c1b,
+0xc15b7774b5b5eeb5, 0x112943beafaf86af, 0x77dfd41d6a6ab56a, 0xba0da0ea50505d50,
+0x124c8a5745450945, 0xcb18fb38f3f3ebf3, 0x9df060ad3030c030, 0x2b74c3c4efef9bef,
+0xe5c37eda3f3ffc3f, 0x921caac755554955, 0x791059dba2a2b2a2, 0x0365c9e9eaea8fea,
+0x0fecca6a65658965, 0xb9686903babad2ba, 0x65935e4a2f2fbc2f, 0x4ee79d8ec0c027c0,
+0xbe81a160dede5fde, 0xe06c38fc1c1c701c, 0xbb2ee746fdfdd3fd, 0x52649a1f4d4d294d,
+0xe4e0397692927292, 0x8fbceafa7575c975, 0x301e0c3606061806, 0x249809ae8a8a128a,
+0xf940794bb2b2f2b2, 0x6359d185e6e6bfe6, 0x70361c7e0e0e380e, 0xf8633ee71f1f7c1f,
+0x37f7c45562629562, 0xeea3b53ad4d477d4, 0x29324d81a8a89aa8, 0xc4f4315296966296,
+0x9b3aef62f9f9c3f9, 0x66f697a3c5c533c5, 0x35b14a1025259425, 0xf220b2ab59597959,
+0x54ae15d084842a84, 0xb7a7e4c57272d572, 0xd5dd72ec3939e439, 0x5a6198164c4c2d4c,
+0xca3bbc945e5e655e, 0xe785f09f7878fd78, 0xddd870e53838e038, 0x148605988c8c0a8c,
+0xc6b2bf17d1d163d1, 0x410b57e4a5a5aea5, 0x434dd9a1e2e2afe2, 0x2ff8c24e61619961,
+0xf1457b42b3b3f6b3, 0x15a5423421218421, 0x94d625089c9c4a9c, 0xf0663cee1e1e781e,
+0x2252866143431143, 0x76fc93b1c7c73bc7, 0xb32be54ffcfcd7fc, 0x2014082404041004,
+0xb208a2e351515951, 0xbcc72f2599995e99, 0x4fc4da226d6da96d, 0x68391a650d0d340d,
+0x8335e979fafacffa, 0xb684a369dfdf5bdf, 0xd79bfca97e7ee57e, 0x3db4481924249024,
+0xc5d776fe3b3bec3b, 0x313d4b9aabab96ab, 0x3ed181f0cece1fce, 0x8855229911114411,
+0x0c8903838f8f068f, 0x4a6b9c044e4e254e, 0xd1517366b7b7e6b7, 0x0b60cbe0ebeb8beb,
+0xfdcc78c13c3cf03c, 0x7cbf1ffd81813e81, 0xd4fe354094946a94, 0xeb0cf31cf7f7fbf7,
+0xa1676f18b9b9deb9, 0x985f268b13134c13, 0x7d9c58512c2cb02c, 0xd6b8bb05d3d36bd3,
+0x6b5cd38ce7e7bbe7, 0x57cbdc396e6ea56e, 0x6ef395aac4c437c4, 0x180f061b03030c03,
+0x8a13acdc56564556, 0x1a49885e44440d44, 0xdf9efea07f7fe17f, 0x21374f88a9a99ea9,
+0x4d8254672a2aa82a, 0xb16d6b0abbbbd6bb, 0x46e29f87c1c123c1, 0xa202a6f153535153,
+0xae8ba572dcdc57dc, 0x582716530b0b2c0b, 0x9cd327019d9d4e9d, 0x47c1d82b6c6cad6c,
+0x95f562a43131c431, 0x87b9e8f37474cd74, 0xe309f115f6f6fff6, 0x0a438c4c46460546,
+0x092645a5acac8aac, 0x3c970fb589891e89, 0xa04428b414145014, 0x5b42dfbae1e1a3e1,
+0xb04e2ca616165816, 0xcdd274f73a3ae83a, 0x6fd0d2066969b969, 0x482d124109092409,
+0xa7ade0d77070dd70, 0xd954716fb6b6e2b6, 0xceb7bd1ed0d067d0, 0x3b7ec7d6eded93ed,
+0x2edb85e2cccc17cc, 0x2a57846842421542, 0xb4c22d2c98985a98, 0x490e55eda4a4aaa4,
+0x5d8850752828a028, 0xda31b8865c5c6d5c, 0x933fed6bf8f8c7f8, 0x44a411c286862286,
+]
+C5 = [
+0x18c07830d8181860, 0x2305af462623238c, 0xc67ef991b8c6c63f, 0xe8136fcdfbe8e887,
+0x874ca113cb878726, 0xb8a9626d11b8b8da, 0x0108050209010104, 0x4f426e9e0d4f4f21,
+0x36adee6c9b3636d8, 0xa6590451ffa6a6a2, 0xd2debdb90cd2d26f, 0xf5fb06f70ef5f5f3,
+0x79ef80f2967979f9, 0x6f5fcede306f6fa1, 0x91fcef3f6d91917e, 0x52aa07a4f8525255,
+0x6027fdc04760609d, 0xbc89766535bcbcca, 0x9baccd2b379b9b56, 0x8e048c018a8e8e02,
+0xa371155bd2a3a3b6, 0x0c603c186c0c0c30, 0x7bff8af6847b7bf1, 0x35b5e16a803535d4,
+0x1de8693af51d1d74, 0xe05347ddb3e0e0a7, 0xd7f6acb321d7d77b, 0xc25eed999cc2c22f,
+0x2e6d965c432e2eb8, 0x4b627a96294b4b31, 0xfea321e15dfefedf, 0x578216aed5575741,
+0x15a8412abd151554, 0x779fb6eee87777c1, 0x37a5eb6e923737dc, 0xe57b56d79ee5e5b3,
+0x9f8cd923139f9f46, 0xf0d317fd23f0f0e7, 0x4a6a7f94204a4a35, 0xda9e95a944dada4f,
+0x58fa25b0a258587d, 0xc906ca8fcfc9c903, 0x29558d527c2929a4, 0x0a5022145a0a0a28,
+0xb1e14f7f50b1b1fe, 0xa0691a5dc9a0a0ba, 0x6b7fdad6146b6bb1, 0x855cab17d985852e,
+0xbd8173673cbdbdce, 0x5dd234ba8f5d5d69, 0x1080502090101040, 0xf4f303f507f4f4f7,
+0xcb16c08bddcbcb0b, 0x3eedc67cd33e3ef8, 0x0528110a2d050514, 0x671fe6ce78676781,
+0xe47353d597e4e4b7, 0x2725bb4e0227279c, 0x4132588273414119, 0x8b2c9d0ba78b8b16,
+0xa7510153f6a7a7a6, 0x7dcf94fab27d7de9, 0x95dcfb374995956e, 0xd88e9fad56d8d847,
+0xfb8b30eb70fbfbcb, 0xee2371c1cdeeee9f, 0x7cc791f8bb7c7ced, 0x6617e3cc71666685,
+0xdda68ea77bdddd53, 0x17b84b2eaf17175c, 0x4702468e45474701, 0x9e84dc211a9e9e42,
+0xca1ec589d4caca0f, 0x2d75995a582d2db4, 0xbf9179632ebfbfc6, 0x07381b0e3f07071c,
+0xad012347acadad8e, 0x5aea2fb4b05a5a75, 0x836cb51bef838336, 0x3385ff66b63333cc,
+0x633ff2c65c636391, 0x02100a0412020208, 0xaa39384993aaaa92, 0x71afa8e2de7171d9,
+0xc80ecf8dc6c8c807, 0x19c87d32d1191964, 0x497270923b494939, 0xd9869aaf5fd9d943,
+0xf2c31df931f2f2ef, 0xe34b48dba8e3e3ab, 0x5be22ab6b95b5b71, 0x8834920dbc88881a,
+0x9aa4c8293e9a9a52, 0x262dbe4c0b262698, 0x328dfa64bf3232c8, 0xb0e94a7d59b0b0fa,
+0xe91b6acff2e9e983, 0x0f78331e770f0f3c, 0xd5e6a6b733d5d573, 0x8074ba1df480803a,
+0xbe997c6127bebec2, 0xcd26de87ebcdcd13, 0x34bde468893434d0, 0x487a75903248483d,
+0xffab24e354ffffdb, 0x7af78ff48d7a7af5, 0x90f4ea3d6490907a, 0x5fc23ebe9d5f5f61,
+0x201da0403d202080, 0x6867d5d00f6868bd, 0x1ad07234ca1a1a68, 0xae192c41b7aeae82,
+0xb4c95e757db4b4ea, 0x549a19a8ce54544d, 0x93ece53b7f939376, 0x220daa442f222288,
+0x6407e9c86364648d, 0xf1db12ff2af1f1e3, 0x73bfa2e6cc7373d1, 0x12905a2482121248,
+0x403a5d807a40401d, 0x0840281048080820, 0xc356e89b95c3c32b, 0xec337bc5dfecec97,
+0xdb9690ab4ddbdb4b, 0xa1611f5fc0a1a1be, 0x8d1c8307918d8d0e, 0x3df5c97ac83d3df4,
+0x97ccf1335b979766, 0x0000000000000000, 0xcf36d483f9cfcf1b, 0x2b4587566e2b2bac,
+0x7697b3ece17676c5, 0x8264b019e6828232, 0xd6fea9b128d6d67f, 0x1bd87736c31b1b6c,
+0xb5c15b7774b5b5ee, 0xaf112943beafaf86, 0x6a77dfd41d6a6ab5, 0x50ba0da0ea50505d,
+0x45124c8a57454509, 0xf3cb18fb38f3f3eb, 0x309df060ad3030c0, 0xef2b74c3c4efef9b,
+0x3fe5c37eda3f3ffc, 0x55921caac7555549, 0xa2791059dba2a2b2, 0xea0365c9e9eaea8f,
+0x650fecca6a656589, 0xbab9686903babad2, 0x2f65935e4a2f2fbc, 0xc04ee79d8ec0c027,
+0xdebe81a160dede5f, 0x1ce06c38fc1c1c70, 0xfdbb2ee746fdfdd3, 0x4d52649a1f4d4d29,
+0x92e4e03976929272, 0x758fbceafa7575c9, 0x06301e0c36060618, 0x8a249809ae8a8a12,
+0xb2f940794bb2b2f2, 0xe66359d185e6e6bf, 0x0e70361c7e0e0e38, 0x1ff8633ee71f1f7c,
+0x6237f7c455626295, 0xd4eea3b53ad4d477, 0xa829324d81a8a89a, 0x96c4f43152969662,
+0xf99b3aef62f9f9c3, 0xc566f697a3c5c533, 0x2535b14a10252594, 0x59f220b2ab595979,
+0x8454ae15d084842a, 0x72b7a7e4c57272d5, 0x39d5dd72ec3939e4, 0x4c5a6198164c4c2d,
+0x5eca3bbc945e5e65, 0x78e785f09f7878fd, 0x38ddd870e53838e0, 0x8c148605988c8c0a,
+0xd1c6b2bf17d1d163, 0xa5410b57e4a5a5ae, 0xe2434dd9a1e2e2af, 0x612ff8c24e616199,
+0xb3f1457b42b3b3f6, 0x2115a54234212184, 0x9c94d625089c9c4a, 0x1ef0663cee1e1e78,
+0x4322528661434311, 0xc776fc93b1c7c73b, 0xfcb32be54ffcfcd7, 0x0420140824040410,
+0x51b208a2e3515159, 0x99bcc72f2599995e, 0x6d4fc4da226d6da9, 0x0d68391a650d0d34,
+0xfa8335e979fafacf, 0xdfb684a369dfdf5b, 0x7ed79bfca97e7ee5, 0x243db44819242490,
+0x3bc5d776fe3b3bec, 0xab313d4b9aabab96, 0xce3ed181f0cece1f, 0x1188552299111144,
+0x8f0c8903838f8f06, 0x4e4a6b9c044e4e25, 0xb7d1517366b7b7e6, 0xeb0b60cbe0ebeb8b,
+0x3cfdcc78c13c3cf0, 0x817cbf1ffd81813e, 0x94d4fe354094946a, 0xf7eb0cf31cf7f7fb,
+0xb9a1676f18b9b9de, 0x13985f268b13134c, 0x2c7d9c58512c2cb0, 0xd3d6b8bb05d3d36b,
+0xe76b5cd38ce7e7bb, 0x6e57cbdc396e6ea5, 0xc46ef395aac4c437, 0x03180f061b03030c,
+0x568a13acdc565645, 0x441a49885e44440d, 0x7fdf9efea07f7fe1, 0xa921374f88a9a99e,
+0x2a4d8254672a2aa8, 0xbbb16d6b0abbbbd6, 0xc146e29f87c1c123, 0x53a202a6f1535351,
+0xdcae8ba572dcdc57, 0x0b582716530b0b2c, 0x9d9cd327019d9d4e, 0x6c47c1d82b6c6cad,
+0x3195f562a43131c4, 0x7487b9e8f37474cd, 0xf6e309f115f6f6ff, 0x460a438c4c464605,
+0xac092645a5acac8a, 0x893c970fb589891e, 0x14a04428b4141450, 0xe15b42dfbae1e1a3,
+0x16b04e2ca6161658, 0x3acdd274f73a3ae8, 0x696fd0d2066969b9, 0x09482d1241090924,
+0x70a7ade0d77070dd, 0xb6d954716fb6b6e2, 0xd0ceb7bd1ed0d067, 0xed3b7ec7d6eded93,
+0xcc2edb85e2cccc17, 0x422a578468424215, 0x98b4c22d2c98985a, 0xa4490e55eda4a4aa,
+0x285d8850752828a0, 0x5cda31b8865c5c6d, 0xf8933fed6bf8f8c7, 0x8644a411c2868622,
+]
+C6 = [
+0x6018c07830d81818, 0x8c2305af46262323, 0x3fc67ef991b8c6c6, 0x87e8136fcdfbe8e8,
+0x26874ca113cb8787, 0xdab8a9626d11b8b8, 0x0401080502090101, 0x214f426e9e0d4f4f,
+0xd836adee6c9b3636, 0xa2a6590451ffa6a6, 0x6fd2debdb90cd2d2, 0xf3f5fb06f70ef5f5,
+0xf979ef80f2967979, 0xa16f5fcede306f6f, 0x7e91fcef3f6d9191, 0x5552aa07a4f85252,
+0x9d6027fdc0476060, 0xcabc89766535bcbc, 0x569baccd2b379b9b, 0x028e048c018a8e8e,
+0xb6a371155bd2a3a3, 0x300c603c186c0c0c, 0xf17bff8af6847b7b, 0xd435b5e16a803535,
+0x741de8693af51d1d, 0xa7e05347ddb3e0e0, 0x7bd7f6acb321d7d7, 0x2fc25eed999cc2c2,
+0xb82e6d965c432e2e, 0x314b627a96294b4b, 0xdffea321e15dfefe, 0x41578216aed55757,
+0x5415a8412abd1515, 0xc1779fb6eee87777, 0xdc37a5eb6e923737, 0xb3e57b56d79ee5e5,
+0x469f8cd923139f9f, 0xe7f0d317fd23f0f0, 0x354a6a7f94204a4a, 0x4fda9e95a944dada,
+0x7d58fa25b0a25858, 0x03c906ca8fcfc9c9, 0xa429558d527c2929, 0x280a5022145a0a0a,
+0xfeb1e14f7f50b1b1, 0xbaa0691a5dc9a0a0, 0xb16b7fdad6146b6b, 0x2e855cab17d98585,
+0xcebd8173673cbdbd, 0x695dd234ba8f5d5d, 0x4010805020901010, 0xf7f4f303f507f4f4,
+0x0bcb16c08bddcbcb, 0xf83eedc67cd33e3e, 0x140528110a2d0505, 0x81671fe6ce786767,
+0xb7e47353d597e4e4, 0x9c2725bb4e022727, 0x1941325882734141, 0x168b2c9d0ba78b8b,
+0xa6a7510153f6a7a7, 0xe97dcf94fab27d7d, 0x6e95dcfb37499595, 0x47d88e9fad56d8d8,
+0xcbfb8b30eb70fbfb, 0x9fee2371c1cdeeee, 0xed7cc791f8bb7c7c, 0x856617e3cc716666,
+0x53dda68ea77bdddd, 0x5c17b84b2eaf1717, 0x014702468e454747, 0x429e84dc211a9e9e,
+0x0fca1ec589d4caca, 0xb42d75995a582d2d, 0xc6bf9179632ebfbf, 0x1c07381b0e3f0707,
+0x8ead012347acadad, 0x755aea2fb4b05a5a, 0x36836cb51bef8383, 0xcc3385ff66b63333,
+0x91633ff2c65c6363, 0x0802100a04120202, 0x92aa39384993aaaa, 0xd971afa8e2de7171,
+0x07c80ecf8dc6c8c8, 0x6419c87d32d11919, 0x39497270923b4949, 0x43d9869aaf5fd9d9,
+0xeff2c31df931f2f2, 0xabe34b48dba8e3e3, 0x715be22ab6b95b5b, 0x1a8834920dbc8888,
+0x529aa4c8293e9a9a, 0x98262dbe4c0b2626, 0xc8328dfa64bf3232, 0xfab0e94a7d59b0b0,
+0x83e91b6acff2e9e9, 0x3c0f78331e770f0f, 0x73d5e6a6b733d5d5, 0x3a8074ba1df48080,
+0xc2be997c6127bebe, 0x13cd26de87ebcdcd, 0xd034bde468893434, 0x3d487a7590324848,
+0xdbffab24e354ffff, 0xf57af78ff48d7a7a, 0x7a90f4ea3d649090, 0x615fc23ebe9d5f5f,
+0x80201da0403d2020, 0xbd6867d5d00f6868, 0x681ad07234ca1a1a, 0x82ae192c41b7aeae,
+0xeab4c95e757db4b4, 0x4d549a19a8ce5454, 0x7693ece53b7f9393, 0x88220daa442f2222,
+0x8d6407e9c8636464, 0xe3f1db12ff2af1f1, 0xd173bfa2e6cc7373, 0x4812905a24821212,
+0x1d403a5d807a4040, 0x2008402810480808, 0x2bc356e89b95c3c3, 0x97ec337bc5dfecec,
+0x4bdb9690ab4ddbdb, 0xbea1611f5fc0a1a1, 0x0e8d1c8307918d8d, 0xf43df5c97ac83d3d,
+0x6697ccf1335b9797, 0x0000000000000000, 0x1bcf36d483f9cfcf, 0xac2b4587566e2b2b,
+0xc57697b3ece17676, 0x328264b019e68282, 0x7fd6fea9b128d6d6, 0x6c1bd87736c31b1b,
+0xeeb5c15b7774b5b5, 0x86af112943beafaf, 0xb56a77dfd41d6a6a, 0x5d50ba0da0ea5050,
+0x0945124c8a574545, 0xebf3cb18fb38f3f3, 0xc0309df060ad3030, 0x9bef2b74c3c4efef,
+0xfc3fe5c37eda3f3f, 0x4955921caac75555, 0xb2a2791059dba2a2, 0x8fea0365c9e9eaea,
+0x89650fecca6a6565, 0xd2bab9686903baba, 0xbc2f65935e4a2f2f, 0x27c04ee79d8ec0c0,
+0x5fdebe81a160dede, 0x701ce06c38fc1c1c, 0xd3fdbb2ee746fdfd, 0x294d52649a1f4d4d,
+0x7292e4e039769292, 0xc9758fbceafa7575, 0x1806301e0c360606, 0x128a249809ae8a8a,
+0xf2b2f940794bb2b2, 0xbfe66359d185e6e6, 0x380e70361c7e0e0e, 0x7c1ff8633ee71f1f,
+0x956237f7c4556262, 0x77d4eea3b53ad4d4, 0x9aa829324d81a8a8, 0x6296c4f431529696,
+0xc3f99b3aef62f9f9, 0x33c566f697a3c5c5, 0x942535b14a102525, 0x7959f220b2ab5959,
+0x2a8454ae15d08484, 0xd572b7a7e4c57272, 0xe439d5dd72ec3939, 0x2d4c5a6198164c4c,
+0x655eca3bbc945e5e, 0xfd78e785f09f7878, 0xe038ddd870e53838, 0x0a8c148605988c8c,
+0x63d1c6b2bf17d1d1, 0xaea5410b57e4a5a5, 0xafe2434dd9a1e2e2, 0x99612ff8c24e6161,
+0xf6b3f1457b42b3b3, 0x842115a542342121, 0x4a9c94d625089c9c, 0x781ef0663cee1e1e,
+0x1143225286614343, 0x3bc776fc93b1c7c7, 0xd7fcb32be54ffcfc, 0x1004201408240404,
+0x5951b208a2e35151, 0x5e99bcc72f259999, 0xa96d4fc4da226d6d, 0x340d68391a650d0d,
+0xcffa8335e979fafa, 0x5bdfb684a369dfdf, 0xe57ed79bfca97e7e, 0x90243db448192424,
+0xec3bc5d776fe3b3b, 0x96ab313d4b9aabab, 0x1fce3ed181f0cece, 0x4411885522991111,
+0x068f0c8903838f8f, 0x254e4a6b9c044e4e, 0xe6b7d1517366b7b7, 0x8beb0b60cbe0ebeb,
+0xf03cfdcc78c13c3c, 0x3e817cbf1ffd8181, 0x6a94d4fe35409494, 0xfbf7eb0cf31cf7f7,
+0xdeb9a1676f18b9b9, 0x4c13985f268b1313, 0xb02c7d9c58512c2c, 0x6bd3d6b8bb05d3d3,
+0xbbe76b5cd38ce7e7, 0xa56e57cbdc396e6e, 0x37c46ef395aac4c4, 0x0c03180f061b0303,
+0x45568a13acdc5656, 0x0d441a49885e4444, 0xe17fdf9efea07f7f, 0x9ea921374f88a9a9,
+0xa82a4d8254672a2a, 0xd6bbb16d6b0abbbb, 0x23c146e29f87c1c1, 0x5153a202a6f15353,
+0x57dcae8ba572dcdc, 0x2c0b582716530b0b, 0x4e9d9cd327019d9d, 0xad6c47c1d82b6c6c,
+0xc43195f562a43131, 0xcd7487b9e8f37474, 0xfff6e309f115f6f6, 0x05460a438c4c4646,
+0x8aac092645a5acac, 0x1e893c970fb58989, 0x5014a04428b41414, 0xa3e15b42dfbae1e1,
+0x5816b04e2ca61616, 0xe83acdd274f73a3a, 0xb9696fd0d2066969, 0x2409482d12410909,
+0xdd70a7ade0d77070, 0xe2b6d954716fb6b6, 0x67d0ceb7bd1ed0d0, 0x93ed3b7ec7d6eded,
+0x17cc2edb85e2cccc, 0x15422a5784684242, 0x5a98b4c22d2c9898, 0xaaa4490e55eda4a4,
+0xa0285d8850752828, 0x6d5cda31b8865c5c, 0xc7f8933fed6bf8f8, 0x228644a411c28686,
+]
+C7 = [
+0x186018c07830d818, 0x238c2305af462623, 0xc63fc67ef991b8c6, 0xe887e8136fcdfbe8,
+0x8726874ca113cb87, 0xb8dab8a9626d11b8, 0x0104010805020901, 0x4f214f426e9e0d4f,
+0x36d836adee6c9b36, 0xa6a2a6590451ffa6, 0xd26fd2debdb90cd2, 0xf5f3f5fb06f70ef5,
+0x79f979ef80f29679, 0x6fa16f5fcede306f, 0x917e91fcef3f6d91, 0x525552aa07a4f852,
+0x609d6027fdc04760, 0xbccabc89766535bc, 0x9b569baccd2b379b, 0x8e028e048c018a8e,
+0xa3b6a371155bd2a3, 0x0c300c603c186c0c, 0x7bf17bff8af6847b, 0x35d435b5e16a8035,
+0x1d741de8693af51d, 0xe0a7e05347ddb3e0, 0xd77bd7f6acb321d7, 0xc22fc25eed999cc2,
+0x2eb82e6d965c432e, 0x4b314b627a96294b, 0xfedffea321e15dfe, 0x5741578216aed557,
+0x155415a8412abd15, 0x77c1779fb6eee877, 0x37dc37a5eb6e9237, 0xe5b3e57b56d79ee5,
+0x9f469f8cd923139f, 0xf0e7f0d317fd23f0, 0x4a354a6a7f94204a, 0xda4fda9e95a944da,
+0x587d58fa25b0a258, 0xc903c906ca8fcfc9, 0x29a429558d527c29, 0x0a280a5022145a0a,
+0xb1feb1e14f7f50b1, 0xa0baa0691a5dc9a0, 0x6bb16b7fdad6146b, 0x852e855cab17d985,
+0xbdcebd8173673cbd, 0x5d695dd234ba8f5d, 0x1040108050209010, 0xf4f7f4f303f507f4,
+0xcb0bcb16c08bddcb, 0x3ef83eedc67cd33e, 0x05140528110a2d05, 0x6781671fe6ce7867,
+0xe4b7e47353d597e4, 0x279c2725bb4e0227, 0x4119413258827341, 0x8b168b2c9d0ba78b,
+0xa7a6a7510153f6a7, 0x7de97dcf94fab27d, 0x956e95dcfb374995, 0xd847d88e9fad56d8,
+0xfbcbfb8b30eb70fb, 0xee9fee2371c1cdee, 0x7ced7cc791f8bb7c, 0x66856617e3cc7166,
+0xdd53dda68ea77bdd, 0x175c17b84b2eaf17, 0x47014702468e4547, 0x9e429e84dc211a9e,
+0xca0fca1ec589d4ca, 0x2db42d75995a582d, 0xbfc6bf9179632ebf, 0x071c07381b0e3f07,
+0xad8ead012347acad, 0x5a755aea2fb4b05a, 0x8336836cb51bef83, 0x33cc3385ff66b633,
+0x6391633ff2c65c63, 0x020802100a041202, 0xaa92aa39384993aa, 0x71d971afa8e2de71,
+0xc807c80ecf8dc6c8, 0x196419c87d32d119, 0x4939497270923b49, 0xd943d9869aaf5fd9,
+0xf2eff2c31df931f2, 0xe3abe34b48dba8e3, 0x5b715be22ab6b95b, 0x881a8834920dbc88,
+0x9a529aa4c8293e9a, 0x2698262dbe4c0b26, 0x32c8328dfa64bf32, 0xb0fab0e94a7d59b0,
+0xe983e91b6acff2e9, 0x0f3c0f78331e770f, 0xd573d5e6a6b733d5, 0x803a8074ba1df480,
+0xbec2be997c6127be, 0xcd13cd26de87ebcd, 0x34d034bde4688934, 0x483d487a75903248,
+0xffdbffab24e354ff, 0x7af57af78ff48d7a, 0x907a90f4ea3d6490, 0x5f615fc23ebe9d5f,
+0x2080201da0403d20, 0x68bd6867d5d00f68, 0x1a681ad07234ca1a, 0xae82ae192c41b7ae,
+0xb4eab4c95e757db4, 0x544d549a19a8ce54, 0x937693ece53b7f93, 0x2288220daa442f22,
+0x648d6407e9c86364, 0xf1e3f1db12ff2af1, 0x73d173bfa2e6cc73, 0x124812905a248212,
+0x401d403a5d807a40, 0x0820084028104808, 0xc32bc356e89b95c3, 0xec97ec337bc5dfec,
+0xdb4bdb9690ab4ddb, 0xa1bea1611f5fc0a1, 0x8d0e8d1c8307918d, 0x3df43df5c97ac83d,
+0x976697ccf1335b97, 0x0000000000000000, 0xcf1bcf36d483f9cf, 0x2bac2b4587566e2b,
+0x76c57697b3ece176, 0x82328264b019e682, 0xd67fd6fea9b128d6, 0x1b6c1bd87736c31b,
+0xb5eeb5c15b7774b5, 0xaf86af112943beaf, 0x6ab56a77dfd41d6a, 0x505d50ba0da0ea50,
+0x450945124c8a5745, 0xf3ebf3cb18fb38f3, 0x30c0309df060ad30, 0xef9bef2b74c3c4ef,
+0x3ffc3fe5c37eda3f, 0x554955921caac755, 0xa2b2a2791059dba2, 0xea8fea0365c9e9ea,
+0x6589650fecca6a65, 0xbad2bab9686903ba, 0x2fbc2f65935e4a2f, 0xc027c04ee79d8ec0,
+0xde5fdebe81a160de, 0x1c701ce06c38fc1c, 0xfdd3fdbb2ee746fd, 0x4d294d52649a1f4d,
+0x927292e4e0397692, 0x75c9758fbceafa75, 0x061806301e0c3606, 0x8a128a249809ae8a,
+0xb2f2b2f940794bb2, 0xe6bfe66359d185e6, 0x0e380e70361c7e0e, 0x1f7c1ff8633ee71f,
+0x62956237f7c45562, 0xd477d4eea3b53ad4, 0xa89aa829324d81a8, 0x966296c4f4315296,
+0xf9c3f99b3aef62f9, 0xc533c566f697a3c5, 0x25942535b14a1025, 0x597959f220b2ab59,
+0x842a8454ae15d084, 0x72d572b7a7e4c572, 0x39e439d5dd72ec39, 0x4c2d4c5a6198164c,
+0x5e655eca3bbc945e, 0x78fd78e785f09f78, 0x38e038ddd870e538, 0x8c0a8c148605988c,
+0xd163d1c6b2bf17d1, 0xa5aea5410b57e4a5, 0xe2afe2434dd9a1e2, 0x6199612ff8c24e61,
+0xb3f6b3f1457b42b3, 0x21842115a5423421, 0x9c4a9c94d625089c, 0x1e781ef0663cee1e,
+0x4311432252866143, 0xc73bc776fc93b1c7, 0xfcd7fcb32be54ffc, 0x0410042014082404,
+0x515951b208a2e351, 0x995e99bcc72f2599, 0x6da96d4fc4da226d, 0x0d340d68391a650d,
+0xfacffa8335e979fa, 0xdf5bdfb684a369df, 0x7ee57ed79bfca97e, 0x2490243db4481924,
+0x3bec3bc5d776fe3b, 0xab96ab313d4b9aab, 0xce1fce3ed181f0ce, 0x1144118855229911,
+0x8f068f0c8903838f, 0x4e254e4a6b9c044e, 0xb7e6b7d1517366b7, 0xeb8beb0b60cbe0eb,
+0x3cf03cfdcc78c13c, 0x813e817cbf1ffd81, 0x946a94d4fe354094, 0xf7fbf7eb0cf31cf7,
+0xb9deb9a1676f18b9, 0x134c13985f268b13, 0x2cb02c7d9c58512c, 0xd36bd3d6b8bb05d3,
+0xe7bbe76b5cd38ce7, 0x6ea56e57cbdc396e, 0xc437c46ef395aac4, 0x030c03180f061b03,
+0x5645568a13acdc56, 0x440d441a49885e44, 0x7fe17fdf9efea07f, 0xa99ea921374f88a9,
+0x2aa82a4d8254672a, 0xbbd6bbb16d6b0abb, 0xc123c146e29f87c1, 0x535153a202a6f153,
+0xdc57dcae8ba572dc, 0x0b2c0b582716530b, 0x9d4e9d9cd327019d, 0x6cad6c47c1d82b6c,
+0x31c43195f562a431, 0x74cd7487b9e8f374, 0xf6fff6e309f115f6, 0x4605460a438c4c46,
+0xac8aac092645a5ac, 0x891e893c970fb589, 0x145014a04428b414, 0xe1a3e15b42dfbae1,
+0x165816b04e2ca616, 0x3ae83acdd274f73a, 0x69b9696fd0d20669, 0x092409482d124109,
+0x70dd70a7ade0d770, 0xb6e2b6d954716fb6, 0xd067d0ceb7bd1ed0, 0xed93ed3b7ec7d6ed,
+0xcc17cc2edb85e2cc, 0x4215422a57846842, 0x985a98b4c22d2c98, 0xa4aaa4490e55eda4,
+0x28a0285d88507528, 0x5c6d5cda31b8865c, 0xf8c7f8933fed6bf8, 0x86228644a411c286,
+]
+
+rc = [
+0x0000000000000000,
+0x1823c6e887b8014f,
+0x36a6d2f5796f9152,
+0x60bc9b8ea30c7b35,
+0x1de0d7c22e4bfe57,
+0x157737e59ff04ada,
+0x58c9290ab1a06b85,
+0xbd5d10f4cb3e0567,
+0xe427418ba77d95d8,
+0xfbee7c66dd17479e,
+0xca2dbf07ad5a8333
+]
+
+DIGESTBYTES = 64
+class WhirlpoolStruct:
+ def __init__(self):
+ self.bitLength = [0]*32
+ self.buffer = [0]*64
+ self.bufferBits = 0
+ self.bufferPos = 0
+ self.hash = [0]*8
+
+def WhirlpoolInit(ctx):
+ ctx = WhirlpoolStruct()
+ return
+
+def WhirlpoolAdd(source, sourceBits, ctx):
+ if not isinstance(source, bytes):
+ raise TypeError("Expected %s, got %s" % (bytes, type(source)))
+ if sys.hexversion < 0x3000000:
+ source = [ord(s)&0xff for s in source]
+
+ carry = 0
+ value = sourceBits
+ i = 31
+ while i >= 0 and (carry != 0 or value != 0):
+ carry += ctx.bitLength[i] + ((value % 0x100000000) & 0xff)
+ ctx.bitLength[i] = carry % 0x100
+ carry >>= 8
+ value >>= 8
+ i -= 1
+
+ bufferBits = ctx.bufferBits
+ bufferPos = ctx.bufferPos
+ sourcePos = 0
+ sourceGap = (8 - (sourceBits & 7)) & 7
+ bufferRem = ctx.bufferBits & 7
+ buffr = ctx.buffer
+
+ while sourceBits > 8:
+ b = ((source[sourcePos] << sourceGap) & 0xff) | ((source[sourcePos + 1] & 0xff) >> (8 - sourceGap))
+ buffr[bufferPos] |= (b >> bufferRem) % 0x100
+ bufferPos += 1
+ bufferBits += 8 - bufferRem
+ if bufferBits == 512:
+ processBuffer(ctx)
+ bufferBits = 0
+ bufferPos = 0
+
+ buffr[bufferPos] = b << (8 - bufferRem)
+ bufferBits += bufferRem
+
+ sourceBits -= 8
+ sourcePos += 1
+
+ b = (source[sourcePos] << sourceGap) & 0xff
+ buffr[bufferPos] |= b >> bufferRem
+ if bufferRem + sourceBits < 8:
+ bufferBits += sourceBits
+ else:
+ bufferPos += 1
+ bufferBits += 8 - bufferRem
+ sourceBits -= 8 - bufferRem
+ if bufferBits == 512:
+ processBuffer(ctx)
+ bufferBits = 0
+ bufferPos = 0
+ buffr[bufferPos] = b << (8 - bufferRem)
+ bufferBits += sourceBits
+ ctx.bufferBits = bufferBits
+ ctx.bufferPos = bufferPos
+
+def WhirlpoolFinalize(ctx):
+ bufferPos = ctx.bufferPos
+ ctx.buffer[bufferPos] |= 0x80 >> (ctx.bufferBits & 7)
+ bufferPos += 1
+ if bufferPos > 32:
+ if bufferPos < 64:
+ for i in xrange(64 - bufferPos):
+ ctx.buffer[bufferPos+i] = 0
+ processBuffer(ctx)
+ bufferPos = 0
+ if bufferPos < 32:
+ for i in xrange(32 - bufferPos):
+ ctx.buffer[bufferPos+i] = 0
+ bufferPos = 32
+ for i in xrange(32):
+ ctx.buffer[32+i] = ctx.bitLength[i]
+ processBuffer(ctx)
+ digest = ''
+ for i in xrange(8):
+ digest += chr((ctx.hash[i] >> 56) % 0x100)
+ digest += chr((ctx.hash[i] >> 48) % 0x100)
+ digest += chr((ctx.hash[i] >> 40) % 0x100)
+ digest += chr((ctx.hash[i] >> 32) % 0x100)
+ digest += chr((ctx.hash[i] >> 24) % 0x100)
+ digest += chr((ctx.hash[i] >> 16) % 0x100)
+ digest += chr((ctx.hash[i] >> 8) % 0x100)
+ digest += chr((ctx.hash[i]) % 0x100)
+ ctx.bufferPos = bufferPos
+ return digest
+
+def CDo(buf, a0, a1, a2, a3, a4, a5, a6, a7):
+ return C0[((buf[a0] >> 56) % 0x100000000) & 0xff] ^ \
+ C1[((buf[a1] >> 48) % 0x100000000) & 0xff] ^ \
+ C2[((buf[a2] >> 40) % 0x100000000) & 0xff] ^ \
+ C3[((buf[a3] >> 32) % 0x100000000) & 0xff] ^ \
+ C4[((buf[a4] >> 24) % 0x100000000) & 0xff] ^ \
+ C5[((buf[a5] >> 16) % 0x100000000) & 0xff] ^ \
+ C6[((buf[a6] >> 8) % 0x100000000) & 0xff] ^ \
+ C7[((buf[a7] >> 0) % 0x100000000) & 0xff]
+
+def processBuffer(ctx):
+ i, r = 0, 0
+ K = [0]*8
+ block = [0]*8
+ state = [0]*8
+ L = [0]*8
+ buffr = ctx.buffer
+
+ buf_cnt = 0
+ for i in xrange(8):
+ block[i] = ((buffr[buf_cnt+0] & 0xff) << 56) ^ \
+ ((buffr[buf_cnt+1] & 0xff) << 48) ^ \
+ ((buffr[buf_cnt+2] & 0xff) << 40) ^ \
+ ((buffr[buf_cnt+3] & 0xff) << 32) ^ \
+ ((buffr[buf_cnt+4] & 0xff) << 24) ^ \
+ ((buffr[buf_cnt+5] & 0xff) << 16) ^ \
+ ((buffr[buf_cnt+6] & 0xff) << 8) ^ \
+ ((buffr[buf_cnt+7] & 0xff) << 0)
+ buf_cnt += 8
+ for i in xrange(8):
+ K[i] = ctx.hash[i]
+ state[i] = block[i] ^ K[i]
+
+ for r in xrange(1, R+1):
+ L[0] = CDo(K, 0, 7, 6, 5, 4, 3, 2, 1) ^ rc[r]
+ L[1] = CDo(K, 1, 0, 7, 6, 5, 4, 3, 2)
+ L[2] = CDo(K, 2, 1, 0, 7, 6, 5, 4, 3)
+ L[3] = CDo(K, 3, 2, 1, 0, 7, 6, 5, 4)
+ L[4] = CDo(K, 4, 3, 2, 1, 0, 7, 6, 5)
+ L[5] = CDo(K, 5, 4, 3, 2, 1, 0, 7, 6)
+ L[6] = CDo(K, 6, 5, 4, 3, 2, 1, 0, 7)
+ L[7] = CDo(K, 7, 6, 5, 4, 3, 2, 1, 0)
+ for i in xrange(8):
+ K[i] = L[i]
+ L[0] = CDo(state, 0, 7, 6, 5, 4, 3, 2, 1) ^ K[0]
+ L[1] = CDo(state, 1, 0, 7, 6, 5, 4, 3, 2) ^ K[1]
+ L[2] = CDo(state, 2, 1, 0, 7, 6, 5, 4, 3) ^ K[2]
+ L[3] = CDo(state, 3, 2, 1, 0, 7, 6, 5, 4) ^ K[3]
+ L[4] = CDo(state, 4, 3, 2, 1, 0, 7, 6, 5) ^ K[4]
+ L[5] = CDo(state, 5, 4, 3, 2, 1, 0, 7, 6) ^ K[5]
+ L[6] = CDo(state, 6, 5, 4, 3, 2, 1, 0, 7) ^ K[6]
+ L[7] = CDo(state, 7, 6, 5, 4, 3, 2, 1, 0) ^ K[7]
+ for i in xrange(8):
+ state[i] = L[i]
+ # apply the Miyaguchi-Preneel compression function
+ for i in xrange(8):
+ ctx.hash[i] ^= state[i] ^ block[i]
+ return
+
+#
+# Tests.
+#
+
+if __name__ == '__main__':
+ assert Whirlpool(b'The quick brown fox jumps over the lazy dog').hexdigest() == \
+ 'b97de512e91e3828b40d2b0fdce9ceb3c4a71f9bea8d88e75c4fa854df36725fd2b52eb6544edcacd6f8beddfea403cb55ae31f03ad62a5ef54e42ee82c3fb35'
+ assert Whirlpool(b'The quick brown fox jumps over the lazy eog').hexdigest() == \
+ 'c27ba124205f72e6847f3e19834f925cc666d0974167af915bb462420ed40cc50900d85a1f923219d832357750492d5c143011a76988344c2635e69d06f2d38c'
+ assert Whirlpool(b'').hexdigest() == \
+ '19fa61d75522a4669b44e39c1d2e1726c530232130d407f89afee0964997f7a73e83be698b288febcf88e3e03c4f0757ea8964e59b63d93708b138cc42a66eb3'
diff --git a/lib/portage/util/writeable_check.py b/lib/portage/util/writeable_check.py
new file mode 100644
index 000000000..e5b14c023
--- /dev/null
+++ b/lib/portage/util/writeable_check.py
@@ -0,0 +1,130 @@
+#-*- coding:utf-8 -*-
+# Copyright 2014-2015 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+"""
+Methods to check whether Portage is going to write to read-only filesystems.
+Since the methods are not portable across different OSes, each OS needs its
+own method. To expand RO checking for different OSes, add a method which
+accepts a list of directories and returns a list of mounts which need to be
+remounted RW, then add "elif ostype == (the ostype value for your OS)" to
+get_ro_checker().
+"""
+from __future__ import unicode_literals
+
+import io
+import logging
+import os
+
+from portage import _encodings
+from portage.util import writemsg_level
+from portage.localization import _
+from portage.data import ostype
+
+
+def get_ro_checker():
+ """
+ Uses the system type to find an appropriate method for testing whether Portage
+ is going to write to any read-only filesystems.
+
+ @return:
+ 1. A method for testing for RO filesystems appropriate to the current system.
+ """
+ return _CHECKERS.get(ostype, empty_ro_checker)
+
+
+def linux_ro_checker(dir_list):
+ """
+ Use /proc/self/mountinfo to check that no directories installed by the
+ ebuild are set to be installed to a read-only filesystem.
+
+ @param dir_list: A list of directories installed by the ebuild.
+ @type dir_list: List
+ @return:
+ 1. A list of filesystems which are both set to be written to and are mounted
+ read-only, may be empty.
+ """
+ ro_filesystems = set()
+ invalids = []
+
+ try:
+ with io.open("/proc/self/mountinfo", mode='r',
+ encoding=_encodings['content'], errors='replace') as f:
+ for line in f:
+ # we're interested in dir and both attr fileds which always
+ # start with either 'ro' or 'rw'
+ # example line:
+ # 14 1 8:3 / / rw,noatime - ext3 /dev/root rw,errors=continue,commit=5,barrier=1,data=writeback
+ # _dir ^ ^ attr1 ^ attr2
+ # there can be a variable number of fields
+ # to the left of the ' - ', after the attr's, so split it there
+ mount = line.split(' - ', 1)
+ try:
+ _dir, attr1 = mount[0].split()[4:6]
+ except ValueError:
+ # If it raises ValueError we can simply ignore the line.
+ invalids.append(line)
+ continue
+ # check for situation with invalid entries for /home and /root in /proc/self/mountinfo
+ # root path is missing sometimes on WSL
+ # for example: 16 1 0:16 / /root rw,noatime - lxfs rw
+ if len(mount) > 1:
+ try:
+ attr2 = mount[1].split()[2]
+ except IndexError:
+ try:
+ attr2 = mount[1].split()[1]
+ except IndexError:
+ invalids.append(line)
+ continue
+ else:
+ invalids.append(line)
+ continue
+ if attr1.startswith('ro') or attr2.startswith('ro'):
+ ro_filesystems.add(_dir)
+
+ # If /proc/self/mountinfo can't be read, assume that there are no RO
+ # filesystems and return.
+ except EnvironmentError:
+ writemsg_level(_("!!! /proc/self/mountinfo cannot be read"),
+ level=logging.WARNING, noiselevel=-1)
+ return []
+
+ for line in invalids:
+ writemsg_level(_("!!! /proc/self/mountinfo contains unrecognized line: %s\n")
+ % line.rstrip(), level=logging.WARNING, noiselevel=-1)
+
+ ro_devs = {}
+ for x in ro_filesystems:
+ try:
+ ro_devs[os.stat(x).st_dev] = x
+ except OSError:
+ pass
+
+ ro_filesystems.clear()
+ for x in set(dir_list):
+ try:
+ dev = os.stat(x).st_dev
+ except OSError:
+ pass
+ else:
+ try:
+ ro_filesystems.add(ro_devs[dev])
+ except KeyError:
+ pass
+
+ return ro_filesystems
+
+
+def empty_ro_checker(dir_list):
+ """
+ Always returns [], this is the fallback function if the system does not have
+ an ro_checker method defined.
+ """
+ return []
+
+
+# _CHECKERS is a map from ostype output to the appropriate function to return
+# in get_ro_checker.
+_CHECKERS = {
+ "Linux": linux_ro_checker,
+}
diff --git a/lib/portage/versions.py b/lib/portage/versions.py
new file mode 100644
index 000000000..0c21373cc
--- /dev/null
+++ b/lib/portage/versions.py
@@ -0,0 +1,588 @@
+# versions.py -- core Portage functionality
+# Copyright 1998-2016 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import unicode_literals
+
+__all__ = [
+ 'best', 'catpkgsplit', 'catsplit',
+ 'cpv_getkey', 'cpv_getversion', 'cpv_sort_key', 'pkgcmp', 'pkgsplit',
+ 'ververify', 'vercmp'
+]
+
+import re
+import sys
+import warnings
+
+if sys.hexversion < 0x3000000:
+ _unicode = unicode
+else:
+ _unicode = str
+ long = int
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.repository.config:_gen_valid_repo',
+ 'portage.util:cmp_sort_key',
+)
+from portage import _unicode_decode
+from portage.eapi import _get_eapi_attrs
+from portage.exception import InvalidData
+from portage.localization import _
+
+_unknown_repo = "__unknown__"
+
+# \w is [a-zA-Z0-9_]
+
+# PMS 3.1.3: A slot name may contain any of the characters [A-Za-z0-9+_.-].
+# It must not begin with a hyphen or a dot.
+_slot = r'([\w+][\w+.-]*)'
+
+# 2.1.1 A category name may contain any of the characters [A-Za-z0-9+_.-].
+# It must not begin with a hyphen or a dot.
+_cat = r'[\w+][\w+.-]*'
+
+# 2.1.2 A package name may contain any of the characters [A-Za-z0-9+_-].
+# It must not begin with a hyphen,
+# and must not end in a hyphen followed by one or more digits.
+_pkg = {
+ "dots_disallowed_in_PN": r'[\w+][\w+-]*?',
+ "dots_allowed_in_PN": r'[\w+][\w+.-]*?',
+}
+
+_v = r'(\d+)((\.\d+)*)([a-z]?)((_(pre|p|beta|alpha|rc)\d*)*)'
+_rev = r'\d+'
+_vr = _v + '(-r(' + _rev + '))?'
+
+_cp = {
+ "dots_disallowed_in_PN": '(' + _cat + '/' + _pkg['dots_disallowed_in_PN'] + '(-' + _vr + ')?)',
+ "dots_allowed_in_PN": '(' + _cat + '/' + _pkg['dots_allowed_in_PN'] + '(-' + _vr + ')?)',
+}
+_cpv = {
+ "dots_disallowed_in_PN": '(' + _cp['dots_disallowed_in_PN'] + '-' + _vr + ')',
+ "dots_allowed_in_PN": '(' + _cp['dots_allowed_in_PN'] + '-' + _vr + ')',
+}
+_pv = {
+ "dots_disallowed_in_PN": '(?P<pn>' + _pkg['dots_disallowed_in_PN'] + '(?P<pn_inval>-' + _vr + ')?)' + '-(?P<ver>' + _v + ')(-r(?P<rev>' + _rev + '))?',
+ "dots_allowed_in_PN": '(?P<pn>' + _pkg['dots_allowed_in_PN'] + '(?P<pn_inval>-' + _vr + ')?)' + '-(?P<ver>' + _v + ')(-r(?P<rev>' + _rev + '))?',
+}
+
+ver_regexp = re.compile("^" + _vr + "$")
+suffix_regexp = re.compile("^(alpha|beta|rc|pre|p)(\\d*)$")
+suffix_value = {"pre": -2, "p": 0, "alpha": -4, "beta": -3, "rc": -1}
+endversion_keys = ["pre", "p", "alpha", "beta", "rc"]
+
+_slot_re_cache = {}
+
+def _get_slot_re(eapi_attrs):
+ cache_key = eapi_attrs.slot_operator
+ slot_re = _slot_re_cache.get(cache_key)
+ if slot_re is not None:
+ return slot_re
+
+ if eapi_attrs.slot_operator:
+ slot_re = _slot + r'(/' + _slot + r')?'
+ else:
+ slot_re = _slot
+
+ slot_re = re.compile('^' + slot_re + '$', re.VERBOSE | re.UNICODE)
+
+ _slot_re_cache[cache_key] = slot_re
+ return slot_re
+
+_pv_re_cache = {}
+
+def _get_pv_re(eapi_attrs):
+ cache_key = eapi_attrs.dots_in_PN
+ pv_re = _pv_re_cache.get(cache_key)
+ if pv_re is not None:
+ return pv_re
+
+ if eapi_attrs.dots_in_PN:
+ pv_re = _pv['dots_allowed_in_PN']
+ else:
+ pv_re = _pv['dots_disallowed_in_PN']
+
+ pv_re = re.compile(r'^' + pv_re + r'$', re.VERBOSE | re.UNICODE)
+
+ _pv_re_cache[cache_key] = pv_re
+ return pv_re
+
+def ververify(myver, silent=1):
+ if ver_regexp.match(myver):
+ return True
+ else:
+ if not silent:
+ print(_("!!! syntax error in version: %s") % myver)
+ return False
+
+def vercmp(ver1, ver2, silent=1):
+ """
+ Compare two versions
+ Example usage:
+ >>> from portage.versions import vercmp
+ >>> vercmp('1.0-r1','1.2-r3')
+ negative number
+ >>> vercmp('1.3','1.2-r3')
+ positive number
+ >>> vercmp('1.0_p3','1.0_p3')
+ 0
+
+ @param pkg1: version to compare with (see ver_regexp in portage.versions.py)
+ @type pkg1: string (example: "2.1.2-r3")
+ @param pkg2: version to compare againts (see ver_regexp in portage.versions.py)
+ @type pkg2: string (example: "2.1.2_rc5")
+ @rtype: None or float
+ @return:
+ 1. positive if ver1 is greater than ver2
+ 2. negative if ver1 is less than ver2
+ 3. 0 if ver1 equals ver2
+ 4. None if ver1 or ver2 are invalid (see ver_regexp in portage.versions.py)
+ """
+
+ if ver1 == ver2:
+ return 0
+
+ match1 = ver_regexp.match(ver1)
+ match2 = ver_regexp.match(ver2)
+
+ # checking that the versions are valid
+ if not match1 or not match1.groups():
+ if not silent:
+ print(_("!!! syntax error in version: %s") % ver1)
+ return None
+ if not match2 or not match2.groups():
+ if not silent:
+ print(_("!!! syntax error in version: %s") % ver2)
+ return None
+
+ # building lists of the version parts before the suffix
+ # first part is simple
+ list1 = [int(match1.group(1))]
+ list2 = [int(match2.group(1))]
+
+ # this part would greatly benefit from a fixed-length version pattern
+ if match1.group(2) or match2.group(2):
+ vlist1 = match1.group(2)[1:].split(".")
+ vlist2 = match2.group(2)[1:].split(".")
+
+ for i in range(0, max(len(vlist1), len(vlist2))):
+ # Implcit .0 is given a value of -1, so that 1.0.0 > 1.0, since it
+ # would be ambiguous if two versions that aren't literally equal
+ # are given the same value (in sorting, for example).
+ if len(vlist1) <= i or len(vlist1[i]) == 0:
+ list1.append(-1)
+ list2.append(int(vlist2[i]))
+ elif len(vlist2) <= i or len(vlist2[i]) == 0:
+ list1.append(int(vlist1[i]))
+ list2.append(-1)
+ # Let's make life easy and use integers unless we're forced to use floats
+ elif (vlist1[i][0] != "0" and vlist2[i][0] != "0"):
+ list1.append(int(vlist1[i]))
+ list2.append(int(vlist2[i]))
+ # now we have to use floats so 1.02 compares correctly against 1.1
+ else:
+ # list1.append(float("0."+vlist1[i]))
+ # list2.append(float("0."+vlist2[i]))
+ # Since python floats have limited range, we multiply both
+ # floating point representations by a constant so that they are
+ # transformed into whole numbers. This allows the practically
+ # infinite range of a python int to be exploited. The
+ # multiplication is done by padding both literal strings with
+ # zeros as necessary to ensure equal length.
+ max_len = max(len(vlist1[i]), len(vlist2[i]))
+ list1.append(int(vlist1[i].ljust(max_len, "0")))
+ list2.append(int(vlist2[i].ljust(max_len, "0")))
+
+ # and now the final letter
+ # NOTE: Behavior changed in r2309 (between portage-2.0.x and portage-2.1).
+ # The new behavior is 12.2.5 > 12.2b which, depending on how you look at,
+ # may seem counter-intuitive. However, if you really think about it, it
+ # seems like it's probably safe to assume that this is the behavior that
+ # is intended by anyone who would use versions such as these.
+ if len(match1.group(4)):
+ list1.append(ord(match1.group(4)))
+ if len(match2.group(4)):
+ list2.append(ord(match2.group(4)))
+
+ for i in range(0, max(len(list1), len(list2))):
+ if len(list1) <= i:
+ return -1
+ elif len(list2) <= i:
+ return 1
+ elif list1[i] != list2[i]:
+ a = list1[i]
+ b = list2[i]
+ rval = (a > b) - (a < b)
+ return rval
+
+ # main version is equal, so now compare the _suffix part
+ list1 = match1.group(5).split("_")[1:]
+ list2 = match2.group(5).split("_")[1:]
+
+ for i in range(0, max(len(list1), len(list2))):
+ # Implicit _p0 is given a value of -1, so that 1 < 1_p0
+ if len(list1) <= i:
+ s1 = ("p","-1")
+ else:
+ s1 = suffix_regexp.match(list1[i]).groups()
+ if len(list2) <= i:
+ s2 = ("p","-1")
+ else:
+ s2 = suffix_regexp.match(list2[i]).groups()
+ if s1[0] != s2[0]:
+ a = suffix_value[s1[0]]
+ b = suffix_value[s2[0]]
+ rval = (a > b) - (a < b)
+ return rval
+ if s1[1] != s2[1]:
+ # it's possible that the s(1|2)[1] == ''
+ # in such a case, fudge it.
+ try:
+ r1 = int(s1[1])
+ except ValueError:
+ r1 = 0
+ try:
+ r2 = int(s2[1])
+ except ValueError:
+ r2 = 0
+ rval = (r1 > r2) - (r1 < r2)
+ if rval:
+ return rval
+
+ # the suffix part is equal to, so finally check the revision
+ if match1.group(9):
+ r1 = int(match1.group(9))
+ else:
+ r1 = 0
+ if match2.group(9):
+ r2 = int(match2.group(9))
+ else:
+ r2 = 0
+ rval = (r1 > r2) - (r1 < r2)
+ return rval
+
+def pkgcmp(pkg1, pkg2):
+ """
+ Compare 2 package versions created in pkgsplit format.
+
+ Example usage:
+ >>> from portage.versions import *
+ >>> pkgcmp(pkgsplit('test-1.0-r1'),pkgsplit('test-1.2-r3'))
+ -1
+ >>> pkgcmp(pkgsplit('test-1.3'),pkgsplit('test-1.2-r3'))
+ 1
+
+ @param pkg1: package to compare with
+ @type pkg1: list (example: ['test', '1.0', 'r1'])
+ @param pkg2: package to compare againts
+ @type pkg2: list (example: ['test', '1.0', 'r1'])
+ @rtype: None or integer
+ @return:
+ 1. None if package names are not the same
+ 2. 1 if pkg1 is greater than pkg2
+ 3. -1 if pkg1 is less than pkg2
+ 4. 0 if pkg1 equals pkg2
+ """
+ if pkg1[0] != pkg2[0]:
+ return None
+ return vercmp("-".join(pkg1[1:]), "-".join(pkg2[1:]))
+
+def _pkgsplit(mypkg, eapi=None):
+ """
+ @param mypkg: pv
+ @return:
+ 1. None if input is invalid.
+ 2. (pn, ver, rev) if input is pv
+ """
+ m = _get_pv_re(_get_eapi_attrs(eapi)).match(mypkg)
+ if m is None:
+ return None
+
+ if m.group('pn_inval') is not None:
+ # package name appears to have a version-like suffix
+ return None
+
+ rev = m.group('rev')
+ if rev is None:
+ rev = '0'
+ rev = 'r' + rev
+
+ return (m.group('pn'), m.group('ver'), rev)
+
+_cat_re = re.compile('^%s$' % _cat, re.UNICODE)
+_missing_cat = 'null'
+
+def catpkgsplit(mydata, silent=1, eapi=None):
+ """
+ Takes a Category/Package-Version-Rev and returns a list of each.
+
+ @param mydata: Data to split
+ @type mydata: string
+ @param silent: suppress error messages
+ @type silent: Boolean (integer)
+ @rype: list
+ @return:
+ 1. If each exists, it returns [cat, pkgname, version, rev]
+ 2. If cat is not specificed in mydata, cat will be "null"
+ 3. if rev does not exist it will be '-r0'
+ """
+ try:
+ return mydata.cpv_split
+ except AttributeError:
+ pass
+ mysplit = mydata.split('/', 1)
+ p_split = None
+ if len(mysplit) == 1:
+ cat = _missing_cat
+ p_split = _pkgsplit(mydata, eapi=eapi)
+ elif len(mysplit) == 2:
+ cat = mysplit[0]
+ if _cat_re.match(cat) is not None:
+ p_split = _pkgsplit(mysplit[1], eapi=eapi)
+ if not p_split:
+ return None
+ retval = (cat, p_split[0], p_split[1], p_split[2])
+ return retval
+
+class _pkg_str(_unicode):
+ """
+ This class represents a cpv. It inherits from str (unicode in python2) and
+ has attributes that cache results for use by functions like catpkgsplit and
+ cpv_getkey which are called frequently (especially in match_from_list).
+ Instances are typically created in dbapi.cp_list() or the Atom contructor,
+ and propagate from there. Generally, code that pickles these objects will
+ manually convert them to a plain unicode object first.
+
+ Instances of this class will have missing attributes for metadata that
+ has not been passed into the constructor. The missing attributes are
+ used to distinguish missing metadata values from undefined metadata values.
+ For example, the repo attribute will be missing if the 'repository' key
+ is missing from the metadata dictionary.
+ """
+
+ def __new__(cls, cpv, metadata=None, settings=None, eapi=None,
+ repo=None, slot=None, build_time=None, build_id=None,
+ file_size=None, mtime=None, db=None):
+ return _unicode.__new__(cls, cpv)
+
+ def __init__(self, cpv, metadata=None, settings=None, eapi=None,
+ repo=None, slot=None, build_time=None, build_id=None,
+ file_size=None, mtime=None, db=None):
+ if not isinstance(cpv, _unicode):
+ # Avoid TypeError from _unicode.__init__ with PyPy.
+ cpv = _unicode_decode(cpv)
+ _unicode.__init__(cpv)
+ if metadata is not None:
+ self.__dict__['_metadata'] = metadata
+ slot = metadata.get('SLOT', slot)
+ repo = metadata.get('repository', repo)
+ eapi = metadata.get('EAPI', eapi)
+ build_time = metadata.get('BUILD_TIME', build_time)
+ file_size = metadata.get('SIZE', file_size)
+ build_id = metadata.get('BUILD_ID', build_id)
+ mtime = metadata.get('_mtime_', mtime)
+ if settings is not None:
+ self.__dict__['_settings'] = settings
+ if db is not None:
+ self.__dict__['_db'] = db
+ if eapi is not None:
+ self.__dict__['eapi'] = eapi
+
+ self.__dict__['build_time'] = self._long(build_time, 0)
+ self.__dict__['file_size'] = self._long(file_size, None)
+ self.__dict__['build_id'] = self._long(build_id, None)
+ self.__dict__['mtime'] = self._long(mtime, None)
+ self.__dict__['cpv_split'] = catpkgsplit(cpv, eapi=eapi)
+ if self.cpv_split is None:
+ raise InvalidData(cpv)
+ self.__dict__['cp'] = self.cpv_split[0] + '/' + self.cpv_split[1]
+ if self.cpv_split[-1] == "r0" and cpv[-3:] != "-r0":
+ self.__dict__['version'] = "-".join(self.cpv_split[2:-1])
+ else:
+ self.__dict__['version'] = "-".join(self.cpv_split[2:])
+ # for match_from_list introspection
+ self.__dict__['cpv'] = self
+ if slot is not None:
+ eapi_attrs = _get_eapi_attrs(eapi)
+ slot_match = _get_slot_re(eapi_attrs).match(slot)
+ if slot_match is None:
+ # Avoid an InvalidAtom exception when creating SLOT atoms
+ self.__dict__['slot'] = '0'
+ self.__dict__['sub_slot'] = '0'
+ self.__dict__['slot_invalid'] = slot
+ else:
+ if eapi_attrs.slot_operator:
+ slot_split = slot.split("/")
+ self.__dict__['slot'] = slot_split[0]
+ if len(slot_split) > 1:
+ self.__dict__['sub_slot'] = slot_split[1]
+ else:
+ self.__dict__['sub_slot'] = slot_split[0]
+ else:
+ self.__dict__['slot'] = slot
+ self.__dict__['sub_slot'] = slot
+
+ if repo is not None:
+ repo = _gen_valid_repo(repo)
+ if not repo:
+ repo = _unknown_repo
+ self.__dict__['repo'] = repo
+
+ def __setattr__(self, name, value):
+ raise AttributeError("_pkg_str instances are immutable",
+ self.__class__, name, value)
+
+ @staticmethod
+ def _long(var, default):
+ if var is not None:
+ try:
+ var = long(var)
+ except ValueError:
+ if var:
+ var = -1
+ else:
+ var = default
+ return var
+
+ @property
+ def stable(self):
+ try:
+ return self._stable
+ except AttributeError:
+ try:
+ settings = self._settings
+ except AttributeError:
+ raise AttributeError('stable')
+ if not settings.local_config:
+ # Since repoman uses different config instances for
+ # different profiles, our local instance does not
+ # refer to the correct profile.
+ raise AssertionError('invalid context')
+ stable = settings._isStable(self)
+ self.__dict__['_stable'] = stable
+ return stable
+
+def pkgsplit(mypkg, silent=1, eapi=None):
+ """
+ @param mypkg: either a pv or cpv
+ @return:
+ 1. None if input is invalid.
+ 2. (pn, ver, rev) if input is pv
+ 3. (cp, ver, rev) if input is a cpv
+ """
+ catpsplit = catpkgsplit(mypkg, eapi=eapi)
+ if catpsplit is None:
+ return None
+ cat, pn, ver, rev = catpsplit
+ if cat is _missing_cat and '/' not in mypkg:
+ return (pn, ver, rev)
+ else:
+ return (cat + '/' + pn, ver, rev)
+
+def cpv_getkey(mycpv, eapi=None):
+ """Calls catpkgsplit on a cpv and returns only the cp."""
+ try:
+ return mycpv.cp
+ except AttributeError:
+ pass
+ mysplit = catpkgsplit(mycpv, eapi=eapi)
+ if mysplit is not None:
+ return mysplit[0] + '/' + mysplit[1]
+
+ warnings.warn("portage.versions.cpv_getkey() " + \
+ "called with invalid cpv: '%s'" % (mycpv,),
+ DeprecationWarning, stacklevel=2)
+
+ myslash = mycpv.split("/", 1)
+ mysplit = _pkgsplit(myslash[-1], eapi=eapi)
+ if mysplit is None:
+ return None
+ mylen = len(myslash)
+ if mylen == 2:
+ return myslash[0] + "/" + mysplit[0]
+ else:
+ return mysplit[0]
+
+def cpv_getversion(mycpv, eapi=None):
+ """Returns the v (including revision) from an cpv."""
+ try:
+ return mycpv.version
+ except AttributeError:
+ pass
+ cp = cpv_getkey(mycpv, eapi=eapi)
+ if cp is None:
+ return None
+ return mycpv[len(cp+"-"):]
+
+def cpv_sort_key(eapi=None):
+ """
+ Create an object for sorting cpvs, to be used as the 'key' parameter
+ in places like list.sort() or sorted(). This calls catpkgsplit() once for
+ each cpv and caches the result. If a given cpv is invalid or two cpvs
+ have different category/package names, then plain string (> and <)
+ comparison is used.
+
+ @rtype: key object for sorting
+ @return: object for use as the 'key' parameter in places like
+ list.sort() or sorted()
+ """
+
+ split_cache = {}
+
+ def cmp_cpv(cpv1, cpv2):
+
+ split1 = split_cache.get(cpv1, False)
+ if split1 is False:
+ split1 = None
+ try:
+ split1 = cpv1.cpv
+ except AttributeError:
+ try:
+ split1 = _pkg_str(cpv1, eapi=eapi)
+ except InvalidData:
+ pass
+ split_cache[cpv1] = split1
+
+ split2 = split_cache.get(cpv2, False)
+ if split2 is False:
+ split2 = None
+ try:
+ split2 = cpv2.cpv
+ except AttributeError:
+ try:
+ split2 = _pkg_str(cpv2, eapi=eapi)
+ except InvalidData:
+ pass
+ split_cache[cpv2] = split2
+
+ if split1 is None or split2 is None or split1.cp != split2.cp:
+ return (cpv1 > cpv2) - (cpv1 < cpv2)
+
+ return vercmp(split1.version, split2.version)
+
+ return cmp_sort_key(cmp_cpv)
+
+def catsplit(mydep):
+ return mydep.split("/", 1)
+
+def best(mymatches, eapi=None):
+ """Accepts None arguments; assumes matches are valid."""
+ if not mymatches:
+ return ""
+ if len(mymatches) == 1:
+ return mymatches[0]
+ bestmatch = mymatches[0]
+ try:
+ v2 = bestmatch.version
+ except AttributeError:
+ v2 = _pkg_str(bestmatch, eapi=eapi).version
+ for x in mymatches[1:]:
+ try:
+ v1 = x.version
+ except AttributeError:
+ v1 = _pkg_str(x, eapi=eapi).version
+ if vercmp(v1, v2) > 0:
+ bestmatch = x
+ v2 = v1
+ return bestmatch
diff --git a/lib/portage/xml/__init__.py b/lib/portage/xml/__init__.py
new file mode 100644
index 000000000..21a391aee
--- /dev/null
+++ b/lib/portage/xml/__init__.py
@@ -0,0 +1,2 @@
+# Copyright 2010 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
diff --git a/lib/portage/xml/metadata.py b/lib/portage/xml/metadata.py
new file mode 100644
index 000000000..9e48dddde
--- /dev/null
+++ b/lib/portage/xml/metadata.py
@@ -0,0 +1,505 @@
+# Copyright 2010-2017 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+"""Provides an easy-to-use python interface to Gentoo's metadata.xml file.
+
+ Example usage:
+ >>> from portage.xml.metadata import MetaDataXML
+ >>> pkg_md = MetaDataXML('/usr/portage/app-misc/gourmet/metadata.xml')
+ >>> pkg_md
+ <MetaDataXML '/usr/portage/app-misc/gourmet/metadata.xml'>
+ >>> pkg_md.herds()
+ ['no-herd']
+ >>> for maint in pkg_md.maintainers():
+ ... print "{0} ({1})".format(maint.email, maint.name)
+ ...
+ nixphoeni@gentoo.org (Joe Sapp)
+ >>> for flag in pkg_md.use():
+ ... print flag.name, "->", flag.description
+ ...
+ rtf -> Enable export to RTF
+ gnome-print -> Enable printing support using gnome-print
+ >>> upstream = pkg_md.upstream()
+ >>> upstream
+ [<_Upstream {'docs': [], 'remoteid': [], 'maintainer':
+ [<_Maintainer 'Thomas_Hinkle@alumni.brown.edu'>], 'bugtracker': [],
+ 'changelog': []}>]
+ >>> upstream[0].maintainer[0].name
+ 'Thomas Mills Hinkle'
+"""
+
+from __future__ import unicode_literals
+
+__all__ = ('MetaDataXML', 'parse_metadata_use')
+
+import sys
+
+if sys.hexversion < 0x2070000 or \
+ (sys.hexversion < 0x3020000 and sys.hexversion >= 0x3000000):
+ # Our _MetadataTreeBuilder usage is incompatible with
+ # cElementTree in Python 2.6, 3.0, and 3.1:
+ # File "/usr/lib/python2.6/xml/etree/ElementTree.py", line 644, in findall
+ # assert self._root is not None
+ import xml.etree.ElementTree as etree
+else:
+ try:
+ import xml.etree.cElementTree as etree
+ except (SystemExit, KeyboardInterrupt):
+ raise
+ except (ImportError, SystemError, RuntimeError, Exception):
+ # broken or missing xml support
+ # https://bugs.python.org/issue14988
+ import xml.etree.ElementTree as etree
+
+try:
+ from xml.parsers.expat import ExpatError
+except (SystemExit, KeyboardInterrupt):
+ raise
+except (ImportError, SystemError, RuntimeError, Exception):
+ ExpatError = SyntaxError
+
+import re
+import xml.etree.ElementTree
+from portage import _encodings, _unicode_encode
+from portage.util import cmp_sort_key, unique_everseen
+
+if sys.hexversion >= 0x3000000:
+ # pylint: disable=W0622
+ basestring = str
+
+
+class _MetadataTreeBuilder(xml.etree.ElementTree.TreeBuilder):
+ """
+ Implements doctype() as required to avoid deprecation warnings with
+ Python >=2.7.
+ """
+ def doctype(self, name, pubid, system):
+ pass
+
+class _Maintainer(object):
+ """An object for representing one maintainer.
+
+ @type email: str or None
+ @ivar email: Maintainer's email address. Used for both Gentoo and upstream.
+ @type name: str or None
+ @ivar name: Maintainer's name. Used for both Gentoo and upstream.
+ @type description: str or None
+ @ivar description: Description of what a maintainer does. Gentoo only.
+ @type maint_type: str or None
+ @ivar maint_type: GLEP67 maintainer type (project or person). Gentoo only.
+ @type restrict: str or None
+ @ivar restrict: e.g. &gt;=portage-2.2 means only maintains versions
+ of Portage greater than 2.2. Should be DEPEND string with < and >
+ converted to &lt; and &gt; respectively.
+ @type status: str or None
+ @ivar status: If set, either 'active' or 'inactive'. Upstream only.
+ """
+
+ def __init__(self, node):
+ self.email = None
+ self.name = None
+ self.description = None
+ self.maint_type = node.get('type')
+ self.restrict = node.get('restrict')
+ self.status = node.get('status')
+ for attr in node:
+ setattr(self, attr.tag, attr.text)
+
+ def __repr__(self):
+ return "<%s %r>" % (self.__class__.__name__, self.email)
+
+
+class _Useflag(object):
+ """An object for representing one USE flag.
+
+ @todo: Is there any way to have a keyword option to leave in
+ <pkg> and <cat> for later processing?
+ @type name: str or None
+ @ivar name: USE flag
+ @type restrict: str or None
+ @ivar restrict: e.g. &gt;=portage-2.2 means flag is only available in
+ versions greater than 2.2
+ @type description: str
+ @ivar description: description of the USE flag
+ """
+
+ def __init__(self, node):
+ self.name = node.get('name')
+ self.restrict = node.get('restrict')
+ _desc = ''
+ if node.text:
+ _desc = node.text
+ for child in node.getchildren():
+ _desc += child.text if child.text else ''
+ _desc += child.tail if child.tail else ''
+ # This takes care of tabs and newlines left from the file
+ self.description = re.sub(r'\s+', ' ', _desc)
+
+ def __repr__(self):
+ return "<%s %r>" % (self.__class__.__name__, self.name)
+
+
+class _Upstream(object):
+ """An object for representing one package's upstream.
+
+ @type maintainers: list
+ @ivar maintainers: L{_Maintainer} objects for each upstream maintainer
+ @type changelogs: list
+ @ivar changelogs: URLs to upstream's ChangeLog file in str format
+ @type docs: list
+ @ivar docs: Sequence of tuples containing URLs to upstream documentation
+ in the first slot and 'lang' attribute in the second, e.g.,
+ [('http.../docs/en/tut.html', None), ('http.../doc/fr/tut.html', 'fr')]
+ @type bugtrackers: list
+ @ivar bugtrackers: URLs to upstream's bugtracker. May also contain an email
+ address if prepended with 'mailto:'
+ @type remoteids: list
+ @ivar remoteids: Sequence of tuples containing the project's hosting site
+ name in the first slot and the project's ID name or number for that
+ site in the second, e.g., [('sourceforge', 'systemrescuecd')]
+ """
+
+ def __init__(self, node):
+ self.node = node
+ self.maintainers = self.upstream_maintainers()
+ self.changelogs = self.upstream_changelogs()
+ self.docs = self.upstream_documentation()
+ self.bugtrackers = self.upstream_bugtrackers()
+ self.remoteids = self.upstream_remoteids()
+
+ def __repr__(self):
+ return "<%s %r>" % (self.__class__.__name__, self.__dict__)
+
+ def upstream_bugtrackers(self):
+ """Retrieve upstream bugtracker location from xml node."""
+ return [e.text for e in self.node.findall('bugs-to')]
+
+ def upstream_changelogs(self):
+ """Retrieve upstream changelog location from xml node."""
+ return [e.text for e in self.node.findall('changelog')]
+
+ def upstream_documentation(self):
+ """Retrieve upstream documentation location from xml node."""
+ result = []
+ for elem in self.node.findall('doc'):
+ lang = elem.get('lang')
+ result.append((elem.text, lang))
+ return result
+
+ def upstream_maintainers(self):
+ """Retrieve upstream maintainer information from xml node."""
+ return [_Maintainer(m) for m in self.node.findall('maintainer')]
+
+ def upstream_remoteids(self):
+ """Retrieve upstream remote ID from xml node."""
+ return [(e.text, e.get('type')) for e in self.node.findall('remote-id')]
+
+
+class MetaDataXML(object):
+ """Access metadata.xml"""
+
+ def __init__(self, metadata_xml_path, herds):
+ """Parse a valid metadata.xml file.
+
+ @type metadata_xml_path: str
+ @param metadata_xml_path: path to a valid metadata.xml file
+ @type herds: str or ElementTree
+ @param herds: path to a herds.xml, or a pre-parsed ElementTree
+ @raise IOError: if C{metadata_xml_path} can not be read
+ """
+
+ self.metadata_xml_path = metadata_xml_path
+ self._xml_tree = None
+
+ try:
+ self._xml_tree = etree.parse(_unicode_encode(metadata_xml_path,
+ encoding=_encodings['fs'], errors='strict'),
+ parser=etree.XMLParser(target=_MetadataTreeBuilder()))
+ except ImportError:
+ pass
+ except ExpatError as e:
+ raise SyntaxError("%s" % (e,))
+
+ if isinstance(herds, etree.ElementTree):
+ herds_etree = herds
+ herds_path = None
+ else:
+ herds_etree = None
+ herds_path = herds
+
+ # Used for caching
+ self._herdstree = herds_etree
+ self._herds_path = herds_path
+ self._descriptions = None
+ self._maintainers = None
+ self._herds = None
+ self._useflags = None
+ self._upstream = None
+
+ def __repr__(self):
+ return "<%s %r>" % (self.__class__.__name__, self.metadata_xml_path)
+
+ def _get_herd_email(self, herd):
+ """Get a herd's email address.
+
+ @type herd: str
+ @param herd: herd whose email you want
+ @rtype: str or None
+ @return: email address or None if herd is not in herds.xml
+ @raise IOError: if $PORTDIR/metadata/herds.xml can not be read
+ """
+
+ if self._herdstree is None:
+ try:
+ self._herdstree = etree.parse(_unicode_encode(self._herds_path,
+ encoding=_encodings['fs'], errors='strict'),
+ parser=etree.XMLParser(target=_MetadataTreeBuilder()))
+ except (ImportError, IOError, SyntaxError):
+ return None
+
+ # Some special herds are not listed in herds.xml
+ if herd in ('no-herd', 'maintainer-wanted', 'maintainer-needed'):
+ return None
+
+ try:
+ # Python 2.7 or >=3.2
+ iterate = self._herdstree.iter
+ except AttributeError:
+ iterate = self._herdstree.getiterator
+
+ for node in iterate('herd'):
+ if node.findtext('name') == herd:
+ return node.findtext('email')
+
+ def herds(self, include_email=False):
+ """Return a list of text nodes for <herd>.
+
+ @type include_email: bool
+ @keyword include_email: if True, also look up the herd's email
+ @rtype: tuple
+ @return: if include_email is False, return a list of strings;
+ if include_email is True, return a list of tuples containing:
+ [('herd1', 'herd1@gentoo.org'), ('no-herd', None);
+ """
+ if self._herds is None:
+ if self._xml_tree is None:
+ self._herds = tuple()
+ else:
+ herds = []
+ for elem in self._xml_tree.findall('herd'):
+ text = elem.text
+ if text is None:
+ text = ''
+ if include_email:
+ herd_mail = self._get_herd_email(text)
+ herds.append((text, herd_mail))
+ else:
+ herds.append(text)
+ self._herds = tuple(herds)
+
+ return self._herds
+
+ def descriptions(self):
+ """Return a list of text nodes for <longdescription>.
+
+ @rtype: list
+ @return: package description in string format
+ @todo: Support the C{lang} attribute
+ """
+ if self._descriptions is None:
+ if self._xml_tree is None:
+ self._descriptions = tuple()
+ else:
+ self._descriptions = tuple(e.text \
+ for e in self._xml_tree.findall("longdescription"))
+
+ return self._descriptions
+
+ def maintainers(self):
+ """Get maintainers' name, email and description.
+
+ @rtype: list
+ @return: a sequence of L{_Maintainer} objects in document order.
+ """
+
+ if self._maintainers is None:
+ if self._xml_tree is None:
+ self._maintainers = tuple()
+ else:
+ self._maintainers = tuple(_Maintainer(node) \
+ for node in self._xml_tree.findall('maintainer'))
+
+ return self._maintainers
+
+ def use(self):
+ """Get names and descriptions for USE flags defined in metadata.
+
+ @rtype: list
+ @return: a sequence of L{_Useflag} objects in document order.
+ """
+
+ if self._useflags is None:
+ if self._xml_tree is None:
+ self._useflags = tuple()
+ else:
+ try:
+ # Python 2.7 or >=3.2
+ iterate = self._xml_tree.iter
+ except AttributeError:
+ iterate = self._xml_tree.getiterator
+ self._useflags = tuple(_Useflag(node) \
+ for node in iterate('flag'))
+
+ return self._useflags
+
+ def upstream(self):
+ """Get upstream contact information.
+
+ @rtype: list
+ @return: a sequence of L{_Upstream} objects in document order.
+ """
+
+ if self._upstream is None:
+ if self._xml_tree is None:
+ self._upstream = tuple()
+ else:
+ self._upstream = tuple(_Upstream(node) \
+ for node in self._xml_tree.findall('upstream'))
+
+ return self._upstream
+
+ def format_maintainer_string(self):
+ """Format string containing maintainers and herds (emails if possible).
+ Used by emerge to display maintainer information.
+ Entries are sorted according to the rules stated on the bug wranglers page.
+
+ @rtype: String
+ @return: a string containing maintainers and herds
+ """
+ maintainers = []
+ for maintainer in self.maintainers():
+ if maintainer.email is None or not maintainer.email.strip():
+ if maintainer.name and maintainer.name.strip():
+ maintainers.append(maintainer.name)
+ else:
+ maintainers.append(maintainer.email)
+
+ for herd, email in self.herds(include_email=True):
+ if herd == "no-herd":
+ continue
+ if email is None or not email.strip():
+ if herd and herd.strip():
+ maintainers.append(herd)
+ else:
+ maintainers.append(email)
+
+ maintainers = list(unique_everseen(maintainers))
+
+ maint_str = ""
+ if maintainers:
+ maint_str = maintainers[0]
+ maintainers = maintainers[1:]
+ if maintainers:
+ maint_str += " " + ",".join(maintainers)
+
+ return maint_str
+
+ def format_upstream_string(self):
+ """Format string containing upstream maintainers and bugtrackers.
+ Used by emerge to display upstream information.
+
+ @rtype: String
+ @return: a string containing upstream maintainers and bugtrackers
+ """
+ maintainers = []
+ for upstream in self.upstream():
+ for maintainer in upstream.maintainers:
+ if maintainer.email is None or not maintainer.email.strip():
+ if maintainer.name and maintainer.name.strip():
+ maintainers.append(maintainer.name)
+ else:
+ maintainers.append(maintainer.email)
+
+ for bugtracker in upstream.bugtrackers:
+ if bugtracker.startswith("mailto:"):
+ bugtracker = bugtracker[7:]
+ maintainers.append(bugtracker)
+
+
+ maintainers = list(unique_everseen(maintainers))
+ maint_str = " ".join(maintainers)
+ return maint_str
+
+# lang with higher value is preferred
+_lang_pref = {
+ "" : 0,
+ "en": 1,
+}
+
+
+def _cmp_lang(a, b):
+ a_score = _lang_pref.get(a.get("lang", ""), -1)
+ b_score = _lang_pref.get(b.get("lang", ""), -1)
+
+ return a_score - b_score
+
+
+def parse_metadata_use(xml_tree):
+ """
+ Records are wrapped in XML as per GLEP 56
+ returns a dict with keys constisting of USE flag names and values
+ containing their respective descriptions
+ """
+ uselist = {}
+
+ usetags = xml_tree.findall("use")
+ if not usetags:
+ return uselist
+
+ # Sort by language preference in descending order.
+ usetags.sort(key=cmp_sort_key(_cmp_lang), reverse=True)
+
+ # It's possible to have multiple 'use' elements.
+ for usetag in usetags:
+ flags = usetag.findall("flag")
+ if not flags:
+ # DTD allows use elements containing no flag elements.
+ continue
+
+ for flag in flags:
+ pkg_flag = flag.get("name")
+ if pkg_flag is not None:
+ flag_restrict = flag.get("restrict")
+
+ # Descriptions may exist for multiple languages, so
+ # ignore all except the first description found for a
+ # particular value of restrict (see bug 599060).
+ try:
+ uselist[pkg_flag][flag_restrict]
+ except KeyError:
+ pass
+ else:
+ continue
+
+ # emulate the Element.itertext() method from python-2.7
+ inner_text = []
+ stack = []
+ stack.append(flag)
+ while stack:
+ obj = stack.pop()
+ if isinstance(obj, basestring):
+ inner_text.append(obj)
+ continue
+ if isinstance(obj.text, basestring):
+ inner_text.append(obj.text)
+ if isinstance(obj.tail, basestring):
+ stack.append(obj.tail)
+ stack.extend(reversed(obj))
+
+ if flag.get("name") not in uselist:
+ uselist[flag.get("name")] = {}
+
+ # (flag_restrict can be None)
+ uselist[flag.get("name")][flag_restrict] = " ".join("".join(inner_text).split())
+ return uselist
+
diff --git a/lib/portage/xpak.py b/lib/portage/xpak.py
new file mode 100644
index 000000000..e11f26e6c
--- /dev/null
+++ b/lib/portage/xpak.py
@@ -0,0 +1,499 @@
+# Copyright 2001-2014 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+
+# The format for a tbz2/xpak:
+#
+# tbz2: tar.bz2 + xpak + (xpak_offset) + "STOP"
+# xpak: "XPAKPACK" + (index_len) + (data_len) + index + data + "XPAKSTOP"
+# index: (pathname_len) + pathname + (data_offset) + (data_len)
+# index entries are concatenated end-to-end.
+# data: concatenated data chunks, end-to-end.
+#
+# [tarball]XPAKPACKIIIIDDDD[index][data]XPAKSTOPOOOOSTOP
+#
+# (integer) == encodeint(integer) ===> 4 characters (big-endian copy)
+# '+' means concatenate the fields ===> All chunks are strings
+
+__all__ = [
+ 'addtolist', 'decodeint', 'encodeint', 'getboth',
+ 'getindex', 'getindex_mem', 'getitem', 'listindex',
+ 'searchindex', 'tbz2', 'xpak_mem', 'xpak', 'xpand',
+ 'xsplit', 'xsplit_mem',
+]
+
+import array
+import errno
+import sys
+
+import portage
+from portage import os
+from portage import shutil
+from portage import normalize_path
+from portage import _encodings
+from portage import _unicode_decode
+from portage import _unicode_encode
+
+def addtolist(mylist, curdir):
+ """(list, dir) --- Takes an array(list) and appends all files from dir down
+ the directory tree. Returns nothing. list is modified."""
+ curdir = normalize_path(_unicode_decode(curdir,
+ encoding=_encodings['fs'], errors='strict'))
+ for parent, dirs, files in os.walk(curdir):
+
+ parent = _unicode_decode(parent,
+ encoding=_encodings['fs'], errors='strict')
+ if parent != curdir:
+ mylist.append(parent[len(curdir) + 1:] + os.sep)
+
+ for x in dirs:
+ try:
+ _unicode_decode(x, encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ dirs.remove(x)
+
+ for x in files:
+ try:
+ x = _unicode_decode(x,
+ encoding=_encodings['fs'], errors='strict')
+ except UnicodeDecodeError:
+ continue
+ mylist.append(os.path.join(parent, x)[len(curdir) + 1:])
+
+def encodeint(myint):
+ """Takes a 4 byte integer and converts it into a string of 4 characters.
+ Returns the characters in a string."""
+ a = array.array('B')
+ a.append((myint >> 24) & 0xff)
+ a.append((myint >> 16) & 0xff)
+ a.append((myint >> 8) & 0xff)
+ a.append(myint & 0xff)
+ try:
+ # Python >= 3.2
+ return a.tobytes()
+ except AttributeError:
+ return a.tostring()
+
+def decodeint(mystring):
+ """Takes a 4 byte string and converts it into a 4 byte integer.
+ Returns an integer."""
+ if sys.hexversion < 0x3000000:
+ mystring = [ord(x) for x in mystring]
+ myint = 0
+ myint += mystring[3]
+ myint += mystring[2] << 8
+ myint += mystring[1] << 16
+ myint += mystring[0] << 24
+ return myint
+
+def xpak(rootdir, outfile=None):
+ """(rootdir, outfile) -- creates an xpak segment of the directory 'rootdir'
+ and under the name 'outfile' if it is specified. Otherwise it returns the
+ xpak segment."""
+
+ mylist = []
+
+ addtolist(mylist, rootdir)
+ mylist.sort()
+ mydata = {}
+ for x in mylist:
+ if x == 'CONTENTS':
+ # CONTENTS is generated during the merge process.
+ continue
+ x = _unicode_encode(x, encoding=_encodings['fs'], errors='strict')
+ with open(os.path.join(rootdir, x), 'rb') as f:
+ mydata[x] = f.read()
+
+ xpak_segment = xpak_mem(mydata)
+ if outfile:
+ outf = open(_unicode_encode(outfile,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ outf.write(xpak_segment)
+ outf.close()
+ else:
+ return xpak_segment
+
+def xpak_mem(mydata):
+ """Create an xpack segment from a map object."""
+
+ mydata_encoded = {}
+ for k, v in mydata.items():
+ k = _unicode_encode(k,
+ encoding=_encodings['repo.content'], errors='backslashreplace')
+ v = _unicode_encode(v,
+ encoding=_encodings['repo.content'], errors='backslashreplace')
+ mydata_encoded[k] = v
+ mydata = mydata_encoded
+ del mydata_encoded
+
+ indexglob = b''
+ indexpos = 0
+ dataglob = b''
+ datapos = 0
+ for x, newglob in mydata.items():
+ mydatasize = len(newglob)
+ indexglob = indexglob + encodeint(len(x)) + x + encodeint(datapos) + encodeint(mydatasize)
+ indexpos = indexpos + 4 + len(x) + 4 + 4
+ dataglob = dataglob + newglob
+ datapos = datapos + mydatasize
+ return b'XPAKPACK' \
+ + encodeint(len(indexglob)) \
+ + encodeint(len(dataglob)) \
+ + indexglob \
+ + dataglob \
+ + b'XPAKSTOP'
+
+def xsplit(infile):
+ """(infile) -- Splits the infile into two files.
+ 'infile.index' contains the index segment.
+ 'infile.dat' contails the data segment."""
+ infile = _unicode_decode(infile,
+ encoding=_encodings['fs'], errors='strict')
+ myfile = open(_unicode_encode(infile,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ mydat = myfile.read()
+ myfile.close()
+
+ splits = xsplit_mem(mydat)
+ if not splits:
+ return False
+
+ myfile = open(_unicode_encode(infile + '.index',
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ myfile.write(splits[0])
+ myfile.close()
+ myfile = open(_unicode_encode(infile + '.dat',
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ myfile.write(splits[1])
+ myfile.close()
+ return True
+
+def xsplit_mem(mydat):
+ if mydat[0:8] != b'XPAKPACK':
+ return None
+ if mydat[-8:] != b'XPAKSTOP':
+ return None
+ indexsize = decodeint(mydat[8:12])
+ return (mydat[16:indexsize + 16], mydat[indexsize + 16:-8])
+
+def getindex(infile):
+ """(infile) -- grabs the index segment from the infile and returns it."""
+ myfile = open(_unicode_encode(infile,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ myheader = myfile.read(16)
+ if myheader[0:8] != b'XPAKPACK':
+ myfile.close()
+ return
+ indexsize = decodeint(myheader[8:12])
+ myindex = myfile.read(indexsize)
+ myfile.close()
+ return myindex
+
+def getboth(infile):
+ """(infile) -- grabs the index and data segments from the infile.
+ Returns an array [indexSegment, dataSegment]"""
+ myfile = open(_unicode_encode(infile,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ myheader = myfile.read(16)
+ if myheader[0:8] != b'XPAKPACK':
+ myfile.close()
+ return
+ indexsize = decodeint(myheader[8:12])
+ datasize = decodeint(myheader[12:16])
+ myindex = myfile.read(indexsize)
+ mydata = myfile.read(datasize)
+ myfile.close()
+ return myindex, mydata
+
+def listindex(myindex):
+ """Print to the terminal the filenames listed in the indexglob passed in."""
+ for x in getindex_mem(myindex):
+ print(x)
+
+def getindex_mem(myindex):
+ """Returns the filenames listed in the indexglob passed in."""
+ myindexlen = len(myindex)
+ startpos = 0
+ myret = []
+ while ((startpos + 8) < myindexlen):
+ mytestlen = decodeint(myindex[startpos:startpos + 4])
+ myret = myret + [myindex[startpos + 4:startpos + 4 + mytestlen]]
+ startpos = startpos + mytestlen + 12
+ return myret
+
+def searchindex(myindex, myitem):
+ """(index, item) -- Finds the offset and length of the file 'item' in the
+ datasegment via the index 'index' provided."""
+ myitem = _unicode_encode(myitem,
+ encoding=_encodings['repo.content'], errors='backslashreplace')
+ mylen = len(myitem)
+ myindexlen = len(myindex)
+ startpos = 0
+ while ((startpos + 8) < myindexlen):
+ mytestlen = decodeint(myindex[startpos:startpos + 4])
+ if mytestlen == mylen:
+ if myitem == myindex[startpos + 4:startpos + 4 + mytestlen]:
+ #found
+ datapos = decodeint(myindex[startpos + 4 + mytestlen:startpos + 8 + mytestlen])
+ datalen = decodeint(myindex[startpos + 8 + mytestlen:startpos + 12 + mytestlen])
+ return datapos, datalen
+ startpos = startpos + mytestlen + 12
+
+def getitem(myid, myitem):
+ myindex = myid[0]
+ mydata = myid[1]
+ myloc = searchindex(myindex, myitem)
+ if not myloc:
+ return None
+ return mydata[myloc[0]:myloc[0] + myloc[1]]
+
+def xpand(myid, mydest):
+ mydest = normalize_path(mydest) + os.sep
+ myindex = myid[0]
+ mydata = myid[1]
+ myindexlen = len(myindex)
+ startpos = 0
+ while ((startpos + 8) < myindexlen):
+ namelen = decodeint(myindex[startpos:startpos + 4])
+ datapos = decodeint(myindex[startpos + 4 + namelen:startpos + 8 + namelen])
+ datalen = decodeint(myindex[startpos + 8 + namelen:startpos + 12 + namelen])
+ myname = myindex[startpos + 4:startpos + 4 + namelen]
+ myname = _unicode_decode(myname,
+ encoding=_encodings['repo.content'], errors='replace')
+ filename = os.path.join(mydest, myname.lstrip(os.sep))
+ filename = normalize_path(filename)
+ if not filename.startswith(mydest):
+ # myname contains invalid ../ component(s)
+ continue
+ dirname = os.path.dirname(filename)
+ if dirname:
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+ mydat = open(_unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ mydat.write(mydata[datapos:datapos + datalen])
+ mydat.close()
+ startpos = startpos + namelen + 12
+
+class tbz2(object):
+ def __init__(self, myfile):
+ self.file = myfile
+ self.filestat = None
+ self.index = b''
+ self.infosize = 0
+ self.xpaksize = 0
+ self.indexsize = None
+ self.datasize = None
+ self.indexpos = None
+ self.datapos = None
+
+ def decompose(self, datadir, cleanup=1):
+ """Alias for unpackinfo() --- Complement to recompose() but optionally
+ deletes the destination directory. Extracts the xpak from the tbz2 into
+ the directory provided. Raises IOError if scan() fails.
+ Returns result of upackinfo()."""
+ if not self.scan():
+ raise IOError
+ if cleanup:
+ self.cleanup(datadir)
+ if not os.path.exists(datadir):
+ os.makedirs(datadir)
+ return self.unpackinfo(datadir)
+ def compose(self, datadir, cleanup=0):
+ """Alias for recompose()."""
+ return self.recompose(datadir, cleanup)
+
+ def recompose(self, datadir, cleanup=0, break_hardlinks=True):
+ """Creates an xpak segment from the datadir provided, truncates the tbz2
+ to the end of regular data if an xpak segment already exists, and adds
+ the new segment to the file with terminating info."""
+ xpdata = xpak(datadir)
+ self.recompose_mem(xpdata, break_hardlinks=break_hardlinks)
+ if cleanup:
+ self.cleanup(datadir)
+
+ def recompose_mem(self, xpdata, break_hardlinks=True):
+ """
+ Update the xpak segment.
+ @param xpdata: A new xpak segment to be written, like that returned
+ from the xpak_mem() function.
+ @param break_hardlinks: If hardlinks exist, create a copy in order
+ to break them. This makes it safe to use hardlinks to create
+ cheap snapshots of the repository, which is useful for solving
+ race conditions on binhosts as described here:
+ https://crbug.com/185031
+ Default is True.
+ """
+ self.scan() # Don't care about condition... We'll rewrite the data anyway.
+
+ if break_hardlinks and self.filestat and self.filestat.st_nlink > 1:
+ tmp_fname = "%s.%d" % (self.file, os.getpid())
+ shutil.copyfile(self.file, tmp_fname)
+ try:
+ portage.util.apply_stat_permissions(self.file, self.filestat)
+ except portage.exception.OperationNotPermitted:
+ pass
+ os.rename(tmp_fname, self.file)
+
+ myfile = open(_unicode_encode(self.file,
+ encoding=_encodings['fs'], errors='strict'), 'ab+')
+ if not myfile:
+ raise IOError
+ myfile.seek(-self.xpaksize, 2) # 0,2 or -0,2 just mean EOF.
+ myfile.truncate()
+ myfile.write(xpdata + encodeint(len(xpdata)) + b'STOP')
+ myfile.flush()
+ myfile.close()
+ return 1
+
+ def cleanup(self, datadir):
+ datadir_split = os.path.split(datadir)
+ if len(datadir_split) >= 2 and len(datadir_split[1]) > 0:
+ # This is potentially dangerous,
+ # thus the above sanity check.
+ try:
+ shutil.rmtree(datadir)
+ except OSError as oe:
+ if oe.errno == errno.ENOENT:
+ pass
+ else:
+ raise oe
+
+ def scan(self):
+ """Scans the tbz2 to locate the xpak segment and setup internal values.
+ This function is called by relevant functions already."""
+ a = None
+ try:
+ mystat = os.stat(self.file)
+ if self.filestat:
+ changed = 0
+ if mystat.st_size != self.filestat.st_size \
+ or mystat.st_mtime != self.filestat.st_mtime \
+ or mystat.st_ctime != self.filestat.st_ctime:
+ changed = True
+ if not changed:
+ return 1
+ self.filestat = mystat
+ a = open(_unicode_encode(self.file,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ a.seek(-16, 2)
+ trailer = a.read()
+ self.infosize = 0
+ self.xpaksize = 0
+ if trailer[-4:] != b'STOP':
+ return 0
+ if trailer[0:8] != b'XPAKSTOP':
+ return 0
+ self.infosize = decodeint(trailer[8:12])
+ self.xpaksize = self.infosize + 8
+ a.seek(-(self.xpaksize), 2)
+ header = a.read(16)
+ if header[0:8] != b'XPAKPACK':
+ return 0
+ self.indexsize = decodeint(header[8:12])
+ self.datasize = decodeint(header[12:16])
+ self.indexpos = a.tell()
+ self.index = a.read(self.indexsize)
+ self.datapos = a.tell()
+ return 2
+ except SystemExit:
+ raise
+ except:
+ return 0
+ finally:
+ if a is not None:
+ a.close()
+
+ def filelist(self):
+ """Return an array of each file listed in the index."""
+ if not self.scan():
+ return None
+ return getindex_mem(self.index)
+
+ def getfile(self, myfile, mydefault=None):
+ """Finds 'myfile' in the data segment and returns it."""
+ if not self.scan():
+ return None
+ myresult = searchindex(self.index, myfile)
+ if not myresult:
+ return mydefault
+ a = open(_unicode_encode(self.file,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ a.seek(self.datapos + myresult[0], 0)
+ myreturn = a.read(myresult[1])
+ a.close()
+ return myreturn
+
+ def getelements(self, myfile):
+ """A split/array representation of tbz2.getfile()"""
+ mydat = self.getfile(myfile)
+ if not mydat:
+ return []
+ return mydat.split()
+
+ def unpackinfo(self, mydest):
+ """Unpacks all the files from the dataSegment into 'mydest'."""
+ if not self.scan():
+ return 0
+ mydest = normalize_path(mydest) + os.sep
+ a = open(_unicode_encode(self.file,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ if not os.path.exists(mydest):
+ os.makedirs(mydest)
+ startpos = 0
+ while ((startpos + 8) < self.indexsize):
+ namelen = decodeint(self.index[startpos:startpos + 4])
+ datapos = decodeint(self.index[startpos + 4 + namelen:startpos + 8 + namelen])
+ datalen = decodeint(self.index[startpos + 8 + namelen:startpos + 12 + namelen])
+ myname = self.index[startpos + 4:startpos + 4 + namelen]
+ myname = _unicode_decode(myname,
+ encoding=_encodings['repo.content'], errors='replace')
+ filename = os.path.join(mydest, myname.lstrip(os.sep))
+ filename = normalize_path(filename)
+ if not filename.startswith(mydest):
+ # myname contains invalid ../ component(s)
+ continue
+ dirname = os.path.dirname(filename)
+ if dirname:
+ if not os.path.exists(dirname):
+ os.makedirs(dirname)
+ mydat = open(_unicode_encode(filename,
+ encoding=_encodings['fs'], errors='strict'), 'wb')
+ a.seek(self.datapos + datapos)
+ mydat.write(a.read(datalen))
+ mydat.close()
+ startpos = startpos + namelen + 12
+ a.close()
+ return 1
+
+ def get_data(self):
+ """Returns all the files from the dataSegment as a map object."""
+ if not self.scan():
+ return {}
+ a = open(_unicode_encode(self.file,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ mydata = {}
+ startpos = 0
+ while ((startpos + 8) < self.indexsize):
+ namelen = decodeint(self.index[startpos:startpos + 4])
+ datapos = decodeint(self.index[startpos + 4 + namelen:startpos + 8 + namelen])
+ datalen = decodeint(self.index[startpos + 8 + namelen:startpos + 12 + namelen])
+ myname = self.index[startpos + 4:startpos + 4 + namelen]
+ a.seek(self.datapos + datapos)
+ mydata[myname] = a.read(datalen)
+ startpos = startpos + namelen + 12
+ a.close()
+ return mydata
+
+ def getboth(self):
+ """Returns an array [indexSegment, dataSegment]"""
+ if not self.scan():
+ return None
+
+ a = open(_unicode_encode(self.file,
+ encoding=_encodings['fs'], errors='strict'), 'rb')
+ a.seek(self.datapos)
+ mydata = a.read(self.datasize)
+ a.close()
+
+ return self.index, mydata