summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--sci-libs/vtk/files/vtk-9.0.3-TBB-2021.04-fix.patch229
-rw-r--r--sci-libs/vtk/files/vtk-9.0.3-cuda-11.5.0.patch25
-rw-r--r--sci-libs/vtk/files/vtk-9.0.3-fix-gcc10-return-local-addr-in-wrappers-upstream-commit-55c74ed3.patch88
-rw-r--r--sci-libs/vtk/files/vtk-9.0.3-pegtl-3.patch75
-rw-r--r--sci-libs/vtk/files/vtk-9.0.3-proj-api-fix-upstream-commit-03256388.patch163
-rw-r--r--sci-libs/vtk/files/vtk-9.0.3-tbb-fix-for-bundled-vtkm.patch504
-rw-r--r--sci-libs/vtk/files/vtk-9.0.3-use-std-atomic-part1.patch811
-rw-r--r--sci-libs/vtk/files/vtk-9.0.3-use-std-atomic-part2.patch851
-rw-r--r--sci-libs/vtk/vtk-9.0.3-r4.ebuild557
9 files changed, 3303 insertions, 0 deletions
diff --git a/sci-libs/vtk/files/vtk-9.0.3-TBB-2021.04-fix.patch b/sci-libs/vtk/files/vtk-9.0.3-TBB-2021.04-fix.patch
new file mode 100644
index 00000000000..2927368a7fd
--- /dev/null
+++ b/sci-libs/vtk/files/vtk-9.0.3-TBB-2021.04-fix.patch
@@ -0,0 +1,229 @@
+Patch is based on following upstream commits:
+
+56cf03534c58a21f3cea608e4f53b7e7e00670fc by Mickael PHILIT <mickey.phy@gmail.com>
+ded89c1e3a025874bfd69feb6c67fa21d6af36d4 by Mickael PHILIT <mickey.phy@gmail.com>
+3e5313e71c35aaeabfd4e65e090031716460d35e by Timothée Couble <timothee.couble@kitware.com>
+
+Backported by Vyacheslav Perestoronin <perestoronin@gmail.com>
+
+Adapted by Vadim Misbakh-Soloviov <mva@gentoo.org>
+
+--- a/CMake/FindTBB.cmake
++++ b/CMake/FindTBB.cmake
+@@ -424,12 +424,18 @@ findpkg_finish(TBB_MALLOC_PROXY tbbmalloc_proxy)
+ #=============================================================================
+ #parse all the version numbers from tbb
+ if(NOT TBB_VERSION)
+-
+- #only read the start of the file
+- file(STRINGS
++ if (EXISTS "${TBB_INCLUDE_DIR}/oneapi/tbb/version.h")
++ file(STRINGS
++ "${TBB_INCLUDE_DIR}/oneapi/tbb/version.h"
++ TBB_VERSION_CONTENTS
++ REGEX "VERSION")
++ else()
++ #only read the start of the file
++ file(STRINGS
+ "${TBB_INCLUDE_DIR}/tbb/tbb_stddef.h"
+ TBB_VERSION_CONTENTS
+ REGEX "VERSION")
++ endif()
+
+ string(REGEX REPLACE
+ ".*#define TBB_VERSION_MAJOR ([0-9]+).*" "\\1"
+--- a/Common/Core/SMP/TBB/vtkSMPTools.cxx
++++ b/Common/Core/SMP/TBB/vtkSMPTools.cxx
+@@ -22,47 +22,50 @@
+ #define __TBB_NO_IMPLICIT_LINKAGE 1
+ #endif
+
+-#include <tbb/task_scheduler_init.h>
++#include <tbb/task_arena.h>
+
+ #ifdef _MSC_VER
+ #pragma pop_macro("__TBB_NO_IMPLICIT_LINKAGE")
+ #endif
+
+-struct vtkSMPToolsInit
+-{
+- tbb::task_scheduler_init Init;
+-
+- vtkSMPToolsInit(int numThreads)
+- : Init(numThreads)
+- {
+- }
+-};
+-
+-static bool vtkSMPToolsInitialized = 0;
+-static int vtkTBBNumSpecifiedThreads = 0;
++static tbb::task_arena taskArena;
+ static vtkSimpleCriticalSection vtkSMPToolsCS;
+
+ //--------------------------------------------------------------------------------
+ void vtkSMPTools::Initialize(int numThreads)
+ {
+ vtkSMPToolsCS.Lock();
+- if (!vtkSMPToolsInitialized)
++
++ // If numThreads <= 0, don't create a task_arena
++ // and let TBB do the default thing.
++ if (numThreads > 0 && numThreads != taskArena.max_concurrency())
+ {
+- // If numThreads <= 0, don't create a task_scheduler_init
+- // and let TBB do the default thing.
+- if (numThreads > 0)
++ if (taskArena.is_active())
+ {
+- static vtkSMPToolsInit aInit(numThreads);
+- vtkTBBNumSpecifiedThreads = numThreads;
++ taskArena.terminate();
+ }
+- vtkSMPToolsInitialized = true;
++ taskArena.initialize(numThreads);
+ }
++
+ vtkSMPToolsCS.Unlock();
+ }
+
+ //--------------------------------------------------------------------------------
+ int vtkSMPTools::GetEstimatedNumberOfThreads()
+ {
+- return vtkTBBNumSpecifiedThreads ? vtkTBBNumSpecifiedThreads
+- : tbb::task_scheduler_init::default_num_threads();
++ return taskArena.max_concurrency();
++}
++
++//------------------------------------------------------------------------------
++void vtk::detail::smp::vtkSMPTools_Impl_For_TBB(vtkIdType first, vtkIdType last, vtkIdType grain,
++ ExecuteFunctorPtrType functorExecuter, void* functor)
++{
++ if (taskArena.is_active())
++ {
++ taskArena.execute([&] { functorExecuter(functor, first, last, grain); });
++ }
++ else
++ {
++ functorExecuter(functor, first, last, grain);
++ }
+ }
+--- a/Common/Core/SMP/TBB/vtkSMPToolsInternal.h.in
++++ b/Common/Core/SMP/TBB/vtkSMPToolsInternal.h.in
+@@ -12,7 +12,8 @@
+ PURPOSE. See the above copyright notice for more information.
+
+ =========================================================================*/
+-#include "vtkNew.h"
++
++#include "vtkCommonCoreModule.h" // For export macro
+
+ #ifdef _MSC_VER
+ # pragma push_macro("__TBB_NO_IMPLICIT_LINKAGE")
+@@ -34,6 +35,10 @@
+ namespace smp
+ {
+
++typedef void (*ExecuteFunctorPtrType)(void*, vtkIdType, vtkIdType, vtkIdType);
++void VTKCOMMONCORE_EXPORT vtkSMPTools_Impl_For_TBB(vtkIdType first, vtkIdType last, vtkIdType grain,
++ ExecuteFunctorPtrType functorExecuter, void* functor);
++
+ //--------------------------------------------------------------------------------
+ template <typename T>
+ class FuncCall
+@@ -43,22 +48,22 @@
+ void operator=(const FuncCall&) = delete;
+
+ public:
+- void operator() (const tbb::blocked_range<vtkIdType>& r) const
+- {
+- o.Execute(r.begin(), r.end());
++ void operator()(const tbb::blocked_range<vtkIdType>& r) const {
++ o.Execute(r.begin(), r.end());
+ }
+
+- FuncCall (T& _o) : o(_o)
++ FuncCall(T& _o)
++ : o(_o)
+ {
+ }
+ };
+
+ //--------------------------------------------------------------------------------
+ template <typename FunctorInternal>
+-void vtkSMPTools_Impl_For(
+- vtkIdType first, vtkIdType last, vtkIdType grain,
+- FunctorInternal& fi)
++void ExecuteFunctor(void* functor, vtkIdType first, vtkIdType last, vtkIdType grain)
+ {
++ FunctorInternal& fi = *reinterpret_cast<FunctorInternal*>(functor);
++
+ vtkIdType n = last - first;
+ if (!n)
+ {
+@@ -66,32 +71,37 @@
+ }
+ if (grain > 0)
+ {
+- tbb::parallel_for(tbb::blocked_range<vtkIdType>(first, last, grain), FuncCall<FunctorInternal>(fi));
++ tbb::parallel_for(
++ tbb::blocked_range<vtkIdType>(first, last, grain), FuncCall<FunctorInternal>(fi));
+ }
+ else
+ {
+- tbb::parallel_for(tbb::blocked_range<vtkIdType>(first, last), FuncCall<FunctorInternal>(fi));
++ tbb::parallel_for(
++ tbb::blocked_range<vtkIdType>(first, last), FuncCall<FunctorInternal>(fi));
+ }
+ }
+
+ //--------------------------------------------------------------------------------
+-template<typename RandomAccessIterator>
+-void vtkSMPTools_Impl_Sort(RandomAccessIterator begin,
+- RandomAccessIterator end)
++template <typename FunctorInternal>
++void vtkSMPTools_Impl_For(vtkIdType first, vtkIdType last, vtkIdType grain, FunctorInternal& fi)
++{
++ vtkSMPTools_Impl_For_TBB(first, last, grain, ExecuteFunctor<FunctorInternal>, &fi);
++}
++
++//--------------------------------------------------------------------------------
++template <typename RandomAccessIterator>
++void vtkSMPTools_Impl_Sort(RandomAccessIterator begin, RandomAccessIterator end)
+ {
+ tbb::parallel_sort(begin, end);
+ }
+
+ //--------------------------------------------------------------------------------
+-template<typename RandomAccessIterator, typename Compare>
+-void vtkSMPTools_Impl_Sort(RandomAccessIterator begin,
+- RandomAccessIterator end,
+- Compare comp)
++template <typename RandomAccessIterator, typename Compare>
++void vtkSMPTools_Impl_Sort(RandomAccessIterator begin, RandomAccessIterator end, Compare comp)
+ {
+ tbb::parallel_sort(begin, end, comp);
+ }
+
+-
+-}//namespace smp
+-}//namespace detail
+-}//namespace vtk
++} // namespace smp
++} // namespace detail
++} // namespace vtk
+--- a/Common/Core/vtkSMPTools.h
++++ b/Common/Core/vtkSMPTools.h
+@@ -228,11 +228,8 @@ public:
+ * Initialize the underlying libraries for execution. This is
+ * not required as it is automatically called before the first
+ * execution of any parallel code. However, it can be used to
+- * control the maximum number of threads used when the back-end
+- * supports it (currently Simple and TBB only). Make sure to call
+- * it before any other parallel operation.
+- * When using Kaapi, use the KAAPI_CPUCOUNT env. variable to control
+- * the number of threads used in the thread pool.
++ * control the maximum number of threads used. Make sure to call
++ * it before the parallel operation.
+ */
+ static void Initialize(int numThreads = 0);
+
diff --git a/sci-libs/vtk/files/vtk-9.0.3-cuda-11.5.0.patch b/sci-libs/vtk/files/vtk-9.0.3-cuda-11.5.0.patch
new file mode 100644
index 00000000000..dc0094516f2
--- /dev/null
+++ b/sci-libs/vtk/files/vtk-9.0.3-cuda-11.5.0.patch
@@ -0,0 +1,25 @@
+This patch was sent by Vyacheslav Perestoronin <perestoronin@gmail.com>
+
+Declared purpose is to make VTK to build with CUDA-11.5 release.
+
+--- a/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/internal/brigand.hpp
++++ b/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/internal/brigand.hpp
+@@ -1057,18 +1057,6 @@
+ {
+ using type = S<L, Ls...>;
+ };
+- template <typename L1, typename L2, typename... Ls>
+- struct find<true, false, L1, L2, Ls...> : find<true, F<Ts..., L2>::value, L2, Ls...>
+- {
+- };
+- template <typename L0, typename L1, typename L2, typename L3, typename L4, typename L5,
+- typename L6, typename L7, typename L8,
+- typename... Ls>
+- struct find<false, false, L0, L1, L2, L3, L4, L5, L6, L7, L8, Ls...>
+- : find<true, F<Ts..., L8>::value, L8, Ls...>
+- {
+- };
+-
+ #ifndef BRIGAND_COMP_CUDA
+ template <typename L1, typename L2, typename L3, typename L4, typename L5, typename L6,
+ typename L7, typename L8, typename L9, typename L10, typename L11, typename L12,
diff --git a/sci-libs/vtk/files/vtk-9.0.3-fix-gcc10-return-local-addr-in-wrappers-upstream-commit-55c74ed3.patch b/sci-libs/vtk/files/vtk-9.0.3-fix-gcc10-return-local-addr-in-wrappers-upstream-commit-55c74ed3.patch
new file mode 100644
index 00000000000..6ac2f7c2a6b
--- /dev/null
+++ b/sci-libs/vtk/files/vtk-9.0.3-fix-gcc10-return-local-addr-in-wrappers-upstream-commit-55c74ed3.patch
@@ -0,0 +1,88 @@
+From 55c74ed3854736cc6cb43fd242f1db5249aea5e4 Mon Sep 17 00:00:00 2001
+From: David Gobbi <david.gobbi@gmail.com>
+Date: Fri, 29 May 2020 10:24:16 -0600
+Subject: [PATCH] Fix gcc10 return-local-addr warning in wrappers
+
+There was a warning from gcc10 because it thought a stack-allocated
+array variable was being returned.
+---
+ Wrapping/Tools/vtkParseExtras.c | 10 +++++-----
+ Wrapping/Tools/vtkParsePreprocess.c | 27 ++++++++++++++++-----------
+ 2 files changed, 21 insertions(+), 16 deletions(-)
+
+diff --git a/Wrapping/Tools/vtkParseExtras.c b/Wrapping/Tools/vtkParseExtras.c
+index 88a73c15d8d..853ed135f6f 100644
+--- a/Wrapping/Tools/vtkParseExtras.c
++++ b/Wrapping/Tools/vtkParseExtras.c
+@@ -251,13 +251,13 @@ static const char* vtkparse_string_replace(
+ if (any_replaced)
+ {
+ /* return a string that was allocated with malloc */
+- if (result == result_store)
++ tmp = (char*)malloc(strlen(result) + 1);
++ strcpy(tmp, result);
++ cp = tmp;
++ if (result != result_store)
+ {
+- tmp = (char*)malloc(strlen(result) + 1);
+- strcpy(tmp, result);
+- result = tmp;
++ free(result);
+ }
+- cp = result;
+ }
+ }
+
+diff --git a/Wrapping/Tools/vtkParsePreprocess.c b/Wrapping/Tools/vtkParsePreprocess.c
+index 4f30cfa2cb5..bd2e7cbb116 100644
+--- a/Wrapping/Tools/vtkParsePreprocess.c
++++ b/Wrapping/Tools/vtkParsePreprocess.c
+@@ -4405,14 +4405,16 @@ const char* vtkParsePreprocess_ExpandMacro(
+ }
+ return macro->Definition;
+ }
+-
+- if (rp == stack_rp)
++ else
+ {
+- rp = (char*)malloc(strlen(stack_rp) + 1);
+- strcpy(rp, stack_rp);
++ char* tmp = (char*)malloc(strlen(rp) + 1);
++ strcpy(tmp, rp);
++ if (rp != stack_rp)
++ {
++ free(rp);
++ }
++ return tmp;
+ }
+-
+- return rp;
+ }
+
+ /**
+@@ -4631,14 +4633,17 @@ const char* vtkParsePreprocess_ProcessString(PreprocessInfo* info, const char* t
+ }
+ return tp;
+ }
+- if (rp == stack_rp)
++ else
+ {
+- rp = (char*)malloc(strlen(stack_rp) + 1);
+- strcpy(rp, stack_rp);
++ char* tmp = (char*)malloc(strlen(rp) + 1);
++ strcpy(tmp, rp);
++ if (rp != stack_rp)
++ {
++ free(rp);
++ }
++ return tmp;
+ }
+ }
+-
+- return rp;
+ }
+
+ /**
+--
+GitLab
+
diff --git a/sci-libs/vtk/files/vtk-9.0.3-pegtl-3.patch b/sci-libs/vtk/files/vtk-9.0.3-pegtl-3.patch
new file mode 100644
index 00000000000..a7c41231f4b
--- /dev/null
+++ b/sci-libs/vtk/files/vtk-9.0.3-pegtl-3.patch
@@ -0,0 +1,75 @@
+Based on patch from Zoltán Fridrich <zfridric@redhat.com>
+Reworked by Vyacheslav Perestoronin <perestoronin@gmail.com>
+Little fixes by Vadim Misbakh-Soloviov <mva@gentoo.org>
+
+--- a/IO/MotionFX/vtkMotionFXCFGGrammar.h
++++ b/IO/MotionFX/vtkMotionFXCFGGrammar.h
+@@ -23,7 +23,12 @@
+
+ // for debugging
+ // clang-format off
++#if TAO_PEGTL_VERSION_MAJOR >= 3
++#include VTK_PEGTL(pegtl/contrib/trace.hpp)
++#else
+ #include VTK_PEGTL(pegtl/contrib/tracer.hpp)
++#endif
++
+ // clang-format on
+
+ namespace MotionFX
+--- a/IO/MotionFX/vtkMotionFXCFGReader.cxx
++++ b/IO/MotionFX/vtkMotionFXCFGReader.cxx
+@@ -1014,14 +1014,24 @@
+ if (this->isOrientation)
+ {
+ std::vector<double> numbers;
++#if TAO_PEGTL_VERSION_MAJOR >= 3
++ tao::pegtl::complete_trace<MotionFX::OrientationsPositionFile::Grammar,
++ Actions::PositionFile::action>(in, numbers, this->positions);
++#else
+ tao::pegtl::parse<MotionFX::OrientationsPositionFile::Grammar,
+ Actions::PositionFile::action /*, tao::pegtl::tracer*/>(in, numbers, this->positions);
++#endif
+ }
+ else
+ {
+ std::vector<double> numbers;
++#if TAO_PEGTL_VERSION_MAJOR >= 3
++ tao::pegtl::complete_trace<MotionFX::LegacyPositionFile::Grammar,
++ Actions::PositionFile::action>(in, numbers, this->positions);
++#else
+ tao::pegtl::parse<MotionFX::LegacyPositionFile::Grammar,
+ Actions::PositionFile::action /*, tao::pegtl::tracer*/>(in, numbers, this->positions);
++#endif
+ }
+ return true;
+ }
+- catch (const tao::pegtl::input_error& e)
++ catch (const std::filesystem::filesystem_error& e)
+ {
+ vtkGenericWarningMacro("PositionFileMotion::read_position_file failed: " << e.what());
+ }
+@@ -1050,7 +1060,11 @@
+ {
+ tao::pegtl::read_input<> in(filename);
+ Actions::CFG::ActiveState state(this->Motions);
++#if TAO_PEGTL_VERSION_MAJOR >= 3
++ tao::pegtl::complete_trace<MotionFX::CFG::Grammar, Actions::CFG::action>(in, state);
++#else
+ tao::pegtl::parse<MotionFX::CFG::Grammar, Actions::CFG::action>(in, state);
++#endif
+ if (this->Motions.size() == 0)
+ {
+ vtkGenericWarningMacro(
+@@ -1061,7 +1075,11 @@
+ if (getenv("MOTIONFX_DEBUG_GRAMMAR") != nullptr)
+ {
+ tao::pegtl::read_input<> in2(filename);
++#if TAO_PEGTL_VERSION_MAJOR >= 3
++ tao::pegtl::complete_trace<MotionFX::CFG::Grammar, tao::pegtl::nothing>(in2);
++#else
+ tao::pegtl::parse<MotionFX::CFG::Grammar, tao::pegtl::nothing, tao::pegtl::tracer>(in2);
++#endif
+ }
+ return false;
+ }
diff --git a/sci-libs/vtk/files/vtk-9.0.3-proj-api-fix-upstream-commit-03256388.patch b/sci-libs/vtk/files/vtk-9.0.3-proj-api-fix-upstream-commit-03256388.patch
new file mode 100644
index 00000000000..1431ae04a5a
--- /dev/null
+++ b/sci-libs/vtk/files/vtk-9.0.3-proj-api-fix-upstream-commit-03256388.patch
@@ -0,0 +1,163 @@
+Almost original commit patch.
+Slightly modified (to make it apply on top of 9.0.3 release tarball) by
+Vadim Misbakh-Soloviov <mva@gentoo.org>
+
+Modified part is last 3 lines in the second hunk of first file:
+```
+}
+else
+{
+```
+
+Original commit patch had a bit different code there, while 9.0.3 has this.
+
+---
+From 0325638832e35c8c8c6fc96e2c1d887aeea3dd43 Mon Sep 17 00:00:00 2001
+From: Julien Schueller <schueller@phimeca.com>
+Date: Mon, 8 Mar 2021 10:57:46 +0100
+Subject: [PATCH] Geovis: Use proj>=5 api
+
+Closes #18130
+---
+ Geovis/Core/vtkGeoProjection.cxx | 17 ++++++++++++++++-
+ Geovis/Core/vtkGeoTransform.cxx | 28 ++++++++++++++--------------
+ ThirdParty/libproj/vtk_libproj.h.in | 7 +------
+ 3 files changed, 31 insertions(+), 21 deletions(-)
+
+diff --git a/Geovis/Core/vtkGeoProjection.cxx b/Geovis/Core/vtkGeoProjection.cxx
+index 7ff6526a5d3..0a0d06eba19 100644
+--- a/Geovis/Core/vtkGeoProjection.cxx
++++ b/Geovis/Core/vtkGeoProjection.cxx
+@@ -121,7 +121,11 @@ vtkGeoProjection::~vtkGeoProjection()
+ this->SetPROJ4String(nullptr);
+ if (this->Projection)
+ {
++#if PROJ_VERSION_MAJOR >= 5
++ proj_destroy(this->Projection);
++#else
+ pj_free(this->Projection);
++#endif
+ }
+ delete this->Internals;
+ this->Internals = nullptr;
+@@ -185,13 +189,21 @@ int vtkGeoProjection::UpdateProjection()
+
+ if (this->Projection)
+ {
++#if PROJ_VERSION_MAJOR >= 5
++ proj_destroy(this->Projection);
++#else
+ pj_free(this->Projection);
++#endif
+ this->Projection = nullptr;
+ }
+
+ if (this->PROJ4String && strlen(this->PROJ4String))
+ {
++#if PROJ_VERSION_MAJOR >= 5
++ this->Projection = proj_create(PJ_DEFAULT_CTX, this->PROJ4String);
++#else
+ this->Projection = pj_init_plus(this->PROJ4String);
++#endif
+ }
+ else
+ {
+@@ -234,8 +246,11 @@ int vtkGeoProjection::UpdateProjection()
+ stringHolder[i] = param.str();
+ pjArgs[3 + i] = stringHolder[i].c_str();
+ }
+-
++#if PROJ_VERSION_MAJOR >= 5
++ this->Projection = proj_create_argv(PJ_DEFAULT_CTX, argSize, const_cast<char**>(pjArgs));
++#else
+ this->Projection = pj_init(argSize, const_cast<char**>(pjArgs));
++#endif
+ delete[] pjArgs;
+ }
+ this->ProjectionMTime = this->GetMTime();
+diff --git a/Geovis/Core/vtkGeoTransform.cxx b/Geovis/Core/vtkGeoTransform.cxx
+index 5c2c74279de..1c99b6b11be 100644
+--- a/Geovis/Core/vtkGeoTransform.cxx
++++ b/Geovis/Core/vtkGeoTransform.cxx
+@@ -163,8 +163,12 @@ void vtkGeoTransform::InternalTransformPoints(double* x, vtkIdType numPts, int s
+ projPJ src = this->SourceProjection ? this->SourceProjection->GetProjection() : nullptr;
+ projPJ dst = this->DestinationProjection ? this->DestinationProjection->GetProjection() : nullptr;
+ int delta = stride - 2;
++#if PROJ_VERSION_MAJOR >= 5
++ PJ_COORD c, c_out;
++#else
+ projLP lp;
+ projXY xy;
++#endif
+ if (src)
+ {
+ // Convert from src system to lat/long using inverse of src transform
+@@ -172,17 +176,15 @@ void vtkGeoTransform::InternalTransformPoints(double* x, vtkIdType numPts, int s
+ for (vtkIdType i = 0; i < numPts; ++i)
+ {
+ #if PROJ_VERSION_MAJOR >= 5
+- xy.x = coord[0];
+- xy.y = coord[1];
++ c.xy.x = coord[0];
++ c.xy.y = coord[1];
++ c_out = proj_trans(src, PJ_INV, c);
++ coord[0] = c_out.lp.lam;
++ coord[1] = c_out.lp.phi;
+ #else
+ xy.u = coord[0];
+ xy.v = coord[1];
+-#endif
+ lp = pj_inv(xy, src);
+-#if PROJ_VERSION_MAJOR >= 5
+- coord[0] = lp.lam;
+- coord[1] = lp.phi;
+-#else
+ coord[0] = lp.u;
+ coord[1] = lp.v;
+ #endif
+@@ -208,17 +210,15 @@ void vtkGeoTransform::InternalTransformPoints(double* x, vtkIdType numPts, int s
+ for (vtkIdType i = 0; i < numPts; ++i)
+ {
+ #if PROJ_VERSION_MAJOR >= 5
+- lp.lam = coord[0];
+- lp.phi = coord[1];
++ c.lp.lam = coord[0];
++ c.lp.phi = coord[1];
++ c_out = proj_trans(src, PJ_FWD, c);
++ coord[0] = c_out.xy.x;
++ coord[1] = c_out.xy.y;
+ #else
+ lp.u = coord[0];
+ lp.v = coord[1];
+-#endif
+ xy = pj_fwd(lp, dst);
+-#if PROJ_VERSION_MAJOR >= 5
+- coord[0] = xy.x;
+- coord[1] = xy.y;
+-#else
+ coord[0] = xy.u;
+ coord[1] = xy.v;
+ #endif
+diff --git a/ThirdParty/libproj/vtk_libproj.h.in b/ThirdParty/libproj/vtk_libproj.h.in
+index 4d8ffc3c5d5..c4182c4db2b 100644
+--- a/ThirdParty/libproj/vtk_libproj.h.in
++++ b/ThirdParty/libproj/vtk_libproj.h.in
+@@ -28,14 +28,9 @@
+ #if VTK_MODULE_USE_EXTERNAL_vtklibproj
+ # if VTK_LibPROJ_MAJOR_VERSION >= 5
+ # include <proj.h>
+-# endif
+-# if VTK_LibPROJ_MAJOR_VERSION < 6
++# else
+ # include <projects.h>
+ # endif
+-# if VTK_LibPROJ_MAJOR_VERSION >= 6
+-# define ACCEPT_USE_OF_DEPRECATED_PROJ_API_H 1
+-# endif
+-# include <proj_api.h>
+ # include <geodesic.h>
+ #else
+ # include <vtklibproj/src/projects.h>
+--
+GitLab
+
diff --git a/sci-libs/vtk/files/vtk-9.0.3-tbb-fix-for-bundled-vtkm.patch b/sci-libs/vtk/files/vtk-9.0.3-tbb-fix-for-bundled-vtkm.patch
new file mode 100644
index 00000000000..f2e1e39d631
--- /dev/null
+++ b/sci-libs/vtk/files/vtk-9.0.3-tbb-fix-for-bundled-vtkm.patch
@@ -0,0 +1,504 @@
+Patch was backported by Vadim Misbakh-Soloviov <mva@gentoo.org> from vtk-m repo.
+
+Paths was changed to be relative to ${S}.
+
+Also, some spaces in removed header (tonot be detected as reversed).
+
+---
+From 904e784e895229d675cd7b3b43a963fdc5813ac8 Mon Sep 17 00:00:00 2001
+From: Kenneth Moreland <morelandkd@ornl.gov>
+Date: Fri, 4 Jun 2021 09:30:52 -0600
+Subject: [PATCH] Remove TBB parallel_sort patch
+
+Years ago we discovered a problem with TBB's parallel sort, which we
+patch in our local repo and submitted a change to TBB, which has been
+accepted.
+
+The code to decide whether to use our parallel_sort patch does not work
+with the latest versions of TBB because it requires including a header
+that changed names to get the TBB version.
+
+We no longer support any TBB version with this bug, so just remove the
+patch from VTK-m.
+---
+ vtkm/cont/tbb/internal/CMakeLists.txt | 1 -
+ vtkm/cont/tbb/internal/FunctorsTBB.h | 10 +-
+ vtkm/cont/tbb/internal/parallel_sort.h | 273 -------------------------
+ 3 files changed, 1 insertion(+), 283 deletions(-)
+ delete mode 100644 vtkm/cont/tbb/internal/parallel_sort.h
+
+diff --git a/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/CMakeLists.txt b/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/CMakeLists.txt
+index 1283307be..ffbf1e845 100644
+--- a/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/CMakeLists.txt
++++ b/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/CMakeLists.txt
+@@ -25,7 +25,6 @@ endif()
+
+ vtkm_declare_headers(${headers}
+ ParallelSortTBB.hxx
+- parallel_sort.h
+ )
+
+ #These sources need to always be built
+diff --git a/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/FunctorsTBB.h b/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/FunctorsTBB.h
+index dc988f7f8..c538c2240 100644
+--- a/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/FunctorsTBB.h
++++ b/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/FunctorsTBB.h
+@@ -38,15 +38,6 @@ VTKM_THIRDPARTY_PRE_INCLUDE
+ // correct settings so that we don't clobber any existing function
+ #include <vtkm/internal/Windows.h>
+
+-#include <tbb/tbb_stddef.h>
+-#if (TBB_VERSION_MAJOR == 4) && (TBB_VERSION_MINOR == 2)
+-//we provide an patched implementation of tbb parallel_sort
+-//that fixes ADL for std::swap. This patch has been submitted to Intel
+-//and is fixed in TBB 4.2 update 2.
+-#include <vtkm/cont/tbb/internal/parallel_sort.h>
+-#else
+-#include <tbb/parallel_sort.h>
+-#endif
+
+ #include <numeric>
+ #include <tbb/blocked_range.h>
+@@ -54,6 +45,7 @@ VTKM_THIRDPARTY_PRE_INCLUDE
+ #include <tbb/parallel_for.h>
+ #include <tbb/parallel_reduce.h>
+ #include <tbb/parallel_scan.h>
++#include <tbb/parallel_sort.h>
+ #include <tbb/partitioner.h>
+ #include <tbb/tick_count.h>
+
+diff --git a/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/parallel_sort.h b/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/parallel_sort.h
+deleted file mode 100644
+index 3451a369f..000000000
+--- a/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/parallel_sort.h
++++ /dev/null
+@@ -1,273 +0,0 @@
+-/*
+- Copyright 2005-2013 Intel Corporation. All Rights Reserved.
+-
+- This file is part of Threading Building Blocks.
+-
+- Threading Building Blocks is free software; you can redistribute it
+- and/or modify it under the terms of the GNU General Public License
+- version 2 as published by the Free Software Foundation.
+-
+- Threading Building Blocks is distributed in the hope that it will be
+- useful, but WITHOUT ANY WARRANTY; without even the implied warranty
+- of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- GNU General Public License for more details.
+-
+- You should have received a copy of the GNU General Public License
+- along with Threading Building Blocks; if not, write to the Free Software
+- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+-
+- As a special exception, you may use this file as part of a free software
+- library without restriction. Specifically, if other files instantiate
+- templates or use macros or inline functions from this file, or you compile
+- this file and link it with other files to produce an executable, this
+- file does not by itself cause the resulting executable to be covered by
+- the GNU General Public License. This exception does not however
+- invalidate any other reasons why the executable file might be covered by
+- the GNU General Public License.
+-*/
+-
+-#ifndef __TBB_parallel_sort_H
+-#define __TBB_parallel_sort_H
+-
+-#include <tbb/blocked_range.h>
+-#include <tbb/parallel_for.h>
+-
+-#include <algorithm>
+-#include <functional>
+-#include <iterator>
+-
+-namespace tbb
+-{
+-
+-//! @cond INTERNAL
+-namespace internal
+-{
+-
+-//! Range used in quicksort to split elements into subranges based on a value.
+-/** The split operation selects a splitter and places all elements less than or equal
+- to the value in the first range and the remaining elements in the second range.
+- @ingroup algorithms */
+-template <typename RandomAccessIterator, typename Compare>
+-class quick_sort_range : private no_assign
+-{
+-
+- inline size_t median_of_three(const RandomAccessIterator& array,
+- size_t l,
+- size_t m,
+- size_t r) const
+- {
+- return comp(array[l], array[m])
+- ? (comp(array[m], array[r]) ? m : (comp(array[l], array[r]) ? r : l))
+- : (comp(array[r], array[m]) ? m : (comp(array[r], array[l]) ? r : l));
+- }
+-
+- inline size_t pseudo_median_of_nine(const RandomAccessIterator& array,
+- const quick_sort_range& range) const
+- {
+- size_t offset = range.size / 8u;
+- return median_of_three(array,
+- median_of_three(array, 0, offset, offset * 2),
+- median_of_three(array, offset * 3, offset * 4, offset * 5),
+- median_of_three(array, offset * 6, offset * 7, range.size - 1));
+- }
+-
+-public:
+- static const size_t grainsize = 500;
+- const Compare& comp;
+- RandomAccessIterator begin;
+- size_t size;
+-
+- quick_sort_range(RandomAccessIterator begin_, size_t size_, const Compare& comp_)
+- : comp(comp_)
+- , begin(begin_)
+- , size(size_)
+- {
+- }
+-
+- bool empty() const { return size == 0; }
+- bool is_divisible() const { return size >= grainsize; }
+-
+- quick_sort_range(quick_sort_range& range, split)
+- : comp(range.comp)
+- {
+- using std::swap;
+- RandomAccessIterator array = range.begin;
+- RandomAccessIterator key0 = range.begin;
+- size_t m = pseudo_median_of_nine(array, range);
+- if (m)
+- swap(array[0], array[m]);
+-
+- size_t i = 0;
+- size_t j = range.size;
+- // Partition interval [i+1,j-1] with key *key0.
+- for (;;)
+- {
+- __TBB_ASSERT(i < j, nullptr);
+- // Loop must terminate since array[l]==*key0.
+- do
+- {
+- --j;
+- __TBB_ASSERT(i <= j, "bad ordering relation?");
+- } while (comp(*key0, array[j]));
+- do
+- {
+- __TBB_ASSERT(i <= j, nullptr);
+- if (i == j)
+- goto partition;
+- ++i;
+- } while (comp(array[i], *key0));
+- if (i == j)
+- goto partition;
+- swap(array[i], array[j]);
+- }
+- partition:
+- // Put the partition key were it belongs
+- swap(array[j], *key0);
+- // array[l..j) is less or equal to key.
+- // array(j..r) is greater or equal to key.
+- // array[j] is equal to key
+- i = j + 1;
+- begin = array + i;
+- size = range.size - i;
+- range.size = j;
+- }
+-};
+-
+-#if __TBB_TASK_GROUP_CONTEXT
+-//! Body class used to test if elements in a range are presorted
+-/** @ingroup algorithms */
+-template <typename RandomAccessIterator, typename Compare>
+-class quick_sort_pretest_body : internal::no_assign
+-{
+- const Compare& comp;
+-
+-public:
+- quick_sort_pretest_body(const Compare& _comp)
+- : comp(_comp)
+- {
+- }
+-
+- void operator()(const blocked_range<RandomAccessIterator>& range) const
+- {
+- task& my_task = task::self();
+- RandomAccessIterator my_end = range.end();
+-
+- int i = 0;
+- for (RandomAccessIterator k = range.begin(); k != my_end; ++k, ++i)
+- {
+- if (i % 64 == 0 && my_task.is_cancelled())
+- break;
+-
+- // The k-1 is never out-of-range because the first chunk starts at begin+serial_cutoff+1
+- if (comp(*(k), *(k - 1)))
+- {
+- my_task.cancel_group_execution();
+- break;
+- }
+- }
+- }
+-};
+-#endif /* __TBB_TASK_GROUP_CONTEXT */
+-
+-//! Body class used to sort elements in a range that is smaller than the grainsize.
+-/** @ingroup algorithms */
+-template <typename RandomAccessIterator, typename Compare>
+-struct quick_sort_body
+-{
+- void operator()(const quick_sort_range<RandomAccessIterator, Compare>& range) const
+- {
+- //SerialQuickSort( range.begin, range.size, range.comp );
+- std::sort(range.begin, range.begin + range.size, range.comp);
+- }
+-};
+-
+-//! Wrapper method to initiate the sort by calling parallel_for.
+-/** @ingroup algorithms */
+-template <typename RandomAccessIterator, typename Compare>
+-void parallel_quick_sort(RandomAccessIterator begin, RandomAccessIterator end, const Compare& comp)
+-{
+-#if __TBB_TASK_GROUP_CONTEXT
+- task_group_context my_context;
+- const int serial_cutoff = 9;
+-
+- __TBB_ASSERT(begin + serial_cutoff < end, "min_parallel_size is smaller than serial cutoff?");
+- RandomAccessIterator k;
+- for (k = begin; k != begin + serial_cutoff; ++k)
+- {
+- if (comp(*(k + 1), *k))
+- {
+- goto do_parallel_quick_sort;
+- }
+- }
+-
+- parallel_for(blocked_range<RandomAccessIterator>(k + 1, end),
+- quick_sort_pretest_body<RandomAccessIterator, Compare>(comp),
+- auto_partitioner(),
+- my_context);
+-
+- if (my_context.is_group_execution_cancelled())
+- do_parallel_quick_sort:
+-#endif /* __TBB_TASK_GROUP_CONTEXT */
+- parallel_for(quick_sort_range<RandomAccessIterator, Compare>(begin, end - begin, comp),
+- quick_sort_body<RandomAccessIterator, Compare>(),
+- auto_partitioner());
+-}
+-
+-} // namespace internal
+-//! @endcond
+-
+-//! @cond INTERNAL
+-/** \page parallel_sort_iter_req Requirements on iterators for parallel_sort
+- Requirements on value type \c T of \c RandomAccessIterator for \c parallel_sort:
+- - \code void swap( T& x, T& y ) \endcode Swaps \c x and \c y
+- - \code bool Compare::operator()( const T& x, const T& y ) \endcode
+- True if x comes before y;
+-**/
+-
+-/** \name parallel_sort
+- See also requirements on \ref parallel_sort_iter_req "iterators for parallel_sort". **/
+-//@{
+-
+-//! Sorts the data in [begin,end) using the given comparator
+-/** The compare function object is used for all comparisons between elements during sorting.
+- The compare object must define a bool operator() function.
+- @ingroup algorithms **/
+-//! @endcond
+-template <typename RandomAccessIterator, typename Compare>
+-void parallel_sort(RandomAccessIterator begin, RandomAccessIterator end, const Compare& comp)
+-{
+- const int min_parallel_size = 500;
+- if (end > begin)
+- {
+- if (end - begin < min_parallel_size)
+- {
+- std::sort(begin, end, comp);
+- }
+- else
+- {
+- internal::parallel_quick_sort(begin, end, comp);
+- }
+- }
+-}
+-
+-//! Sorts the data in [begin,end) with a default comparator \c std::less<RandomAccessIterator>
+-/** @ingroup algorithms **/
+-template <typename RandomAccessIterator>
+-inline void parallel_sort(RandomAccessIterator begin, RandomAccessIterator end)
+-{
+- parallel_sort(
+- begin, end, std::less<typename std::iterator_traits<RandomAccessIterator>::value_type>());
+-}
+-
+-//! Sorts the data in the range \c [begin,end) with a default comparator \c std::less<T>
+-/** @ingroup algorithms **/
+-template <typename T>
+-inline void parallel_sort(T* begin, T* end)
+-{
+- parallel_sort(begin, end, std::less<T>());
+-}
+-//@}
+-
+-} // namespace tbb
+-
+-#endif
+--
+GitLab
+
+From 97a2408fc4a24eed8eaeb38f36315b4306e54f58 Mon Sep 17 00:00:00 2001
+From: Robert Maynard <robert.maynard@kitware.com>
+Date: Mon, 1 Jun 2020 16:58:02 -0400
+Subject: [PATCH] Correct warnings for using TBB 2020 deprecated functions
+
+---
+ vtkm/cont/tbb/internal/ParallelSortTBB.cxx | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/ParallelSortTBB.cxx b/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/ParallelSortTBB.cxx
+index 3076b3a9c..9243017e6 100644
+--- a/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/ParallelSortTBB.cxx
++++ b/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/ParallelSortTBB.cxx
+@@ -54,7 +54,7 @@
+ #include <vtkm/internal/Windows.h>
+
+ #include <tbb/task.h>
+-#include <tbb/tbb_thread.h>
++#include <thread>
+
+ #if defined(VTKM_MSVC)
+ #pragma pop_macro("__TBB_NO_IMPLICITLINKAGE")
+@@ -69,7 +69,7 @@ namespace tbb
+ namespace sort
+ {
+
+-const size_t MAX_CORES = ::tbb::tbb_thread::hardware_concurrency();
++const size_t MAX_CORES = std::thread::hardware_concurrency();
+
+ // Simple TBB task wrapper around a generic functor.
+ template <typename FunctorType>
+--
+GitLab
+
+From 1eea0bee122fa3ef47cc488d97f82e008d69c8bd Mon Sep 17 00:00:00 2001
+From: Kenneth Moreland <morelandkd@ornl.gov>
+Date: Fri, 4 Jun 2021 09:34:30 -0600
+Subject: [PATCH] Use TBB task_group for radix sort
+
+TBB 2020 introduced a new class called `task_group`. TBB 2021 removed
+the old class `task` as its functionality was replaced by `task_group`.
+Our parallel radix sort for TBB was implemented using `task`s, so change
+it to use `task_group` (which actually cleans up the code a bit).
+---
+ vtkm/cont/internal/ParallelRadixSort.h | 2 +-
+ vtkm/cont/tbb/internal/ParallelSortTBB.cxx | 35 ++++++++++++++++++++--
+ 2 files changed, 33 insertions(+), 4 deletions(-)
+
+diff --git a/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/internal/ParallelRadixSort.h b/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/internal/ParallelRadixSort.h
+index 7604aacd7..cca8f6b72 100644
+--- a/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/internal/ParallelRadixSort.h
++++ b/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/internal/ParallelRadixSort.h
+@@ -485,7 +485,7 @@ struct RunTask
+ }
+
+ template <typename ThreaderData = void*>
+- void operator()(ThreaderData tData = nullptr)
++ void operator()(ThreaderData tData = nullptr) const
+ {
+ size_t num_nodes_at_current_height = (size_t)pow(2, (double)binary_tree_height_);
+ if (num_threads_ <= num_nodes_at_current_height)
+diff --git a/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/ParallelSortTBB.cxx b/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/ParallelSortTBB.cxx
+index 9243017e6..d99406954 100644
+--- a/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/ParallelSortTBB.cxx
++++ b/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/ParallelSortTBB.cxx
+@@ -53,7 +53,7 @@
+ // correct settings so that we don't clobber any existing function
+ #include <vtkm/internal/Windows.h>
+
+-#include <tbb/task.h>
++#include <tbb/tbb.h>
+ #include <thread>
+
+ #if defined(VTKM_MSVC)
+@@ -71,6 +71,7 @@ namespace sort
+
+ const size_t MAX_CORES = std::thread::hardware_concurrency();
+
++#if TBB_VERSION_MAJOR < 2020
+ // Simple TBB task wrapper around a generic functor.
+ template <typename FunctorType>
+ struct TaskWrapper : public ::tbb::task
+@@ -94,7 +95,7 @@ struct RadixThreaderTBB
+ size_t GetAvailableCores() const { return MAX_CORES; }
+
+ template <typename TaskType>
+- void RunParentTask(TaskType task)
++ void RunParentTask(TaskType task) const
+ {
+ using Task = TaskWrapper<TaskType>;
+ Task& root = *new (::tbb::task::allocate_root()) Task(task);
+@@ -102,7 +103,7 @@ struct RadixThreaderTBB
+ }
+
+ template <typename TaskType>
+- void RunChildTasks(TaskWrapper<TaskType>* wrapper, TaskType left, TaskType right)
++ void RunChildTasks(TaskWrapper<TaskType>* wrapper, TaskType left, TaskType right) const
+ {
+ using Task = TaskWrapper<TaskType>;
+ ::tbb::empty_task& p = *new (wrapper->allocate_continuation())::tbb::empty_task();
+@@ -115,6 +116,34 @@ struct RadixThreaderTBB
+ }
+ };
+
++#else // TBB_VERSION_MAJOR >= 2020
++
++// In TBB version 2020, the task class was deprecated. Instead, we use the simpler task_group.
++
++struct RadixThreaderTBB
++{
++ std::shared_ptr<::tbb::task_group> TaskGroup =
++ std::shared_ptr<::tbb::task_group>(new ::tbb::task_group);
++
++ size_t GetAvailableCores() const { return MAX_CORES; }
++
++ template <typename TaskType>
++ void RunParentTask(TaskType task) const
++ {
++ this->TaskGroup->run_and_wait(task);
++ // All tasks should be complete at this point.
++ }
++
++ template <typename TaskType>
++ void RunChildTasks(void*, TaskType left, TaskType right) const
++ {
++ this->TaskGroup->run(left);
++ this->TaskGroup->run(right);
++ }
++};
++
++#endif
++
+ VTKM_INSTANTIATE_RADIX_SORT_FOR_THREADER(RadixThreaderTBB)
+ }
+ }
+diff --git a/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/ParallelSortTBB.cxx b/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/ParallelSortTBB.cxx
+index d99406954..73db80ed6 100644
+--- a/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/ParallelSortTBB.cxx
++++ b/ThirdParty/vtkm/vtkvtkm/vtk-m/vtkm/cont/tbb/internal/ParallelSortTBB.cxx
+@@ -53,7 +53,13 @@
+ // correct settings so that we don't clobber any existing function
+ #include <vtkm/internal/Windows.h>
+
++#if TBB_VERSION_MAJOR >= 2020
++#include <tbb/task.h>
++#include <tbb/task_group.h>
++#else
+ #include <tbb/tbb.h>
++#endif
++
+ #include <thread>
+
+ #if defined(VTKM_MSVC)
diff --git a/sci-libs/vtk/files/vtk-9.0.3-use-std-atomic-part1.patch b/sci-libs/vtk/files/vtk-9.0.3-use-std-atomic-part1.patch
new file mode 100644
index 00000000000..ee52c32e53f
--- /dev/null
+++ b/sci-libs/vtk/files/vtk-9.0.3-use-std-atomic-part1.patch
@@ -0,0 +1,811 @@
+This is a bit packported upstream commit patch, that removes vtkAtomic
+and switches codebase to use std::atomic. This patch is kinda required
+for patchset for supporting modern versions of dev-cpp/tbb.
+
+Patch backported by Vadim Misbakh-Solovov <mva@gentoo.org>
+
+Original patch url: https://gitlab.kitware.com/vtk/vtk/-/commit/4ca62d8c73.patch
+
+--- a/Common/Core/CMakeLists.txt
++++ b/Common/Core/CMakeLists.txt
+@@ -266,7 +266,6 @@ set(headers
+ vtkArchiver.h
+ vtkArrayIteratorIncludes.h
+ vtkAssume.h
+- vtkAtomicTypeConcepts.h
+ vtkAutoInit.h
+ vtkBuffer.h
+ vtkCollectionRange.h
+--- a/Common/Core/SMP/OpenMP/vtkAtomic.cxx
++++ /dev/null
+@@ -1,173 +0,0 @@
+-/*=========================================================================
+-
+- Program: Visualization Toolkit
+- Module: vtkAtomic.cxx
+-
+- Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
+- All rights reserved.
+- See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
+-
+- This software is distributed WITHOUT ANY WARRANTY; without even
+- the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+- PURPOSE. See the above copyright notice for more information.
+-
+-=========================================================================*/
+-
+-#include "vtkAtomic.h"
+-
+-namespace detail
+-{
+-
+-vtkTypeInt64 AtomicOps<8>::AddAndFetch(vtkTypeInt64* ref, vtkTypeInt64 val)
+-{
+- vtkTypeInt64 result;
+-#pragma omp atomic capture
+- {
+- (*ref) += val;
+- result = *ref;
+- }
+-#pragma omp flush
+- return result;
+-}
+-
+-vtkTypeInt64 AtomicOps<8>::SubAndFetch(vtkTypeInt64* ref, vtkTypeInt64 val)
+-{
+- vtkTypeInt64 result;
+-#pragma omp atomic capture
+- {
+- (*ref) -= val;
+- result = *ref;
+- }
+-#pragma omp flush
+- return result;
+-}
+-
+-vtkTypeInt64 AtomicOps<8>::PreIncrement(vtkTypeInt64* ref)
+-{
+- vtkTypeInt64 result;
+-#pragma omp atomic capture
+- result = ++(*ref);
+-#pragma omp flush
+- return result;
+-}
+-
+-vtkTypeInt64 AtomicOps<8>::PreDecrement(vtkTypeInt64* ref)
+-{
+- vtkTypeInt64 result;
+-#pragma omp atomic capture
+- result = --(*ref);
+-#pragma omp flush
+- return result;
+-}
+-
+-vtkTypeInt64 AtomicOps<8>::PostIncrement(vtkTypeInt64* ref)
+-{
+- vtkTypeInt64 result;
+-#pragma omp atomic capture
+- result = (*ref)++;
+-#pragma omp flush
+- return result;
+-}
+-
+-vtkTypeInt64 AtomicOps<8>::PostDecrement(vtkTypeInt64* ref)
+-{
+- vtkTypeInt64 result;
+-#pragma omp atomic capture
+- result = (*ref)--;
+-#pragma omp flush
+- return result;
+-}
+-
+-vtkTypeInt64 AtomicOps<8>::Load(const vtkTypeInt64* ref)
+-{
+- vtkTypeInt64 result;
+-#pragma omp flush
+-#pragma omp atomic read
+- result = *ref;
+- return result;
+-}
+-
+-void AtomicOps<8>::Store(vtkTypeInt64* ref, vtkTypeInt64 val)
+-{
+-#pragma omp atomic write
+- *ref = val;
+-#pragma omp flush
+-}
+-
+-vtkTypeInt32 AtomicOps<4>::AddAndFetch(vtkTypeInt32* ref, vtkTypeInt32 val)
+-{
+- vtkTypeInt32 result;
+-#pragma omp atomic capture
+- {
+- (*ref) += val;
+- result = *ref;
+- }
+-#pragma omp flush
+- return result;
+-}
+-
+-vtkTypeInt32 AtomicOps<4>::SubAndFetch(vtkTypeInt32* ref, vtkTypeInt32 val)
+-{
+- vtkTypeInt32 result;
+-#pragma omp atomic capture
+- {
+- (*ref) -= val;
+- result = *ref;
+- }
+-#pragma omp flush
+- return result;
+-}
+-
+-vtkTypeInt32 AtomicOps<4>::PreIncrement(vtkTypeInt32* ref)
+-{
+- vtkTypeInt32 result;
+-#pragma omp atomic capture
+- result = ++(*ref);
+-#pragma omp flush
+- return result;
+-}
+-
+-vtkTypeInt32 AtomicOps<4>::PreDecrement(vtkTypeInt32* ref)
+-{
+- vtkTypeInt32 result;
+-#pragma omp atomic capture
+- result = --(*ref);
+-#pragma omp flush
+- return result;
+-}
+-
+-vtkTypeInt32 AtomicOps<4>::PostIncrement(vtkTypeInt32* ref)
+-{
+- vtkTypeInt32 result;
+-#pragma omp atomic capture
+- result = (*ref)++;
+-#pragma omp flush
+- return result;
+-}
+-
+-vtkTypeInt32 AtomicOps<4>::PostDecrement(vtkTypeInt32* ref)
+-{
+- vtkTypeInt32 result;
+-#pragma omp atomic capture
+- result = (*ref)--;
+-#pragma omp flush
+- return result;
+-}
+-
+-vtkTypeInt32 AtomicOps<4>::Load(const vtkTypeInt32* ref)
+-{
+- vtkTypeInt32 result;
+-#pragma omp flush
+-#pragma omp atomic read
+- result = *ref;
+- return result;
+-}
+-
+-void AtomicOps<4>::Store(vtkTypeInt32* ref, vtkTypeInt32 val)
+-{
+-#pragma omp atomic write
+- *ref = val;
+-#pragma omp flush
+-}
+-
+-}
+--- a/Common/Core/SMP/OpenMP/vtkAtomic.h.in
++++ /dev/null
+@@ -1,293 +0,0 @@
+-/*=========================================================================
+-
+- Program: Visualization Toolkit
+- Module: vtkAtomic.h
+-
+- Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
+- All rights reserved.
+- See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
+-
+- This software is distributed WITHOUT ANY WARRANTY; without even
+- the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+- PURPOSE. See the above copyright notice for more information.
+-
+-=========================================================================*/
+-// .NAME vtkAtomic -
+-// .SECTION Description
+-
+-#ifndef vtkAtomic_h
+-#define vtkAtomic_h
+-
+-#include "vtkCommonCoreModule.h" // For export macro
+-#include "vtkAtomicTypeConcepts.h"
+-#include "vtkSystemIncludes.h"
+-
+-#include <cstddef>
+-
+-
+-#ifndef __VTK_WRAP__
+-namespace detail
+-{
+-
+-template <size_t size> class AtomicOps;
+-
+-template <> class VTKCOMMONCORE_EXPORT AtomicOps<8>
+-{
+-public:
+- typedef vtkTypeInt64 atomic_type;
+-
+- static vtkTypeInt64 AddAndFetch(vtkTypeInt64 *ref, vtkTypeInt64 val);
+- static vtkTypeInt64 SubAndFetch(vtkTypeInt64 *ref, vtkTypeInt64 val);
+- static vtkTypeInt64 PreIncrement(vtkTypeInt64 *ref);
+- static vtkTypeInt64 PreDecrement(vtkTypeInt64 *ref);
+- static vtkTypeInt64 PostIncrement(vtkTypeInt64 *ref);
+- static vtkTypeInt64 PostDecrement(vtkTypeInt64 *ref);
+- static vtkTypeInt64 Load(const vtkTypeInt64 *ref);
+- static void Store(vtkTypeInt64 *ref, vtkTypeInt64 val);
+-};
+-
+-template <> class VTKCOMMONCORE_EXPORT AtomicOps<4>
+-{
+-public:
+- typedef vtkTypeInt32 atomic_type;
+-
+- static vtkTypeInt32 AddAndFetch(vtkTypeInt32 *ref, vtkTypeInt32 val);
+- static vtkTypeInt32 SubAndFetch(vtkTypeInt32 *ref, vtkTypeInt32 val);
+- static vtkTypeInt32 PreIncrement(vtkTypeInt32 *ref);
+- static vtkTypeInt32 PreDecrement(vtkTypeInt32 *ref);
+- static vtkTypeInt32 PostIncrement(vtkTypeInt32 *ref);
+- static vtkTypeInt32 PostDecrement(vtkTypeInt32 *ref);
+- static vtkTypeInt32 Load(const vtkTypeInt32 *ref);
+- static void Store(vtkTypeInt32 *ref, vtkTypeInt32 val);
+-};
+-
+-} // detail
+-#endif // __VTK_WRAP__
+-
+-
+-template <typename T> class vtkAtomic : private vtk::atomic::detail::IntegralType<T>
+-{
+-private:
+- typedef detail::AtomicOps<sizeof(T)> Impl;
+-
+-public:
+- vtkAtomic() : Atomic(0)
+- {
+- }
+-
+- vtkAtomic(T val) : Atomic(static_cast<typename Impl::atomic_type>(val))
+- {
+- }
+-
+- vtkAtomic(const vtkAtomic<T> &atomic)
+- : Atomic(static_cast<typename Impl::atomic_type>(atomic.load()))
+- {
+- }
+-
+- T operator++()
+- {
+- return static_cast<T>(Impl::PreIncrement(&this->Atomic));
+- }
+-
+- T operator++(int)
+- {
+- return static_cast<T>(Impl::PostIncrement(&this->Atomic));
+- }
+-
+- T operator--()
+- {
+- return static_cast<T>(Impl::PreDecrement(&this->Atomic));
+- }
+-
+- T operator--(int)
+- {
+- return static_cast<T>(Impl::PostDecrement(&this->Atomic));
+- }
+-
+- T operator+=(T val)
+- {
+- return static_cast<T>(Impl::AddAndFetch(&this->Atomic,
+- static_cast<typename Impl::atomic_type>(val)));
+- }
+-
+- T operator-=(T val)
+- {
+- return static_cast<T>(Impl::SubAndFetch(&this->Atomic,
+- static_cast<typename Impl::atomic_type>(val)));
+- }
+-
+- operator T() const
+- {
+- return static_cast<T>(Impl::Load(&this->Atomic));
+- }
+-
+- T operator=(T val)
+- {
+- Impl::Store(&this->Atomic, static_cast<typename Impl::atomic_type>(val));
+- return val;
+- }
+-
+- vtkAtomic<T>& operator=(const vtkAtomic<T> &atomic)
+- {
+- this->store(atomic.load());
+- return *this;
+- }
+-
+- T load() const
+- {
+- return static_cast<T>(Impl::Load(&this->Atomic));
+- }
+-
+- void store(T val)
+- {
+- Impl::Store(&this->Atomic, static_cast<typename Impl::atomic_type>(val));
+- }
+-
+-private:
+- typename Impl::atomic_type Atomic;
+-};
+-
+-
+-template <typename T> class vtkAtomic<T*>
+-{
+-private:
+- typedef detail::AtomicOps<sizeof(T*)> Impl;
+-
+-public:
+- vtkAtomic() : Atomic(0)
+- {
+- }
+-
+- vtkAtomic(T* val)
+- : Atomic(reinterpret_cast<typename Impl::atomic_type>(val))
+- {
+- }
+-
+- vtkAtomic(const vtkAtomic<T*> &atomic)
+- : Atomic(reinterpret_cast<typename Impl::atomic_type>(atomic.load()))
+- {
+- }
+-
+- T* operator++()
+- {
+- return reinterpret_cast<T*>(Impl::AddAndFetch(&this->Atomic, sizeof(T)));
+- }
+-
+- T* operator++(int)
+- {
+- T* val = reinterpret_cast<T*>(Impl::AddAndFetch(&this->Atomic, sizeof(T)));
+- return --val;
+- }
+-
+- T* operator--()
+- {
+- return reinterpret_cast<T*>(Impl::SubAndFetch(&this->Atomic, sizeof(T)));
+- }
+-
+- T* operator--(int)
+- {
+- T* val = reinterpret_cast<T*>(Impl::AddAndFetch(&this->Atomic, sizeof(T)));
+- return ++val;
+- }
+-
+- T* operator+=(std::ptrdiff_t val)
+- {
+- return reinterpret_cast<T*>(Impl::AddAndFetch(&this->Atomic,
+- val * sizeof(T)));
+- }
+-
+- T* operator-=(std::ptrdiff_t val)
+- {
+- return reinterpret_cast<T*>(Impl::SubAndFetch(&this->Atomic,
+- val * sizeof(T)));
+- }
+-
+- operator T*() const
+- {
+- return reinterpret_cast<T*>(Impl::Load(&this->Atomic));
+- }
+-
+- T* operator=(T* val)
+- {
+- Impl::Store(&this->Atomic,
+- reinterpret_cast<typename Impl::atomic_type>(val));
+- return val;
+- }
+-
+- vtkAtomic<T*>& operator=(const vtkAtomic<T*> &atomic)
+- {
+- this->store(atomic.load());
+- return *this;
+- }
+-
+- T* load() const
+- {
+- return reinterpret_cast<T*>(Impl::Load(&this->Atomic));
+- }
+-
+- void store(T* val)
+- {
+- Impl::Store(&this->Atomic,
+- reinterpret_cast<typename Impl::atomic_type>(val));
+- }
+-
+-private:
+- typename Impl::atomic_type Atomic;
+-};
+-
+-
+-template <> class vtkAtomic<void*>
+-{
+-private:
+- typedef detail::AtomicOps<sizeof(void*)> Impl;
+-
+-public:
+- vtkAtomic() : Atomic(0)
+- {
+- }
+-
+- vtkAtomic(void* val)
+- : Atomic(reinterpret_cast<Impl::atomic_type>(val))
+- {
+- }
+-
+- vtkAtomic(const vtkAtomic<void*> &atomic)
+- : Atomic(reinterpret_cast<Impl::atomic_type>(atomic.load()))
+- {
+- }
+-
+- operator void*() const
+- {
+- return reinterpret_cast<void*>(Impl::Load(&this->Atomic));
+- }
+-
+- void* operator=(void* val)
+- {
+- Impl::Store(&this->Atomic,
+- reinterpret_cast<Impl::atomic_type>(val));
+- return val;
+- }
+-
+- vtkAtomic<void*>& operator=(const vtkAtomic<void*> &atomic)
+- {
+- this->store(atomic.load());
+- return *this;
+- }
+-
+- void* load() const
+- {
+- return reinterpret_cast<void*>(Impl::Load(&this->Atomic));
+- }
+-
+- void store(void* val)
+- {
+- Impl::Store(&this->Atomic,
+- reinterpret_cast<Impl::atomic_type>(val));
+- }
+-
+-private:
+- Impl::atomic_type Atomic;
+-};
+-
+-#endif
+-// VTK-HeaderTest-Exclude: vtkAtomic.h
+--- a/Common/Core/SMP/OpenMP/vtkSMPThreadLocalImpl.h.in
++++ b/Common/Core/SMP/OpenMP/vtkSMPThreadLocalImpl.h.in
+@@ -33,7 +33,6 @@
+ #define vtkSMPThreadLocalImpl_h
+
+ #include "vtkCommonCoreModule.h" // For export macro
+-#include "vtkAtomic.h"
+ #include "vtkConfigure.h"
+ #include "vtkSystemIncludes.h"
+
+ #include <atomic>
+@@ -49,7 +49,7 @@ typedef void* StoragePointerType;
+
+ struct Slot
+ {
+- vtkAtomic<ThreadIdType> ThreadId;
++ std::atomic<ThreadIdType> ThreadId;
+ omp_lock_t ModifyLock;
+ StoragePointerType Storage;
+
+@@ -66,7 +66,7 @@ private:
+ struct HashTableArray
+ {
+ size_t Size, SizeLg;
+- vtkAtomic<size_t> NumberOfEntries;
++ std::atomic<size_t> NumberOfEntries;
+ Slot *Slots;
+ HashTableArray *Prev;
+
+@@ -90,8 +90,8 @@ public:
+ size_t Size() const;
+
+ private:
+- vtkAtomic<HashTableArray*> Root;
+- vtkAtomic<size_t> Count;
++ std::atomic<HashTableArray*> Root;
++ std::atomic<size_t> Count;
+
+ friend class ThreadSpecificStorageIterator;
+ };
+--- a/Common/Core/SMP/Sequential/vtkAtomic.cxx
++++ /dev/null
+@@ -1,278 +0,0 @@
+-/*=========================================================================
+-
+- Program: Visualization Toolkit
+- Module: vtkAtomic.cxx
+-
+- Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
+- All rights reserved.
+- See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
+-
+- This software is distributed WITHOUT ANY WARRANTY; without even
+- the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+- PURPOSE. See the above copyright notice for more information.
+-
+-=========================================================================*/
+-
+-#include "vtkAtomic.h"
+-
+-#if !defined(VTK_GCC_ATOMICS_32) && !defined(VTK_APPLE_ATOMICS_32) && \
+- !defined(VTK_WINDOWS_ATOMICS_32)
+-#define VTK_LOCK_BASED_ATOMICS_32
+-#endif
+-
+-#if !defined(VTK_GCC_ATOMICS_64) && !defined(VTK_APPLE_ATOMICS_64) && \
+- !defined(VTK_WINDOWS_ATOMICS_64)
+-#define VTK_LOCK_BASED_ATOMICS_64
+-#endif
+-
+-#if defined(VTK_WINDOWS_ATOMICS_32) || defined(VTK_WINDOWS_ATOMICS_64)
+-#include "vtkWindows.h"
+-#endif
+-
+-#if defined(VTK_LOCK_BASED_ATOMICS_32) || defined(VTK_LOCK_BASED_ATOMICS_64)
+-
+-#include "vtkSimpleCriticalSection.h"
+-
+-class CriticalSectionGuard
+-{
+-public:
+- CriticalSectionGuard(vtkSimpleCriticalSection& cs)
+- : CriticalSection(cs)
+- {
+- this->CriticalSection.Lock();
+- }
+-
+- ~CriticalSectionGuard() { this->CriticalSection.Unlock(); }
+-
+-private:
+- // not copyable
+- CriticalSectionGuard(const CriticalSectionGuard&);
+- void operator=(const CriticalSectionGuard&);
+-
+- vtkSimpleCriticalSection& CriticalSection;
+-};
+-
+-#if defined(VTK_LOCK_BASED_ATOMICS_64)
+-detail::AtomicOps<8>::atomic_type::atomic_type(vtkTypeInt64 init)
+- : var(init)
+-{
+- this->csec = new vtkSimpleCriticalSection;
+-}
+-
+-detail::AtomicOps<8>::atomic_type::~atomic_type()
+-{
+- delete this->csec;
+-}
+-#endif
+-
+-#if defined(VTK_LOCK_BASED_ATOMICS_32)
+-detail::AtomicOps<4>::atomic_type::atomic_type(vtkTypeInt32 init)
+- : var(init)
+-{
+- this->csec = new vtkSimpleCriticalSection;
+-}
+-
+-detail::AtomicOps<4>::atomic_type::~atomic_type()
+-{
+- delete this->csec;
+-}
+-#endif
+-
+-#endif // VTK_LOCK_BASED_ATOMICS
+-
+-namespace detail
+-{
+-
+-#if defined(VTK_WINDOWS_ATOMICS_64) || defined(VTK_LOCK_BASED_ATOMICS_64)
+-
+-vtkTypeInt64 AtomicOps<8>::AddAndFetch(atomic_type* ref, vtkTypeInt64 val)
+-{
+-#if defined(VTK_WINDOWS_ATOMICS_64)
+-#if defined(VTK_HAS_INTERLOCKEDADD)
+- return InterlockedAdd64(ref, val);
+-#else
+- return InterlockedExchangeAdd64(ref, val) + val;
+-#endif
+-#else
+- CriticalSectionGuard csg(*ref->csec);
+- return ref->var += val;
+-#endif
+-}
+-
+-vtkTypeInt64 AtomicOps<8>::SubAndFetch(atomic_type* ref, vtkTypeInt64 val)
+-{
+-#if defined(VTK_WINDOWS_ATOMICS_64)
+-#if defined(VTK_HAS_INTERLOCKEDADD)
+- return InterlockedAdd64(ref, -val);
+-#else
+- return InterlockedExchangeAdd64(ref, -val) - val;
+-#endif
+-#else
+- CriticalSectionGuard csg(*ref->csec);
+- return ref->var -= val;
+-#endif
+-}
+-
+-vtkTypeInt64 AtomicOps<8>::PreIncrement(atomic_type* ref)
+-{
+-#if defined(VTK_WINDOWS_ATOMICS_64)
+- return InterlockedIncrement64(ref);
+-#else
+- CriticalSectionGuard csg(*ref->csec);
+- return ++(ref->var);
+-#endif
+-}
+-
+-vtkTypeInt64 AtomicOps<8>::PreDecrement(atomic_type* ref)
+-{
+-#if defined(VTK_WINDOWS_ATOMICS_64)
+- return InterlockedDecrement64(ref);
+-#else
+- CriticalSectionGuard csg(*ref->csec);
+- return --(ref->var);
+-#endif
+-}
+-
+-vtkTypeInt64 AtomicOps<8>::PostIncrement(atomic_type* ref)
+-{
+-#if defined(VTK_WINDOWS_ATOMICS_64)
+- vtkTypeInt64 val = InterlockedIncrement64(ref);
+- return --val;
+-#else
+- CriticalSectionGuard csg(*ref->csec);
+- return (ref->var)++;
+-#endif
+-}
+-
+-vtkTypeInt64 AtomicOps<8>::PostDecrement(atomic_type* ref)
+-{
+-#if defined(VTK_WINDOWS_ATOMICS_64)
+- vtkTypeInt64 val = InterlockedDecrement64(ref);
+- return ++val;
+-#else
+- CriticalSectionGuard csg(*ref->csec);
+- return (ref->var)--;
+-#endif
+-}
+-
+-vtkTypeInt64 AtomicOps<8>::Load(const atomic_type* ref)
+-{
+-#if defined(VTK_WINDOWS_ATOMICS_64)
+- vtkTypeInt64 val;
+- InterlockedExchange64(&val, *ref);
+- return val;
+-#else
+- CriticalSectionGuard csg(*ref->csec);
+- return ref->var;
+-#endif
+-}
+-
+-void AtomicOps<8>::Store(atomic_type* ref, vtkTypeInt64 val)
+-{
+-#if defined(VTK_WINDOWS_ATOMICS_64)
+- InterlockedExchange64(ref, val);
+-#else
+- CriticalSectionGuard csg(*ref->csec);
+- ref->var = val;
+-#endif
+-}
+-
+-#endif // defined(VTK_WINDOWS_ATOMICS_64) || defined(VTK_LOCK_BASED_ATOMICS_64)
+-
+-#if defined(VTK_WINDOWS_ATOMICS_32) || defined(VTK_LOCK_BASED_ATOMICS_32)
+-
+-vtkTypeInt32 AtomicOps<4>::AddAndFetch(atomic_type* ref, vtkTypeInt32 val)
+-{
+-#if defined(VTK_WINDOWS_ATOMICS_32)
+-#if defined(VTK_HAS_INTERLOCKEDADD)
+- return InterlockedAdd(reinterpret_cast<long*>(ref), val);
+-#else
+- return InterlockedExchangeAdd(reinterpret_cast<long*>(ref), val) + val;
+-#endif
+-#else
+- CriticalSectionGuard csg(*ref->csec);
+- return ref->var += val;
+-#endif
+-}
+-
+-vtkTypeInt32 AtomicOps<4>::SubAndFetch(atomic_type* ref, vtkTypeInt32 val)
+-{
+-#if defined(VTK_WINDOWS_ATOMICS_32)
+-#if defined(VTK_HAS_INTERLOCKEDADD)
+- return InterlockedAdd(reinterpret_cast<long*>(ref), -val);
+-#else
+- return InterlockedExchangeAdd(reinterpret_cast<long*>(ref), -val) - val;
+-#endif
+-#else
+- CriticalSectionGuard csg(*ref->csec);
+- return ref->var -= val;
+-#endif
+-}
+-
+-vtkTypeInt32 AtomicOps<4>::PreIncrement(atomic_type* ref)
+-{
+-#if defined(VTK_WINDOWS_ATOMICS_32)
+- return InterlockedIncrement(reinterpret_cast<long*>(ref));
+-#else
+- CriticalSectionGuard csg(*ref->csec);
+- return ++(ref->var);
+-#endif
+-}
+-
+-vtkTypeInt32 AtomicOps<4>::PreDecrement(atomic_type* ref)
+-{
+-#if defined(VTK_WINDOWS_ATOMICS_32)
+- return InterlockedDecrement(reinterpret_cast<long*>(ref));
+-#else
+- CriticalSectionGuard csg(*ref->csec);
+- return --(ref->var);
+-#endif
+-}
+-
+-vtkTypeInt32 AtomicOps<4>::PostIncrement(atomic_type* ref)
+-{
+-#if defined(VTK_WINDOWS_ATOMICS_32)
+- vtkTypeInt32 val = InterlockedIncrement(reinterpret_cast<long*>(ref));
+- return --val;
+-#else
+- CriticalSectionGuard csg(*ref->csec);
+- return (ref->var)++;
+-#endif
+-}
+-
+-vtkTypeInt32 AtomicOps<4>::PostDecrement(atomic_type* ref)
+-{
+-#if defined(VTK_WINDOWS_ATOMICS_32)
+- vtkTypeInt32 val = InterlockedDecrement(reinterpret_cast<long*>(ref));
+- return ++val;
+-#else
+- CriticalSectionGuard csg(*ref->csec);
+- return (ref->var)--;
+-#endif
+-}
+-
+-vtkTypeInt32 AtomicOps<4>::Load(const atomic_type* ref)
+-{
+-#if defined(VTK_WINDOWS_ATOMICS_32)
+- long val;
+- InterlockedExchange(&val, *ref);
+- return val;
+-#else
+- CriticalSectionGuard csg(*ref->csec);
+- return ref->var;
+-#endif
+-}
+-
+-void AtomicOps<4>::Store(atomic_type* ref, vtkTypeInt32 val)
+-{
+-#if defined(VTK_WINDOWS_ATOMICS_32)
+- InterlockedExchange(reinterpret_cast<long*>(ref), val);
+-#else
+- CriticalSectionGuard csg(*ref->csec);
+- ref->var = val;
+-#endif
+-}
+-
+-#endif // defined(VTK_WINDOWS_ATOMICS_32) || defined(VTK_LOCK_BASED_ATOMICS_32)
+-
+-} // namespace detail
diff --git a/sci-libs/vtk/files/vtk-9.0.3-use-std-atomic-part2.patch b/sci-libs/vtk/files/vtk-9.0.3-use-std-atomic-part2.patch
new file mode 100644
index 00000000000..e2520af7748
--- /dev/null
+++ b/sci-libs/vtk/files/vtk-9.0.3-use-std-atomic-part2.patch
@@ -0,0 +1,851 @@
+--- a/Common/Core/SMP/Sequential/vtkAtomic.h.in
++++ /dev/null
+@@ -1,467 +0,0 @@
+-/*=========================================================================
+-
+- Program: Visualization Toolkit
+- Module: vtkAtomic.h
+-
+- Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
+- All rights reserved.
+- See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
+-
+- This software is distributed WITHOUT ANY WARRANTY; without even
+- the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+- PURPOSE. See the above copyright notice for more information.
+-
+-=========================================================================*/
+-// .NAME vtkAtomic - Provides support for atomic integers
+-// .SECTION Description
+-// Objects of atomic types are C++ objects that are free from data races;
+-// that is, if one thread writes to an atomic object while another thread
+-// reads from it, the behavior is well-defined. vtkAtomic provides
+-// a subset of the std::atomic API and implementation for the following types,
+-// 32 bit signed and unsigned integers, 64 bit signed and unsigned integers,
+-// and pointers. For these types, vtkAtomic defines a
+-// number of operations that happen atomically - without interruption
+-// by another thread. Furthermore, these operations happen in a
+-// sequentially-consistent way and use full memory fences. This means
+-// that operations relating to atomic variables happen in the specified
+-// order and the results are made visible to other processing cores to
+-// guarantee proper sequential operation. Other memory access patterns
+-// supported by std::atomic are not currently supported.
+-//
+-// Note that when atomic operations are not available on a particular
+-// platform or compiler, mutexes, which are significantly slower, are used
+-// as a fallback.
+-
+-#ifndef vtkAtomic_h
+-#define vtkAtomic_h
+-
+-#include "vtkCommonCoreModule.h" // For export macro
+-#include "vtkAtomicTypeConcepts.h"
+-#include "vtkConfigure.h"
+-#include "vtkSystemIncludes.h"
+-#include "vtkType.h"
+-
+-#include <cstddef>
+-
+-#cmakedefine VTK_HAVE_ATOMIC_BUILTINS
+-
+-// Assume 64-bit atomic operations are not available on 32 bit platforms
+-#if defined(VTK_HAVE_ATOMIC_BUILTINS)
+-# define VTK_GCC_ATOMICS_32
+-# if VTK_SIZEOF_VOID_P == 8
+-# define VTK_GCC_ATOMICS_64
+-# endif
+-#elif defined(_WIN32) && defined(_MSC_VER)
+-# define VTK_WINDOWS_ATOMICS_32
+-# if VTK_SIZEOF_VOID_P == 8
+-# define VTK_WINDOWS_ATOMICS_64
+-# endif
+-#endif
+-
+-
+-#if defined(_WIN32) && defined(_MSC_VER)
+-// disable warning about the padding due to alignment
+-# pragma warning(disable:4324)
+-# define VTK_ALIGN(X) __declspec(align(X))
+-#elif defined(__GNUC__) // gcc compatible compiler
+-# define VTK_ALIGN(X) __attribute__ ((aligned (X)))
+-#else
+-# define VTK_ALIGN(X)
+-#endif
+-
+-
+-class vtkSimpleCriticalSection;
+-
+-
+-#ifndef __VTK_WRAP__
+-namespace detail
+-{
+-
+-template <size_t size> class AtomicOps;
+-
+-#if defined(VTK_GCC_ATOMICS_64)
+-template <> class AtomicOps<8>
+-{
+-public:
+- typedef vtkTypeInt64 VTK_ALIGN(8) atomic_type;
+- typedef vtkTypeInt64 value_type;
+-
+- static value_type AddAndFetch(value_type *ref, value_type val)
+- {
+- return __atomic_add_fetch(ref, val, __ATOMIC_SEQ_CST);
+- }
+-
+- static value_type SubAndFetch(value_type *ref, value_type val)
+- {
+- return __atomic_sub_fetch(ref, val, __ATOMIC_SEQ_CST);
+- }
+-
+- static value_type PreIncrement(value_type *ref)
+- {
+- return __atomic_add_fetch(ref, 1, __ATOMIC_SEQ_CST);
+- }
+-
+- static value_type PreDecrement(value_type *ref)
+- {
+- return __atomic_sub_fetch(ref, 1, __ATOMIC_SEQ_CST);
+- }
+-
+- static value_type PostIncrement(value_type *ref)
+- {
+- return __atomic_fetch_add(ref, 1, __ATOMIC_SEQ_CST);
+- }
+-
+- static value_type PostDecrement(value_type *ref)
+- {
+- return __atomic_fetch_sub(ref, 1, __ATOMIC_SEQ_CST);
+- }
+-
+- static value_type Load(const value_type *ref)
+- {
+- return __atomic_load_n(ref, __ATOMIC_SEQ_CST);
+- }
+-
+- static void Store(value_type *ref, value_type val)
+- {
+- __atomic_store_n(ref, val, __ATOMIC_SEQ_CST);
+- }
+-};
+-
+-#else
+-
+-template <> class VTKCOMMONCORE_EXPORT AtomicOps<8>
+-{
+-public:
+-#if defined(VTK_WINDOWS_ATOMICS_64)
+- typedef vtkTypeInt64 VTK_ALIGN(8) atomic_type;
+-#else
+- struct VTKCOMMONCORE_EXPORT atomic_type
+- {
+- vtkTypeInt64 var;
+- vtkSimpleCriticalSection *csec;
+-
+- atomic_type(vtkTypeInt64 init);
+- ~atomic_type();
+- };
+-#endif
+- typedef vtkTypeInt64 value_type;
+-
+- static vtkTypeInt64 AddAndFetch(atomic_type *ref, vtkTypeInt64 val);
+- static vtkTypeInt64 SubAndFetch(atomic_type *ref, vtkTypeInt64 val);
+- static vtkTypeInt64 PreIncrement(atomic_type *ref);
+- static vtkTypeInt64 PreDecrement(atomic_type *ref);
+- static vtkTypeInt64 PostIncrement(atomic_type *ref);
+- static vtkTypeInt64 PostDecrement(atomic_type *ref);
+- static vtkTypeInt64 Load(const atomic_type *ref);
+- static void Store(atomic_type *ref, vtkTypeInt64 val);
+-};
+-
+-#endif
+-
+-#if defined(VTK_GCC_ATOMICS_32)
+-template <> class AtomicOps<4>
+-{
+-public:
+- typedef vtkTypeInt32 VTK_ALIGN(4) atomic_type;
+- typedef vtkTypeInt32 value_type;
+-
+- static value_type AddAndFetch(value_type *ref, value_type val)
+- {
+- return __atomic_add_fetch(ref, val, __ATOMIC_SEQ_CST);
+- }
+-
+- static value_type SubAndFetch(value_type *ref, value_type val)
+- {
+- return __atomic_sub_fetch(ref, val, __ATOMIC_SEQ_CST);
+- }
+-
+- static value_type PreIncrement(value_type *ref)
+- {
+- return __atomic_add_fetch(ref, 1, __ATOMIC_SEQ_CST);
+- }
+-
+- static value_type PreDecrement(value_type *ref)
+- {
+- return __atomic_sub_fetch(ref, 1, __ATOMIC_SEQ_CST);
+- }
+-
+- static value_type PostIncrement(value_type *ref)
+- {
+- return __atomic_fetch_add(ref, 1, __ATOMIC_SEQ_CST);
+- }
+-
+- static value_type PostDecrement(value_type *ref)
+- {
+- return __atomic_fetch_sub(ref, 1, __ATOMIC_SEQ_CST);
+- }
+-
+- static value_type Load(const value_type *ref)
+- {
+- return __atomic_load_n(ref, __ATOMIC_SEQ_CST);
+- }
+-
+- static void Store(value_type *ref, value_type val)
+- {
+- __atomic_store_n(ref, val, __ATOMIC_SEQ_CST);
+- }
+-};
+-
+-#else
+-
+-template <> class VTKCOMMONCORE_EXPORT AtomicOps<4>
+-{
+-public:
+-#if defined(VTK_WINDOWS_ATOMICS_32)
+- typedef vtkTypeInt32 VTK_ALIGN(4) atomic_type;
+-#else
+- struct VTKCOMMONCORE_EXPORT atomic_type
+- {
+- vtkTypeInt32 var;
+- vtkSimpleCriticalSection *csec;
+-
+- atomic_type(vtkTypeInt32 init);
+- ~atomic_type();
+- };
+-#endif
+- typedef vtkTypeInt32 value_type;
+-
+- static vtkTypeInt32 AddAndFetch(atomic_type *ref, vtkTypeInt32 val);
+- static vtkTypeInt32 SubAndFetch(atomic_type *ref, vtkTypeInt32 val);
+- static vtkTypeInt32 PreIncrement(atomic_type *ref);
+- static vtkTypeInt32 PreDecrement(atomic_type *ref);
+- static vtkTypeInt32 PostIncrement(atomic_type *ref);
+- static vtkTypeInt32 PostDecrement(atomic_type *ref);
+- static vtkTypeInt32 Load(const atomic_type *ref);
+- static void Store(atomic_type *ref, vtkTypeInt32 val);
+-};
+-
+-#endif
+-}
+-#endif // __VTK_WRAP__
+-
+-template <typename T> class vtkAtomic : private vtk::atomic::detail::IntegralType<T>
+-{
+-private:
+- typedef detail::AtomicOps<sizeof(T)> Impl;
+-
+-public:
+- vtkAtomic() : Atomic(0)
+- {
+- }
+-
+- vtkAtomic(T val) : Atomic(static_cast<typename Impl::value_type>(val))
+- {
+- }
+-
+- vtkAtomic(const vtkAtomic<T> &atomic)
+- : Atomic(static_cast<typename Impl::value_type>(atomic.load()))
+- {
+- }
+-
+- T operator++()
+- {
+- return static_cast<T>(Impl::PreIncrement(&this->Atomic));
+- }
+-
+- T operator++(int)
+- {
+- return static_cast<T>(Impl::PostIncrement(&this->Atomic));
+- }
+-
+- T operator--()
+- {
+- return static_cast<T>(Impl::PreDecrement(&this->Atomic));
+- }
+-
+- T operator--(int)
+- {
+- return static_cast<T>(Impl::PostDecrement(&this->Atomic));
+- }
+-
+- T operator+=(T val)
+- {
+- return static_cast<T>(Impl::AddAndFetch(&this->Atomic,
+- static_cast<typename Impl::value_type>(val)));
+- }
+-
+- T operator-=(T val)
+- {
+- return static_cast<T>(Impl::SubAndFetch(&this->Atomic,
+- static_cast<typename Impl::value_type>(val)));
+- }
+-
+- operator T() const
+- {
+- return static_cast<T>(Impl::Load(&this->Atomic));
+- }
+-
+- T operator=(T val)
+- {
+- Impl::Store(&this->Atomic, static_cast<typename Impl::value_type>(val));
+- return val;
+- }
+-
+- vtkAtomic<T>& operator=(const vtkAtomic<T> &atomic)
+- {
+- this->store(atomic.load());
+- return *this;
+- }
+-
+- T load() const
+- {
+- return static_cast<T>(Impl::Load(&this->Atomic));
+- }
+-
+- void store(T val)
+- {
+- Impl::Store(&this->Atomic, static_cast<typename Impl::value_type>(val));
+- }
+-
+-private:
+- typename Impl::atomic_type Atomic;
+-};
+-
+-
+-template <typename T> class vtkAtomic<T*>
+-{
+-private:
+- typedef detail::AtomicOps<sizeof(T*)> Impl;
+-
+-public:
+- vtkAtomic() : Atomic(0)
+- {
+- }
+-
+- vtkAtomic(T* val)
+- : Atomic(reinterpret_cast<typename Impl::value_type>(val))
+- {
+- }
+-
+- vtkAtomic(const vtkAtomic<T*> &atomic)
+- : Atomic(reinterpret_cast<typename Impl::value_type>(atomic.load()))
+- {
+- }
+-
+- T* operator++()
+- {
+- return reinterpret_cast<T*>(Impl::AddAndFetch(&this->Atomic, sizeof(T)));
+- }
+-
+- T* operator++(int)
+- {
+- T* val = reinterpret_cast<T*>(Impl::AddAndFetch(&this->Atomic, sizeof(T)));
+- return --val;
+- }
+-
+- T* operator--()
+- {
+- return reinterpret_cast<T*>(Impl::SubAndFetch(&this->Atomic, sizeof(T)));
+- }
+-
+- T* operator--(int)
+- {
+- T* val = reinterpret_cast<T*>(Impl::AddAndFetch(&this->Atomic, sizeof(T)));
+- return ++val;
+- }
+-
+- T* operator+=(std::ptrdiff_t val)
+- {
+- return reinterpret_cast<T*>(Impl::AddAndFetch(&this->Atomic,
+- val * sizeof(T)));
+- }
+-
+- T* operator-=(std::ptrdiff_t val)
+- {
+- return reinterpret_cast<T*>(Impl::SubAndFetch(&this->Atomic,
+- val * sizeof(T)));
+- }
+-
+- operator T*() const
+- {
+- return reinterpret_cast<T*>(Impl::Load(&this->Atomic));
+- }
+-
+- T* operator=(T* val)
+- {
+- Impl::Store(&this->Atomic,
+- reinterpret_cast<typename Impl::value_type>(val));
+- return val;
+- }
+-
+- vtkAtomic<T*>& operator=(const vtkAtomic<T*> &atomic)
+- {
+- this->store(atomic.load());
+- return *this;
+- }
+-
+- T* load() const
+- {
+- return reinterpret_cast<T*>(Impl::Load(&this->Atomic));
+- }
+-
+- void store(T* val)
+- {
+- Impl::Store(&this->Atomic,
+- reinterpret_cast<typename Impl::value_type>(val));
+- }
+-
+-private:
+- typename Impl::atomic_type Atomic;
+-};
+-
+-
+-template <> class vtkAtomic<void*>
+-{
+-private:
+- typedef detail::AtomicOps<sizeof(void*)> Impl;
+-
+-public:
+- vtkAtomic() : Atomic(0)
+- {
+- }
+-
+- vtkAtomic(void* val)
+- : Atomic(reinterpret_cast<Impl::value_type>(val))
+- {
+- }
+-
+- vtkAtomic(const vtkAtomic<void*> &atomic)
+- : Atomic(reinterpret_cast<Impl::value_type>(atomic.load()))
+- {
+- }
+-
+- operator void*() const
+- {
+- return reinterpret_cast<void*>(Impl::Load(&this->Atomic));
+- }
+-
+- void* operator=(void* val)
+- {
+- Impl::Store(&this->Atomic,
+- reinterpret_cast<Impl::value_type>(val));
+- return val;
+- }
+-
+- vtkAtomic<void*>& operator=(const vtkAtomic<void*> &atomic)
+- {
+- this->store(atomic.load());
+- return *this;
+- }
+-
+- void* load() const
+- {
+- return reinterpret_cast<void*>(Impl::Load(&this->Atomic));
+- }
+-
+- void store(void* val)
+- {
+- Impl::Store(&this->Atomic,
+- reinterpret_cast<Impl::value_type>(val));
+- }
+-
+-private:
+- Impl::atomic_type Atomic;
+-};
+-
+-#endif
+-// VTK-HeaderTest-Exclude: vtkAtomic.h
+--- a/Common/Core/SMP/TBB/vtkAtomic.h.in
++++ /dev/null
+@@ -1,247 +0,0 @@
+- /*=========================================================================
+-
+- Program: Visualization Toolkit
+- Module: vtkAtomic.h
+-
+- Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
+- All rights reserved.
+- See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
+-
+- This software is distributed WITHOUT ANY WARRANTY; without even
+- the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+- PURPOSE. See the above copyright notice for more information.
+-
+-=========================================================================*/
+-// .NAME vtkAtomic -
+-// .SECTION Description
+-
+-#ifndef vtkAtomic_h
+-#define vtkAtomic_h
+-
+-#include "vtkAtomicTypeConcepts.h"
+-
+-#ifdef _MSC_VER
+-# pragma push_macro("__TBB_NO_IMPLICIT_LINKAGE")
+-# define __TBB_NO_IMPLICIT_LINKAGE 1
+-#endif
+-
+-#include <tbb/atomic.h>
+-
+-#ifdef _MSC_VER
+-# pragma pop_macro("__TBB_NO_IMPLICIT_LINKAGE")
+-#endif
+-
+-#include <cstddef>
+-
+-
+-template <typename T> class vtkAtomic : private vtk::atomic::detail::IntegralType<T>
+-{
+-public:
+- vtkAtomic()
+- {
+- this->Atomic = 0;
+- }
+-
+- vtkAtomic(T val)
+- {
+- this->Atomic = val;
+- }
+-
+- vtkAtomic(const vtkAtomic<T> &atomic)
+- {
+- this->Atomic = atomic.Atomic;
+- }
+-
+- T operator++()
+- {
+- return ++this->Atomic;
+- }
+-
+- T operator++(int)
+- {
+- return this->Atomic++;
+- }
+-
+- T operator--()
+- {
+- return --this->Atomic;
+- }
+-
+- T operator--(int)
+- {
+- return this->Atomic--;
+- }
+-
+- T operator+=(T val)
+- {
+- return this->Atomic += val;
+- }
+-
+- T operator-=(T val)
+- {
+- return this->Atomic -= val;
+- }
+-
+- operator T() const
+- {
+- return this->Atomic;
+- }
+-
+- T operator=(T val)
+- {
+- this->Atomic = val;
+- return val;
+- }
+-
+- vtkAtomic<T>& operator=(const vtkAtomic<T> &atomic)
+- {
+- this->Atomic = atomic.Atomic;
+- return *this;
+- }
+-
+- T load() const
+- {
+- return this->Atomic;
+- }
+-
+- void store(T val)
+- {
+- this->Atomic = val;
+- }
+-
+-private:
+- tbb::atomic<T> Atomic;
+-};
+-
+-
+-template <typename T> class vtkAtomic<T*>
+-{
+-public:
+- vtkAtomic()
+- {
+- this->Atomic = 0;
+- }
+-
+- vtkAtomic(T* val)
+- {
+- this->Atomic = val;
+- }
+-
+- vtkAtomic(const vtkAtomic<T*> &atomic)
+- {
+- this->Atomic = atomic.Atomic;
+- }
+-
+- T* operator++()
+- {
+- return ++this->Atomic;
+- }
+-
+- T* operator++(int)
+- {
+- return this->Atomic++;
+- }
+-
+- T* operator--()
+- {
+- return --this->Atomic;
+- }
+-
+- T* operator--(int)
+- {
+- return this->Atomic--;
+- }
+-
+- T* operator+=(std::ptrdiff_t val)
+- {
+- return this->Atomic += val;
+- }
+-
+- T* operator-=(std::ptrdiff_t val)
+- {
+- return this->Atomic -= val;
+- }
+-
+- operator T*() const
+- {
+- return this->Atomic;
+- }
+-
+- T* operator=(T* val)
+- {
+- this->Atomic = val;
+- return val;
+- }
+-
+- vtkAtomic<T*>& operator=(const vtkAtomic<T*> &atomic)
+- {
+- this->Atomic = atomic.Atomic;
+- return *this;
+- }
+-
+- T* load() const
+- {
+- return this->Atomic;
+- }
+-
+- void store(T* val)
+- {
+- this->Atomic = val;
+- }
+-
+-private:
+- tbb::atomic<T*> Atomic;
+-};
+-
+-
+-template <> class vtkAtomic<void*>
+-{
+-public:
+- vtkAtomic()
+- {
+- this->Atomic = 0;
+- }
+-
+- vtkAtomic(void* val)
+- {
+- this->Atomic = val;
+- }
+-
+- vtkAtomic(const vtkAtomic<void*> &atomic)
+- {
+- this->Atomic = atomic.Atomic;
+- }
+-
+- operator void*() const
+- {
+- return this->Atomic;
+- }
+-
+- void* operator=(void* val)
+- {
+- this->Atomic = val;
+- return val;
+- }
+-
+- vtkAtomic<void*>& operator=(const vtkAtomic<void*> &atomic)
+- {
+- this->Atomic = atomic.Atomic;
+- return *this;
+- }
+-
+- void* load() const
+- {
+- return this->Atomic;
+- }
+-
+- void store(void* val)
+- {
+- this->Atomic = val;
+- }
+-
+-private:
+- tbb::atomic<void*> Atomic;
+-};
+-
+-#endif
+-// VTK-HeaderTest-Exclude: vtkAtomic.h
+--- a/Common/Core/vtkAtomicTypeConcepts.h
++++ /dev/null
+@@ -1,48 +0,0 @@
+-/*=========================================================================
+-
+- Program: Visualization Toolkit
+- Module: vtkAtomicTypeConcepts.h
+-
+- Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
+- All rights reserved.
+- See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
+-
+- This software is distributed WITHOUT ANY WARRANTY; without even
+- the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+- PURPOSE. See the above copyright notice for more information.
+-
+-=========================================================================*/
+-
+-#ifndef vtkAtomicTypeConcepts_h
+-#define vtkAtomicTypeConcepts_h
+-
+-#include <limits>
+-
+-namespace vtk
+-{
+-namespace atomic
+-{
+-namespace detail
+-{
+-
+-template <bool>
+-struct CompileTimeCheck;
+-template <>
+-struct CompileTimeCheck<true>
+-{
+-};
+-
+-template <typename T>
+-struct IntegralType
+-{
+- CompileTimeCheck<std::numeric_limits<T>::is_specialized && std::numeric_limits<T>::is_integer &&
+- (sizeof(T) == 4 || sizeof(T) == 8)>
+- c;
+-};
+-
+-} // detail
+-} // atomic
+-} // vtk
+-
+-#endif
+-// VTK-HeaderTest-Exclude: vtkAtomicTypeConcepts.h
+--- a/Common/Core/vtkSMPSelection.cmake
++++ b/Common/Core/vtkSMPSelection.cmake
+@@ -25,7 +25,6 @@ if (VTK_SMP_IMPLEMENTATION_TYPE STREQUAL "TBB")
+ list(APPEND vtk_smp_sources
+ "${vtk_smp_implementation_dir}/vtkSMPTools.cxx")
+ list(APPEND vtk_smp_headers_to_configure
+- vtkAtomic.h
+ vtkSMPToolsInternal.h
+ vtkSMPThreadLocal.h)
+
+@@ -46,14 +45,6 @@ elseif (VTK_SMP_IMPLEMENTATION_TYPE STREQUAL "OpenMP")
+
+ if (OpenMP_CXX_SPEC_DATE AND NOT "${OpenMP_CXX_SPEC_DATE}" LESS "201107")
+ set(vtk_smp_use_default_atomics OFF)
+- list(APPEND vtk_smp_sources
+- "${vtk_smp_implementation_dir}/vtkAtomic.cxx")
+- list(APPEND vtk_smp_headers_to_configure
+- vtkAtomic.h)
+-
+- set_source_files_properties(vtkAtomic.cxx
+- PROPERITES
+- COMPILE_FLAGS "${OpenMP_CXX_FLAGS}")
+ else()
+ message(WARNING
+ "Required OpenMP version (3.1) for atomics not detected. Using default "
+@@ -74,29 +65,7 @@ if (vtk_smp_use_default_atomics)
+
+ include("${CMAKE_CURRENT_SOURCE_DIR}/vtkTestBuiltins.cmake")
+
+- set(vtkAtomic_defines)
+-
+- # Check for atomic functions
+- if (WIN32)
+- check_symbol_exists(InterlockedAdd "windows.h" VTK_HAS_INTERLOCKEDADD)
+-
+- if (VTK_HAS_INTERLOCKEDADD)
+- list(APPEND vtkAtomic_defines "VTK_HAS_INTERLOCKEDADD")
+- endif ()
+- endif()
+-
+- set_source_files_properties(vtkAtomic.cxx
+- PROPERITES
+- COMPILE_DEFINITIONS "${vtkAtomic_defines}")
+-
+ set(vtk_atomics_default_impl_dir "${CMAKE_CURRENT_SOURCE_DIR}/SMP/Sequential")
+- list(APPEND vtk_smp_sources
+- "${vtk_atomics_default_impl_dir}/vtkAtomic.cxx")
+- configure_file(
+- "${vtk_atomics_default_impl_dir}/vtkAtomic.h.in"
+- "${CMAKE_CURRENT_BINARY_DIR}/vtkAtomic.h")
+- list(APPEND vtk_smp_headers
+- "${CMAKE_CURRENT_BINARY_DIR}/vtkAtomic.h")
+ endif()
+
+ foreach (vtk_smp_header IN LISTS vtk_smp_headers_to_configure)
+--- a/Common/Core/vtkTimeStamp.cxx
++++ b/Common/Core/vtkTimeStamp.cxx
+@@ -47,10 +47,8 @@ void vtkTimeStamp::Modified()
+ //
+ // The last solution has been decided to have the smallest downside of these.
+ //
+- // static const vtkAtomicUIntXX* GlobalTimeStamp = new vtkAtomicUIntXX(0);
+- //
+ // Good luck!
+-#if defined(VTK_USE_64BIT_TIMESTAMPS) || VTK_SIZEOF_VOID_P == 8
++#if defined(VTK_USE_64BIT_TIMESTAMPS) || (VTK_SIZEOF_VOID_P == 8)
+ static std::atomic<uint64_t> GlobalTimeStamp(0U);
+ #else
+ static std::atomic<uint32_t> GlobalTimeStamp(0U);
+--- /dev/null
++++ b/Documentation/release/dev/vtkAtomicRemoved.md
+@@ -0,0 +1,4 @@
++## Removed vtkAtomic in favor of C++11 std::atomic
++
++- Removed vtkAtomic in favor of C++11 std::atomic
++- vtkAtomic.h and vtkAtomicTypeConcepts.h are no longer installed
+--
+GitLab
+
+
diff --git a/sci-libs/vtk/vtk-9.0.3-r4.ebuild b/sci-libs/vtk/vtk-9.0.3-r4.ebuild
new file mode 100644
index 00000000000..02c39405f38
--- /dev/null
+++ b/sci-libs/vtk/vtk-9.0.3-r4.ebuild
@@ -0,0 +1,557 @@
+# Copyright 1999-2021 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+EAPI=7
+
+# TODO:
+# - add USE flag for remote modules? Those modules can be downloaded
+# properly before building.
+
+PYTHON_COMPAT=( python3_{8..10} )
+WEBAPP_OPTIONAL=yes
+WEBAPP_MANUAL_SLOT=yes
+
+inherit check-reqs cmake cuda flag-o-matic java-pkg-opt-2 python-single-r1 toolchain-funcs virtualx webapp
+
+# Short package version
+MY_PV="$(ver_cut 1-2)"
+
+DESCRIPTION="The Visualization Toolkit"
+HOMEPAGE="https://www.vtk.org/"
+SRC_URI="
+ https://www.vtk.org/files/release/${MY_PV}/VTK-${PV}.tar.gz
+ https://www.vtk.org/files/release/${MY_PV}/VTKData-${PV}.tar.gz
+ doc? ( https://www.vtk.org/files/release/${MY_PV}/vtkDocHtml-${PV}.tar.gz )
+ examples? ( https://www.vtk.org/files/release/${MY_PV}/VTKLargeData-${PV}.tar.gz )
+ test? (
+ https://www.vtk.org/files/release/${MY_PV}/VTKLargeData-${PV}.tar.gz
+ )
+"
+S="${WORKDIR}/VTK-${PV}"
+
+LICENSE="BSD LGPL-2"
+SLOT="0/${MY_PV}"
+KEYWORDS="~amd64 ~arm ~arm64 ~x86 ~amd64-linux ~x86-linux"
+# Note: external xdmf2 has no recognized target
+IUSE="+X all-modules boost cuda doc examples ffmpeg gdal imaging java
+ +json kits mpi mysql odbc offscreen openmp pegtl postgres python
+ qt5 +rendering tbb theora tk video_cards_nvidia views web"
+
+RESTRICT="!test? ( test )"
+
+REQUIRED_USE="
+ all-modules? ( boost ffmpeg gdal imaging mysql odbc postgres qt5 rendering theora views )
+ cuda? ( X video_cards_nvidia )
+ java? ( rendering )
+ python? ( ${PYTHON_REQUIRED_USE} )
+ qt5? ( X rendering )
+ tk? ( X rendering python )
+ web? ( python )
+"
+
+RDEPEND="
+ app-arch/lz4
+ app-arch/xz-utils
+ dev-db/sqlite
+ dev-cpp/eigen[cuda?,openmp?]
+ dev-libs/double-conversion:=
+ dev-libs/expat
+ dev-libs/icu:=
+ dev-libs/libxml2:2
+ dev-libs/pugixml
+ media-libs/freetype
+ media-libs/libogg
+ media-libs/libpng
+ media-libs/libtheora
+ media-libs/tiff
+ <sci-libs/hdf5-1.12:=[mpi=]
+ sci-libs/kissfft[openmp?]
+ sci-libs/netcdf:=[mpi=]
+ sys-libs/zlib
+ virtual/jpeg
+ all-modules? ( sci-geosciences/liblas[gdal] )
+ boost? ( dev-libs/boost:=[mpi?] )
+ cuda? ( dev-util/nvidia-cuda-toolkit:= )
+ ffmpeg? ( media-video/ffmpeg:= )
+ gdal? ( sci-libs/gdal:= )
+ java? ( >=virtual/jdk-1.8:* )
+ json? ( dev-libs/jsoncpp:= )
+ mpi? (
+ sci-libs/h5part
+ sys-cluster/openmpi[cxx,romio]
+ )
+ mysql? ( dev-db/mariadb-connector-c )
+ odbc? ( dev-db/unixODBC )
+ offscreen? ( media-libs/mesa[osmesa] )
+ postgres? ( dev-db/postgresql:= )
+ python? ( ${PYTHON_DEPS} )
+ qt5? (
+ dev-qt/qtcore:5
+ dev-qt/qtsql:5
+ dev-qt/qtwidgets:5
+ )
+ rendering? (
+ media-libs/freeglut
+ media-libs/glew:=
+ sci-libs/proj:=
+ virtual/opengl
+ x11-libs/gl2ps
+ )
+ tbb? ( <dev-cpp/tbb-2022 )
+ tk? ( dev-lang/tk:= )
+ video_cards_nvidia? ( x11-drivers/nvidia-drivers[tools,static-libs] )
+ views? (
+ x11-libs/libICE
+ x11-libs/libXext
+ )
+ web? ( ${WEBAPP_DEPEND} )
+ $(python_gen_cond_dep '
+ python? (
+ boost? ( dev-libs/boost:=[mpi?,python?,${PYTHON_USEDEP}] )
+ gdal? ( sci-libs/gdal:=[python?,${PYTHON_USEDEP}] )
+ mpi? ( dev-python/mpi4py[${PYTHON_USEDEP}] )
+ )
+ ')
+"
+DEPEND="
+ ${RDEPEND}
+ dev-libs/jsoncpp
+ dev-libs/utfcpp
+ pegtl? ( dev-libs/pegtl )
+"
+BDEPEND="
+ mpi? ( app-admin/chrpath )
+ openmp? (
+ || (
+ sys-devel/gcc[openmp(+)]
+ sys-devel/clang-runtime[openmp(+)]
+ )
+ )
+"
+
+PATCHES=(
+ "${FILESDIR}"/${PN}-9.0.1-0001-fix-kepler-compute_arch-if-CUDA-toolkit-11-is-used.patch
+ "${FILESDIR}"/${PN}-8.2.0-freetype-2.10.3-provide-FT_CALLBACK_DEF.patch
+ "${FILESDIR}"/${PN}-9.0.1-limits-include-gcc11.patch
+ "${FILESDIR}"/${P}-TBB-2021.04-fix.patch
+ "${FILESDIR}"/${P}-proj-api-fix-upstream-commit-03256388.patch
+ "${FILESDIR}"/${P}-pegtl-3.patch
+ "${FILESDIR}"/${P}-cuda-11.5.0.patch
+ "${FILESDIR}"/${P}-fix-gcc10-return-local-addr-in-wrappers-upstream-commit-55c74ed3.patch
+ "${FILESDIR}"/${P}-use-std-atomic-part1.patch
+ "${FILESDIR}"/${P}-use-std-atomic-part2.patch
+ "${FILESDIR}"/${P}-tbb-fix-for-bundled-vtkm.patch
+)
+
+DOCS=( CONTRIBUTING.md README.md )
+
+CHECKREQS_DISK_BUILD="3G"
+
+pkg_pretend() {
+ if use examples; then
+ CHECKREQS_DISK_BUILD="4G"
+ fi
+ if use cuda; then
+ # NOTE: This should actually equal to (number of build jobs)*7G,
+ # as any of the cuda compile tasks can take up 7G!
+ # 10.2 GiB install directory, 6.4 GiB build directory with max. USE flags
+ CHECKREQS_MEMORY="7G"
+ CHECKREQS_DISK_BUILD="14G"
+ fi
+ check-reqs_pkg_setup
+}
+
+pkg_setup() {
+ if use examples; then
+ CHECKREQS_DISK_BUILD="4G"
+ fi
+ if use cuda; then
+ CHECKREQS_MEMORY="7G"
+ CHECKREQS_DISK_BUILD="14G"
+ fi
+ check-reqs_pkg_setup
+
+ use java && java-pkg-opt-2_pkg_setup
+ use python && python-single-r1_pkg_setup
+ use web && webapp_pkg_setup
+}
+
+src_prepare() {
+ # If we have system libraries available use these and delete
+ # the respecting files in ${S}/ThirdParty to save some space.
+ # Note: libharu is omitted: vtk needs an updated version (2.4.0)
+ # Note: no valid xdmf2 targets are found for system xdmf2
+ # Note: no valid target found for h5part and mpi4py
+ # TODO: diy2 exodusII h5part libharu verdict vpic vtkm xdmf2 xdmf3 zfp
+ local -a DROPS=( doubleconversion eigen expat freetype gl2ps glew
+ hdf5 jpeg jsoncpp libproj libxml2 lz4 lzma netcdf ogg png pugixml
+ sqlite theora tiff utf8 zlib )
+ use pegtl && DROPS+=( pegtl )
+
+ local x
+ for x in ${DROPS[@]}; do
+ ebegin "Dropping bundled ${x}"
+ rm -r ThirdParty/${x}/vtk${x} || die
+ eend $?
+ done
+ unset x
+
+ if use doc; then
+ einfo "Removing .md5 files from documents."
+ rm -f "${WORKDIR}"/html/*.md5 || die "Failed to remove superfluous hashes"
+ sed -e "s|\${VTK_BINARY_DIR}/Utilities/Doxygen/doc|${WORKDIR}|" \
+ -i Utilities/Doxygen/CMakeLists.txt || die
+ fi
+
+ cmake_src_prepare
+
+ if use cuda; then
+ cuda_add_sandbox -w
+ cuda_src_prepare
+ fi
+
+ if use test; then
+ ebegin "Copying data files to ${BUILD_DIR}"
+ mkdir -p "${BUILD_DIR}/ExternalData" || die
+ pushd "${BUILD_DIR}/ExternalData" >/dev/null || die
+ ln -sf ../../VTK-${PV}/.ExternalData/README.rst . || die
+ ln -sf ../../VTK-${PV}/.ExternalData/SHA512 . || die
+ popd >/dev/null || die
+ eend "$?"
+ fi
+}
+
+src_configure() {
+ local mycmakeargs=(
+# TODO: defaults for some variables to consider as USE flags
+# -DVTK_ANDROID_BUILD=OFF
+# -DVTK_BUILD_COMPILE_TOOLS_ONLY=OFF
+# -DVTK_ENABLE_LOGGING=ON
+# -DVTK_ENABLE_REMOTE_MODULES=ON
+# -DVTK_INSTALL_SDK=ON
+# -DVTK_IOS_BUILD=OFF
+# -DVTK_LEGACY_REMOVE=OFF
+# -DVTK_LEGACY_SILENT=OFF
+# -DVTK_WHEEL_BUILD=OFF
+
+ -DVTK_BUILD_ALL_MODULES=$(usex all-modules ON OFF)
+ # we use the pre-built documentation and install these with USE=doc
+ -DVTK_BUILD_DOCUMENTATION=OFF
+ -DVTK_BUILD_EXAMPLES=$(usex examples ON OFF)
+
+ -DVTK_ENABLE_KITS=$(usex kits ON OFF)
+ # default to ON: USE flag for this?
+ -DVTK_ENABLE_REMOTE_MODULES=OFF
+
+ -DVTK_DATA_STORE="${S}/.ExternalData"
+
+ # Use upstream default, where USE flags are not given.
+ # Passing "DONT_WANT" will restrict building of modules from
+ # those groups and will severly limit the built libraries.
+ # Exceptions are MPI, where the default is "DONT_WANT" and
+ # StandAlone using "WANT".
+ -DVTK_GROUP_ENABLE_Imaging=$(usex imaging "WANT" "DEFAULT")
+ -DVTK_GROUP_ENABLE_Qt=$(usex qt5 "WANT" "DEFAULT")
+ -DVTK_GROUP_ENABLE_Rendering=$(usex rendering "WANT" "DEFAULT")
+ -DVTK_GROUP_ENABLE_StandAlone="WANT"
+ -DVTK_GROUP_ENABLE_Views=$(usex views "WANT" "DEFAULT")
+ -DVTK_GROUP_ENABLE_Web=$(usex web "WANT" "DEFAULT")
+
+ -DVTK_MODULE_ENABLE_VTK_vtkm="WANT"
+ -DVTK_MODULE_ENABLE_VTK_AcceleratorsVTKm="WANT"
+
+ -DVTK_PYTHON_VERSION="3"
+ -DVTK_RELOCATABLE_INSTALL=ON
+
+ -DVTK_USE_CUDA=$(usex cuda ON OFF)
+ # use system libraries where possible
+ -DVTK_USE_EXTERNAL=ON
+ -DVTK_USE_MPI=$(usex mpi ON OFF)
+ -DVTK_USE_TK=$(usex tk ON OFF)
+ -DVTK_USE_X=$(usex X ON OFF)
+
+ -DVTK_VERSIONED_INSTALL=ON
+
+ -DVTK_WRAP_JAVA=$(usex java ON OFF)
+ -DVTK_WRAP_PYTHON=$(usex python ON OFF)
+ )
+
+ if use examples || use test; then
+ mycmakeargs+=( -DVTK_USE_LARGE_DATA=ON )
+ fi
+
+ if ! use java && ! use python; then
+ # defaults to ON
+ mycmakeargs+=( -DVTK_ENABLE_WRAPPING=OFF )
+ fi
+
+ if use boost; then
+ mycmakeargs+=(
+ -DVTK_MODULE_ENABLE_VTK_InfovisBoost="WANT"
+ -DVTK_MODULE_ENABLE_VTK_InfovisBoostGraphAlgorithms="WANT"
+ )
+ fi
+
+ if use cuda; then
+ local cuda_arch=
+ case ${VTK_CUDA_ARCH:-native} in
+ # we ignore fermi arch, because current nvidia-cuda-toolkit-11*
+ # no longer supports it
+ kepler|maxwell|pascal|volta|turing|ampere|all)
+ cuda_arch=${VTK_CUDA_ARCH}
+ ;;
+ native)
+ ewarn "If auto detection fails for you, please try and export the"
+ ewarn "VTK_CUDA_ARCH environment variable to one of the common arch"
+ ewarn "names: kepler, maxwell, pascal, volta, turing, ampere or all."
+ cuda_arch=native
+ ;;
+ *)
+ eerror "Please properly set the VTK_CUDA_ARCH environment variable to"
+ eerror "one of: kepler, maxwell, pascal, volta, turing, ampere, all"
+ die "Invalid CUDA architecture given: '${VTK_CUDA_ARCH}'!"
+ ;;
+ esac
+ ewarn "Using CUDA architecture '${cuda_arch}'"
+
+ mycmakeargs+=( -DVTKm_CUDA_Architecture=${cuda_arch} )
+ fi
+
+ if use ffmpeg; then
+ mycmakeargs+=( -DVTK_MODULE_ENABLE_VTK_IOFFMPEG="WANT" )
+ fi
+
+ if use gdal; then
+ mycmakeargs+=( -DVTK_MODULE_ENABLE_VTK_GeovisGDAL="WANT" )
+ fi
+
+ if use java; then
+ mycmakeargs+=(
+ -DCMAKE_INSTALL_JARDIR="share/${PN}"
+ -DVTK_ENABLE_WRAPPING=ON
+ )
+ fi
+
+ if use json; then
+ mycmakeargs+=( -DVTK_MODULE_ENABLE_VTK_IOGeoJSON="WANT" )
+ fi
+
+ if use mpi; then
+ mycmakeargs+=(
+ -DVTK_GROUP_ENABLE_MPI="WANT"
+ -DVTK_MODULE_ENABLE_VTK_IOH5part="WANT"
+ -DVTK_MODULE_ENABLE_VTK_IOParallel="WANT"
+ -DVTK_MODULE_ENABLE_VTK_IOParallelNetCDF="WANT"
+ -DVTK_MODULE_ENABLE_VTK_IOParallelXML="WANT"
+ -DVTK_MODULE_ENABLE_VTK_ParallelMPI="WANT"
+ -DVTK_MODULE_ENABLE_VTK_RenderingParallel="WANT"
+ -DVTK_MODULE_ENABLE_VTK_h5part="WANT"
+ -DVTKm_ENABLE_MPI=ON
+ )
+ if use python; then
+ mycmakeargs+=( -DVTK_MODULE_ENABLE_VTK_ParallelMPI4Py="WANT" )
+ fi
+ fi
+
+ if use mysql; then
+ mycmakeargs+=(
+ -DVTK_MODULE_ENABLE_VTK_IOMySQL="WANT"
+ -DVTK_MODULE_ENABLE_VTK_IOSQL="WANT"
+ )
+ fi
+
+ if use odbc; then
+ mycmakeargs+=( -DVTK_MODULE_ENABLE_VTK_IOODBC="WANT" )
+ fi
+
+ if use offscreen; then
+ mycmakeargs+=(
+ -DVTK_OPENGL_HAS_OSMESA=ON
+ -DVTK_DEFAULT_RENDER_WINDOW_OFFSCREEN=ON
+ -DVTK_DEFAULT_RENDER_WINDOW_HEADLESS=ON
+ )
+ fi
+
+ if use openmp; then
+ if use tbb; then
+ einfo "NOTE: You have specified both openmp and tbb USE flags."
+ einfo "NOTE: Tbb will take precedence. Disabling OpenMP"
+ # Sequential is default SMP implementation, nothing special to do
+ else
+ mycmakeargs+=(
+ -DVTK_SMP_IMPLEMENTATION_TYPE="OpenMP"
+ -DVTKm_ENABLE_OPENMP=ON
+ )
+ fi
+ fi
+
+ if use pegtl; then
+ mycmakeargs+=( -DVTK_MODULE_USE_EXTERNAL_VTK_pegtl=ON )
+ else
+ mycmakeargs+=( -DVTK_MODULE_USE_EXTERNAL_VTK_pegtl=OFF )
+ fi
+
+ if use postgres; then
+ mycmakeargs+=(
+ -DVTK_MODULE_ENABLE_VTK_IOPostgreSQL="WANT"
+ -DVTK_MODULE_ENABLE_VTK_IOSQL="WANT"
+ )
+ fi
+
+ if use python; then
+ mycmakeargs+=(
+ -DVTK_ENABLE_WRAPPING=ON
+ -DPython3_EXECUTABLE="${PYTHON}"
+ -DVTK_PYTHON_SITE_PACKAGES_SUFFIX="lib/${EPYTHON}/site-packages"
+ )
+ fi
+
+ if use qt5; then
+ mycmakeargs+=( -DVTK_MODULE_ENABLE_VTK_GUISupportQt="WANT" )
+ if use mysql || use postgres; then
+ mycmakeargs+=( -DVTK_MODULE_ENABLE_VTK_GUISupportQtSQL="WANT" )
+ fi
+ if use rendering; then
+ mycmakeargs+=( -DVTK_MODULE_ENABLE_VTK_RenderingQt="WANT" )
+ fi
+ if use views; then
+ mycmakeargs+=( -DVTK_MODULE_ENABLE_VTK_ViewsQt="WANT" )
+ fi
+ fi
+
+ if use rendering || use web || use all-modules; then
+ # needs patched version
+ mycmakeargs+=( -DVTK_MODULE_USE_EXTERNAL_VTK_libharu=OFF )
+ fi
+
+ if use rendering; then
+ mycmakeargs+=(
+ -DVTK_MODULE_ENABLE_VTK_IOExportGL2PS="WANT"
+ -DVTK_MODULE_USE_EXTERNAL_VTK_gl2ps=ON
+ -DVTK_MODULE_USE_EXTERNAL_VTK_glew=ON
+ -DVTK_MODULE_USE_EXTERNAL_VTK_libproj=ON
+ )
+ fi
+
+ if use tbb; then
+ mycmakeargs+=(
+ -DVTK_SMP_IMPLEMENTATION_TYPE="TBB"
+ -DVTKm_ENABLE_TBB=ON
+ )
+ fi
+
+ if use test; then
+ ewarn "Testing requires VTK_FORBID_DOWNLOADS=OFF by upstream."
+ ewarn "Care has been taken to pre-download all required files."
+ ewarn "In case you find missing files, please inform me."
+ mycmakeargs+=(
+ -DVTK_BUILD_TESTING=ON
+ -DVTK_DATA_EXCLUDE_FROM_ALL=ON
+ -DVTK_FORBID_DOWNLOADS=OFF
+ )
+ else
+ mycmakeargs+=(
+ -DVTK_BUILD_TESTING=OFF
+ -DVTK_FORBID_DOWNLOADS=ON
+ )
+ fi
+
+ if use theora; then
+ mycmakeargs+=( -DVTK_MODULE_ENABLE_VTK_IOOggTheora="WANT" )
+ fi
+
+ if use all-modules; then
+ mycmakeargs+=(
+ -DVTK_ENABLE_OSPRAY=OFF
+ -DVTK_MODULE_ENABLE_VTK_DomainsMicroscopy="DONT_WANT"
+ -DVTK_MODULE_ENABLE_VTK_FiltersOpenTURNS="DONT_WANT"
+ -DVTK_MODULE_ENABLE_VTK_IOADIOS2="DONT_WANT"
+ -DVTK_MODULE_ENABLE_VTK_IOPDAL="DONT_WANT"
+ -DVTK_MODULE_ENABLE_VTK_MomentInvariants="DONT_WANT"
+ -DVTK_MODULE_ENABLE_VTK_PoissonReconstruction="DONT_WANT"
+ -DVTK_MODULE_ENABLE_VTK_Powercrust="DONT_WANT"
+ -DVTK_MODULE_ENABLE_VTK_RenderingOpenVR="DONT_WANT"
+ -DVTK_MODULE_ENABLE_VTK_SignedTensor="DONT_WANT"
+ -DVTK_MODULE_ENABLE_VTK_SplineDrivenImageSlicer="DONT_WANT"
+ -DVTK_MODULE_ENABLE_VTK_vtkDICOM="DONT_WANT"
+ -DVTK_MODULE_USE_EXTERNAL_vtkkissfft=ON
+ )
+ fi
+
+ use java && export JAVA_HOME="${EPREFIX}/etc/java-config-2/current-system-vm"
+
+ if use mpi; then
+ export CC=mpicc
+ export CXX=mpicxx
+ export FC=mpif90
+ export F90=mpif90
+ export F77=mpif77
+ fi
+
+ cmake_src_configure
+}
+
+src_test() {
+ nonfatal virtx cmake_src_test
+}
+
+src_install() {
+ use web && webapp_src_preinst
+
+ # Stop web page images from being compressed
+ if use doc; then
+ HTML_DOCS=( "${WORKDIR}/html/." )
+ fi
+
+ cmake_src_install
+
+ use java && java-pkg_regjar "${ED}"/usr/share/${PN}/${PN}.jar
+
+ # install examples
+ if use examples; then
+ einfo "Installing examples"
+ mv -v {E,e}xamples || die
+ dodoc -r examples
+ docompress -x /usr/share/doc/${PF}/examples
+
+ einfo "Installing datafiles"
+ insinto /usr/share/${PN}/data
+ doins -r "${S}/.ExternalData"
+ fi
+
+ # with MPI runpath's are not deleted properly
+ if use mpi; then
+ chrpath -d "${ED}"/usr/$(get_libdir)/*.so.${PV} || die
+ fi
+
+ use python && python_optimize
+
+ # environment
+ cat >> "${T}"/40${PN} <<- EOF || die
+ VTK_DATA_ROOT=${EPREFIX}/usr/share/${PN}/data
+ VTK_DIR=${EPREFIX}/usr/$(get_libdir)/${PN}
+ VTKHOME=${EPREFIX}/usr
+ EOF
+ doenvd "${T}"/40${PN}
+
+ use web && webapp_src_install
+
+ # Temporary!
+ # Avoid collision with paraview.
+ # bug #793221
+ rm -rf "${ED}"/usr/share/vtkm-1.5/VTKm{LICENSE.txt,README.md} || die
+}
+
+# webapp.eclass exports these but we want it optional #534036
+pkg_postinst() {
+ use web && webapp_pkg_postinst
+
+ if use examples; then
+ einfo "You can get more and updated examples at"
+ einfo "https://kitware.github.io/vtk-examples/site/"
+ fi
+}
+
+pkg_prerm() {
+ use web && webapp_pkg_prerm
+}